problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_35825
|
rasdani/github-patches
|
git_diff
|
facebookresearch__hydra-2174
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add more details to Callback doc
(from zulip chat)
> When are the Callbacks executed in the hydra stack exactly? More specifically, are they executed after the launchers in the environments that the launchers provide? (Would be awesome to add a sentence about this in the docs)
</issue>
<code>
[start of hydra/experimental/callback.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 import logging
3 from typing import Any
4
5 from omegaconf import DictConfig
6
7 from hydra.core.utils import JobReturn
8
9 logger = logging.getLogger(__name__)
10
11
12 class Callback:
13 def on_run_start(self, config: DictConfig, **kwargs: Any) -> None:
14 """
15 Called in RUN mode before job starts.
16 """
17 ...
18
19 def on_run_end(self, config: DictConfig, **kwargs: Any) -> None:
20 """
21 Called in RUN mode after job ends.
22 """
23 ...
24
25 def on_multirun_start(self, config: DictConfig, **kwargs: Any) -> None:
26 """
27 Called in MULTIRUN mode before any job starts.
28 """
29 ...
30
31 def on_multirun_end(self, config: DictConfig, **kwargs: Any) -> None:
32 """
33 Called in MULTIRUN mode after all jobs end.
34 """
35 ...
36
37 def on_job_start(self, config: DictConfig, **kwargs: Any) -> None:
38 """
39 Called in both RUN and MULTIRUN modes, once for each Hydra job (before running
40 application code).
41 """
42 ...
43
44 def on_job_end(
45 self, config: DictConfig, job_return: JobReturn, **kwargs: Any
46 ) -> None:
47 """
48 Called in both RUN and MULTIRUN modes, once for each Hydra job (after running
49 application code).
50 """
51 ...
52
[end of hydra/experimental/callback.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/hydra/experimental/callback.py b/hydra/experimental/callback.py
--- a/hydra/experimental/callback.py
+++ b/hydra/experimental/callback.py
@@ -12,32 +12,37 @@
class Callback:
def on_run_start(self, config: DictConfig, **kwargs: Any) -> None:
"""
- Called in RUN mode before job starts.
+ Called in RUN mode before job/application code starts. `config` is composed with overrides.
+ Some `hydra.runtime` configs are not populated yet.
+ See hydra.core.utils.run_job for more info.
"""
...
def on_run_end(self, config: DictConfig, **kwargs: Any) -> None:
"""
- Called in RUN mode after job ends.
+ Called in RUN mode after job/application code returns.
"""
...
def on_multirun_start(self, config: DictConfig, **kwargs: Any) -> None:
"""
Called in MULTIRUN mode before any job starts.
+ When using a launcher, this will be executed on local machine before any Sweeper/Launcher is initialized.
"""
...
def on_multirun_end(self, config: DictConfig, **kwargs: Any) -> None:
"""
- Called in MULTIRUN mode after all jobs end.
+ Called in MULTIRUN mode after all jobs returns.
+ When using a launcher, this will be executed on local machine.
"""
...
def on_job_start(self, config: DictConfig, **kwargs: Any) -> None:
"""
- Called in both RUN and MULTIRUN modes, once for each Hydra job (before running
- application code).
+ Called in both RUN and MULTIRUN modes, once for each Hydra job (before running application code).
+ This is called from within `hydra.core.utils.run_job`. In the case of remote launching, this will be executed
+ on the remote server along with your application code.
"""
...
@@ -47,5 +52,10 @@
"""
Called in both RUN and MULTIRUN modes, once for each Hydra job (after running
application code).
+ This is called from within `hydra.core.utils.run_job`. In the case of remote launching, this will be executed
+ on the remote server after your application code.
+
+ `job_return` contains info that could be useful for logging or post-processing.
+ See hydra.core.utils.JobReturn for more.
"""
...
|
{"golden_diff": "diff --git a/hydra/experimental/callback.py b/hydra/experimental/callback.py\n--- a/hydra/experimental/callback.py\n+++ b/hydra/experimental/callback.py\n@@ -12,32 +12,37 @@\n class Callback:\n def on_run_start(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n- Called in RUN mode before job starts.\n+ Called in RUN mode before job/application code starts. `config` is composed with overrides.\n+ Some `hydra.runtime` configs are not populated yet.\n+ See hydra.core.utils.run_job for more info.\n \"\"\"\n ...\n \n def on_run_end(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n- Called in RUN mode after job ends.\n+ Called in RUN mode after job/application code returns.\n \"\"\"\n ...\n \n def on_multirun_start(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n Called in MULTIRUN mode before any job starts.\n+ When using a launcher, this will be executed on local machine before any Sweeper/Launcher is initialized.\n \"\"\"\n ...\n \n def on_multirun_end(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n- Called in MULTIRUN mode after all jobs end.\n+ Called in MULTIRUN mode after all jobs returns.\n+ When using a launcher, this will be executed on local machine.\n \"\"\"\n ...\n \n def on_job_start(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n- Called in both RUN and MULTIRUN modes, once for each Hydra job (before running\n- application code).\n+ Called in both RUN and MULTIRUN modes, once for each Hydra job (before running application code).\n+ This is called from within `hydra.core.utils.run_job`. In the case of remote launching, this will be executed\n+ on the remote server along with your application code.\n \"\"\"\n ...\n \n@@ -47,5 +52,10 @@\n \"\"\"\n Called in both RUN and MULTIRUN modes, once for each Hydra job (after running\n application code).\n+ This is called from within `hydra.core.utils.run_job`. In the case of remote launching, this will be executed\n+ on the remote server after your application code.\n+\n+ `job_return` contains info that could be useful for logging or post-processing.\n+ See hydra.core.utils.JobReturn for more.\n \"\"\"\n ...\n", "issue": "Add more details to Callback doc\n(from zulip chat)\r\n> When are the Callbacks executed in the hydra stack exactly? More specifically, are they executed after the launchers in the environments that the launchers provide? (Would be awesome to add a sentence about this in the docs)\r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport logging\nfrom typing import Any\n\nfrom omegaconf import DictConfig\n\nfrom hydra.core.utils import JobReturn\n\nlogger = logging.getLogger(__name__)\n\n\nclass Callback:\n def on_run_start(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n Called in RUN mode before job starts.\n \"\"\"\n ...\n\n def on_run_end(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n Called in RUN mode after job ends.\n \"\"\"\n ...\n\n def on_multirun_start(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n Called in MULTIRUN mode before any job starts.\n \"\"\"\n ...\n\n def on_multirun_end(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n Called in MULTIRUN mode after all jobs end.\n \"\"\"\n ...\n\n def on_job_start(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n Called in both RUN and MULTIRUN modes, once for each Hydra job (before running\n application code).\n \"\"\"\n ...\n\n def on_job_end(\n self, config: DictConfig, job_return: JobReturn, **kwargs: Any\n ) -> None:\n \"\"\"\n Called in both RUN and MULTIRUN modes, once for each Hydra job (after running\n application code).\n \"\"\"\n ...\n", "path": "hydra/experimental/callback.py"}]}
| 1,015 | 552 |
gh_patches_debug_29649
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-4897
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CKV_AWS_325 check might not be accurate for PostgreSQL RDS instances
**Describe the issue**
The CKV_AWS_325 check expects the 'audit' log type to be enabled for RDS Cluster audit logging. However, this log type may not be applicable for PostgreSQL RDS instances, which use the 'postgresql' log type instead.
**Examples**
For a PostgreSQL RDS instance with the following Terraform configuration:
**main.tf**
```
resource "aws_rds_cluster" "main" {
# ... other configurations ...
enabled_cloudwatch_logs_exports = var.enabled_cloudwatch_logs_exports
# ... other configurations ...
}
```
**variables.tf**
```
variable "enabled_cloudwatch_logs_exports" {
default = [
"postgresql"
]
}
```
The CKV_AWS_325 check fails, even though the 'postgresql' log type is enabled for the RDS instance.
**Expected outcome**
The CKV_AWS_325 check should either not be applicable or should pass for PostgreSQL RDS instances with the 'postgresql' log type enabled.
**Version**
**Checkov Version:** Lastest
**Additional context**
This issue was discovered while analyzing a Terraform configuration for an AWS PostgreSQL RDS instance. The CKV_AWS_325 check might need to be updated to accommodate different log types for different RDS engines.
https://github.com/bridgecrewio/checkov/blob/main/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py
</issue>
<code>
[start of checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py]
1 from __future__ import annotations
2
3 from typing import Any
4
5 from checkov.common.models.enums import CheckCategories, CheckResult
6 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
7
8
9 class RDSClusterAuditLogging(BaseResourceCheck):
10 def __init__(self) -> None:
11 """
12 NIST.800-53.r5 AC-2(4), NIST.800-53.r5 AC-4(26), NIST.800-53.r5 AC-6(9), NIST.800-53.r5 AU-10,
13 NIST.800-53.r5 AU-12, NIST.800-53.r5 AU-2, NIST.800-53.r5 AU-3, NIST.800-53.r5 AU-6(3), NIST.800-53.r5 AU-6(4),
14 NIST.800-53.r5 CA-7, NIST.800-53.r5 SC-7(10), NIST.800-53.r5 SC-7(9), NIST.800-53.r5 SI-3(8),
15 NIST.800-53.r5 SI-4(20), NIST.800-53.r5 SI-7(8)
16 Database logging should be enabled
17 """
18 name = "Ensure that RDS Cluster audit logging is enabled"
19 id = "CKV_AWS_325"
20 supported_resources = ("aws_rds_cluster",)
21 categories = (CheckCategories.LOGGING,)
22 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
23
24 def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:
25 logs_exports = conf.get("enabled_cloudwatch_logs_exports")
26 if (
27 logs_exports
28 and isinstance(logs_exports, list)
29 and isinstance(logs_exports[0], list)
30 and "audit" in logs_exports[0]
31 ):
32 return CheckResult.PASSED
33
34 return CheckResult.FAILED
35
36
37 check = RDSClusterAuditLogging()
38
[end of checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py b/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py
--- a/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py
+++ b/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py
@@ -5,6 +5,12 @@
from checkov.common.models.enums import CheckCategories, CheckResult
from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
+SUPPORTED_ENGINES = {
+ "aurora",
+ "aurora-mysql",
+ "mysql"
+}
+
class RDSClusterAuditLogging(BaseResourceCheck):
def __init__(self) -> None:
@@ -15,13 +21,18 @@
NIST.800-53.r5 SI-4(20), NIST.800-53.r5 SI-7(8)
Database logging should be enabled
"""
- name = "Ensure that RDS Cluster audit logging is enabled"
+ name = "Ensure that RDS Cluster audit logging is enabled for MySQL engine"
id = "CKV_AWS_325"
supported_resources = ("aws_rds_cluster",)
categories = (CheckCategories.LOGGING,)
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:
+ engine = conf.get("engine")
+ if engine and isinstance(engine, list) and engine[0] not in SUPPORTED_ENGINES:
+ # only MySQL cluster support easy audit logging export
+ return CheckResult.UNKNOWN
+
logs_exports = conf.get("enabled_cloudwatch_logs_exports")
if (
logs_exports
|
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py b/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py\n--- a/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py\n+++ b/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py\n@@ -5,6 +5,12 @@\n from checkov.common.models.enums import CheckCategories, CheckResult\n from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n \n+SUPPORTED_ENGINES = {\n+ \"aurora\",\n+ \"aurora-mysql\",\n+ \"mysql\"\n+}\n+\n \n class RDSClusterAuditLogging(BaseResourceCheck):\n def __init__(self) -> None:\n@@ -15,13 +21,18 @@\n NIST.800-53.r5 SI-4(20), NIST.800-53.r5 SI-7(8)\n Database logging should be enabled\n \"\"\"\n- name = \"Ensure that RDS Cluster audit logging is enabled\"\n+ name = \"Ensure that RDS Cluster audit logging is enabled for MySQL engine\"\n id = \"CKV_AWS_325\"\n supported_resources = (\"aws_rds_cluster\",)\n categories = (CheckCategories.LOGGING,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:\n+ engine = conf.get(\"engine\")\n+ if engine and isinstance(engine, list) and engine[0] not in SUPPORTED_ENGINES:\n+ # only MySQL cluster support easy audit logging export\n+ return CheckResult.UNKNOWN\n+\n logs_exports = conf.get(\"enabled_cloudwatch_logs_exports\")\n if (\n logs_exports\n", "issue": "CKV_AWS_325 check might not be accurate for PostgreSQL RDS instances\n**Describe the issue**\r\nThe CKV_AWS_325 check expects the 'audit' log type to be enabled for RDS Cluster audit logging. However, this log type may not be applicable for PostgreSQL RDS instances, which use the 'postgresql' log type instead.\r\n\r\n**Examples**\r\nFor a PostgreSQL RDS instance with the following Terraform configuration:\r\n\r\n**main.tf**\r\n\r\n```\r\nresource \"aws_rds_cluster\" \"main\" {\r\n # ... other configurations ...\r\n enabled_cloudwatch_logs_exports = var.enabled_cloudwatch_logs_exports\r\n # ... other configurations ...\r\n}\r\n```\r\n\r\n**variables.tf**\r\n\r\n```\r\nvariable \"enabled_cloudwatch_logs_exports\" {\r\n default = [\r\n \"postgresql\"\r\n ]\r\n}\r\n```\r\n\r\nThe CKV_AWS_325 check fails, even though the 'postgresql' log type is enabled for the RDS instance.\r\n\r\n**Expected outcome**\r\nThe CKV_AWS_325 check should either not be applicable or should pass for PostgreSQL RDS instances with the 'postgresql' log type enabled.\r\n\r\n**Version**\r\n\r\n**Checkov Version:** Lastest\r\n\r\n**Additional context**\r\nThis issue was discovered while analyzing a Terraform configuration for an AWS PostgreSQL RDS instance. The CKV_AWS_325 check might need to be updated to accommodate different log types for different RDS engines.\r\n\r\nhttps://github.com/bridgecrewio/checkov/blob/main/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass RDSClusterAuditLogging(BaseResourceCheck):\n def __init__(self) -> None:\n \"\"\"\n NIST.800-53.r5 AC-2(4), NIST.800-53.r5 AC-4(26), NIST.800-53.r5 AC-6(9), NIST.800-53.r5 AU-10,\n NIST.800-53.r5 AU-12, NIST.800-53.r5 AU-2, NIST.800-53.r5 AU-3, NIST.800-53.r5 AU-6(3), NIST.800-53.r5 AU-6(4),\n NIST.800-53.r5 CA-7, NIST.800-53.r5 SC-7(10), NIST.800-53.r5 SC-7(9), NIST.800-53.r5 SI-3(8),\n NIST.800-53.r5 SI-4(20), NIST.800-53.r5 SI-7(8)\n Database logging should be enabled\n \"\"\"\n name = \"Ensure that RDS Cluster audit logging is enabled\"\n id = \"CKV_AWS_325\"\n supported_resources = (\"aws_rds_cluster\",)\n categories = (CheckCategories.LOGGING,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:\n logs_exports = conf.get(\"enabled_cloudwatch_logs_exports\")\n if (\n logs_exports\n and isinstance(logs_exports, list)\n and isinstance(logs_exports[0], list)\n and \"audit\" in logs_exports[0]\n ):\n return CheckResult.PASSED\n\n return CheckResult.FAILED\n\n\ncheck = RDSClusterAuditLogging()\n", "path": "checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py"}]}
| 1,445 | 405 |
gh_patches_debug_7747
|
rasdani/github-patches
|
git_diff
|
Cog-Creators__Red-DiscordBot-1981
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[V3 ModLog] Error in command "case"
# Command bugs
#### Command name
[p]case
#### What cog is this command from?
ModLog
#### What were you expecting to happen?
To show me information about a case.
#### What actually happened?
I get this error:

#### How can we reproduce this issue?
1. Have a modlog with cases.
2. Do [p]case
</issue>
<code>
[start of redbot/cogs/modlog/modlog.py]
1 import discord
2
3 from redbot.core import checks, modlog, commands
4 from redbot.core.bot import Red
5 from redbot.core.i18n import Translator, cog_i18n
6 from redbot.core.utils.chat_formatting import box
7
8 _ = Translator("ModLog", __file__)
9
10
11 @cog_i18n(_)
12 class ModLog:
13 """Log for mod actions"""
14
15 def __init__(self, bot: Red):
16 self.bot = bot
17
18 @commands.group()
19 @checks.guildowner_or_permissions(administrator=True)
20 async def modlogset(self, ctx: commands.Context):
21 """Settings for the mod log"""
22 pass
23
24 @modlogset.command()
25 @commands.guild_only()
26 async def modlog(self, ctx: commands.Context, channel: discord.TextChannel = None):
27 """Sets a channel as mod log
28
29 Leaving the channel parameter empty will deactivate it"""
30 guild = ctx.guild
31 if channel:
32 if channel.permissions_for(guild.me).send_messages:
33 await modlog.set_modlog_channel(guild, channel)
34 await ctx.send(_("Mod events will be sent to {}").format(channel.mention))
35 else:
36 await ctx.send(
37 _("I do not have permissions to send messages in {}!").format(channel.mention)
38 )
39 else:
40 try:
41 await modlog.get_modlog_channel(guild)
42 except RuntimeError:
43 await ctx.send_help()
44 else:
45 await modlog.set_modlog_channel(guild, None)
46 await ctx.send(_("Mod log deactivated."))
47
48 @modlogset.command(name="cases")
49 @commands.guild_only()
50 async def set_cases(self, ctx: commands.Context, action: str = None):
51 """Enables or disables case creation for each type of mod action"""
52 guild = ctx.guild
53
54 if action is None: # No args given
55 casetypes = await modlog.get_all_casetypes(guild)
56 await ctx.send_help()
57 title = _("Current settings:")
58 msg = ""
59 for ct in casetypes:
60 enabled = await ct.is_enabled()
61 value = "enabled" if enabled else "disabled"
62 msg += "%s : %s\n" % (ct.name, value)
63
64 msg = title + "\n" + box(msg)
65 await ctx.send(msg)
66 return
67 casetype = await modlog.get_casetype(action, guild)
68 if not casetype:
69 await ctx.send(_("That action is not registered"))
70 else:
71
72 enabled = await casetype.is_enabled()
73 await casetype.set_enabled(True if not enabled else False)
74
75 msg = _("Case creation for {} actions is now {}.").format(
76 action, "enabled" if not enabled else "disabled"
77 )
78 await ctx.send(msg)
79
80 @modlogset.command()
81 @commands.guild_only()
82 async def resetcases(self, ctx: commands.Context):
83 """Resets modlog's cases"""
84 guild = ctx.guild
85 await modlog.reset_cases(guild)
86 await ctx.send(_("Cases have been reset."))
87
88 @commands.command()
89 @commands.guild_only()
90 async def case(self, ctx: commands.Context, number: int):
91 """Shows the specified case"""
92 try:
93 case = await modlog.get_case(number, ctx.guild, self.bot)
94 except RuntimeError:
95 await ctx.send(_("That case does not exist for that server"))
96 return
97 else:
98 await ctx.send(embed=await case.get_case_msg_content())
99
100 @commands.command(usage="[case] <reason>")
101 @commands.guild_only()
102 async def reason(self, ctx: commands.Context, *, reason: str):
103 """Lets you specify a reason for mod-log's cases
104
105 Please note that you can only edit cases you are
106 the owner of unless you are a mod/admin or the server owner.
107
108 If no number is specified, the latest case will be used."""
109 author = ctx.author
110 guild = ctx.guild
111 potential_case = reason.split()[0]
112 if potential_case.isdigit():
113 case = int(potential_case)
114 reason = reason.replace(potential_case, "")
115 else:
116 case = str(int(await modlog.get_next_case_number(guild)) - 1)
117 # latest case
118 try:
119 case_before = await modlog.get_case(case, guild, self.bot)
120 except RuntimeError:
121 await ctx.send(_("That case does not exist!"))
122 return
123 else:
124 if case_before.moderator is None:
125 # No mod set, so attempt to find out if the author
126 # triggered the case creation with an action
127 bot_perms = guild.me.guild_permissions
128 if bot_perms.view_audit_log:
129 case_type = await modlog.get_casetype(case_before.action_type, guild)
130 if case_type is not None and case_type.audit_type is not None:
131 audit_type = getattr(discord.AuditLogAction, case_type.audit_type)
132 if audit_type:
133 audit_case = None
134 async for entry in guild.audit_logs(action=audit_type):
135 if (
136 entry.target.id == case_before.user.id
137 and entry.action == audit_type
138 ):
139 audit_case = entry
140 break
141 if audit_case:
142 case_before.moderator = audit_case.user
143 is_guild_owner = author == guild.owner
144 is_case_author = author == case_before.moderator
145 author_is_mod = await ctx.bot.is_mod(author)
146 if not (is_guild_owner or is_case_author or author_is_mod):
147 await ctx.send(_("You are not authorized to modify that case!"))
148 return
149 to_modify = {"reason": reason}
150 if case_before.moderator != author:
151 to_modify["amended_by"] = author
152 to_modify["modified_at"] = ctx.message.created_at.timestamp()
153 await case_before.edit(to_modify)
154 await ctx.send(_("Reason has been updated."))
155
[end of redbot/cogs/modlog/modlog.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/redbot/cogs/modlog/modlog.py b/redbot/cogs/modlog/modlog.py
--- a/redbot/cogs/modlog/modlog.py
+++ b/redbot/cogs/modlog/modlog.py
@@ -95,7 +95,10 @@
await ctx.send(_("That case does not exist for that server"))
return
else:
- await ctx.send(embed=await case.get_case_msg_content())
+ if await ctx.embed_requested():
+ await ctx.send(embed=await case.message_content(embed=True))
+ else:
+ await ctx.send(await case.message_content(embed=False))
@commands.command(usage="[case] <reason>")
@commands.guild_only()
|
{"golden_diff": "diff --git a/redbot/cogs/modlog/modlog.py b/redbot/cogs/modlog/modlog.py\n--- a/redbot/cogs/modlog/modlog.py\n+++ b/redbot/cogs/modlog/modlog.py\n@@ -95,7 +95,10 @@\n await ctx.send(_(\"That case does not exist for that server\"))\n return\n else:\n- await ctx.send(embed=await case.get_case_msg_content())\n+ if await ctx.embed_requested():\n+ await ctx.send(embed=await case.message_content(embed=True))\n+ else:\n+ await ctx.send(await case.message_content(embed=False))\n \n @commands.command(usage=\"[case] <reason>\")\n @commands.guild_only()\n", "issue": "[V3 ModLog] Error in command \"case\"\n# Command bugs\r\n\r\n\r\n#### Command name\r\n\r\n[p]case\r\n\r\n#### What cog is this command from?\r\n\r\nModLog\r\n\r\n#### What were you expecting to happen?\r\n\r\nTo show me information about a case.\r\n\r\n\r\n#### What actually happened?\r\nI get this error:\r\n\r\n\r\n\r\n\r\n#### How can we reproduce this issue?\r\n\r\n1. Have a modlog with cases.\r\n2. Do [p]case\r\n\n", "before_files": [{"content": "import discord\n\nfrom redbot.core import checks, modlog, commands\nfrom redbot.core.bot import Red\nfrom redbot.core.i18n import Translator, cog_i18n\nfrom redbot.core.utils.chat_formatting import box\n\n_ = Translator(\"ModLog\", __file__)\n\n\n@cog_i18n(_)\nclass ModLog:\n \"\"\"Log for mod actions\"\"\"\n\n def __init__(self, bot: Red):\n self.bot = bot\n\n @commands.group()\n @checks.guildowner_or_permissions(administrator=True)\n async def modlogset(self, ctx: commands.Context):\n \"\"\"Settings for the mod log\"\"\"\n pass\n\n @modlogset.command()\n @commands.guild_only()\n async def modlog(self, ctx: commands.Context, channel: discord.TextChannel = None):\n \"\"\"Sets a channel as mod log\n\n Leaving the channel parameter empty will deactivate it\"\"\"\n guild = ctx.guild\n if channel:\n if channel.permissions_for(guild.me).send_messages:\n await modlog.set_modlog_channel(guild, channel)\n await ctx.send(_(\"Mod events will be sent to {}\").format(channel.mention))\n else:\n await ctx.send(\n _(\"I do not have permissions to send messages in {}!\").format(channel.mention)\n )\n else:\n try:\n await modlog.get_modlog_channel(guild)\n except RuntimeError:\n await ctx.send_help()\n else:\n await modlog.set_modlog_channel(guild, None)\n await ctx.send(_(\"Mod log deactivated.\"))\n\n @modlogset.command(name=\"cases\")\n @commands.guild_only()\n async def set_cases(self, ctx: commands.Context, action: str = None):\n \"\"\"Enables or disables case creation for each type of mod action\"\"\"\n guild = ctx.guild\n\n if action is None: # No args given\n casetypes = await modlog.get_all_casetypes(guild)\n await ctx.send_help()\n title = _(\"Current settings:\")\n msg = \"\"\n for ct in casetypes:\n enabled = await ct.is_enabled()\n value = \"enabled\" if enabled else \"disabled\"\n msg += \"%s : %s\\n\" % (ct.name, value)\n\n msg = title + \"\\n\" + box(msg)\n await ctx.send(msg)\n return\n casetype = await modlog.get_casetype(action, guild)\n if not casetype:\n await ctx.send(_(\"That action is not registered\"))\n else:\n\n enabled = await casetype.is_enabled()\n await casetype.set_enabled(True if not enabled else False)\n\n msg = _(\"Case creation for {} actions is now {}.\").format(\n action, \"enabled\" if not enabled else \"disabled\"\n )\n await ctx.send(msg)\n\n @modlogset.command()\n @commands.guild_only()\n async def resetcases(self, ctx: commands.Context):\n \"\"\"Resets modlog's cases\"\"\"\n guild = ctx.guild\n await modlog.reset_cases(guild)\n await ctx.send(_(\"Cases have been reset.\"))\n\n @commands.command()\n @commands.guild_only()\n async def case(self, ctx: commands.Context, number: int):\n \"\"\"Shows the specified case\"\"\"\n try:\n case = await modlog.get_case(number, ctx.guild, self.bot)\n except RuntimeError:\n await ctx.send(_(\"That case does not exist for that server\"))\n return\n else:\n await ctx.send(embed=await case.get_case_msg_content())\n\n @commands.command(usage=\"[case] <reason>\")\n @commands.guild_only()\n async def reason(self, ctx: commands.Context, *, reason: str):\n \"\"\"Lets you specify a reason for mod-log's cases\n \n Please note that you can only edit cases you are\n the owner of unless you are a mod/admin or the server owner.\n \n If no number is specified, the latest case will be used.\"\"\"\n author = ctx.author\n guild = ctx.guild\n potential_case = reason.split()[0]\n if potential_case.isdigit():\n case = int(potential_case)\n reason = reason.replace(potential_case, \"\")\n else:\n case = str(int(await modlog.get_next_case_number(guild)) - 1)\n # latest case\n try:\n case_before = await modlog.get_case(case, guild, self.bot)\n except RuntimeError:\n await ctx.send(_(\"That case does not exist!\"))\n return\n else:\n if case_before.moderator is None:\n # No mod set, so attempt to find out if the author\n # triggered the case creation with an action\n bot_perms = guild.me.guild_permissions\n if bot_perms.view_audit_log:\n case_type = await modlog.get_casetype(case_before.action_type, guild)\n if case_type is not None and case_type.audit_type is not None:\n audit_type = getattr(discord.AuditLogAction, case_type.audit_type)\n if audit_type:\n audit_case = None\n async for entry in guild.audit_logs(action=audit_type):\n if (\n entry.target.id == case_before.user.id\n and entry.action == audit_type\n ):\n audit_case = entry\n break\n if audit_case:\n case_before.moderator = audit_case.user\n is_guild_owner = author == guild.owner\n is_case_author = author == case_before.moderator\n author_is_mod = await ctx.bot.is_mod(author)\n if not (is_guild_owner or is_case_author or author_is_mod):\n await ctx.send(_(\"You are not authorized to modify that case!\"))\n return\n to_modify = {\"reason\": reason}\n if case_before.moderator != author:\n to_modify[\"amended_by\"] = author\n to_modify[\"modified_at\"] = ctx.message.created_at.timestamp()\n await case_before.edit(to_modify)\n await ctx.send(_(\"Reason has been updated.\"))\n", "path": "redbot/cogs/modlog/modlog.py"}]}
| 2,318 | 152 |
gh_patches_debug_21688
|
rasdani/github-patches
|
git_diff
|
scikit-hep__pyhf-1419
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add pyhf.utils.citation to the python API
</issue>
<code>
[start of src/pyhf/utils.py]
1 import json
2 import jsonschema
3 import pkg_resources
4 from pathlib import Path
5 import yaml
6 import click
7 import hashlib
8
9 from .exceptions import InvalidSpecification
10
11 SCHEMA_CACHE = {}
12 SCHEMA_BASE = "https://scikit-hep.org/pyhf/schemas/"
13 SCHEMA_VERSION = '1.0.0'
14
15
16 def load_schema(schema_id, version=None):
17 global SCHEMA_CACHE
18 if not version:
19 version = SCHEMA_VERSION
20 try:
21 return SCHEMA_CACHE[f'{SCHEMA_BASE}{Path(version).joinpath(schema_id)}']
22 except KeyError:
23 pass
24
25 path = pkg_resources.resource_filename(
26 __name__, str(Path('schemas').joinpath(version, schema_id))
27 )
28 with open(path) as json_schema:
29 schema = json.load(json_schema)
30 SCHEMA_CACHE[schema['$id']] = schema
31 return SCHEMA_CACHE[schema['$id']]
32
33
34 # load the defs.json as it is included by $ref
35 load_schema('defs.json')
36
37
38 def validate(spec, schema_name, version=None):
39 schema = load_schema(schema_name, version=version)
40 try:
41 resolver = jsonschema.RefResolver(
42 base_uri=f"file://{pkg_resources.resource_filename(__name__, 'schemas/'):s}",
43 referrer=schema_name,
44 store=SCHEMA_CACHE,
45 )
46 validator = jsonschema.Draft6Validator(
47 schema, resolver=resolver, format_checker=None
48 )
49 return validator.validate(spec)
50 except jsonschema.ValidationError as err:
51 raise InvalidSpecification(err, schema_name)
52
53
54 def options_from_eqdelimstring(opts):
55 document = '\n'.join(
56 f"{opt.split('=', 1)[0]}: {opt.split('=', 1)[1]}" for opt in opts
57 )
58 return yaml.safe_load(document)
59
60
61 class EqDelimStringParamType(click.ParamType):
62 name = 'equal-delimited option'
63
64 def convert(self, value, param, ctx):
65 try:
66 return options_from_eqdelimstring([value])
67 except IndexError:
68 self.fail(f'{value:s} is not a valid equal-delimited string', param, ctx)
69
70
71 def digest(obj, algorithm='sha256'):
72 """
73 Get the digest for the provided object. Note: object must be JSON-serializable.
74
75 The hashing algorithms supported are in :mod:`hashlib`, part of Python's Standard Libraries.
76
77 Example:
78
79 >>> import pyhf
80 >>> obj = {'a': 2.0, 'b': 3.0, 'c': 1.0}
81 >>> pyhf.utils.digest(obj)
82 'a38f6093800189b79bc22ef677baf90c75705af2cfc7ff594159eca54eaa7928'
83 >>> pyhf.utils.digest(obj, algorithm='md5')
84 '2c0633f242928eb55c3672fed5ba8612'
85 >>> pyhf.utils.digest(obj, algorithm='sha1')
86 '49a27f499e763766c9545b294880df277be6f545'
87
88 Raises:
89 ValueError: If the object is not JSON-serializable or if the algorithm is not supported.
90
91 Args:
92 obj (:obj:`jsonable`): A JSON-serializable object to compute the digest of. Usually a :class:`~pyhf.workspace.Workspace` object.
93 algorithm (:obj:`str`): The hashing algorithm to use.
94
95 Returns:
96 digest (:obj:`str`): The digest for the JSON-serialized object provided and hash algorithm specified.
97 """
98
99 try:
100 stringified = json.dumps(obj, sort_keys=True, ensure_ascii=False).encode('utf8')
101 except TypeError:
102 raise ValueError(
103 "The supplied object is not JSON-serializable for calculating a hash."
104 )
105 try:
106 hash_alg = getattr(hashlib, algorithm)
107 except AttributeError:
108 raise ValueError(
109 f"{algorithm} is not an algorithm provided by Python's hashlib library."
110 )
111 return hash_alg(stringified).hexdigest()
112
113
114 def remove_prefix(text, prefix):
115 """
116 Remove a prefix from the beginning of the provided text.
117
118 Example:
119
120 >>> import pyhf
121 >>> pyhf.utils.remove_prefix("alpha_syst1", "alpha_")
122 'syst1'
123
124 Args:
125 text (:obj:`str`): A provided input to manipulate.
126 prefix (:obj:`str`): A prefix to remove from provided input, if it exists.
127
128 Returns:
129 stripped_text (:obj:`str`): Text with the prefix removed.
130 """
131 # NB: python3.9 can be `return text.removeprefix(prefix)`
132 if text.startswith(prefix):
133 return text[len(prefix) :]
134 return text
135
136
137 def citation(oneline=False):
138 """
139 Get the bibtex citation for pyhf
140
141 Example:
142
143 >>> import pyhf
144 >>> pyhf.utils.citation(True)
145 '@software{pyhf, author = {Lukas Heinrich and Matthew Feickert and Giordon Stark}, title = "{pyhf: v0.6.1}", version = {0.6.1}, doi = {10.5281/zenodo.1169739}, url = {https://github.com/scikit-hep/pyhf},}@article{pyhf_joss, doi = {10.21105/joss.02823}, url = {https://doi.org/10.21105/joss.02823}, year = {2021}, publisher = {The Open Journal}, volume = {6}, number = {58}, pages = {2823}, author = {Lukas Heinrich and Matthew Feickert and Giordon Stark and Kyle Cranmer}, title = {pyhf: pure-Python implementation of HistFactory statistical models}, journal = {Journal of Open Source Software}}'
146
147 Keyword Args:
148 oneline (:obj:`bool`): Whether to provide citation with new lines (default) or as a one-liner.
149
150 Returns:
151 citation (:obj:`str`): The citation for this software
152 """
153 path = Path(
154 pkg_resources.resource_filename(
155 __name__, str(Path('data').joinpath('citation.bib'))
156 )
157 )
158 with path.open() as fp:
159 # remove end-of-file newline if there is one
160 data = fp.read().strip()
161
162 if oneline:
163 data = ''.join(data.splitlines())
164 return data
165
[end of src/pyhf/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/pyhf/utils.py b/src/pyhf/utils.py
--- a/src/pyhf/utils.py
+++ b/src/pyhf/utils.py
@@ -141,7 +141,7 @@
Example:
>>> import pyhf
- >>> pyhf.utils.citation(True)
+ >>> pyhf.utils.citation(oneline=True)
'@software{pyhf, author = {Lukas Heinrich and Matthew Feickert and Giordon Stark}, title = "{pyhf: v0.6.1}", version = {0.6.1}, doi = {10.5281/zenodo.1169739}, url = {https://github.com/scikit-hep/pyhf},}@article{pyhf_joss, doi = {10.21105/joss.02823}, url = {https://doi.org/10.21105/joss.02823}, year = {2021}, publisher = {The Open Journal}, volume = {6}, number = {58}, pages = {2823}, author = {Lukas Heinrich and Matthew Feickert and Giordon Stark and Kyle Cranmer}, title = {pyhf: pure-Python implementation of HistFactory statistical models}, journal = {Journal of Open Source Software}}'
Keyword Args:
|
{"golden_diff": "diff --git a/src/pyhf/utils.py b/src/pyhf/utils.py\n--- a/src/pyhf/utils.py\n+++ b/src/pyhf/utils.py\n@@ -141,7 +141,7 @@\n Example:\n \n >>> import pyhf\n- >>> pyhf.utils.citation(True)\n+ >>> pyhf.utils.citation(oneline=True)\n '@software{pyhf, author = {Lukas Heinrich and Matthew Feickert and Giordon Stark}, title = \"{pyhf: v0.6.1}\", version = {0.6.1}, doi = {10.5281/zenodo.1169739}, url = {https://github.com/scikit-hep/pyhf},}@article{pyhf_joss, doi = {10.21105/joss.02823}, url = {https://doi.org/10.21105/joss.02823}, year = {2021}, publisher = {The Open Journal}, volume = {6}, number = {58}, pages = {2823}, author = {Lukas Heinrich and Matthew Feickert and Giordon Stark and Kyle Cranmer}, title = {pyhf: pure-Python implementation of HistFactory statistical models}, journal = {Journal of Open Source Software}}'\n \n Keyword Args:\n", "issue": "Add pyhf.utils.citation to the python API\n\n", "before_files": [{"content": "import json\nimport jsonschema\nimport pkg_resources\nfrom pathlib import Path\nimport yaml\nimport click\nimport hashlib\n\nfrom .exceptions import InvalidSpecification\n\nSCHEMA_CACHE = {}\nSCHEMA_BASE = \"https://scikit-hep.org/pyhf/schemas/\"\nSCHEMA_VERSION = '1.0.0'\n\n\ndef load_schema(schema_id, version=None):\n global SCHEMA_CACHE\n if not version:\n version = SCHEMA_VERSION\n try:\n return SCHEMA_CACHE[f'{SCHEMA_BASE}{Path(version).joinpath(schema_id)}']\n except KeyError:\n pass\n\n path = pkg_resources.resource_filename(\n __name__, str(Path('schemas').joinpath(version, schema_id))\n )\n with open(path) as json_schema:\n schema = json.load(json_schema)\n SCHEMA_CACHE[schema['$id']] = schema\n return SCHEMA_CACHE[schema['$id']]\n\n\n# load the defs.json as it is included by $ref\nload_schema('defs.json')\n\n\ndef validate(spec, schema_name, version=None):\n schema = load_schema(schema_name, version=version)\n try:\n resolver = jsonschema.RefResolver(\n base_uri=f\"file://{pkg_resources.resource_filename(__name__, 'schemas/'):s}\",\n referrer=schema_name,\n store=SCHEMA_CACHE,\n )\n validator = jsonschema.Draft6Validator(\n schema, resolver=resolver, format_checker=None\n )\n return validator.validate(spec)\n except jsonschema.ValidationError as err:\n raise InvalidSpecification(err, schema_name)\n\n\ndef options_from_eqdelimstring(opts):\n document = '\\n'.join(\n f\"{opt.split('=', 1)[0]}: {opt.split('=', 1)[1]}\" for opt in opts\n )\n return yaml.safe_load(document)\n\n\nclass EqDelimStringParamType(click.ParamType):\n name = 'equal-delimited option'\n\n def convert(self, value, param, ctx):\n try:\n return options_from_eqdelimstring([value])\n except IndexError:\n self.fail(f'{value:s} is not a valid equal-delimited string', param, ctx)\n\n\ndef digest(obj, algorithm='sha256'):\n \"\"\"\n Get the digest for the provided object. Note: object must be JSON-serializable.\n\n The hashing algorithms supported are in :mod:`hashlib`, part of Python's Standard Libraries.\n\n Example:\n\n >>> import pyhf\n >>> obj = {'a': 2.0, 'b': 3.0, 'c': 1.0}\n >>> pyhf.utils.digest(obj)\n 'a38f6093800189b79bc22ef677baf90c75705af2cfc7ff594159eca54eaa7928'\n >>> pyhf.utils.digest(obj, algorithm='md5')\n '2c0633f242928eb55c3672fed5ba8612'\n >>> pyhf.utils.digest(obj, algorithm='sha1')\n '49a27f499e763766c9545b294880df277be6f545'\n\n Raises:\n ValueError: If the object is not JSON-serializable or if the algorithm is not supported.\n\n Args:\n obj (:obj:`jsonable`): A JSON-serializable object to compute the digest of. Usually a :class:`~pyhf.workspace.Workspace` object.\n algorithm (:obj:`str`): The hashing algorithm to use.\n\n Returns:\n digest (:obj:`str`): The digest for the JSON-serialized object provided and hash algorithm specified.\n \"\"\"\n\n try:\n stringified = json.dumps(obj, sort_keys=True, ensure_ascii=False).encode('utf8')\n except TypeError:\n raise ValueError(\n \"The supplied object is not JSON-serializable for calculating a hash.\"\n )\n try:\n hash_alg = getattr(hashlib, algorithm)\n except AttributeError:\n raise ValueError(\n f\"{algorithm} is not an algorithm provided by Python's hashlib library.\"\n )\n return hash_alg(stringified).hexdigest()\n\n\ndef remove_prefix(text, prefix):\n \"\"\"\n Remove a prefix from the beginning of the provided text.\n\n Example:\n\n >>> import pyhf\n >>> pyhf.utils.remove_prefix(\"alpha_syst1\", \"alpha_\")\n 'syst1'\n\n Args:\n text (:obj:`str`): A provided input to manipulate.\n prefix (:obj:`str`): A prefix to remove from provided input, if it exists.\n\n Returns:\n stripped_text (:obj:`str`): Text with the prefix removed.\n \"\"\"\n # NB: python3.9 can be `return text.removeprefix(prefix)`\n if text.startswith(prefix):\n return text[len(prefix) :]\n return text\n\n\ndef citation(oneline=False):\n \"\"\"\n Get the bibtex citation for pyhf\n\n Example:\n\n >>> import pyhf\n >>> pyhf.utils.citation(True)\n '@software{pyhf, author = {Lukas Heinrich and Matthew Feickert and Giordon Stark}, title = \"{pyhf: v0.6.1}\", version = {0.6.1}, doi = {10.5281/zenodo.1169739}, url = {https://github.com/scikit-hep/pyhf},}@article{pyhf_joss, doi = {10.21105/joss.02823}, url = {https://doi.org/10.21105/joss.02823}, year = {2021}, publisher = {The Open Journal}, volume = {6}, number = {58}, pages = {2823}, author = {Lukas Heinrich and Matthew Feickert and Giordon Stark and Kyle Cranmer}, title = {pyhf: pure-Python implementation of HistFactory statistical models}, journal = {Journal of Open Source Software}}'\n\n Keyword Args:\n oneline (:obj:`bool`): Whether to provide citation with new lines (default) or as a one-liner.\n\n Returns:\n citation (:obj:`str`): The citation for this software\n \"\"\"\n path = Path(\n pkg_resources.resource_filename(\n __name__, str(Path('data').joinpath('citation.bib'))\n )\n )\n with path.open() as fp:\n # remove end-of-file newline if there is one\n data = fp.read().strip()\n\n if oneline:\n data = ''.join(data.splitlines())\n return data\n", "path": "src/pyhf/utils.py"}]}
| 2,408 | 316 |
gh_patches_debug_30078
|
rasdani/github-patches
|
git_diff
|
ansible-collections__community.general-7089
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
lookup bitwarden field=notes resulting in KeyError
### Summary
When using the community.general.bitwarden lookup there is an error trying to lookup a secure note.
Playbook:
```yaml
- debug:
msg: >-
"{{ lookup('community.general.bitwarden', 'secure note 2023', field='notes') }}"
```
Error:
```
fatal: [my.hostname.net]: FAILED! => {
"msg": "An unhandled exception occurred while running the lookup plugin 'community.general.bitwarden'. Error was a <class 'KeyError'>, original message: 'fields'. 'fields'"
}
```
With debug:
```console
exception during Jinja2 execution: Traceback (most recent call last):
File "/opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible/template/__init__.py", line 831, in _lookup
ran = instance.run(loop_terms, variables=self._available_variables, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible_collections/community/general/plugins/lookup/bitwarden.py", line 159, in run
return [_bitwarden.get_field(field, term, search_field, collection_id) for term in terms]
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible_collections/community/general/plugins/lookup/bitwarden.py", line 159, in <listcomp>
return [_bitwarden.get_field(field, term, search_field, collection_id) for term in terms]
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible_collections/community/general/plugins/lookup/bitwarden.py", line 141, in get_field
for custom_field in match['fields']:
~~~~~^^^^^^^^^^
KeyError: 'fields'
fatal: [my.hostname.net]: FAILED! => {
"msg": "An unhandled exception occurred while running the lookup plugin 'community.general.bitwarden'. Error was a <class 'KeyError'>, original message: 'fields'. 'fields'"
}
```
Same thing when trying to do the lookup by id:
```yaml
- debug:
msg: >-
"{{ lookup('community.general.bitwarden', 'ba9f1125-e52a-4d4f-9b2b-af1c43a00bd6', search='id', field='notes') }}"
```
### Issue Type
Bug Report
### Component Name
bitwarden
### Ansible Version
```console (paste below)
$ ansible --version
ansible [core 2.15.0]
config file = /Users/sjoerd/.ansible.cfg
configured module search path = ['/Users/name/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible
ansible collection location = /Users/name/.ansible/collections:/usr/share/ansible/collections
executable location = /opt/homebrew/bin/ansible
python version = 3.11.3 (main, Apr 7 2023, 20:13:31) [Clang 14.0.0 (clang-1400.0.29.202)] (/opt/homebrew/Cellar/ansible/8.0.0/libexec/bin/python3.11)
jinja version = 3.1.2
libyaml = True
```
### Community.general Version
```console (paste below)
$ ansible-galaxy collection list community.general
# /opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible_collections
Collection Version
----------------- -------
community.general 7.0.1
```
### Configuration
```console (paste below)
$ ansible-config dump --only-changed
```
### OS / Environment
Source: MacOS
Target: CentOS Stream 9
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
- debug:
msg: >-
"{{ lookup('community.general.bitwarden', 'secure note 2023', field='notes') }}"
```
### Expected Results
Expected the contents of the secure not to be printed to the terminal
### Actual Results
```console (paste below)
fatal: [my.hostname.net]: FAILED! => {
"msg": "An unhandled exception occurred while running the lookup plugin 'community.general.bitwarden'. Error was a <class 'KeyError'>, original message: 'fields'. 'fields'"
}
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
</issue>
<code>
[start of plugins/lookup/bitwarden.py]
1 # -*- coding: utf-8 -*-
2 # Copyright (c) 2022, Jonathan Lung <[email protected]>
3 # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
4 # SPDX-License-Identifier: GPL-3.0-or-later
5 from __future__ import (absolute_import, division, print_function)
6 __metaclass__ = type
7
8 DOCUMENTATION = """
9 name: bitwarden
10 author:
11 - Jonathan Lung (@lungj) <[email protected]>
12 requirements:
13 - bw (command line utility)
14 - be logged into bitwarden
15 - bitwarden vault unlocked
16 - C(BW_SESSION) environment variable set
17 short_description: Retrieve secrets from Bitwarden
18 version_added: 5.4.0
19 description:
20 - Retrieve secrets from Bitwarden.
21 options:
22 _terms:
23 description: Key(s) to fetch values for from login info.
24 required: true
25 type: list
26 elements: str
27 search:
28 description: Field to retrieve, for example C(name) or C(id).
29 type: str
30 default: name
31 version_added: 5.7.0
32 field:
33 description: Field to fetch. Leave unset to fetch whole response.
34 type: str
35 collection_id:
36 description: Collection ID to filter results by collection. Leave unset to skip filtering.
37 type: str
38 version_added: 6.3.0
39 """
40
41 EXAMPLES = """
42 - name: "Get 'password' from Bitwarden record named 'a_test'"
43 ansible.builtin.debug:
44 msg: >-
45 {{ lookup('community.general.bitwarden', 'a_test', field='password') }}
46
47 - name: "Get 'password' from Bitwarden record with id 'bafba515-af11-47e6-abe3-af1200cd18b2'"
48 ansible.builtin.debug:
49 msg: >-
50 {{ lookup('community.general.bitwarden', 'bafba515-af11-47e6-abe3-af1200cd18b2', search='id', field='password') }}
51
52 - name: "Get 'password' from Bitwarden record named 'a_test' from collection"
53 ansible.builtin.debug:
54 msg: >-
55 {{ lookup('community.general.bitwarden', 'a_test', field='password', collection_id='bafba515-af11-47e6-abe3-af1200cd18b2') }}
56
57 - name: "Get full Bitwarden record named 'a_test'"
58 ansible.builtin.debug:
59 msg: >-
60 {{ lookup('community.general.bitwarden', 'a_test') }}
61
62 - name: "Get custom field 'api_key' from Bitwarden record named 'a_test'"
63 ansible.builtin.debug:
64 msg: >-
65 {{ lookup('community.general.bitwarden', 'a_test', field='api_key') }}
66 """
67
68 RETURN = """
69 _raw:
70 description: List of requested field or JSON object of list of matches.
71 type: list
72 elements: raw
73 """
74
75 from subprocess import Popen, PIPE
76
77 from ansible.errors import AnsibleError
78 from ansible.module_utils.common.text.converters import to_bytes, to_text
79 from ansible.parsing.ajson import AnsibleJSONDecoder
80 from ansible.plugins.lookup import LookupBase
81
82
83 class BitwardenException(AnsibleError):
84 pass
85
86
87 class Bitwarden(object):
88
89 def __init__(self, path='bw'):
90 self._cli_path = path
91
92 @property
93 def cli_path(self):
94 return self._cli_path
95
96 @property
97 def unlocked(self):
98 out, err = self._run(['status'], stdin="")
99 decoded = AnsibleJSONDecoder().raw_decode(out)[0]
100 return decoded['status'] == 'unlocked'
101
102 def _run(self, args, stdin=None, expected_rc=0):
103 p = Popen([self.cli_path] + args, stdout=PIPE, stderr=PIPE, stdin=PIPE)
104 out, err = p.communicate(to_bytes(stdin))
105 rc = p.wait()
106 if rc != expected_rc:
107 raise BitwardenException(err)
108 return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict')
109
110 def _get_matches(self, search_value, search_field, collection_id):
111 """Return matching records whose search_field is equal to key.
112 """
113
114 # Prepare set of params for Bitwarden CLI
115 params = ['list', 'items', '--search', search_value]
116
117 if collection_id:
118 params.extend(['--collectionid', collection_id])
119
120 out, err = self._run(params)
121
122 # This includes things that matched in different fields.
123 initial_matches = AnsibleJSONDecoder().raw_decode(out)[0]
124
125 # Filter to only include results from the right field.
126 return [item for item in initial_matches if item[search_field] == search_value]
127
128 def get_field(self, field, search_value, search_field="name", collection_id=None):
129 """Return a list of the specified field for records whose search_field match search_value
130 and filtered by collection if collection has been provided.
131
132 If field is None, return the whole record for each match.
133 """
134 matches = self._get_matches(search_value, search_field, collection_id)
135
136 if field in ['autofillOnPageLoad', 'password', 'passwordRevisionDate', 'totp', 'uris', 'username']:
137 return [match['login'][field] for match in matches]
138 elif not field:
139 return matches
140 else:
141 custom_field_matches = []
142 for match in matches:
143 for custom_field in match['fields']:
144 if custom_field['name'] == field:
145 custom_field_matches.append(custom_field['value'])
146 if matches and not custom_field_matches:
147 raise AnsibleError("Custom field {field} does not exist in {search_value}".format(field=field, search_value=search_value))
148 return custom_field_matches
149
150
151 class LookupModule(LookupBase):
152
153 def run(self, terms, variables=None, **kwargs):
154 self.set_options(var_options=variables, direct=kwargs)
155 field = self.get_option('field')
156 search_field = self.get_option('search')
157 collection_id = self.get_option('collection_id')
158 if not _bitwarden.unlocked:
159 raise AnsibleError("Bitwarden Vault locked. Run 'bw unlock'.")
160
161 return [_bitwarden.get_field(field, term, search_field, collection_id) for term in terms]
162
163
164 _bitwarden = Bitwarden()
165
[end of plugins/lookup/bitwarden.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/plugins/lookup/bitwarden.py b/plugins/lookup/bitwarden.py
--- a/plugins/lookup/bitwarden.py
+++ b/plugins/lookup/bitwarden.py
@@ -132,20 +132,29 @@
If field is None, return the whole record for each match.
"""
matches = self._get_matches(search_value, search_field, collection_id)
-
- if field in ['autofillOnPageLoad', 'password', 'passwordRevisionDate', 'totp', 'uris', 'username']:
- return [match['login'][field] for match in matches]
- elif not field:
+ if not field:
return matches
- else:
- custom_field_matches = []
- for match in matches:
+ field_matches = []
+ for match in matches:
+ # if there are no custom fields, then `match` has no key 'fields'
+ if 'fields' in match:
+ custom_field_found = False
for custom_field in match['fields']:
- if custom_field['name'] == field:
- custom_field_matches.append(custom_field['value'])
- if matches and not custom_field_matches:
- raise AnsibleError("Custom field {field} does not exist in {search_value}".format(field=field, search_value=search_value))
- return custom_field_matches
+ if field == custom_field['name']:
+ field_matches.append(custom_field['value'])
+ custom_field_found = True
+ break
+ if custom_field_found:
+ continue
+ if 'login' in match and field in match['login']:
+ field_matches.append(match['login'][field])
+ continue
+ if field in match:
+ field_matches.append(match[field])
+ continue
+ if matches and not field_matches:
+ raise AnsibleError("field {field} does not exist in {search_value}".format(field=field, search_value=search_value))
+ return field_matches
class LookupModule(LookupBase):
|
{"golden_diff": "diff --git a/plugins/lookup/bitwarden.py b/plugins/lookup/bitwarden.py\n--- a/plugins/lookup/bitwarden.py\n+++ b/plugins/lookup/bitwarden.py\n@@ -132,20 +132,29 @@\n If field is None, return the whole record for each match.\n \"\"\"\n matches = self._get_matches(search_value, search_field, collection_id)\n-\n- if field in ['autofillOnPageLoad', 'password', 'passwordRevisionDate', 'totp', 'uris', 'username']:\n- return [match['login'][field] for match in matches]\n- elif not field:\n+ if not field:\n return matches\n- else:\n- custom_field_matches = []\n- for match in matches:\n+ field_matches = []\n+ for match in matches:\n+ # if there are no custom fields, then `match` has no key 'fields'\n+ if 'fields' in match:\n+ custom_field_found = False\n for custom_field in match['fields']:\n- if custom_field['name'] == field:\n- custom_field_matches.append(custom_field['value'])\n- if matches and not custom_field_matches:\n- raise AnsibleError(\"Custom field {field} does not exist in {search_value}\".format(field=field, search_value=search_value))\n- return custom_field_matches\n+ if field == custom_field['name']:\n+ field_matches.append(custom_field['value'])\n+ custom_field_found = True\n+ break\n+ if custom_field_found:\n+ continue\n+ if 'login' in match and field in match['login']:\n+ field_matches.append(match['login'][field])\n+ continue\n+ if field in match:\n+ field_matches.append(match[field])\n+ continue\n+ if matches and not field_matches:\n+ raise AnsibleError(\"field {field} does not exist in {search_value}\".format(field=field, search_value=search_value))\n+ return field_matches\n \n \n class LookupModule(LookupBase):\n", "issue": "lookup bitwarden field=notes resulting in KeyError\n### Summary\n\nWhen using the community.general.bitwarden lookup there is an error trying to lookup a secure note.\r\n\r\nPlaybook:\r\n```yaml\r\n- debug:\r\n msg: >-\r\n \"{{ lookup('community.general.bitwarden', 'secure note 2023', field='notes') }}\"\r\n```\r\n\r\nError:\r\n```\r\nfatal: [my.hostname.net]: FAILED! => {\r\n \"msg\": \"An unhandled exception occurred while running the lookup plugin 'community.general.bitwarden'. Error was a <class 'KeyError'>, original message: 'fields'. 'fields'\"\r\n}\r\n```\r\nWith debug:\r\n```console\r\nexception during Jinja2 execution: Traceback (most recent call last):\r\n File \"/opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible/template/__init__.py\", line 831, in _lookup\r\n ran = instance.run(loop_terms, variables=self._available_variables, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible_collections/community/general/plugins/lookup/bitwarden.py\", line 159, in run\r\n return [_bitwarden.get_field(field, term, search_field, collection_id) for term in terms]\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible_collections/community/general/plugins/lookup/bitwarden.py\", line 159, in <listcomp>\r\n return [_bitwarden.get_field(field, term, search_field, collection_id) for term in terms]\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible_collections/community/general/plugins/lookup/bitwarden.py\", line 141, in get_field\r\n for custom_field in match['fields']:\r\n ~~~~~^^^^^^^^^^\r\nKeyError: 'fields'\r\nfatal: [my.hostname.net]: FAILED! => {\r\n \"msg\": \"An unhandled exception occurred while running the lookup plugin 'community.general.bitwarden'. Error was a <class 'KeyError'>, original message: 'fields'. 'fields'\"\r\n}\r\n```\r\n\r\nSame thing when trying to do the lookup by id:\r\n```yaml\r\n- debug:\r\n msg: >-\r\n \"{{ lookup('community.general.bitwarden', 'ba9f1125-e52a-4d4f-9b2b-af1c43a00bd6', search='id', field='notes') }}\"\r\n```\n\n### Issue Type\n\nBug Report\n\n### Component Name\n\nbitwarden\n\n### Ansible Version\n\n```console (paste below)\r\n$ ansible --version\r\nansible [core 2.15.0]\r\n config file = /Users/sjoerd/.ansible.cfg\r\n configured module search path = ['/Users/name/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible\r\n ansible collection location = /Users/name/.ansible/collections:/usr/share/ansible/collections\r\n executable location = /opt/homebrew/bin/ansible\r\n python version = 3.11.3 (main, Apr 7 2023, 20:13:31) [Clang 14.0.0 (clang-1400.0.29.202)] (/opt/homebrew/Cellar/ansible/8.0.0/libexec/bin/python3.11)\r\n jinja version = 3.1.2\r\n libyaml = True\r\n```\r\n\n\n### Community.general Version\n\n```console (paste below)\r\n$ ansible-galaxy collection list community.general\r\n\r\n# /opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible_collections\r\nCollection Version\r\n----------------- -------\r\ncommunity.general 7.0.1\r\n```\r\n\n\n### Configuration\n\n```console (paste below)\r\n$ ansible-config dump --only-changed\r\n\r\n```\r\n\n\n### OS / Environment\n\nSource: MacOS\r\nTarget: CentOS Stream 9\r\n\n\n### Steps to Reproduce\n\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml (paste below)\r\n- debug:\r\n msg: >-\r\n \"{{ lookup('community.general.bitwarden', 'secure note 2023', field='notes') }}\"\r\n\r\n```\r\n\n\n### Expected Results\n\nExpected the contents of the secure not to be printed to the terminal\n\n### Actual Results\n\n```console (paste below)\r\nfatal: [my.hostname.net]: FAILED! => {\r\n \"msg\": \"An unhandled exception occurred while running the lookup plugin 'community.general.bitwarden'. Error was a <class 'KeyError'>, original message: 'fields'. 'fields'\"\r\n}\r\n```\r\n\n\n### Code of Conduct\n\n- [X] I agree to follow the Ansible Code of Conduct\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2022, Jonathan Lung <[email protected]>\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nDOCUMENTATION = \"\"\"\n name: bitwarden\n author:\n - Jonathan Lung (@lungj) <[email protected]>\n requirements:\n - bw (command line utility)\n - be logged into bitwarden\n - bitwarden vault unlocked\n - C(BW_SESSION) environment variable set\n short_description: Retrieve secrets from Bitwarden\n version_added: 5.4.0\n description:\n - Retrieve secrets from Bitwarden.\n options:\n _terms:\n description: Key(s) to fetch values for from login info.\n required: true\n type: list\n elements: str\n search:\n description: Field to retrieve, for example C(name) or C(id).\n type: str\n default: name\n version_added: 5.7.0\n field:\n description: Field to fetch. Leave unset to fetch whole response.\n type: str\n collection_id:\n description: Collection ID to filter results by collection. Leave unset to skip filtering.\n type: str\n version_added: 6.3.0\n\"\"\"\n\nEXAMPLES = \"\"\"\n- name: \"Get 'password' from Bitwarden record named 'a_test'\"\n ansible.builtin.debug:\n msg: >-\n {{ lookup('community.general.bitwarden', 'a_test', field='password') }}\n\n- name: \"Get 'password' from Bitwarden record with id 'bafba515-af11-47e6-abe3-af1200cd18b2'\"\n ansible.builtin.debug:\n msg: >-\n {{ lookup('community.general.bitwarden', 'bafba515-af11-47e6-abe3-af1200cd18b2', search='id', field='password') }}\n\n- name: \"Get 'password' from Bitwarden record named 'a_test' from collection\"\n ansible.builtin.debug:\n msg: >-\n {{ lookup('community.general.bitwarden', 'a_test', field='password', collection_id='bafba515-af11-47e6-abe3-af1200cd18b2') }}\n\n- name: \"Get full Bitwarden record named 'a_test'\"\n ansible.builtin.debug:\n msg: >-\n {{ lookup('community.general.bitwarden', 'a_test') }}\n\n- name: \"Get custom field 'api_key' from Bitwarden record named 'a_test'\"\n ansible.builtin.debug:\n msg: >-\n {{ lookup('community.general.bitwarden', 'a_test', field='api_key') }}\n\"\"\"\n\nRETURN = \"\"\"\n _raw:\n description: List of requested field or JSON object of list of matches.\n type: list\n elements: raw\n\"\"\"\n\nfrom subprocess import Popen, PIPE\n\nfrom ansible.errors import AnsibleError\nfrom ansible.module_utils.common.text.converters import to_bytes, to_text\nfrom ansible.parsing.ajson import AnsibleJSONDecoder\nfrom ansible.plugins.lookup import LookupBase\n\n\nclass BitwardenException(AnsibleError):\n pass\n\n\nclass Bitwarden(object):\n\n def __init__(self, path='bw'):\n self._cli_path = path\n\n @property\n def cli_path(self):\n return self._cli_path\n\n @property\n def unlocked(self):\n out, err = self._run(['status'], stdin=\"\")\n decoded = AnsibleJSONDecoder().raw_decode(out)[0]\n return decoded['status'] == 'unlocked'\n\n def _run(self, args, stdin=None, expected_rc=0):\n p = Popen([self.cli_path] + args, stdout=PIPE, stderr=PIPE, stdin=PIPE)\n out, err = p.communicate(to_bytes(stdin))\n rc = p.wait()\n if rc != expected_rc:\n raise BitwardenException(err)\n return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict')\n\n def _get_matches(self, search_value, search_field, collection_id):\n \"\"\"Return matching records whose search_field is equal to key.\n \"\"\"\n\n # Prepare set of params for Bitwarden CLI\n params = ['list', 'items', '--search', search_value]\n\n if collection_id:\n params.extend(['--collectionid', collection_id])\n\n out, err = self._run(params)\n\n # This includes things that matched in different fields.\n initial_matches = AnsibleJSONDecoder().raw_decode(out)[0]\n\n # Filter to only include results from the right field.\n return [item for item in initial_matches if item[search_field] == search_value]\n\n def get_field(self, field, search_value, search_field=\"name\", collection_id=None):\n \"\"\"Return a list of the specified field for records whose search_field match search_value\n and filtered by collection if collection has been provided.\n\n If field is None, return the whole record for each match.\n \"\"\"\n matches = self._get_matches(search_value, search_field, collection_id)\n\n if field in ['autofillOnPageLoad', 'password', 'passwordRevisionDate', 'totp', 'uris', 'username']:\n return [match['login'][field] for match in matches]\n elif not field:\n return matches\n else:\n custom_field_matches = []\n for match in matches:\n for custom_field in match['fields']:\n if custom_field['name'] == field:\n custom_field_matches.append(custom_field['value'])\n if matches and not custom_field_matches:\n raise AnsibleError(\"Custom field {field} does not exist in {search_value}\".format(field=field, search_value=search_value))\n return custom_field_matches\n\n\nclass LookupModule(LookupBase):\n\n def run(self, terms, variables=None, **kwargs):\n self.set_options(var_options=variables, direct=kwargs)\n field = self.get_option('field')\n search_field = self.get_option('search')\n collection_id = self.get_option('collection_id')\n if not _bitwarden.unlocked:\n raise AnsibleError(\"Bitwarden Vault locked. Run 'bw unlock'.\")\n\n return [_bitwarden.get_field(field, term, search_field, collection_id) for term in terms]\n\n\n_bitwarden = Bitwarden()\n", "path": "plugins/lookup/bitwarden.py"}]}
| 3,572 | 443 |
gh_patches_debug_1795
|
rasdani/github-patches
|
git_diff
|
aws__aws-cli-3683
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Passing AWS_PROFILE on update-kubeconfig cli
Add profile details if calling update passing update-kubeconfig with profile details
#aws eks update-kubeconfig --name k8s-test --kubeconfig k8s-test.config --profile myprofile
Currently this generates as
```
user:
exec:
apiVersion: client.authentication.k8s.io/v1alpha1
args:
- token
- -i
- k8s-test
command: aws-iam-authenticator
```
Its good to add the profile details on the cli when not using default profile.
```
- name: aws
user:
exec:
apiVersion: client.authentication.k8s.io/v1alpha1
command: aws-iam-authenticator
args:
- "token"
- "-i"
- "k8s-test"
env:
- name: AWS_PROFILE
value: "myprofile"
```
</issue>
<code>
[start of awscli/customizations/eks/update_kubeconfig.py]
1 # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13
14 import os
15 import logging
16
17 from botocore.compat import OrderedDict
18
19 from awscli.customizations.commands import BasicCommand
20 from awscli.customizations.utils import uni_print
21 from awscli.compat import is_windows
22 from awscli.compat import which
23 from awscli.customizations.eks.exceptions import EKSClusterError
24 from awscli.customizations.eks.kubeconfig import (Kubeconfig,
25 KubeconfigError,
26 KubeconfigLoader,
27 KubeconfigWriter,
28 KubeconfigValidator,
29 KubeconfigAppender)
30 from awscli.customizations.eks.ordered_yaml import ordered_yaml_dump
31
32 LOG = logging.getLogger(__name__)
33
34 DEFAULT_PATH = os.path.expanduser("~/.kube/config")
35
36 # Use the endpoint for kubernetes 1.10
37 # To get the most recent endpoint we will need to
38 # Do a check on the cluster's version number
39 API_VERSION = "client.authentication.k8s.io/v1alpha1"
40
41 AUTH_BIN = "aws-iam-authenticator"
42
43 AUTH_GOGET_PATH = ("github.com/kubernetes-sigs/"
44 "aws-iam-authenticator/cmd/aws-iam-authenticator")
45
46
47 def check_for_binary(binary):
48 return which(binary) is not None
49
50 def warn_of_missing_dependencies():
51 """
52 Check the dependencies of the kubectl configuration generated by
53 update-kubeconfig to see if any are missing.
54 If they are missing, print a warning with information on how to download
55 the missing dependency
56
57 Currently dependent on:
58 aws-iam-authenticator
59 """
60 if not check_for_binary(AUTH_BIN):
61 uni_print("Warning: {0} is not installed properly or is not in your path.\n"
62 "Refer to the AWS Documentation to download it at "
63 "https://docs.aws.amazon.com/eks/latest/userguide/configure-kubectl.html\n".format(AUTH_BIN))
64
65
66 class UpdateKubeconfigCommand(BasicCommand):
67 NAME = 'update-kubeconfig'
68
69 DESCRIPTION = BasicCommand.FROM_FILE(
70 'eks',
71 'update-kubeconfig',
72 '_description.rst'
73 )
74
75 ARG_TABLE = [
76 {
77 'name': 'name',
78 'help_text': ("The name of the cluster for which "
79 "to create a kubeconfig entry. "
80 "This cluster must exist in your account and in the "
81 "specified or configured default Region "
82 "for your AWS CLI installation."),
83 'required': True
84 },
85 {
86 'name': 'kubeconfig',
87 'help_text': ("Optionally specify a kubeconfig file to append "
88 "with your configuration. "
89 "By default, the configuration is written to the "
90 "first file path in the KUBECONFIG "
91 "environment variable (if it is set) "
92 "or the default kubeconfig path (.kube/config) "
93 "in your home directory."),
94 'required': False
95 },
96 {
97 'name': 'role-arn',
98 'help_text': ("To assume a role for cluster authentication, "
99 "specify an IAM role ARN with this option. "
100 "For example, if you created a cluster "
101 "while assuming an IAM role, "
102 "then you must also assume that role to "
103 "connect to the cluster the first time."),
104 'required': False
105 },
106 {
107 'name': 'dry-run',
108 'action': 'store_true',
109 'default': False,
110 'help_text': ("Print the merged kubeconfig to stdout instead of "
111 "writing it to the specified file."),
112 'required': False
113 },
114 {
115 'name': 'verbose',
116 'action': 'store_true',
117 'default': False,
118 'help_text': ("Print more detailed output "
119 "when writing to the kubeconfig file, "
120 "including the appended entries.")
121 }
122 ]
123
124 def _display_entries(self, entries):
125 """
126 Display entries in yaml format
127
128 :param entries: a list of OrderedDicts to be printed
129 :type entries: list
130 """
131 uni_print("Entries:\n\n")
132 for entry in entries:
133 uni_print(ordered_yaml_dump(entry))
134 uni_print("\n")
135
136 def _run_main(self, parsed_args, parsed_globals):
137 client = EKSClient(self._session,
138 parsed_args.name,
139 parsed_args.role_arn,
140 parsed_globals)
141 new_cluster_dict = client.get_cluster_entry()
142 new_user_dict = client.get_user_entry()
143
144 config_selector = KubeconfigSelector(
145 os.environ.get("KUBECONFIG", ""),
146 parsed_args.kubeconfig
147 )
148 config = config_selector.choose_kubeconfig(
149 new_cluster_dict["name"]
150 )
151 updating_existing = config.has_cluster(new_cluster_dict["name"])
152 appender = KubeconfigAppender()
153 new_context_dict = appender.insert_cluster_user_pair(config,
154 new_cluster_dict,
155 new_user_dict)
156
157 if parsed_args.dry_run:
158 uni_print(config.dump_content())
159 else:
160 writer = KubeconfigWriter()
161 writer.write_kubeconfig(config)
162
163 if updating_existing:
164 uni_print("Updated context {0} in {1}\n".format(
165 new_context_dict["name"], config.path
166 ))
167 else:
168 uni_print("Added new context {0} to {1}\n".format(
169 new_context_dict["name"], config.path
170 ))
171
172 if parsed_args.verbose:
173 self._display_entries([
174 new_context_dict,
175 new_user_dict,
176 new_cluster_dict
177 ])
178
179 warn_of_missing_dependencies()
180
181
182 class KubeconfigSelector(object):
183
184 def __init__(self, env_variable, path_in, validator=None,
185 loader=None):
186 """
187 Parse KUBECONFIG into a list of absolute paths.
188 Also replace the empty list with DEFAULT_PATH
189
190 :param env_variable: KUBECONFIG as a long string
191 :type env_variable: string
192
193 :param path_in: The path passed in through the CLI
194 :type path_in: string or None
195 """
196 if validator is None:
197 validator = KubeconfigValidator()
198 self._validator = validator
199
200 if loader is None:
201 loader = KubeconfigLoader(validator)
202 self._loader = loader
203
204 if path_in is not None:
205 # Override environment variable
206 self._paths = [self._expand_path(path_in)]
207 else:
208 # Get the list of paths from the environment variable
209 if env_variable == "":
210 env_variable = DEFAULT_PATH
211 self._paths = [self._expand_path(element)
212 for element in env_variable.split(os.pathsep)
213 if len(element.strip()) > 0]
214 if len(self._paths) == 0:
215 self._paths = [DEFAULT_PATH]
216
217 def choose_kubeconfig(self, cluster_name):
218 """
219 Choose which kubeconfig file to read from.
220 If name is already an entry in one of the $KUBECONFIG files,
221 choose that one.
222 Otherwise choose the first file.
223
224 :param cluster_name: The name of the cluster which is going to be added
225 :type cluster_name: String
226
227 :return: a chosen Kubeconfig based on above rules
228 :rtype: Kubeconfig
229 """
230 # Search for an existing entry to update
231 for candidate_path in self._paths:
232 try:
233 loaded_config = self._loader.load_kubeconfig(candidate_path)
234
235 if loaded_config.has_cluster(cluster_name):
236 LOG.debug("Found entry to update at {0}".format(
237 candidate_path
238 ))
239 return loaded_config
240 except KubeconfigError as e:
241 LOG.warning("Passing {0}:{1}".format(candidate_path, e))
242
243 # No entry was found, use the first file in KUBECONFIG
244 #
245 # Note: This could raise KubeconfigErrors if paths[0] is corrupted
246 return self._loader.load_kubeconfig(self._paths[0])
247
248 def _expand_path(self, path):
249 """ A helper to expand a path to a full absolute path. """
250 return os.path.abspath(os.path.expanduser(path))
251
252
253 class EKSClient(object):
254 def __init__(self, session, cluster_name, role_arn, parsed_globals=None):
255 self._session = session
256 self._cluster_name = cluster_name
257 self._role_arn = role_arn
258 self._cluster_description = None
259 self._globals = parsed_globals
260
261 def _get_cluster_description(self):
262 """
263 Use an eks describe-cluster call to get the cluster description
264 Cache the response in self._cluster_description.
265 describe-cluster will only be called once.
266 """
267 if self._cluster_description is None:
268 if self._globals is None:
269 client = self._session.create_client("eks")
270 else:
271 client = self._session.create_client(
272 "eks",
273 region_name=self._globals.region,
274 endpoint_url=self._globals.endpoint_url,
275 verify=self._globals.verify_ssl
276 )
277 full_description = client.describe_cluster(name=self._cluster_name)
278 self._cluster_description = full_description["cluster"]
279
280 if "status" not in self._cluster_description:
281 raise EKSClusterError("Cluster not found")
282 if self._cluster_description["status"] != "ACTIVE":
283 raise EKSClusterError("Cluster status not active")
284
285 return self._cluster_description
286
287 def get_cluster_entry(self):
288 """
289 Return a cluster entry generated using
290 the previously obtained description.
291 """
292
293 cert_data = self._get_cluster_description().get("certificateAuthority",
294 {"data": ""})["data"]
295 endpoint = self._get_cluster_description().get("endpoint")
296 arn = self._get_cluster_description().get("arn")
297
298 return OrderedDict([
299 ("cluster", OrderedDict([
300 ("certificate-authority-data", cert_data),
301 ("server", endpoint)
302 ])),
303 ("name", arn)
304 ])
305
306 def get_user_entry(self):
307 """
308 Return a user entry generated using
309 the previously obtained description.
310 """
311 generated_user = OrderedDict([
312 ("name", self._get_cluster_description().get("arn", "")),
313 ("user", OrderedDict([
314 ("exec", OrderedDict([
315 ("apiVersion", API_VERSION),
316 ("args",
317 [
318 "token",
319 "-i",
320 self._cluster_name
321 ]),
322 ("command", AUTH_BIN)
323 ]))
324 ]))
325 ])
326
327 if is_windows:
328 generated_user["user"]["exec"]["command"] += ".exe"
329
330 if self._role_arn is not None:
331 generated_user["user"]["exec"]["args"].extend([
332 "-r",
333 self._role_arn
334 ])
335
336 return generated_user
337
[end of awscli/customizations/eks/update_kubeconfig.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/awscli/customizations/eks/update_kubeconfig.py b/awscli/customizations/eks/update_kubeconfig.py
--- a/awscli/customizations/eks/update_kubeconfig.py
+++ b/awscli/customizations/eks/update_kubeconfig.py
@@ -333,4 +333,10 @@
self._role_arn
])
+ if self._session.profile:
+ generated_user["user"]["exec"]["env"] = [OrderedDict([
+ ("name", "AWS_PROFILE"),
+ ("value", self._session.profile)
+ ])]
+
return generated_user
|
{"golden_diff": "diff --git a/awscli/customizations/eks/update_kubeconfig.py b/awscli/customizations/eks/update_kubeconfig.py\n--- a/awscli/customizations/eks/update_kubeconfig.py\n+++ b/awscli/customizations/eks/update_kubeconfig.py\n@@ -333,4 +333,10 @@\n self._role_arn\n ])\n \n+ if self._session.profile:\n+ generated_user[\"user\"][\"exec\"][\"env\"] = [OrderedDict([\n+ (\"name\", \"AWS_PROFILE\"),\n+ (\"value\", self._session.profile)\n+ ])]\n+\n return generated_user\n", "issue": "Passing AWS_PROFILE on update-kubeconfig cli\nAdd profile details if calling update passing update-kubeconfig with profile details \r\n\r\n#aws eks update-kubeconfig --name k8s-test --kubeconfig k8s-test.config --profile myprofile \r\n\r\nCurrently this generates as \r\n```\r\n user:\r\n exec:\r\n apiVersion: client.authentication.k8s.io/v1alpha1\r\n args:\r\n - token\r\n - -i\r\n - k8s-test\r\n command: aws-iam-authenticator\r\n```\r\n\r\nIts good to add the profile details on the cli when not using default profile. \r\n\r\n```\r\n- name: aws\r\n user:\r\n exec:\r\n apiVersion: client.authentication.k8s.io/v1alpha1\r\n command: aws-iam-authenticator\r\n args:\r\n - \"token\"\r\n - \"-i\"\r\n - \"k8s-test\"\r\n\r\n env:\r\n - name: AWS_PROFILE\r\n value: \"myprofile\"\r\n```\n", "before_files": [{"content": "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\nimport os\nimport logging\n\nfrom botocore.compat import OrderedDict\n\nfrom awscli.customizations.commands import BasicCommand\nfrom awscli.customizations.utils import uni_print\nfrom awscli.compat import is_windows\nfrom awscli.compat import which\nfrom awscli.customizations.eks.exceptions import EKSClusterError\nfrom awscli.customizations.eks.kubeconfig import (Kubeconfig,\n KubeconfigError,\n KubeconfigLoader,\n KubeconfigWriter,\n KubeconfigValidator,\n KubeconfigAppender)\nfrom awscli.customizations.eks.ordered_yaml import ordered_yaml_dump\n\nLOG = logging.getLogger(__name__)\n\nDEFAULT_PATH = os.path.expanduser(\"~/.kube/config\")\n\n# Use the endpoint for kubernetes 1.10\n# To get the most recent endpoint we will need to\n# Do a check on the cluster's version number\nAPI_VERSION = \"client.authentication.k8s.io/v1alpha1\"\n\nAUTH_BIN = \"aws-iam-authenticator\"\n\nAUTH_GOGET_PATH = (\"github.com/kubernetes-sigs/\"\n \"aws-iam-authenticator/cmd/aws-iam-authenticator\")\n\n\ndef check_for_binary(binary):\n return which(binary) is not None\n\ndef warn_of_missing_dependencies():\n \"\"\"\n Check the dependencies of the kubectl configuration generated by\n update-kubeconfig to see if any are missing.\n If they are missing, print a warning with information on how to download\n the missing dependency\n\n Currently dependent on:\n aws-iam-authenticator\n \"\"\"\n if not check_for_binary(AUTH_BIN):\n uni_print(\"Warning: {0} is not installed properly or is not in your path.\\n\"\n \"Refer to the AWS Documentation to download it at \"\n \"https://docs.aws.amazon.com/eks/latest/userguide/configure-kubectl.html\\n\".format(AUTH_BIN))\n\n\nclass UpdateKubeconfigCommand(BasicCommand):\n NAME = 'update-kubeconfig'\n\n DESCRIPTION = BasicCommand.FROM_FILE(\n 'eks',\n 'update-kubeconfig',\n '_description.rst'\n )\n\n ARG_TABLE = [\n {\n 'name': 'name',\n 'help_text': (\"The name of the cluster for which \"\n \"to create a kubeconfig entry. \"\n \"This cluster must exist in your account and in the \"\n \"specified or configured default Region \"\n \"for your AWS CLI installation.\"),\n 'required': True\n },\n {\n 'name': 'kubeconfig',\n 'help_text': (\"Optionally specify a kubeconfig file to append \"\n \"with your configuration. \"\n \"By default, the configuration is written to the \"\n \"first file path in the KUBECONFIG \"\n \"environment variable (if it is set) \"\n \"or the default kubeconfig path (.kube/config) \"\n \"in your home directory.\"),\n 'required': False\n },\n {\n 'name': 'role-arn',\n 'help_text': (\"To assume a role for cluster authentication, \"\n \"specify an IAM role ARN with this option. \"\n \"For example, if you created a cluster \"\n \"while assuming an IAM role, \"\n \"then you must also assume that role to \"\n \"connect to the cluster the first time.\"),\n 'required': False\n },\n {\n 'name': 'dry-run',\n 'action': 'store_true',\n 'default': False,\n 'help_text': (\"Print the merged kubeconfig to stdout instead of \"\n \"writing it to the specified file.\"),\n 'required': False\n },\n {\n 'name': 'verbose',\n 'action': 'store_true',\n 'default': False,\n 'help_text': (\"Print more detailed output \"\n \"when writing to the kubeconfig file, \"\n \"including the appended entries.\")\n }\n ]\n\n def _display_entries(self, entries):\n \"\"\" \n Display entries in yaml format\n\n :param entries: a list of OrderedDicts to be printed\n :type entries: list\n \"\"\"\n uni_print(\"Entries:\\n\\n\")\n for entry in entries:\n uni_print(ordered_yaml_dump(entry))\n uni_print(\"\\n\")\n\n def _run_main(self, parsed_args, parsed_globals):\n client = EKSClient(self._session,\n parsed_args.name,\n parsed_args.role_arn,\n parsed_globals)\n new_cluster_dict = client.get_cluster_entry()\n new_user_dict = client.get_user_entry()\n\n config_selector = KubeconfigSelector(\n os.environ.get(\"KUBECONFIG\", \"\"),\n parsed_args.kubeconfig\n )\n config = config_selector.choose_kubeconfig(\n new_cluster_dict[\"name\"]\n )\n updating_existing = config.has_cluster(new_cluster_dict[\"name\"])\n appender = KubeconfigAppender()\n new_context_dict = appender.insert_cluster_user_pair(config,\n new_cluster_dict,\n new_user_dict)\n\n if parsed_args.dry_run:\n uni_print(config.dump_content())\n else:\n writer = KubeconfigWriter()\n writer.write_kubeconfig(config)\n\n if updating_existing:\n uni_print(\"Updated context {0} in {1}\\n\".format(\n new_context_dict[\"name\"], config.path\n ))\n else:\n uni_print(\"Added new context {0} to {1}\\n\".format(\n new_context_dict[\"name\"], config.path\n ))\n\n if parsed_args.verbose:\n self._display_entries([\n new_context_dict,\n new_user_dict,\n new_cluster_dict\n ])\n\n warn_of_missing_dependencies()\n\n\nclass KubeconfigSelector(object):\n\n def __init__(self, env_variable, path_in, validator=None,\n loader=None):\n \"\"\"\n Parse KUBECONFIG into a list of absolute paths.\n Also replace the empty list with DEFAULT_PATH\n\n :param env_variable: KUBECONFIG as a long string\n :type env_variable: string\n\n :param path_in: The path passed in through the CLI\n :type path_in: string or None\n \"\"\"\n if validator is None:\n validator = KubeconfigValidator()\n self._validator = validator\n\n if loader is None:\n loader = KubeconfigLoader(validator)\n self._loader = loader\n\n if path_in is not None:\n # Override environment variable\n self._paths = [self._expand_path(path_in)]\n else:\n # Get the list of paths from the environment variable\n if env_variable == \"\":\n env_variable = DEFAULT_PATH\n self._paths = [self._expand_path(element)\n for element in env_variable.split(os.pathsep)\n if len(element.strip()) > 0]\n if len(self._paths) == 0:\n self._paths = [DEFAULT_PATH]\n\n def choose_kubeconfig(self, cluster_name):\n \"\"\"\n Choose which kubeconfig file to read from.\n If name is already an entry in one of the $KUBECONFIG files,\n choose that one.\n Otherwise choose the first file.\n\n :param cluster_name: The name of the cluster which is going to be added\n :type cluster_name: String\n\n :return: a chosen Kubeconfig based on above rules\n :rtype: Kubeconfig\n \"\"\"\n # Search for an existing entry to update\n for candidate_path in self._paths:\n try:\n loaded_config = self._loader.load_kubeconfig(candidate_path)\n\n if loaded_config.has_cluster(cluster_name):\n LOG.debug(\"Found entry to update at {0}\".format(\n candidate_path\n ))\n return loaded_config\n except KubeconfigError as e:\n LOG.warning(\"Passing {0}:{1}\".format(candidate_path, e))\n\n # No entry was found, use the first file in KUBECONFIG\n #\n # Note: This could raise KubeconfigErrors if paths[0] is corrupted\n return self._loader.load_kubeconfig(self._paths[0])\n\n def _expand_path(self, path):\n \"\"\" A helper to expand a path to a full absolute path. \"\"\"\n return os.path.abspath(os.path.expanduser(path))\n\n\nclass EKSClient(object):\n def __init__(self, session, cluster_name, role_arn, parsed_globals=None):\n self._session = session\n self._cluster_name = cluster_name\n self._role_arn = role_arn\n self._cluster_description = None\n self._globals = parsed_globals\n\n def _get_cluster_description(self):\n \"\"\"\n Use an eks describe-cluster call to get the cluster description\n Cache the response in self._cluster_description.\n describe-cluster will only be called once.\n \"\"\"\n if self._cluster_description is None:\n if self._globals is None:\n client = self._session.create_client(\"eks\")\n else:\n client = self._session.create_client(\n \"eks\", \n region_name=self._globals.region,\n endpoint_url=self._globals.endpoint_url,\n verify=self._globals.verify_ssl\n )\n full_description = client.describe_cluster(name=self._cluster_name)\n self._cluster_description = full_description[\"cluster\"]\n\n if \"status\" not in self._cluster_description:\n raise EKSClusterError(\"Cluster not found\")\n if self._cluster_description[\"status\"] != \"ACTIVE\":\n raise EKSClusterError(\"Cluster status not active\")\n\n return self._cluster_description\n\n def get_cluster_entry(self):\n \"\"\"\n Return a cluster entry generated using\n the previously obtained description.\n \"\"\"\n\n cert_data = self._get_cluster_description().get(\"certificateAuthority\",\n {\"data\": \"\"})[\"data\"]\n endpoint = self._get_cluster_description().get(\"endpoint\")\n arn = self._get_cluster_description().get(\"arn\")\n\n return OrderedDict([\n (\"cluster\", OrderedDict([\n (\"certificate-authority-data\", cert_data),\n (\"server\", endpoint)\n ])),\n (\"name\", arn)\n ])\n\n def get_user_entry(self):\n \"\"\"\n Return a user entry generated using\n the previously obtained description.\n \"\"\"\n generated_user = OrderedDict([\n (\"name\", self._get_cluster_description().get(\"arn\", \"\")),\n (\"user\", OrderedDict([\n (\"exec\", OrderedDict([\n (\"apiVersion\", API_VERSION),\n (\"args\",\n [\n \"token\",\n \"-i\",\n self._cluster_name\n ]),\n (\"command\", AUTH_BIN)\n ]))\n ]))\n ])\n\n if is_windows:\n generated_user[\"user\"][\"exec\"][\"command\"] += \".exe\"\n\n if self._role_arn is not None:\n generated_user[\"user\"][\"exec\"][\"args\"].extend([\n \"-r\",\n self._role_arn\n ])\n\n return generated_user\n", "path": "awscli/customizations/eks/update_kubeconfig.py"}]}
| 4,084 | 134 |
gh_patches_debug_39566
|
rasdani/github-patches
|
git_diff
|
meltano__meltano-7010
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bug: Missing Python executable from plugin venv causes error during `meltano install`
### Meltano Version
2.10.0
### Python Version
3.9
### Bug scope
CLI (options, error messages, logging, etc.)
### Operating System
Linux Mint 20.3
### Description
1. Install Python (e.g. using `pyenv`).
2. Use that version of Python to run `meltano install`.
3. Uninstall that version of Python.
4. Install a new version of Python (e.g. going from version 3.9.X to 3.9.Y)
5. Use that version of Python to run `meltano install` again:
```
Installing X plugins...
[...]
Need help fixing this problem? Visit http://melta.no/ for troubleshooting steps, or to
join our friendly Slack community.
[Errno 2] No such file or directory: '<project directory>/.meltano/<installed plugin>/venv/bin/python'
```
The venvs managed by Meltano assume that the underlying Python executable that `venv/bin/python` links to will not be deleted. In reality it may be, and if it is we should detect this and automatically replace the venv (as if the `--clean` flag has been provided).
### Code
_No response_
</issue>
<code>
[start of src/meltano/core/venv_service.py]
1 """Manage Python virtual environments."""
2
3 from __future__ import annotations
4
5 import asyncio
6 import hashlib
7 import logging
8 import os
9 import platform
10 import shutil
11 import subprocess
12 import sys
13 from asyncio.subprocess import Process
14 from collections import namedtuple
15 from collections.abc import Iterable
16 from pathlib import Path
17
18 from meltano.core.error import AsyncSubprocessError
19 from meltano.core.project import Project
20
21 logger = logging.getLogger(__name__)
22
23 VenvSpecs = namedtuple("VenvSpecs", ("lib_dir", "bin_dir", "site_packages_dir"))
24
25 POSIX = VenvSpecs(
26 lib_dir="lib",
27 bin_dir="bin",
28 site_packages_dir=os.path.join(
29 "lib",
30 f"python{'.'.join(str(part) for part in sys.version_info[:2])}",
31 "site-packages",
32 ),
33 )
34
35 NT = VenvSpecs(
36 lib_dir="Lib",
37 bin_dir="Scripts",
38 site_packages_dir=os.path.join("Lib", "site-packages"),
39 )
40
41 PLATFORM_SPECS = {"Linux": POSIX, "Darwin": POSIX, "Windows": NT}
42
43
44 def venv_platform_specs():
45 """Get virtual environment sub-path info for the current platform.
46
47 Raises:
48 Exception: This platform is not supported.
49
50 Returns:
51 Virtual environment sub-path info for the current platform.
52 """
53 system = platform.system()
54 try:
55 return PLATFORM_SPECS[system]
56 except KeyError as ex:
57 raise Exception(f"Platform {system!r} not supported.") from ex
58
59
60 PIP_PACKAGES = ("pip", "setuptools", "wheel")
61
62
63 class VirtualEnv:
64 """Info about a single virtual environment."""
65
66 def __init__(self, root: Path):
67 """Initialize the `VirtualEnv` instance.
68
69 Args:
70 root: The root directory of the virtual environment.
71 """
72 self.root = root.resolve()
73 self.specs = venv_platform_specs()
74
75 def __getattr__(self, key: str):
76 """Get a specific attribute from this instance.
77
78 Used to provide `VenvSpecs` attributes for this specific virtual environment.
79
80 Args:
81 key: The attribute name. Must be one of the `VenvSpecs` attributes.
82
83 Returns:
84 The root directory of this virtual environment joined to the requested
85 platform-specific path using this platform's `VenvSpecs` instance.
86 """
87 return self.root / getattr(self.specs, key)
88
89 def __str__(self):
90 """_summary_.
91
92 Returns:
93 _description_.
94 """
95 return str(self.root)
96
97
98 async def exec_async(*args, **kwargs) -> Process:
99 """Run an executable asyncronously in a subprocess.
100
101 Args:
102 args: Positional arguments for `asyncio.create_subprocess_exec`.
103 kwargs: Keyword arguments for `asyncio.create_subprocess_exec`.
104
105 Raises:
106 AsyncSubprocessError: The command failed.
107
108 Returns:
109 The subprocess.
110 """
111 run = await asyncio.create_subprocess_exec(
112 *args,
113 stdout=subprocess.PIPE,
114 stderr=subprocess.PIPE,
115 **kwargs,
116 )
117 await run.wait()
118
119 if run.returncode != 0:
120 raise AsyncSubprocessError("Command failed", run)
121
122 return run
123
124
125 def fingerprint(pip_install_args: Iterable[str]) -> str:
126 """Generate a hash identifying pip install args.
127
128 Arguments are sorted and deduplicated before the hash is generated.
129
130 Args:
131 pip_install_args: Arguments for `pip install`.
132
133 Returns:
134 The SHA256 hash hex digest of the sorted set of pip install args.
135 """
136 return hashlib.sha256(" ".join(sorted(set(pip_install_args))).encode()).hexdigest()
137
138
139 class VenvService: # noqa: WPS214
140 """Manages virtual environments.
141
142 The methods in this class are not threadsafe.
143 """
144
145 def __init__(self, project: Project, namespace: str = "", name: str = ""):
146 """Initialize the `VenvService`.
147
148 Args:
149 project: The Meltano project.
150 namespace: The namespace for the venv, e.g. a Plugin type.
151 name: The name of the venv, e.g. a Plugin name.
152 """
153 self.project = project
154 self.namespace = namespace
155 self.name = name
156 self.venv = VirtualEnv(self.project.venvs_dir(namespace, name))
157 self.python_path = self.venv.bin_dir / "python"
158 self.plugin_fingerprint_path = self.venv.root / ".meltano_plugin_fingerprint"
159
160 async def install(self, pip_install_args: list[str], clean: bool = False) -> None:
161 """Configure a virtual environment, then run pip install with the given args.
162
163 Args:
164 pip_install_args: Arguments passed to `pip install`.
165 clean: Whether to not attempt to use an existing virtual environment.
166 """
167 if not clean and self.requires_clean_install(pip_install_args):
168 logger.debug(
169 f"Packages for '{self.namespace}/{self.name}' have changed so performing a clean install."
170 )
171 clean = True
172
173 self.clean_run_files()
174 await self._pip_install(pip_install_args=pip_install_args, clean=clean)
175 self.write_fingerprint(pip_install_args)
176
177 def requires_clean_install(self, pip_install_args: list[str]) -> bool:
178 """Determine whether a clean install is needed.
179
180 Args:
181 pip_install_args: The arguments being passed to `pip install`, used
182 for fingerprinting the installation.
183
184 Returns:
185 Whether virtual environment doesn't exist or can't be reused.
186 """
187 if self.venv.site_packages_dir.joinpath("meltano_venv.pth").exists():
188 # clean up deprecated feature
189 return True
190 existing_fingerprint = self.read_fingerprint()
191 return (
192 existing_fingerprint != fingerprint(pip_install_args)
193 if existing_fingerprint
194 else True
195 )
196
197 def clean_run_files(self) -> None:
198 """Destroy cached configuration files, if they exist."""
199 try:
200 shutil.rmtree(self.project.run_dir(self.name, make_dirs=False))
201 except FileNotFoundError:
202 logger.debug("No cached configuration files to remove")
203
204 def clean(self) -> None:
205 """Destroy the virtual environment, if it exists."""
206 try:
207 shutil.rmtree(self.venv.root)
208 logger.debug(
209 "Removed old virtual environment for '%s/%s'", # noqa: WPS323
210 self.namespace,
211 self.name,
212 )
213 except FileNotFoundError:
214 # If the VirtualEnv has never been created before do nothing
215 logger.debug("No old virtual environment to remove")
216
217 async def create(self) -> Process:
218 """Create a new virtual environment.
219
220 Raises:
221 AsyncSubprocessError: The virtual environment could not be created.
222
223 Returns:
224 The Python process creating the virtual environment.
225 """
226 logger.debug(f"Creating virtual environment for '{self.namespace}/{self.name}'")
227 try:
228 return await exec_async(sys.executable, "-m", "venv", str(self.venv))
229 except AsyncSubprocessError as err:
230 raise AsyncSubprocessError(
231 f"Could not create the virtualenv for '{self.namespace}/{self.name}'",
232 err.process,
233 ) from err
234
235 async def upgrade_pip(self) -> Process:
236 """Upgrade the `pip` package to the latest version in the virtual environment.
237
238 Raises:
239 AsyncSubprocessError: Failed to upgrade pip to the latest version.
240
241 Returns:
242 The process running `pip install --upgrade ...`.
243 """
244 logger.debug(f"Upgrading pip for '{self.namespace}/{self.name}'")
245 try:
246 return await self._pip_install(["--upgrade", *PIP_PACKAGES])
247 except AsyncSubprocessError as err:
248 raise AsyncSubprocessError(
249 "Failed to upgrade pip to the latest version.", err.process
250 ) from err
251
252 def read_fingerprint(self) -> str | None:
253 """Get the fingerprint of the existing virtual environment.
254
255 Returns:
256 The fingerprint of the existing virtual environment if it exists.
257 `None` otherwise.
258 """
259 if not self.plugin_fingerprint_path.exists():
260 return None
261 with open(self.plugin_fingerprint_path) as fingerprint_file:
262 return fingerprint_file.read()
263
264 def write_fingerprint(self, pip_install_args: list[str]) -> None:
265 """Save the fingerprint for this installation.
266
267 Args:
268 pip_install_args: The arguments being passed to `pip install`.
269 """
270 with open(self.plugin_fingerprint_path, "wt") as fingerprint_file:
271 fingerprint_file.write(fingerprint(pip_install_args))
272
273 def exec_path(self, executable: str) -> Path:
274 """Return the absolute path for the given executable in the virtual environment.
275
276 Args:
277 executable: The path to the executable relative to the venv bin directory.
278
279 Returns:
280 The venv bin directory joined to the provided executable.
281 """
282 return self.venv.bin_dir / executable
283
284 async def _pip_install(
285 self, pip_install_args: list[str], clean: bool = False
286 ) -> Process:
287 """Install a package using `pip` in the proper virtual environment.
288
289 Args:
290 pip_install_args: The arguments to pass to `pip install`.
291 clean: Whether the installation should be done in a clean venv.
292
293 Raises:
294 AsyncSubprocessError: The command failed.
295
296 Returns:
297 The process running `pip install` with the provided args.
298 """
299 if clean:
300 self.clean()
301 await self.create()
302 await self.upgrade_pip()
303
304 pip_install_args_str = " ".join(pip_install_args)
305 log_msg_prefix = (
306 f"Upgrading with args {pip_install_args_str!r} in existing"
307 if "--upgrade" in pip_install_args
308 else f"Installing with args {pip_install_args_str!r} into"
309 )
310 logger.debug(
311 f"{log_msg_prefix} virtual environment for '{self.namespace}/{self.name}'"
312 )
313
314 try:
315 return await exec_async(
316 str(self.python_path), "-m", "pip", "install", *pip_install_args
317 )
318 except AsyncSubprocessError as err:
319 raise AsyncSubprocessError(
320 f"Failed to install plugin '{self.name}'.", err.process
321 ) from err
322
[end of src/meltano/core/venv_service.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/meltano/core/venv_service.py b/src/meltano/core/venv_service.py
--- a/src/meltano/core/venv_service.py
+++ b/src/meltano/core/venv_service.py
@@ -154,7 +154,6 @@
self.namespace = namespace
self.name = name
self.venv = VirtualEnv(self.project.venvs_dir(namespace, name))
- self.python_path = self.venv.bin_dir / "python"
self.plugin_fingerprint_path = self.venv.root / ".meltano_plugin_fingerprint"
async def install(self, pip_install_args: list[str], clean: bool = False) -> None:
@@ -184,15 +183,18 @@
Returns:
Whether virtual environment doesn't exist or can't be reused.
"""
- if self.venv.site_packages_dir.joinpath("meltano_venv.pth").exists():
- # clean up deprecated feature
- return True
- existing_fingerprint = self.read_fingerprint()
- return (
- existing_fingerprint != fingerprint(pip_install_args)
- if existing_fingerprint
- else True
- )
+ # A generator function is used to perform the checks lazily
+ def checks():
+ # The Python installation used to create this venv no longer exists
+ yield not self.exec_path("python").exists()
+ # The deprecated `meltano_venv.pth` feature is used by this venv
+ yield self.venv.site_packages_dir.joinpath("meltano_venv.pth").exists()
+ # The fingerprint of the venv does not match the pip install args
+ existing_fingerprint = self.read_fingerprint()
+ yield existing_fingerprint is None
+ yield existing_fingerprint != fingerprint(pip_install_args)
+
+ return any(checks())
def clean_run_files(self) -> None:
"""Destroy cached configuration files, if they exist."""
@@ -279,7 +281,18 @@
Returns:
The venv bin directory joined to the provided executable.
"""
- return self.venv.bin_dir / executable
+ absolute_executable = self.venv.bin_dir / executable
+ if platform.system() != "Windows":
+ return absolute_executable
+
+ # On Windows, try using the '.exe' suffixed version if it exists. Use the
+ # regular executable path as a fallback (and for backwards compatibility).
+ absolute_executable_windows = absolute_executable.with_suffix(".exe")
+ return (
+ absolute_executable_windows
+ if absolute_executable_windows.exists()
+ else absolute_executable
+ )
async def _pip_install(
self, pip_install_args: list[str], clean: bool = False
@@ -313,7 +326,7 @@
try:
return await exec_async(
- str(self.python_path), "-m", "pip", "install", *pip_install_args
+ str(self.exec_path("python")), "-m", "pip", "install", *pip_install_args
)
except AsyncSubprocessError as err:
raise AsyncSubprocessError(
|
{"golden_diff": "diff --git a/src/meltano/core/venv_service.py b/src/meltano/core/venv_service.py\n--- a/src/meltano/core/venv_service.py\n+++ b/src/meltano/core/venv_service.py\n@@ -154,7 +154,6 @@\n self.namespace = namespace\n self.name = name\n self.venv = VirtualEnv(self.project.venvs_dir(namespace, name))\n- self.python_path = self.venv.bin_dir / \"python\"\n self.plugin_fingerprint_path = self.venv.root / \".meltano_plugin_fingerprint\"\n \n async def install(self, pip_install_args: list[str], clean: bool = False) -> None:\n@@ -184,15 +183,18 @@\n Returns:\n Whether virtual environment doesn't exist or can't be reused.\n \"\"\"\n- if self.venv.site_packages_dir.joinpath(\"meltano_venv.pth\").exists():\n- # clean up deprecated feature\n- return True\n- existing_fingerprint = self.read_fingerprint()\n- return (\n- existing_fingerprint != fingerprint(pip_install_args)\n- if existing_fingerprint\n- else True\n- )\n+ # A generator function is used to perform the checks lazily\n+ def checks():\n+ # The Python installation used to create this venv no longer exists\n+ yield not self.exec_path(\"python\").exists()\n+ # The deprecated `meltano_venv.pth` feature is used by this venv\n+ yield self.venv.site_packages_dir.joinpath(\"meltano_venv.pth\").exists()\n+ # The fingerprint of the venv does not match the pip install args\n+ existing_fingerprint = self.read_fingerprint()\n+ yield existing_fingerprint is None\n+ yield existing_fingerprint != fingerprint(pip_install_args)\n+\n+ return any(checks())\n \n def clean_run_files(self) -> None:\n \"\"\"Destroy cached configuration files, if they exist.\"\"\"\n@@ -279,7 +281,18 @@\n Returns:\n The venv bin directory joined to the provided executable.\n \"\"\"\n- return self.venv.bin_dir / executable\n+ absolute_executable = self.venv.bin_dir / executable\n+ if platform.system() != \"Windows\":\n+ return absolute_executable\n+\n+ # On Windows, try using the '.exe' suffixed version if it exists. Use the\n+ # regular executable path as a fallback (and for backwards compatibility).\n+ absolute_executable_windows = absolute_executable.with_suffix(\".exe\")\n+ return (\n+ absolute_executable_windows\n+ if absolute_executable_windows.exists()\n+ else absolute_executable\n+ )\n \n async def _pip_install(\n self, pip_install_args: list[str], clean: bool = False\n@@ -313,7 +326,7 @@\n \n try:\n return await exec_async(\n- str(self.python_path), \"-m\", \"pip\", \"install\", *pip_install_args\n+ str(self.exec_path(\"python\")), \"-m\", \"pip\", \"install\", *pip_install_args\n )\n except AsyncSubprocessError as err:\n raise AsyncSubprocessError(\n", "issue": "bug: Missing Python executable from plugin venv causes error during `meltano install`\n### Meltano Version\n\n2.10.0\n\n### Python Version\n\n3.9\n\n### Bug scope\n\nCLI (options, error messages, logging, etc.)\n\n### Operating System\n\nLinux Mint 20.3\n\n### Description\n\n1. Install Python (e.g. using `pyenv`).\r\n2. Use that version of Python to run `meltano install`.\r\n3. Uninstall that version of Python.\r\n4. Install a new version of Python (e.g. going from version 3.9.X to 3.9.Y)\r\n5. Use that version of Python to run `meltano install` again:\r\n\r\n```\r\nInstalling X plugins...\r\n[...]\r\n\r\nNeed help fixing this problem? Visit http://melta.no/ for troubleshooting steps, or to\r\njoin our friendly Slack community.\r\n\r\n[Errno 2] No such file or directory: '<project directory>/.meltano/<installed plugin>/venv/bin/python'\r\n```\r\n\r\nThe venvs managed by Meltano assume that the underlying Python executable that `venv/bin/python` links to will not be deleted. In reality it may be, and if it is we should detect this and automatically replace the venv (as if the `--clean` flag has been provided).\n\n### Code\n\n_No response_\n", "before_files": [{"content": "\"\"\"Manage Python virtual environments.\"\"\"\n\nfrom __future__ import annotations\n\nimport asyncio\nimport hashlib\nimport logging\nimport os\nimport platform\nimport shutil\nimport subprocess\nimport sys\nfrom asyncio.subprocess import Process\nfrom collections import namedtuple\nfrom collections.abc import Iterable\nfrom pathlib import Path\n\nfrom meltano.core.error import AsyncSubprocessError\nfrom meltano.core.project import Project\n\nlogger = logging.getLogger(__name__)\n\nVenvSpecs = namedtuple(\"VenvSpecs\", (\"lib_dir\", \"bin_dir\", \"site_packages_dir\"))\n\nPOSIX = VenvSpecs(\n lib_dir=\"lib\",\n bin_dir=\"bin\",\n site_packages_dir=os.path.join(\n \"lib\",\n f\"python{'.'.join(str(part) for part in sys.version_info[:2])}\",\n \"site-packages\",\n ),\n)\n\nNT = VenvSpecs(\n lib_dir=\"Lib\",\n bin_dir=\"Scripts\",\n site_packages_dir=os.path.join(\"Lib\", \"site-packages\"),\n)\n\nPLATFORM_SPECS = {\"Linux\": POSIX, \"Darwin\": POSIX, \"Windows\": NT}\n\n\ndef venv_platform_specs():\n \"\"\"Get virtual environment sub-path info for the current platform.\n\n Raises:\n Exception: This platform is not supported.\n\n Returns:\n Virtual environment sub-path info for the current platform.\n \"\"\"\n system = platform.system()\n try:\n return PLATFORM_SPECS[system]\n except KeyError as ex:\n raise Exception(f\"Platform {system!r} not supported.\") from ex\n\n\nPIP_PACKAGES = (\"pip\", \"setuptools\", \"wheel\")\n\n\nclass VirtualEnv:\n \"\"\"Info about a single virtual environment.\"\"\"\n\n def __init__(self, root: Path):\n \"\"\"Initialize the `VirtualEnv` instance.\n\n Args:\n root: The root directory of the virtual environment.\n \"\"\"\n self.root = root.resolve()\n self.specs = venv_platform_specs()\n\n def __getattr__(self, key: str):\n \"\"\"Get a specific attribute from this instance.\n\n Used to provide `VenvSpecs` attributes for this specific virtual environment.\n\n Args:\n key: The attribute name. Must be one of the `VenvSpecs` attributes.\n\n Returns:\n The root directory of this virtual environment joined to the requested\n platform-specific path using this platform's `VenvSpecs` instance.\n \"\"\"\n return self.root / getattr(self.specs, key)\n\n def __str__(self):\n \"\"\"_summary_.\n\n Returns:\n _description_.\n \"\"\"\n return str(self.root)\n\n\nasync def exec_async(*args, **kwargs) -> Process:\n \"\"\"Run an executable asyncronously in a subprocess.\n\n Args:\n args: Positional arguments for `asyncio.create_subprocess_exec`.\n kwargs: Keyword arguments for `asyncio.create_subprocess_exec`.\n\n Raises:\n AsyncSubprocessError: The command failed.\n\n Returns:\n The subprocess.\n \"\"\"\n run = await asyncio.create_subprocess_exec(\n *args,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n **kwargs,\n )\n await run.wait()\n\n if run.returncode != 0:\n raise AsyncSubprocessError(\"Command failed\", run)\n\n return run\n\n\ndef fingerprint(pip_install_args: Iterable[str]) -> str:\n \"\"\"Generate a hash identifying pip install args.\n\n Arguments are sorted and deduplicated before the hash is generated.\n\n Args:\n pip_install_args: Arguments for `pip install`.\n\n Returns:\n The SHA256 hash hex digest of the sorted set of pip install args.\n \"\"\"\n return hashlib.sha256(\" \".join(sorted(set(pip_install_args))).encode()).hexdigest()\n\n\nclass VenvService: # noqa: WPS214\n \"\"\"Manages virtual environments.\n\n The methods in this class are not threadsafe.\n \"\"\"\n\n def __init__(self, project: Project, namespace: str = \"\", name: str = \"\"):\n \"\"\"Initialize the `VenvService`.\n\n Args:\n project: The Meltano project.\n namespace: The namespace for the venv, e.g. a Plugin type.\n name: The name of the venv, e.g. a Plugin name.\n \"\"\"\n self.project = project\n self.namespace = namespace\n self.name = name\n self.venv = VirtualEnv(self.project.venvs_dir(namespace, name))\n self.python_path = self.venv.bin_dir / \"python\"\n self.plugin_fingerprint_path = self.venv.root / \".meltano_plugin_fingerprint\"\n\n async def install(self, pip_install_args: list[str], clean: bool = False) -> None:\n \"\"\"Configure a virtual environment, then run pip install with the given args.\n\n Args:\n pip_install_args: Arguments passed to `pip install`.\n clean: Whether to not attempt to use an existing virtual environment.\n \"\"\"\n if not clean and self.requires_clean_install(pip_install_args):\n logger.debug(\n f\"Packages for '{self.namespace}/{self.name}' have changed so performing a clean install.\"\n )\n clean = True\n\n self.clean_run_files()\n await self._pip_install(pip_install_args=pip_install_args, clean=clean)\n self.write_fingerprint(pip_install_args)\n\n def requires_clean_install(self, pip_install_args: list[str]) -> bool:\n \"\"\"Determine whether a clean install is needed.\n\n Args:\n pip_install_args: The arguments being passed to `pip install`, used\n for fingerprinting the installation.\n\n Returns:\n Whether virtual environment doesn't exist or can't be reused.\n \"\"\"\n if self.venv.site_packages_dir.joinpath(\"meltano_venv.pth\").exists():\n # clean up deprecated feature\n return True\n existing_fingerprint = self.read_fingerprint()\n return (\n existing_fingerprint != fingerprint(pip_install_args)\n if existing_fingerprint\n else True\n )\n\n def clean_run_files(self) -> None:\n \"\"\"Destroy cached configuration files, if they exist.\"\"\"\n try:\n shutil.rmtree(self.project.run_dir(self.name, make_dirs=False))\n except FileNotFoundError:\n logger.debug(\"No cached configuration files to remove\")\n\n def clean(self) -> None:\n \"\"\"Destroy the virtual environment, if it exists.\"\"\"\n try:\n shutil.rmtree(self.venv.root)\n logger.debug(\n \"Removed old virtual environment for '%s/%s'\", # noqa: WPS323\n self.namespace,\n self.name,\n )\n except FileNotFoundError:\n # If the VirtualEnv has never been created before do nothing\n logger.debug(\"No old virtual environment to remove\")\n\n async def create(self) -> Process:\n \"\"\"Create a new virtual environment.\n\n Raises:\n AsyncSubprocessError: The virtual environment could not be created.\n\n Returns:\n The Python process creating the virtual environment.\n \"\"\"\n logger.debug(f\"Creating virtual environment for '{self.namespace}/{self.name}'\")\n try:\n return await exec_async(sys.executable, \"-m\", \"venv\", str(self.venv))\n except AsyncSubprocessError as err:\n raise AsyncSubprocessError(\n f\"Could not create the virtualenv for '{self.namespace}/{self.name}'\",\n err.process,\n ) from err\n\n async def upgrade_pip(self) -> Process:\n \"\"\"Upgrade the `pip` package to the latest version in the virtual environment.\n\n Raises:\n AsyncSubprocessError: Failed to upgrade pip to the latest version.\n\n Returns:\n The process running `pip install --upgrade ...`.\n \"\"\"\n logger.debug(f\"Upgrading pip for '{self.namespace}/{self.name}'\")\n try:\n return await self._pip_install([\"--upgrade\", *PIP_PACKAGES])\n except AsyncSubprocessError as err:\n raise AsyncSubprocessError(\n \"Failed to upgrade pip to the latest version.\", err.process\n ) from err\n\n def read_fingerprint(self) -> str | None:\n \"\"\"Get the fingerprint of the existing virtual environment.\n\n Returns:\n The fingerprint of the existing virtual environment if it exists.\n `None` otherwise.\n \"\"\"\n if not self.plugin_fingerprint_path.exists():\n return None\n with open(self.plugin_fingerprint_path) as fingerprint_file:\n return fingerprint_file.read()\n\n def write_fingerprint(self, pip_install_args: list[str]) -> None:\n \"\"\"Save the fingerprint for this installation.\n\n Args:\n pip_install_args: The arguments being passed to `pip install`.\n \"\"\"\n with open(self.plugin_fingerprint_path, \"wt\") as fingerprint_file:\n fingerprint_file.write(fingerprint(pip_install_args))\n\n def exec_path(self, executable: str) -> Path:\n \"\"\"Return the absolute path for the given executable in the virtual environment.\n\n Args:\n executable: The path to the executable relative to the venv bin directory.\n\n Returns:\n The venv bin directory joined to the provided executable.\n \"\"\"\n return self.venv.bin_dir / executable\n\n async def _pip_install(\n self, pip_install_args: list[str], clean: bool = False\n ) -> Process:\n \"\"\"Install a package using `pip` in the proper virtual environment.\n\n Args:\n pip_install_args: The arguments to pass to `pip install`.\n clean: Whether the installation should be done in a clean venv.\n\n Raises:\n AsyncSubprocessError: The command failed.\n\n Returns:\n The process running `pip install` with the provided args.\n \"\"\"\n if clean:\n self.clean()\n await self.create()\n await self.upgrade_pip()\n\n pip_install_args_str = \" \".join(pip_install_args)\n log_msg_prefix = (\n f\"Upgrading with args {pip_install_args_str!r} in existing\"\n if \"--upgrade\" in pip_install_args\n else f\"Installing with args {pip_install_args_str!r} into\"\n )\n logger.debug(\n f\"{log_msg_prefix} virtual environment for '{self.namespace}/{self.name}'\"\n )\n\n try:\n return await exec_async(\n str(self.python_path), \"-m\", \"pip\", \"install\", *pip_install_args\n )\n except AsyncSubprocessError as err:\n raise AsyncSubprocessError(\n f\"Failed to install plugin '{self.name}'.\", err.process\n ) from err\n", "path": "src/meltano/core/venv_service.py"}]}
| 3,922 | 710 |
gh_patches_debug_20571
|
rasdani/github-patches
|
git_diff
|
python__peps-2689
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Link topic page(s) in the rendered topic header
As a followup to #2579 implementing #2572 , it would be very useful to have the `Topic` header in the rendered PEPs actually link to said topic(s), and would be easy to implement (so long as we keep the display names consistent with the URLs, minus caps/whitespace).
</issue>
<code>
[start of pep_sphinx_extensions/pep_processor/transforms/pep_headers.py]
1 from pathlib import Path
2 import re
3
4 from docutils import nodes
5 from docutils import transforms
6 from sphinx import errors
7
8 from pep_sphinx_extensions.pep_processor.transforms import pep_zero
9 from pep_sphinx_extensions.pep_processor.transforms.pep_zero import _mask_email
10
11
12 class PEPParsingError(errors.SphinxError):
13 pass
14
15
16 # PEPHeaders is identical to docutils.transforms.peps.Headers excepting bdfl-delegate, sponsor & superseeded-by
17 class PEPHeaders(transforms.Transform):
18 """Process fields in a PEP's initial RFC-2822 header."""
19
20 # Run before pep_processor.transforms.pep_title.PEPTitle
21 default_priority = 330
22
23 def apply(self) -> None:
24 if not Path(self.document["source"]).match("pep-*"):
25 return # not a PEP file, exit early
26
27 if not len(self.document):
28 raise PEPParsingError("Document tree is empty.")
29
30 header = self.document[0]
31 if not isinstance(header, nodes.field_list) or "rfc2822" not in header["classes"]:
32 raise PEPParsingError("Document does not begin with an RFC-2822 header; it is not a PEP.")
33
34 # PEP number should be the first field
35 pep_field = header[0]
36 if pep_field[0].astext().lower() != "pep":
37 raise PEPParsingError("Document does not contain an RFC-2822 'PEP' header!")
38
39 # Extract PEP number
40 value = pep_field[1].astext()
41 try:
42 pep_num = int(value)
43 except ValueError:
44 raise PEPParsingError(f"'PEP' header must contain an integer. '{value}' is invalid!")
45
46 # Special processing for PEP 0.
47 if pep_num == 0:
48 pending = nodes.pending(pep_zero.PEPZero)
49 self.document.insert(1, pending)
50 self.document.note_pending(pending)
51
52 # If there are less than two headers in the preamble, or if Title is absent
53 if len(header) < 2 or header[1][0].astext().lower() != "title":
54 raise PEPParsingError("No title!")
55
56 fields_to_remove = []
57 for field in header:
58 name = field[0].astext().lower()
59 body = field[1]
60 if len(body) == 0:
61 # body is empty
62 continue
63 elif len(body) > 1:
64 msg = f"PEP header field body contains multiple elements:\n{field.pformat(level=1)}"
65 raise PEPParsingError(msg)
66 elif not isinstance(body[0], nodes.paragraph): # len(body) == 1
67 msg = f"PEP header field body may only contain a single paragraph:\n{field.pformat(level=1)}"
68 raise PEPParsingError(msg)
69
70 para = body[0]
71 if name in {"author", "bdfl-delegate", "pep-delegate", "sponsor"}:
72 # mask emails
73 for node in para:
74 if not isinstance(node, nodes.reference):
75 continue
76 node.replace_self(_mask_email(node))
77 elif name in {"discussions-to", "resolution", "post-history"}:
78 # Prettify mailing list and Discourse links
79 for node in para:
80 if (not isinstance(node, nodes.reference)
81 or not node["refuri"]):
82 continue
83 # Have known mailto links link to their main list pages
84 if node["refuri"].lower().startswith("mailto:"):
85 node["refuri"] = _generate_list_url(node["refuri"])
86 parts = node["refuri"].lower().split("/")
87 if len(parts) <= 2 or parts[2] not in LINK_PRETTIFIERS:
88 continue
89 pretty_title = _make_link_pretty(str(node["refuri"]))
90 if name == "post-history":
91 node["reftitle"] = pretty_title
92 else:
93 node[0] = nodes.Text(pretty_title)
94 elif name in {"replaces", "superseded-by", "requires"}:
95 # replace PEP numbers with normalised list of links to PEPs
96 new_body = []
97 for pep_str in re.split(r",?\s+", body.astext()):
98 target = self.document.settings.pep_url.format(int(pep_str))
99 new_body += [nodes.reference("", pep_str, refuri=target), nodes.Text(", ")]
100 para[:] = new_body[:-1] # drop trailing space
101 elif name in {"last-modified", "content-type", "version"}:
102 # Mark unneeded fields
103 fields_to_remove.append(field)
104
105 # Remove any trailing commas and whitespace in the headers
106 if para and isinstance(para[-1], nodes.Text):
107 last_node = para[-1]
108 if last_node.astext().strip() == ",":
109 last_node.parent.remove(last_node)
110 else:
111 para[-1] = last_node.rstrip().rstrip(",")
112
113 # Remove unneeded fields
114 for field in fields_to_remove:
115 field.parent.remove(field)
116
117
118 def _generate_list_url(mailto: str) -> str:
119 list_name_domain = mailto.lower().removeprefix("mailto:").strip()
120 list_name = list_name_domain.split("@")[0]
121
122 if list_name_domain.endswith("@googlegroups.com"):
123 return f"https://groups.google.com/g/{list_name}"
124
125 if not list_name_domain.endswith("@python.org"):
126 return mailto
127
128 # Active lists not yet on Mailman3; this URL will redirect if/when they are
129 if list_name in {"csv", "db-sig", "doc-sig", "python-list", "web-sig"}:
130 return f"https://mail.python.org/mailman/listinfo/{list_name}"
131 # Retired lists that are closed for posting, so only the archive matters
132 if list_name in {"import-sig", "python-3000"}:
133 return f"https://mail.python.org/pipermail/{list_name}/"
134 # The remaining lists (and any new ones) are all on Mailman3/Hyperkitty
135 return f"https://mail.python.org/archives/list/{list_name}@python.org/"
136
137
138 def _process_list_url(parts: list[str]) -> tuple[str, str]:
139 item_type = "list"
140
141 # HyperKitty (Mailman3) archive structure is
142 # https://mail.python.org/archives/list/<list_name>/thread/<id>
143 if "archives" in parts:
144 list_name = (
145 parts[parts.index("archives") + 2].removesuffix("@python.org"))
146 if len(parts) > 6 and parts[6] in {"message", "thread"}:
147 item_type = parts[6]
148
149 # Mailman3 list info structure is
150 # https://mail.python.org/mailman3/lists/<list_name>.python.org/
151 elif "mailman3" in parts:
152 list_name = (
153 parts[parts.index("mailman3") + 2].removesuffix(".python.org"))
154
155 # Pipermail (Mailman) archive structure is
156 # https://mail.python.org/pipermail/<list_name>/<month>-<year>/<id>
157 elif "pipermail" in parts:
158 list_name = parts[parts.index("pipermail") + 1]
159 item_type = "message" if len(parts) > 6 else "list"
160
161 # Mailman listinfo structure is
162 # https://mail.python.org/mailman/listinfo/<list_name>
163 elif "listinfo" in parts:
164 list_name = parts[parts.index("listinfo") + 1]
165
166 # Not a link to a mailing list, message or thread
167 else:
168 raise ValueError(
169 f"{'/'.join(parts)} not a link to a list, message or thread")
170
171 return list_name, item_type
172
173
174 def _process_discourse_url(parts: list[str]) -> tuple[str, str]:
175 item_name = "discourse"
176
177 if len(parts) < 5 or ("t" not in parts and "c" not in parts):
178 raise ValueError(
179 f"{'/'.join(parts)} not a link to a Discourse thread or category")
180
181 first_subpart = parts[4]
182 has_title = not first_subpart.isnumeric()
183
184 if "t" in parts:
185 item_type = "post" if len(parts) > (5 + has_title) else "thread"
186 elif "c" in parts:
187 item_type = "category"
188 if has_title:
189 item_name = f"{first_subpart.replace('-', ' ')} {item_name}"
190
191 return item_name, item_type
192
193
194 # Domains supported for pretty URL parsing
195 LINK_PRETTIFIERS = {
196 "mail.python.org": _process_list_url,
197 "discuss.python.org": _process_discourse_url,
198 }
199
200
201 def _process_pretty_url(url: str) -> tuple[str, str]:
202 parts = url.lower().strip().strip("/").split("/")
203 try:
204 item_name, item_type = LINK_PRETTIFIERS[parts[2]](parts)
205 except KeyError as error:
206 raise ValueError(
207 f"{url} not a link to a recognized domain to prettify") from error
208 item_name = item_name.title().replace("Sig", "SIG").replace("Pep", "PEP")
209 return item_name, item_type
210
211
212 def _make_link_pretty(url: str) -> str:
213 item_name, item_type = _process_pretty_url(url)
214 return f"{item_name} {item_type}"
215
[end of pep_sphinx_extensions/pep_processor/transforms/pep_headers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pep_sphinx_extensions/pep_processor/transforms/pep_headers.py b/pep_sphinx_extensions/pep_processor/transforms/pep_headers.py
--- a/pep_sphinx_extensions/pep_processor/transforms/pep_headers.py
+++ b/pep_sphinx_extensions/pep_processor/transforms/pep_headers.py
@@ -98,6 +98,17 @@
target = self.document.settings.pep_url.format(int(pep_str))
new_body += [nodes.reference("", pep_str, refuri=target), nodes.Text(", ")]
para[:] = new_body[:-1] # drop trailing space
+ elif name == "topic":
+ new_body = []
+ for topic_name in body.astext().split(","):
+ if topic_name:
+ target = f"/topic/{topic_name.lower().strip()}/"
+ new_body += [
+ nodes.reference("", topic_name, refuri=target),
+ nodes.Text(", "),
+ ]
+ if new_body:
+ para[:] = new_body[:-1] # Drop trailing space/comma
elif name in {"last-modified", "content-type", "version"}:
# Mark unneeded fields
fields_to_remove.append(field)
|
{"golden_diff": "diff --git a/pep_sphinx_extensions/pep_processor/transforms/pep_headers.py b/pep_sphinx_extensions/pep_processor/transforms/pep_headers.py\n--- a/pep_sphinx_extensions/pep_processor/transforms/pep_headers.py\n+++ b/pep_sphinx_extensions/pep_processor/transforms/pep_headers.py\n@@ -98,6 +98,17 @@\n target = self.document.settings.pep_url.format(int(pep_str))\n new_body += [nodes.reference(\"\", pep_str, refuri=target), nodes.Text(\", \")]\n para[:] = new_body[:-1] # drop trailing space\n+ elif name == \"topic\":\n+ new_body = []\n+ for topic_name in body.astext().split(\",\"):\n+ if topic_name:\n+ target = f\"/topic/{topic_name.lower().strip()}/\"\n+ new_body += [\n+ nodes.reference(\"\", topic_name, refuri=target),\n+ nodes.Text(\", \"),\n+ ]\n+ if new_body:\n+ para[:] = new_body[:-1] # Drop trailing space/comma\n elif name in {\"last-modified\", \"content-type\", \"version\"}:\n # Mark unneeded fields\n fields_to_remove.append(field)\n", "issue": "Link topic page(s) in the rendered topic header\nAs a followup to #2579 implementing #2572 , it would be very useful to have the `Topic` header in the rendered PEPs actually link to said topic(s), and would be easy to implement (so long as we keep the display names consistent with the URLs, minus caps/whitespace).\n", "before_files": [{"content": "from pathlib import Path\nimport re\n\nfrom docutils import nodes\nfrom docutils import transforms\nfrom sphinx import errors\n\nfrom pep_sphinx_extensions.pep_processor.transforms import pep_zero\nfrom pep_sphinx_extensions.pep_processor.transforms.pep_zero import _mask_email\n\n\nclass PEPParsingError(errors.SphinxError):\n pass\n\n\n# PEPHeaders is identical to docutils.transforms.peps.Headers excepting bdfl-delegate, sponsor & superseeded-by\nclass PEPHeaders(transforms.Transform):\n \"\"\"Process fields in a PEP's initial RFC-2822 header.\"\"\"\n\n # Run before pep_processor.transforms.pep_title.PEPTitle\n default_priority = 330\n\n def apply(self) -> None:\n if not Path(self.document[\"source\"]).match(\"pep-*\"):\n return # not a PEP file, exit early\n\n if not len(self.document):\n raise PEPParsingError(\"Document tree is empty.\")\n\n header = self.document[0]\n if not isinstance(header, nodes.field_list) or \"rfc2822\" not in header[\"classes\"]:\n raise PEPParsingError(\"Document does not begin with an RFC-2822 header; it is not a PEP.\")\n\n # PEP number should be the first field\n pep_field = header[0]\n if pep_field[0].astext().lower() != \"pep\":\n raise PEPParsingError(\"Document does not contain an RFC-2822 'PEP' header!\")\n\n # Extract PEP number\n value = pep_field[1].astext()\n try:\n pep_num = int(value)\n except ValueError:\n raise PEPParsingError(f\"'PEP' header must contain an integer. '{value}' is invalid!\")\n\n # Special processing for PEP 0.\n if pep_num == 0:\n pending = nodes.pending(pep_zero.PEPZero)\n self.document.insert(1, pending)\n self.document.note_pending(pending)\n\n # If there are less than two headers in the preamble, or if Title is absent\n if len(header) < 2 or header[1][0].astext().lower() != \"title\":\n raise PEPParsingError(\"No title!\")\n\n fields_to_remove = []\n for field in header:\n name = field[0].astext().lower()\n body = field[1]\n if len(body) == 0:\n # body is empty\n continue\n elif len(body) > 1:\n msg = f\"PEP header field body contains multiple elements:\\n{field.pformat(level=1)}\"\n raise PEPParsingError(msg)\n elif not isinstance(body[0], nodes.paragraph): # len(body) == 1\n msg = f\"PEP header field body may only contain a single paragraph:\\n{field.pformat(level=1)}\"\n raise PEPParsingError(msg)\n\n para = body[0]\n if name in {\"author\", \"bdfl-delegate\", \"pep-delegate\", \"sponsor\"}:\n # mask emails\n for node in para:\n if not isinstance(node, nodes.reference):\n continue\n node.replace_self(_mask_email(node))\n elif name in {\"discussions-to\", \"resolution\", \"post-history\"}:\n # Prettify mailing list and Discourse links\n for node in para:\n if (not isinstance(node, nodes.reference)\n or not node[\"refuri\"]):\n continue\n # Have known mailto links link to their main list pages\n if node[\"refuri\"].lower().startswith(\"mailto:\"):\n node[\"refuri\"] = _generate_list_url(node[\"refuri\"])\n parts = node[\"refuri\"].lower().split(\"/\")\n if len(parts) <= 2 or parts[2] not in LINK_PRETTIFIERS:\n continue\n pretty_title = _make_link_pretty(str(node[\"refuri\"]))\n if name == \"post-history\":\n node[\"reftitle\"] = pretty_title\n else:\n node[0] = nodes.Text(pretty_title)\n elif name in {\"replaces\", \"superseded-by\", \"requires\"}:\n # replace PEP numbers with normalised list of links to PEPs\n new_body = []\n for pep_str in re.split(r\",?\\s+\", body.astext()):\n target = self.document.settings.pep_url.format(int(pep_str))\n new_body += [nodes.reference(\"\", pep_str, refuri=target), nodes.Text(\", \")]\n para[:] = new_body[:-1] # drop trailing space\n elif name in {\"last-modified\", \"content-type\", \"version\"}:\n # Mark unneeded fields\n fields_to_remove.append(field)\n\n # Remove any trailing commas and whitespace in the headers\n if para and isinstance(para[-1], nodes.Text):\n last_node = para[-1]\n if last_node.astext().strip() == \",\":\n last_node.parent.remove(last_node)\n else:\n para[-1] = last_node.rstrip().rstrip(\",\")\n\n # Remove unneeded fields\n for field in fields_to_remove:\n field.parent.remove(field)\n\n\ndef _generate_list_url(mailto: str) -> str:\n list_name_domain = mailto.lower().removeprefix(\"mailto:\").strip()\n list_name = list_name_domain.split(\"@\")[0]\n\n if list_name_domain.endswith(\"@googlegroups.com\"):\n return f\"https://groups.google.com/g/{list_name}\"\n\n if not list_name_domain.endswith(\"@python.org\"):\n return mailto\n\n # Active lists not yet on Mailman3; this URL will redirect if/when they are\n if list_name in {\"csv\", \"db-sig\", \"doc-sig\", \"python-list\", \"web-sig\"}:\n return f\"https://mail.python.org/mailman/listinfo/{list_name}\"\n # Retired lists that are closed for posting, so only the archive matters\n if list_name in {\"import-sig\", \"python-3000\"}:\n return f\"https://mail.python.org/pipermail/{list_name}/\"\n # The remaining lists (and any new ones) are all on Mailman3/Hyperkitty\n return f\"https://mail.python.org/archives/list/{list_name}@python.org/\"\n\n\ndef _process_list_url(parts: list[str]) -> tuple[str, str]:\n item_type = \"list\"\n\n # HyperKitty (Mailman3) archive structure is\n # https://mail.python.org/archives/list/<list_name>/thread/<id>\n if \"archives\" in parts:\n list_name = (\n parts[parts.index(\"archives\") + 2].removesuffix(\"@python.org\"))\n if len(parts) > 6 and parts[6] in {\"message\", \"thread\"}:\n item_type = parts[6]\n\n # Mailman3 list info structure is\n # https://mail.python.org/mailman3/lists/<list_name>.python.org/\n elif \"mailman3\" in parts:\n list_name = (\n parts[parts.index(\"mailman3\") + 2].removesuffix(\".python.org\"))\n\n # Pipermail (Mailman) archive structure is\n # https://mail.python.org/pipermail/<list_name>/<month>-<year>/<id>\n elif \"pipermail\" in parts:\n list_name = parts[parts.index(\"pipermail\") + 1]\n item_type = \"message\" if len(parts) > 6 else \"list\"\n\n # Mailman listinfo structure is\n # https://mail.python.org/mailman/listinfo/<list_name>\n elif \"listinfo\" in parts:\n list_name = parts[parts.index(\"listinfo\") + 1]\n\n # Not a link to a mailing list, message or thread\n else:\n raise ValueError(\n f\"{'/'.join(parts)} not a link to a list, message or thread\")\n\n return list_name, item_type\n\n\ndef _process_discourse_url(parts: list[str]) -> tuple[str, str]:\n item_name = \"discourse\"\n\n if len(parts) < 5 or (\"t\" not in parts and \"c\" not in parts):\n raise ValueError(\n f\"{'/'.join(parts)} not a link to a Discourse thread or category\")\n\n first_subpart = parts[4]\n has_title = not first_subpart.isnumeric()\n\n if \"t\" in parts:\n item_type = \"post\" if len(parts) > (5 + has_title) else \"thread\"\n elif \"c\" in parts:\n item_type = \"category\"\n if has_title:\n item_name = f\"{first_subpart.replace('-', ' ')} {item_name}\"\n\n return item_name, item_type\n\n\n# Domains supported for pretty URL parsing\nLINK_PRETTIFIERS = {\n \"mail.python.org\": _process_list_url,\n \"discuss.python.org\": _process_discourse_url,\n}\n\n\ndef _process_pretty_url(url: str) -> tuple[str, str]:\n parts = url.lower().strip().strip(\"/\").split(\"/\")\n try:\n item_name, item_type = LINK_PRETTIFIERS[parts[2]](parts)\n except KeyError as error:\n raise ValueError(\n f\"{url} not a link to a recognized domain to prettify\") from error\n item_name = item_name.title().replace(\"Sig\", \"SIG\").replace(\"Pep\", \"PEP\")\n return item_name, item_type\n\n\ndef _make_link_pretty(url: str) -> str:\n item_name, item_type = _process_pretty_url(url)\n return f\"{item_name} {item_type}\"\n", "path": "pep_sphinx_extensions/pep_processor/transforms/pep_headers.py"}]}
| 3,263 | 274 |
gh_patches_debug_9538
|
rasdani/github-patches
|
git_diff
|
aws-cloudformation__cfn-lint-481
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Can't run in Travis CI xenial
I was testing the new Travis CI image:
https://blog.travis-ci.com/2018-11-08-xenial-release
https://docs.travis-ci.com/user/reference/xenial/
My install script ran `pip install --user --upgrade cfn-lint` and then `cfn-lint my.cfn.yaml` which gave this error:
```Traceback (most recent call last):
File "/home/travis/.local/bin/cfn-lint", line 7, in <module>
from cfnlint.__main__ import main
File "/home/travis/.local/lib/python2.7/site-packages/cfnlint/__main__.py", line 19, in <module>
import cfnlint.core
File "/home/travis/.local/lib/python2.7/site-packages/cfnlint/core.py", line 24, in <module>
import cfnlint.maintenance
File "/home/travis/.local/lib/python2.7/site-packages/cfnlint/maintenance.py", line 19, in <module>
import requests
File "/usr/lib/python2.7/dist-packages/requests/__init__.py", line 58, in <module>
from . import utils
File "/usr/lib/python2.7/dist-packages/requests/utils.py", line 25, in <module>
from . import certs
ImportError: cannot import name certs
```
I "fixed" this problem by just changing to Python3 in my build script:
```
pyenv global 3.7
pip3 install --user --upgrade cfn-lint
cfn-lint stack.cfn.yaml
```
But there might be some other lingering issue here
</issue>
<code>
[start of setup.py]
1 """
2 Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 import codecs
18 import re
19 from setuptools import find_packages
20 from setuptools import setup
21
22
23 def get_version(filename):
24 with codecs.open(filename, 'r', 'utf-8') as fp:
25 contents = fp.read()
26 return re.search(r"__version__ = ['\"]([^'\"]+)['\"]", contents).group(1)
27
28
29 version = get_version('src/cfnlint/version.py')
30
31
32 with open('README.md') as f:
33 readme = f.read()
34
35 setup(
36 name='cfn-lint',
37 version=version,
38 description=('checks cloudformation for practices and behaviour \
39 that could potentially be improved'),
40 long_description=readme,
41 long_description_content_type="text/markdown",
42 keywords='aws, lint',
43 author='kddejong',
44 author_email='[email protected]',
45 url='https://github.com/awslabs/cfn-python-lint',
46 package_dir={'': 'src'},
47 package_data={'cfnlint': [
48 'data/CloudSpecs/*.json',
49 'data/AdditionalSpecs/*.json',
50 'data/Serverless/*.json',
51 'data/CfnLintCli/config/schema.json'
52 ]},
53 packages=find_packages('src'),
54 zip_safe=False,
55 install_requires=['pyyaml', 'six', 'requests', 'aws-sam-translator>=1.6.0', 'jsonpatch', 'jsonschema~=2.6.0', 'pathlib2'],
56 python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
57 entry_points={
58 'console_scripts': [
59 'cfn-lint = cfnlint.__main__:main'
60 ]
61 },
62 license='MIT no attribution',
63 test_suite="unittest",
64 classifiers=[
65 'Development Status :: 5 - Production/Stable',
66 'Intended Audience :: Developers',
67 'License :: OSI Approved :: MIT License',
68 'Natural Language :: English',
69 'Operating System :: OS Independent',
70 'Programming Language :: Python :: 2',
71 'Programming Language :: Python :: 2.7',
72 'Programming Language :: Python :: 3',
73 'Programming Language :: Python :: 3.4',
74 'Programming Language :: Python :: 3.5',
75 'Programming Language :: Python :: 3.6',
76 ],
77 )
78
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -52,7 +52,15 @@
]},
packages=find_packages('src'),
zip_safe=False,
- install_requires=['pyyaml', 'six', 'requests', 'aws-sam-translator>=1.6.0', 'jsonpatch', 'jsonschema~=2.6.0', 'pathlib2'],
+ install_requires=[
+ 'pyyaml',
+ 'six~=1.11',
+ 'requests>=2.15.0',
+ 'aws-sam-translator>=1.8.0',
+ 'jsonpatch',
+ 'jsonschema~=2.6',
+ 'pathlib2>=2.3.0;python_version<"3.4"'
+ ],
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
entry_points={
'console_scripts': [
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -52,7 +52,15 @@\n ]},\n packages=find_packages('src'),\n zip_safe=False,\n- install_requires=['pyyaml', 'six', 'requests', 'aws-sam-translator>=1.6.0', 'jsonpatch', 'jsonschema~=2.6.0', 'pathlib2'],\n+ install_requires=[\n+ 'pyyaml',\n+ 'six~=1.11',\n+ 'requests>=2.15.0',\n+ 'aws-sam-translator>=1.8.0',\n+ 'jsonpatch',\n+ 'jsonschema~=2.6',\n+ 'pathlib2>=2.3.0;python_version<\"3.4\"'\n+ ],\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n entry_points={\n 'console_scripts': [\n", "issue": "Can't run in Travis CI xenial\nI was testing the new Travis CI image:\r\n\r\nhttps://blog.travis-ci.com/2018-11-08-xenial-release\r\nhttps://docs.travis-ci.com/user/reference/xenial/\r\n\r\nMy install script ran `pip install --user --upgrade cfn-lint` and then `cfn-lint my.cfn.yaml` which gave this error:\r\n\r\n```Traceback (most recent call last):\r\n File \"/home/travis/.local/bin/cfn-lint\", line 7, in <module>\r\n from cfnlint.__main__ import main\r\n File \"/home/travis/.local/lib/python2.7/site-packages/cfnlint/__main__.py\", line 19, in <module>\r\n import cfnlint.core\r\n File \"/home/travis/.local/lib/python2.7/site-packages/cfnlint/core.py\", line 24, in <module>\r\n import cfnlint.maintenance\r\n File \"/home/travis/.local/lib/python2.7/site-packages/cfnlint/maintenance.py\", line 19, in <module>\r\n import requests\r\n File \"/usr/lib/python2.7/dist-packages/requests/__init__.py\", line 58, in <module>\r\n from . import utils\r\n File \"/usr/lib/python2.7/dist-packages/requests/utils.py\", line 25, in <module>\r\n from . import certs\r\nImportError: cannot import name certs\r\n```\r\n\r\nI \"fixed\" this problem by just changing to Python3 in my build script:\r\n\r\n```\r\npyenv global 3.7\r\npip3 install --user --upgrade cfn-lint\r\ncfn-lint stack.cfn.yaml\r\n```\r\n\r\nBut there might be some other lingering issue here\n", "before_files": [{"content": "\"\"\"\n Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport codecs\nimport re\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\ndef get_version(filename):\n with codecs.open(filename, 'r', 'utf-8') as fp:\n contents = fp.read()\n return re.search(r\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", contents).group(1)\n\n\nversion = get_version('src/cfnlint/version.py')\n\n\nwith open('README.md') as f:\n readme = f.read()\n\nsetup(\n name='cfn-lint',\n version=version,\n description=('checks cloudformation for practices and behaviour \\\n that could potentially be improved'),\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n keywords='aws, lint',\n author='kddejong',\n author_email='[email protected]',\n url='https://github.com/awslabs/cfn-python-lint',\n package_dir={'': 'src'},\n package_data={'cfnlint': [\n 'data/CloudSpecs/*.json',\n 'data/AdditionalSpecs/*.json',\n 'data/Serverless/*.json',\n 'data/CfnLintCli/config/schema.json'\n ]},\n packages=find_packages('src'),\n zip_safe=False,\n install_requires=['pyyaml', 'six', 'requests', 'aws-sam-translator>=1.6.0', 'jsonpatch', 'jsonschema~=2.6.0', 'pathlib2'],\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n entry_points={\n 'console_scripts': [\n 'cfn-lint = cfnlint.__main__:main'\n ]\n },\n license='MIT no attribution',\n test_suite=\"unittest\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n)\n", "path": "setup.py"}]}
| 1,761 | 221 |
gh_patches_debug_30618
|
rasdani/github-patches
|
git_diff
|
qutebrowser__qutebrowser-4191
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improvements for content->host-block-lists
- [ ] Handle local directories (read every file in the directory)
- [ ] Handle local paths without `file://`
</issue>
<code>
[start of qutebrowser/browser/adblock.py]
1 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
2
3 # Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]>
4 #
5 # This file is part of qutebrowser.
6 #
7 # qutebrowser is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # qutebrowser is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
19
20 """Functions related to ad blocking."""
21
22 import io
23 import os.path
24 import functools
25 import posixpath
26 import zipfile
27
28 from qutebrowser.browser import downloads
29 from qutebrowser.config import config
30 from qutebrowser.utils import objreg, standarddir, log, message
31 from qutebrowser.commands import cmdutils
32
33
34 def _guess_zip_filename(zf):
35 """Guess which file to use inside a zip file.
36
37 Args:
38 zf: A ZipFile instance.
39 """
40 files = zf.namelist()
41 if len(files) == 1:
42 return files[0]
43 else:
44 for e in files:
45 if posixpath.splitext(e)[0].lower() == 'hosts':
46 return e
47 raise FileNotFoundError("No hosts file found in zip")
48
49
50 def get_fileobj(byte_io):
51 """Get a usable file object to read the hosts file from."""
52 byte_io.seek(0) # rewind downloaded file
53 if zipfile.is_zipfile(byte_io):
54 byte_io.seek(0) # rewind what zipfile.is_zipfile did
55 zf = zipfile.ZipFile(byte_io)
56 filename = _guess_zip_filename(zf)
57 byte_io = zf.open(filename, mode='r')
58 else:
59 byte_io.seek(0) # rewind what zipfile.is_zipfile did
60 return byte_io
61
62
63 def _is_whitelisted_url(url):
64 """Check if the given URL is on the adblock whitelist.
65
66 Args:
67 url: The URL to check as QUrl.
68 """
69 for pattern in config.val.content.host_blocking.whitelist:
70 if pattern.matches(url):
71 return True
72 return False
73
74
75 class _FakeDownload:
76
77 """A download stub to use on_download_finished with local files."""
78
79 def __init__(self, fileobj):
80 self.basename = os.path.basename(fileobj.name)
81 self.fileobj = fileobj
82 self.successful = True
83
84
85 class HostBlocker:
86
87 """Manage blocked hosts based from /etc/hosts-like files.
88
89 Attributes:
90 _blocked_hosts: A set of blocked hosts.
91 _config_blocked_hosts: A set of blocked hosts from ~/.config.
92 _in_progress: The DownloadItems which are currently downloading.
93 _done_count: How many files have been read successfully.
94 _local_hosts_file: The path to the blocked-hosts file.
95 _config_hosts_file: The path to a blocked-hosts in ~/.config
96 """
97
98 def __init__(self):
99 self._blocked_hosts = set()
100 self._config_blocked_hosts = set()
101 self._in_progress = []
102 self._done_count = 0
103
104 data_dir = standarddir.data()
105 self._local_hosts_file = os.path.join(data_dir, 'blocked-hosts')
106 self._update_files()
107
108 config_dir = standarddir.config()
109 self._config_hosts_file = os.path.join(config_dir, 'blocked-hosts')
110
111 config.instance.changed.connect(self._update_files)
112
113 def is_blocked(self, url):
114 """Check if the given URL (as QUrl) is blocked."""
115 if not config.val.content.host_blocking.enabled:
116 return False
117 host = url.host()
118 return ((host in self._blocked_hosts or
119 host in self._config_blocked_hosts) and
120 not _is_whitelisted_url(url))
121
122 def _read_hosts_file(self, filename, target):
123 """Read hosts from the given filename.
124
125 Args:
126 filename: The file to read.
127 target: The set to store the hosts in.
128
129 Return:
130 True if a read was attempted, False otherwise
131 """
132 if not os.path.exists(filename):
133 return False
134
135 try:
136 with open(filename, 'r', encoding='utf-8') as f:
137 for line in f:
138 target.add(line.strip())
139 except (OSError, UnicodeDecodeError):
140 log.misc.exception("Failed to read host blocklist!")
141
142 return True
143
144 def read_hosts(self):
145 """Read hosts from the existing blocked-hosts file."""
146 self._blocked_hosts = set()
147
148 self._read_hosts_file(self._config_hosts_file,
149 self._config_blocked_hosts)
150
151 found = self._read_hosts_file(self._local_hosts_file,
152 self._blocked_hosts)
153
154 if not found:
155 args = objreg.get('args')
156 if (config.val.content.host_blocking.lists and
157 args.basedir is None and
158 config.val.content.host_blocking.enabled):
159 message.info("Run :adblock-update to get adblock lists.")
160
161 @cmdutils.register(instance='host-blocker')
162 def adblock_update(self):
163 """Update the adblock block lists.
164
165 This updates `~/.local/share/qutebrowser/blocked-hosts` with downloaded
166 host lists and re-reads `~/.config/qutebrowser/blocked-hosts`.
167 """
168 self._read_hosts_file(self._config_hosts_file,
169 self._config_blocked_hosts)
170 self._blocked_hosts = set()
171 self._done_count = 0
172 download_manager = objreg.get('qtnetwork-download-manager')
173 for url in config.val.content.host_blocking.lists:
174 if url.scheme() == 'file':
175 filename = url.toLocalFile()
176 try:
177 fileobj = open(filename, 'rb')
178 except OSError as e:
179 message.error("adblock: Error while reading {}: {}".format(
180 filename, e.strerror))
181 continue
182 download = _FakeDownload(fileobj)
183 self._in_progress.append(download)
184 self._on_download_finished(download)
185 else:
186 fobj = io.BytesIO()
187 fobj.name = 'adblock: ' + url.host()
188 target = downloads.FileObjDownloadTarget(fobj)
189 download = download_manager.get(url, target=target,
190 auto_remove=True)
191 self._in_progress.append(download)
192 download.finished.connect(
193 functools.partial(self._on_download_finished, download))
194
195 def _parse_line(self, line):
196 """Parse a line from a host file.
197
198 Args:
199 line: The bytes object to parse.
200
201 Returns:
202 True if parsing succeeded, False otherwise.
203 """
204 if line.startswith(b'#'):
205 # Ignoring comments early so we don't have to care about
206 # encoding errors in them.
207 return True
208
209 try:
210 line = line.decode('utf-8')
211 except UnicodeDecodeError:
212 log.misc.error("Failed to decode: {!r}".format(line))
213 return False
214
215 # Remove comments
216 try:
217 hash_idx = line.index('#')
218 line = line[:hash_idx]
219 except ValueError:
220 pass
221
222 line = line.strip()
223 # Skip empty lines
224 if not line:
225 return True
226
227 parts = line.split()
228 if len(parts) == 1:
229 # "one host per line" format
230 hosts = [parts[0]]
231 else:
232 # /etc/hosts format
233 hosts = parts[1:]
234
235 for host in hosts:
236 if ('.' in host and
237 not host.endswith('.localdomain') and
238 host != '0.0.0.0'):
239 self._blocked_hosts.add(host)
240
241 return True
242
243 def _merge_file(self, byte_io):
244 """Read and merge host files.
245
246 Args:
247 byte_io: The BytesIO object of the completed download.
248
249 Return:
250 A set of the merged hosts.
251 """
252 error_count = 0
253 line_count = 0
254 try:
255 f = get_fileobj(byte_io)
256 except (OSError, zipfile.BadZipFile, zipfile.LargeZipFile,
257 LookupError) as e:
258 message.error("adblock: Error while reading {}: {} - {}".format(
259 byte_io.name, e.__class__.__name__, e))
260 return
261
262 for line in f:
263 line_count += 1
264 ok = self._parse_line(line)
265 if not ok:
266 error_count += 1
267
268 log.misc.debug("{}: read {} lines".format(byte_io.name, line_count))
269 if error_count > 0:
270 message.error("adblock: {} read errors for {}".format(
271 error_count, byte_io.name))
272
273 def _on_lists_downloaded(self):
274 """Install block lists after files have been downloaded."""
275 with open(self._local_hosts_file, 'w', encoding='utf-8') as f:
276 for host in sorted(self._blocked_hosts):
277 f.write(host + '\n')
278 message.info("adblock: Read {} hosts from {} sources.".format(
279 len(self._blocked_hosts), self._done_count))
280
281 @config.change_filter('content.host_blocking.lists')
282 def _update_files(self):
283 """Update files when the config changed."""
284 if not config.val.content.host_blocking.lists:
285 try:
286 os.remove(self._local_hosts_file)
287 except FileNotFoundError:
288 pass
289 except OSError as e:
290 log.misc.exception("Failed to delete hosts file: {}".format(e))
291
292 def _on_download_finished(self, download):
293 """Check if all downloads are finished and if so, trigger reading.
294
295 Arguments:
296 download: The finished DownloadItem.
297 """
298 self._in_progress.remove(download)
299 if download.successful:
300 self._done_count += 1
301 try:
302 self._merge_file(download.fileobj)
303 finally:
304 download.fileobj.close()
305 if not self._in_progress:
306 try:
307 self._on_lists_downloaded()
308 except OSError:
309 log.misc.exception("Failed to write host block list!")
310
[end of qutebrowser/browser/adblock.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/qutebrowser/browser/adblock.py b/qutebrowser/browser/adblock.py
--- a/qutebrowser/browser/adblock.py
+++ b/qutebrowser/browser/adblock.py
@@ -173,15 +173,12 @@
for url in config.val.content.host_blocking.lists:
if url.scheme() == 'file':
filename = url.toLocalFile()
- try:
- fileobj = open(filename, 'rb')
- except OSError as e:
- message.error("adblock: Error while reading {}: {}".format(
- filename, e.strerror))
- continue
- download = _FakeDownload(fileobj)
- self._in_progress.append(download)
- self._on_download_finished(download)
+ if os.path.isdir(filename):
+ for filenames in os.scandir(filename):
+ if filenames.is_file():
+ self._import_local(filenames.path)
+ else:
+ self._import_local(filename)
else:
fobj = io.BytesIO()
fobj.name = 'adblock: ' + url.host()
@@ -192,6 +189,22 @@
download.finished.connect(
functools.partial(self._on_download_finished, download))
+ def _import_local(self, filename):
+ """Adds the contents of a file to the blocklist.
+
+ Args:
+ filename: path to a local file to import.
+ """
+ try:
+ fileobj = open(filename, 'rb')
+ except OSError as e:
+ message.error("adblock: Error while reading {}: {}".format(
+ filename, e.strerror))
+ return
+ download = _FakeDownload(fileobj)
+ self._in_progress.append(download)
+ self._on_download_finished(download)
+
def _parse_line(self, line):
"""Parse a line from a host file.
|
{"golden_diff": "diff --git a/qutebrowser/browser/adblock.py b/qutebrowser/browser/adblock.py\n--- a/qutebrowser/browser/adblock.py\n+++ b/qutebrowser/browser/adblock.py\n@@ -173,15 +173,12 @@\n for url in config.val.content.host_blocking.lists:\n if url.scheme() == 'file':\n filename = url.toLocalFile()\n- try:\n- fileobj = open(filename, 'rb')\n- except OSError as e:\n- message.error(\"adblock: Error while reading {}: {}\".format(\n- filename, e.strerror))\n- continue\n- download = _FakeDownload(fileobj)\n- self._in_progress.append(download)\n- self._on_download_finished(download)\n+ if os.path.isdir(filename):\n+ for filenames in os.scandir(filename):\n+ if filenames.is_file():\n+ self._import_local(filenames.path)\n+ else:\n+ self._import_local(filename)\n else:\n fobj = io.BytesIO()\n fobj.name = 'adblock: ' + url.host()\n@@ -192,6 +189,22 @@\n download.finished.connect(\n functools.partial(self._on_download_finished, download))\n \n+ def _import_local(self, filename):\n+ \"\"\"Adds the contents of a file to the blocklist.\n+\n+ Args:\n+ filename: path to a local file to import.\n+ \"\"\"\n+ try:\n+ fileobj = open(filename, 'rb')\n+ except OSError as e:\n+ message.error(\"adblock: Error while reading {}: {}\".format(\n+ filename, e.strerror))\n+ return\n+ download = _FakeDownload(fileobj)\n+ self._in_progress.append(download)\n+ self._on_download_finished(download)\n+\n def _parse_line(self, line):\n \"\"\"Parse a line from a host file.\n", "issue": "Improvements for content->host-block-lists\n- [ ] Handle local directories (read every file in the directory)\n- [ ] Handle local paths without `file://`\n\n", "before_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Functions related to ad blocking.\"\"\"\n\nimport io\nimport os.path\nimport functools\nimport posixpath\nimport zipfile\n\nfrom qutebrowser.browser import downloads\nfrom qutebrowser.config import config\nfrom qutebrowser.utils import objreg, standarddir, log, message\nfrom qutebrowser.commands import cmdutils\n\n\ndef _guess_zip_filename(zf):\n \"\"\"Guess which file to use inside a zip file.\n\n Args:\n zf: A ZipFile instance.\n \"\"\"\n files = zf.namelist()\n if len(files) == 1:\n return files[0]\n else:\n for e in files:\n if posixpath.splitext(e)[0].lower() == 'hosts':\n return e\n raise FileNotFoundError(\"No hosts file found in zip\")\n\n\ndef get_fileobj(byte_io):\n \"\"\"Get a usable file object to read the hosts file from.\"\"\"\n byte_io.seek(0) # rewind downloaded file\n if zipfile.is_zipfile(byte_io):\n byte_io.seek(0) # rewind what zipfile.is_zipfile did\n zf = zipfile.ZipFile(byte_io)\n filename = _guess_zip_filename(zf)\n byte_io = zf.open(filename, mode='r')\n else:\n byte_io.seek(0) # rewind what zipfile.is_zipfile did\n return byte_io\n\n\ndef _is_whitelisted_url(url):\n \"\"\"Check if the given URL is on the adblock whitelist.\n\n Args:\n url: The URL to check as QUrl.\n \"\"\"\n for pattern in config.val.content.host_blocking.whitelist:\n if pattern.matches(url):\n return True\n return False\n\n\nclass _FakeDownload:\n\n \"\"\"A download stub to use on_download_finished with local files.\"\"\"\n\n def __init__(self, fileobj):\n self.basename = os.path.basename(fileobj.name)\n self.fileobj = fileobj\n self.successful = True\n\n\nclass HostBlocker:\n\n \"\"\"Manage blocked hosts based from /etc/hosts-like files.\n\n Attributes:\n _blocked_hosts: A set of blocked hosts.\n _config_blocked_hosts: A set of blocked hosts from ~/.config.\n _in_progress: The DownloadItems which are currently downloading.\n _done_count: How many files have been read successfully.\n _local_hosts_file: The path to the blocked-hosts file.\n _config_hosts_file: The path to a blocked-hosts in ~/.config\n \"\"\"\n\n def __init__(self):\n self._blocked_hosts = set()\n self._config_blocked_hosts = set()\n self._in_progress = []\n self._done_count = 0\n\n data_dir = standarddir.data()\n self._local_hosts_file = os.path.join(data_dir, 'blocked-hosts')\n self._update_files()\n\n config_dir = standarddir.config()\n self._config_hosts_file = os.path.join(config_dir, 'blocked-hosts')\n\n config.instance.changed.connect(self._update_files)\n\n def is_blocked(self, url):\n \"\"\"Check if the given URL (as QUrl) is blocked.\"\"\"\n if not config.val.content.host_blocking.enabled:\n return False\n host = url.host()\n return ((host in self._blocked_hosts or\n host in self._config_blocked_hosts) and\n not _is_whitelisted_url(url))\n\n def _read_hosts_file(self, filename, target):\n \"\"\"Read hosts from the given filename.\n\n Args:\n filename: The file to read.\n target: The set to store the hosts in.\n\n Return:\n True if a read was attempted, False otherwise\n \"\"\"\n if not os.path.exists(filename):\n return False\n\n try:\n with open(filename, 'r', encoding='utf-8') as f:\n for line in f:\n target.add(line.strip())\n except (OSError, UnicodeDecodeError):\n log.misc.exception(\"Failed to read host blocklist!\")\n\n return True\n\n def read_hosts(self):\n \"\"\"Read hosts from the existing blocked-hosts file.\"\"\"\n self._blocked_hosts = set()\n\n self._read_hosts_file(self._config_hosts_file,\n self._config_blocked_hosts)\n\n found = self._read_hosts_file(self._local_hosts_file,\n self._blocked_hosts)\n\n if not found:\n args = objreg.get('args')\n if (config.val.content.host_blocking.lists and\n args.basedir is None and\n config.val.content.host_blocking.enabled):\n message.info(\"Run :adblock-update to get adblock lists.\")\n\n @cmdutils.register(instance='host-blocker')\n def adblock_update(self):\n \"\"\"Update the adblock block lists.\n\n This updates `~/.local/share/qutebrowser/blocked-hosts` with downloaded\n host lists and re-reads `~/.config/qutebrowser/blocked-hosts`.\n \"\"\"\n self._read_hosts_file(self._config_hosts_file,\n self._config_blocked_hosts)\n self._blocked_hosts = set()\n self._done_count = 0\n download_manager = objreg.get('qtnetwork-download-manager')\n for url in config.val.content.host_blocking.lists:\n if url.scheme() == 'file':\n filename = url.toLocalFile()\n try:\n fileobj = open(filename, 'rb')\n except OSError as e:\n message.error(\"adblock: Error while reading {}: {}\".format(\n filename, e.strerror))\n continue\n download = _FakeDownload(fileobj)\n self._in_progress.append(download)\n self._on_download_finished(download)\n else:\n fobj = io.BytesIO()\n fobj.name = 'adblock: ' + url.host()\n target = downloads.FileObjDownloadTarget(fobj)\n download = download_manager.get(url, target=target,\n auto_remove=True)\n self._in_progress.append(download)\n download.finished.connect(\n functools.partial(self._on_download_finished, download))\n\n def _parse_line(self, line):\n \"\"\"Parse a line from a host file.\n\n Args:\n line: The bytes object to parse.\n\n Returns:\n True if parsing succeeded, False otherwise.\n \"\"\"\n if line.startswith(b'#'):\n # Ignoring comments early so we don't have to care about\n # encoding errors in them.\n return True\n\n try:\n line = line.decode('utf-8')\n except UnicodeDecodeError:\n log.misc.error(\"Failed to decode: {!r}\".format(line))\n return False\n\n # Remove comments\n try:\n hash_idx = line.index('#')\n line = line[:hash_idx]\n except ValueError:\n pass\n\n line = line.strip()\n # Skip empty lines\n if not line:\n return True\n\n parts = line.split()\n if len(parts) == 1:\n # \"one host per line\" format\n hosts = [parts[0]]\n else:\n # /etc/hosts format\n hosts = parts[1:]\n\n for host in hosts:\n if ('.' in host and\n not host.endswith('.localdomain') and\n host != '0.0.0.0'):\n self._blocked_hosts.add(host)\n\n return True\n\n def _merge_file(self, byte_io):\n \"\"\"Read and merge host files.\n\n Args:\n byte_io: The BytesIO object of the completed download.\n\n Return:\n A set of the merged hosts.\n \"\"\"\n error_count = 0\n line_count = 0\n try:\n f = get_fileobj(byte_io)\n except (OSError, zipfile.BadZipFile, zipfile.LargeZipFile,\n LookupError) as e:\n message.error(\"adblock: Error while reading {}: {} - {}\".format(\n byte_io.name, e.__class__.__name__, e))\n return\n\n for line in f:\n line_count += 1\n ok = self._parse_line(line)\n if not ok:\n error_count += 1\n\n log.misc.debug(\"{}: read {} lines\".format(byte_io.name, line_count))\n if error_count > 0:\n message.error(\"adblock: {} read errors for {}\".format(\n error_count, byte_io.name))\n\n def _on_lists_downloaded(self):\n \"\"\"Install block lists after files have been downloaded.\"\"\"\n with open(self._local_hosts_file, 'w', encoding='utf-8') as f:\n for host in sorted(self._blocked_hosts):\n f.write(host + '\\n')\n message.info(\"adblock: Read {} hosts from {} sources.\".format(\n len(self._blocked_hosts), self._done_count))\n\n @config.change_filter('content.host_blocking.lists')\n def _update_files(self):\n \"\"\"Update files when the config changed.\"\"\"\n if not config.val.content.host_blocking.lists:\n try:\n os.remove(self._local_hosts_file)\n except FileNotFoundError:\n pass\n except OSError as e:\n log.misc.exception(\"Failed to delete hosts file: {}\".format(e))\n\n def _on_download_finished(self, download):\n \"\"\"Check if all downloads are finished and if so, trigger reading.\n\n Arguments:\n download: The finished DownloadItem.\n \"\"\"\n self._in_progress.remove(download)\n if download.successful:\n self._done_count += 1\n try:\n self._merge_file(download.fileobj)\n finally:\n download.fileobj.close()\n if not self._in_progress:\n try:\n self._on_lists_downloaded()\n except OSError:\n log.misc.exception(\"Failed to write host block list!\")\n", "path": "qutebrowser/browser/adblock.py"}]}
| 3,637 | 407 |
gh_patches_debug_1400
|
rasdani/github-patches
|
git_diff
|
conda__conda-7241
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
conda's configuration context is not initialized in conda.exports
root cause of https://github.com/conda-forge/conda-smithy/issues/762
</issue>
<code>
[start of conda/exports.py]
1 # -*- coding: utf-8 -*-
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 from collections import Hashable
5 from logging import getLogger
6 import threading
7 from warnings import warn
8
9 log = getLogger(__name__)
10
11 from . import CondaError # NOQA
12 CondaError = CondaError
13
14 from . import compat, plan # NOQA
15 compat, plan = compat, plan
16
17 from .core.solve import Solver # NOQA
18 Solver = Solver
19
20 from .plan import display_actions # NOQA
21 display_actions = display_actions
22
23 from .cli.common import specs_from_args, spec_from_line, specs_from_url # NOQA
24 from .cli.conda_argparse import add_parser_prefix, add_parser_channels # NOQA
25 add_parser_channels, add_parser_prefix = add_parser_channels, add_parser_prefix
26 specs_from_args, spec_from_line = specs_from_args, spec_from_line
27 specs_from_url = specs_from_url
28
29 from .cli.conda_argparse import ArgumentParser # NOQA
30 ArgumentParser = ArgumentParser
31
32 from .common.compat import PY3, StringIO, input, iteritems, string_types, text_type # NOQA
33 PY3, StringIO, input, iteritems, string_types, text_type = PY3, StringIO, input, iteritems, string_types, text_type # NOQA
34 from .gateways.connection.session import CondaSession # NOQA
35 CondaSession = CondaSession
36
37 from .common.toposort import _toposort # NOQA
38 _toposort = _toposort
39
40 from .gateways.disk.link import lchmod # NOQA
41 lchmod = lchmod
42
43 from .gateways.connection.download import TmpDownload # NOQA
44
45 TmpDownload = TmpDownload
46 handle_proxy_407 = lambda x, y: warn("handle_proxy_407 is deprecated. "
47 "Now handled by CondaSession.")
48 from .core.index import dist_str_in_index, fetch_index, get_index # NOQA
49 dist_str_in_index, fetch_index, get_index = dist_str_in_index, fetch_index, get_index # NOQA
50 from .core.package_cache_data import download, rm_fetched # NOQA
51 download, rm_fetched = download, rm_fetched
52
53 from .install import package_cache, prefix_placeholder, symlink_conda # NOQA
54 package_cache, prefix_placeholder, symlink_conda = package_cache, prefix_placeholder, symlink_conda
55
56 from .gateways.disk.delete import delete_trash, move_to_trash # NOQA
57 delete_trash, move_to_trash = delete_trash, move_to_trash
58
59 from .core.prefix_data import is_linked, linked, linked_data # NOQA
60 is_linked, linked, linked_data = is_linked, linked, linked_data
61
62 from .misc import untracked, walk_prefix # NOQA
63 untracked, walk_prefix = untracked, walk_prefix
64
65 from .resolve import MatchSpec, ResolvePackageNotFound, Resolve, Unsatisfiable # NOQA
66 MatchSpec, Resolve = MatchSpec, Resolve
67 Unsatisfiable = Unsatisfiable
68 NoPackagesFound = NoPackagesFoundError = ResolvePackageNotFound
69
70 from .utils import hashsum_file, human_bytes, unix_path_to_win, url_path # NOQA
71 from .common.path import win_path_to_unix # NOQA
72 hashsum_file, human_bytes = hashsum_file, human_bytes
73 unix_path_to_win = unix_path_to_win
74 win_path_to_unix, url_path = win_path_to_unix, url_path
75
76 from .gateways.disk.read import compute_md5sum # NOQA
77 md5_file = compute_md5sum
78
79 from .models.version import VersionOrder, normalized_version # NOQA
80 VersionOrder, normalized_version = VersionOrder, normalized_version # NOQA
81
82 import conda.base.context # NOQA
83 from .base.context import get_prefix, non_x86_linux_machines, sys_rc_path # NOQA
84 non_x86_linux_machines, sys_rc_path = non_x86_linux_machines, sys_rc_path
85 get_prefix = get_prefix
86
87 from ._vendor.auxlib.entity import EntityEncoder # NOQA
88 EntityEncoder = EntityEncoder
89 from .base.constants import DEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX # NOQA
90 DEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX = DEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX # NOQA
91 get_default_urls = lambda: DEFAULT_CHANNELS
92
93 arch_name = conda.base.context.context.arch_name
94 binstar_upload = conda.base.context.context.anaconda_upload
95 bits = conda.base.context.context.bits
96 default_prefix = conda.base.context.context.default_prefix
97 default_python = conda.base.context.context.default_python
98 envs_dirs = conda.base.context.context.envs_dirs
99 pkgs_dirs = conda.base.context.context.pkgs_dirs
100 platform = conda.base.context.context.platform
101 root_dir = conda.base.context.context.root_prefix
102 root_writable = conda.base.context.context.root_writable
103 subdir = conda.base.context.context.subdir
104 conda_private = conda.base.context.context.conda_private
105 from .models.channel import get_conda_build_local_url # NOQA
106 get_rc_urls = lambda: list(conda.base.context.context.channels)
107 get_local_urls = lambda: list(get_conda_build_local_url()) or []
108 load_condarc = lambda fn: conda.base.context.reset_context([fn])
109 from .exceptions import PaddingError, LinkError, CondaOSError, PathNotFoundError # NOQA
110 PaddingError = PaddingError
111 LinkError = LinkError
112 CondaOSError = CondaOSError
113 # PathNotFoundError is the conda 4.4.x name for it - let's plan ahead.
114 PathNotFoundError = CondaFileNotFoundError = PathNotFoundError
115 from .gateways.disk.link import CrossPlatformStLink # NOQA
116 CrossPlatformStLink = CrossPlatformStLink
117
118 from .models.enums import FileMode # NOQA
119 FileMode = FileMode
120 from .models.enums import PathType # NOQA
121 PathType = PathType
122
123 from .models.records import PackageRecord # NOQA
124 PackageRecord = IndexRecord = PackageRecord
125
126 from .compat import TemporaryDirectory # NOQA
127 TemporaryDirectory = TemporaryDirectory
128
129 from .gateways.subprocess import ACTIVE_SUBPROCESSES, subprocess_call # NOQA
130 ACTIVE_SUBPROCESSES, subprocess_call = ACTIVE_SUBPROCESSES, subprocess_call
131
132 from .core.subdir_data import cache_fn_url # NOQA
133 cache_fn_url = cache_fn_url
134
135
136 class Completer(object): # pragma: no cover
137 def get_items(self):
138 return self._get_items()
139
140 def __contains__(self, item):
141 return True
142
143 def __iter__(self):
144 return iter(self.get_items())
145
146
147 class InstalledPackages(object):
148 pass
149
150
151 class memoized(object): # pragma: no cover
152 """Decorator. Caches a function's return value each time it is called.
153 If called later with the same arguments, the cached value is returned
154 (not reevaluated).
155 """
156 def __init__(self, func):
157 self.func = func
158 self.cache = {}
159 self.lock = threading.Lock()
160
161 def __call__(self, *args, **kw):
162 newargs = []
163 for arg in args:
164 if isinstance(arg, list):
165 newargs.append(tuple(arg))
166 elif not isinstance(arg, Hashable):
167 # uncacheable. a list, for instance.
168 # better to not cache than blow up.
169 return self.func(*args, **kw)
170 else:
171 newargs.append(arg)
172 newargs = tuple(newargs)
173 key = (newargs, frozenset(sorted(kw.items())))
174 with self.lock:
175 if key in self.cache:
176 return self.cache[key]
177 else:
178 value = self.func(*args, **kw)
179 self.cache[key] = value
180 return value
181
182
183 from .gateways.disk.delete import rm_rf as _rm_rf # NOQA
184 from .core.prefix_data import delete_prefix_from_linked_data # NOQA
185
186
187 def rm_rf(path, max_retries=5, trash=True):
188 _rm_rf(path, max_retries, trash)
189 delete_prefix_from_linked_data(path)
190
191
192 # ######################
193 # signature.py
194 # ######################
195 KEYS = None
196 KEYS_DIR = None
197
198
199 def hash_file(_):
200 return None # pragma: no cover
201
202
203 def verify(_):
204 return False # pragma: no cover
205
206
207 from .plan import execute_actions, execute_instructions, execute_plan, install_actions # NOQA
208 execute_actions, execute_instructions = execute_actions, execute_instructions
209 execute_plan, install_actions = execute_plan, install_actions
210
[end of conda/exports.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/conda/exports.py b/conda/exports.py
--- a/conda/exports.py
+++ b/conda/exports.py
@@ -11,6 +11,9 @@
from . import CondaError # NOQA
CondaError = CondaError
+from .base.context import reset_context # NOQA
+reset_context() # initialize context when conda.exports is imported
+
from . import compat, plan # NOQA
compat, plan = compat, plan
|
{"golden_diff": "diff --git a/conda/exports.py b/conda/exports.py\n--- a/conda/exports.py\n+++ b/conda/exports.py\n@@ -11,6 +11,9 @@\n from . import CondaError # NOQA\n CondaError = CondaError\n \n+from .base.context import reset_context # NOQA\n+reset_context() # initialize context when conda.exports is imported\n+\n from . import compat, plan # NOQA\n compat, plan = compat, plan\n", "issue": "conda's configuration context is not initialized in conda.exports\nroot cause of https://github.com/conda-forge/conda-smithy/issues/762\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom collections import Hashable\nfrom logging import getLogger\nimport threading\nfrom warnings import warn\n\nlog = getLogger(__name__)\n\nfrom . import CondaError # NOQA\nCondaError = CondaError\n\nfrom . import compat, plan # NOQA\ncompat, plan = compat, plan\n\nfrom .core.solve import Solver # NOQA\nSolver = Solver\n\nfrom .plan import display_actions # NOQA\ndisplay_actions = display_actions\n\nfrom .cli.common import specs_from_args, spec_from_line, specs_from_url # NOQA\nfrom .cli.conda_argparse import add_parser_prefix, add_parser_channels # NOQA\nadd_parser_channels, add_parser_prefix = add_parser_channels, add_parser_prefix\nspecs_from_args, spec_from_line = specs_from_args, spec_from_line\nspecs_from_url = specs_from_url\n\nfrom .cli.conda_argparse import ArgumentParser # NOQA\nArgumentParser = ArgumentParser\n\nfrom .common.compat import PY3, StringIO, input, iteritems, string_types, text_type # NOQA\nPY3, StringIO, input, iteritems, string_types, text_type = PY3, StringIO, input, iteritems, string_types, text_type # NOQA\nfrom .gateways.connection.session import CondaSession # NOQA\nCondaSession = CondaSession\n\nfrom .common.toposort import _toposort # NOQA\n_toposort = _toposort\n\nfrom .gateways.disk.link import lchmod # NOQA\nlchmod = lchmod\n\nfrom .gateways.connection.download import TmpDownload # NOQA\n\nTmpDownload = TmpDownload\nhandle_proxy_407 = lambda x, y: warn(\"handle_proxy_407 is deprecated. \"\n \"Now handled by CondaSession.\")\nfrom .core.index import dist_str_in_index, fetch_index, get_index # NOQA\ndist_str_in_index, fetch_index, get_index = dist_str_in_index, fetch_index, get_index # NOQA\nfrom .core.package_cache_data import download, rm_fetched # NOQA\ndownload, rm_fetched = download, rm_fetched\n\nfrom .install import package_cache, prefix_placeholder, symlink_conda # NOQA\npackage_cache, prefix_placeholder, symlink_conda = package_cache, prefix_placeholder, symlink_conda\n\nfrom .gateways.disk.delete import delete_trash, move_to_trash # NOQA\ndelete_trash, move_to_trash = delete_trash, move_to_trash\n\nfrom .core.prefix_data import is_linked, linked, linked_data # NOQA\nis_linked, linked, linked_data = is_linked, linked, linked_data\n\nfrom .misc import untracked, walk_prefix # NOQA\nuntracked, walk_prefix = untracked, walk_prefix\n\nfrom .resolve import MatchSpec, ResolvePackageNotFound, Resolve, Unsatisfiable # NOQA\nMatchSpec, Resolve = MatchSpec, Resolve\nUnsatisfiable = Unsatisfiable\nNoPackagesFound = NoPackagesFoundError = ResolvePackageNotFound\n\nfrom .utils import hashsum_file, human_bytes, unix_path_to_win, url_path # NOQA\nfrom .common.path import win_path_to_unix # NOQA\nhashsum_file, human_bytes = hashsum_file, human_bytes\nunix_path_to_win = unix_path_to_win\nwin_path_to_unix, url_path = win_path_to_unix, url_path\n\nfrom .gateways.disk.read import compute_md5sum # NOQA\nmd5_file = compute_md5sum\n\nfrom .models.version import VersionOrder, normalized_version # NOQA\nVersionOrder, normalized_version = VersionOrder, normalized_version # NOQA\n\nimport conda.base.context # NOQA\nfrom .base.context import get_prefix, non_x86_linux_machines, sys_rc_path # NOQA\nnon_x86_linux_machines, sys_rc_path = non_x86_linux_machines, sys_rc_path\nget_prefix = get_prefix\n\nfrom ._vendor.auxlib.entity import EntityEncoder # NOQA\nEntityEncoder = EntityEncoder\nfrom .base.constants import DEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX # NOQA\nDEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX = DEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX # NOQA\nget_default_urls = lambda: DEFAULT_CHANNELS\n\narch_name = conda.base.context.context.arch_name\nbinstar_upload = conda.base.context.context.anaconda_upload\nbits = conda.base.context.context.bits\ndefault_prefix = conda.base.context.context.default_prefix\ndefault_python = conda.base.context.context.default_python\nenvs_dirs = conda.base.context.context.envs_dirs\npkgs_dirs = conda.base.context.context.pkgs_dirs\nplatform = conda.base.context.context.platform\nroot_dir = conda.base.context.context.root_prefix\nroot_writable = conda.base.context.context.root_writable\nsubdir = conda.base.context.context.subdir\nconda_private = conda.base.context.context.conda_private\nfrom .models.channel import get_conda_build_local_url # NOQA\nget_rc_urls = lambda: list(conda.base.context.context.channels)\nget_local_urls = lambda: list(get_conda_build_local_url()) or []\nload_condarc = lambda fn: conda.base.context.reset_context([fn])\nfrom .exceptions import PaddingError, LinkError, CondaOSError, PathNotFoundError # NOQA\nPaddingError = PaddingError\nLinkError = LinkError\nCondaOSError = CondaOSError\n# PathNotFoundError is the conda 4.4.x name for it - let's plan ahead.\nPathNotFoundError = CondaFileNotFoundError = PathNotFoundError\nfrom .gateways.disk.link import CrossPlatformStLink # NOQA\nCrossPlatformStLink = CrossPlatformStLink\n\nfrom .models.enums import FileMode # NOQA\nFileMode = FileMode\nfrom .models.enums import PathType # NOQA\nPathType = PathType\n\nfrom .models.records import PackageRecord # NOQA\nPackageRecord = IndexRecord = PackageRecord\n\nfrom .compat import TemporaryDirectory # NOQA\nTemporaryDirectory = TemporaryDirectory\n\nfrom .gateways.subprocess import ACTIVE_SUBPROCESSES, subprocess_call # NOQA\nACTIVE_SUBPROCESSES, subprocess_call = ACTIVE_SUBPROCESSES, subprocess_call\n\nfrom .core.subdir_data import cache_fn_url # NOQA\ncache_fn_url = cache_fn_url\n\n\nclass Completer(object): # pragma: no cover\n def get_items(self):\n return self._get_items()\n\n def __contains__(self, item):\n return True\n\n def __iter__(self):\n return iter(self.get_items())\n\n\nclass InstalledPackages(object):\n pass\n\n\nclass memoized(object): # pragma: no cover\n \"\"\"Decorator. Caches a function's return value each time it is called.\n If called later with the same arguments, the cached value is returned\n (not reevaluated).\n \"\"\"\n def __init__(self, func):\n self.func = func\n self.cache = {}\n self.lock = threading.Lock()\n\n def __call__(self, *args, **kw):\n newargs = []\n for arg in args:\n if isinstance(arg, list):\n newargs.append(tuple(arg))\n elif not isinstance(arg, Hashable):\n # uncacheable. a list, for instance.\n # better to not cache than blow up.\n return self.func(*args, **kw)\n else:\n newargs.append(arg)\n newargs = tuple(newargs)\n key = (newargs, frozenset(sorted(kw.items())))\n with self.lock:\n if key in self.cache:\n return self.cache[key]\n else:\n value = self.func(*args, **kw)\n self.cache[key] = value\n return value\n\n\nfrom .gateways.disk.delete import rm_rf as _rm_rf # NOQA\nfrom .core.prefix_data import delete_prefix_from_linked_data # NOQA\n\n\ndef rm_rf(path, max_retries=5, trash=True):\n _rm_rf(path, max_retries, trash)\n delete_prefix_from_linked_data(path)\n\n\n# ######################\n# signature.py\n# ######################\nKEYS = None\nKEYS_DIR = None\n\n\ndef hash_file(_):\n return None # pragma: no cover\n\n\ndef verify(_):\n return False # pragma: no cover\n\n\nfrom .plan import execute_actions, execute_instructions, execute_plan, install_actions # NOQA\nexecute_actions, execute_instructions = execute_actions, execute_instructions\nexecute_plan, install_actions = execute_plan, install_actions\n", "path": "conda/exports.py"}]}
| 2,978 | 111 |
gh_patches_debug_62284
|
rasdani/github-patches
|
git_diff
|
SciTools__cartopy-228
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
coastlines drawn under filled contours
When using matplotlib v1.3.x and cartopy v0.7.x (development master from github) coastlines and other features are drawn underneath filled contours unless:
```
ax.coastlines(zorder=1)
```
is used to manually raise them above. I suspect this may be a matplotlib issue, but thought it would be best to raise it here for the cartopy experts to take a look at first.
</issue>
<code>
[start of lib/cartopy/mpl/feature_artist.py]
1 # (C) British Crown Copyright 2011 - 2012, Met Office
2 #
3 # This file is part of cartopy.
4 #
5 # cartopy is free software: you can redistribute it and/or modify it under
6 # the terms of the GNU Lesser General Public License as published by the
7 # Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # cartopy is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU Lesser General Public License for more details.
14 #
15 # You should have received a copy of the GNU Lesser General Public License
16 # along with cartopy. If not, see <http://www.gnu.org/licenses/>.
17 """
18 This module defines the :class:`FeatureArtist` class, for drawing
19 :class:`Feature` instances with matplotlib.
20
21 """
22 import warnings
23 import weakref
24
25 import matplotlib.artist
26 import matplotlib.cbook
27 import matplotlib.collections
28 import matplotlib.lines
29
30 import cartopy.mpl.patch
31
32
33 class FeatureArtist(matplotlib.artist.Artist):
34 """
35 A subclass of :class:`~matplotlib.artist.Artist` capable of
36 drawing a :class:`cartopy.feature.Feature`.
37
38 """
39 _geometry_to_path_cache = weakref.WeakKeyDictionary()
40 """
41 A nested mapping from geometry and target projection to the
42 resulting transformed matplotlib paths::
43
44 {geom: {target_projection: list_of_paths}}
45
46 This provides a significant boost when producing multiple maps of the
47 same projection.
48
49 """
50 def __init__(self, feature, **kwargs):
51 """
52 Args:
53
54 * feature:
55 an instance of :class:`cartopy.feature.Feature` to draw.
56 * kwargs:
57 keyword arguments to be used when drawing the feature. These
58 will override those shared with the feature.
59
60 """
61 super(FeatureArtist, self).__init__()
62
63 if kwargs is None:
64 kwargs = {}
65 self._kwargs = dict(kwargs)
66
67 # Set default zorder so that features are drawn before
68 # lines e.g. contours but after images.
69 # Note that the zorder of Patch, PatchCollection and PathCollection
70 # are all 1 by default. Assuming equal zorder drawing takes place in
71 # the following order: collections, patches, lines (default zorder=2),
72 # text (default zorder=3), then other artists e.g. FeatureArtist.
73 if self._kwargs.get('zorder') is not None:
74 self.set_zorder(self._kwargs['zorder'])
75 elif feature.kwargs.get('zorder') is not None:
76 self.set_zorder(feature.kwargs['zorder'])
77 else:
78 self.set_zorder(matplotlib.collections.PathCollection.zorder)
79
80 self._feature = feature
81
82 @matplotlib.artist.allow_rasterization
83 def draw(self, renderer, *args, **kwargs):
84 """
85 Draws the geometries of the feature that intersect with the extent of
86 the :class:`cartopy.mpl.GeoAxes` instance to which this
87 object has been added.
88
89 """
90 if not self.get_visible():
91 return
92
93 ax = self.get_axes()
94 feature_crs = self._feature.crs
95
96 # Get geometries that we need to draw.
97 extent = None
98 try:
99 extent = ax.get_extent(feature_crs)
100 except ValueError:
101 warnings.warn('Unable to determine extent. Defaulting to global.')
102 geoms = self._feature.intersecting_geometries(extent)
103
104 # Project (if necessary) and convert geometries to matplotlib paths.
105 paths = []
106 key = ax.projection
107 for geom in geoms:
108 mapping = FeatureArtist._geometry_to_path_cache.setdefault(geom,
109 {})
110 geom_paths = mapping.get(key)
111 if geom_paths is None:
112 if ax.projection != feature_crs:
113 projected_geom = ax.projection.project_geometry(
114 geom, feature_crs)
115 else:
116 projected_geom = geom
117 geom_paths = cartopy.mpl.patch.geos_to_path(
118 projected_geom)
119 mapping[key] = geom_paths
120 paths.extend(geom_paths)
121
122 # Build path collection and draw it.
123 transform = ax.projection._as_mpl_transform(ax)
124 # Combine all the keyword args in priority order
125 final_kwargs = dict(self._feature.kwargs)
126 final_kwargs.update(self._kwargs)
127 final_kwargs.update(kwargs)
128 c = matplotlib.collections.PathCollection(paths,
129 transform=transform,
130 **final_kwargs)
131 c.set_clip_path(ax.patch)
132 return c.draw(renderer)
133
[end of lib/cartopy/mpl/feature_artist.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lib/cartopy/mpl/feature_artist.py b/lib/cartopy/mpl/feature_artist.py
--- a/lib/cartopy/mpl/feature_artist.py
+++ b/lib/cartopy/mpl/feature_artist.py
@@ -75,7 +75,10 @@
elif feature.kwargs.get('zorder') is not None:
self.set_zorder(feature.kwargs['zorder'])
else:
- self.set_zorder(matplotlib.collections.PathCollection.zorder)
+ # The class attribute matplotlib.collections.PathCollection.zorder
+ # was removed after mpl v1.2.0, so the hard-coded value of 1 is
+ # used instead.
+ self.set_zorder(1)
self._feature = feature
|
{"golden_diff": "diff --git a/lib/cartopy/mpl/feature_artist.py b/lib/cartopy/mpl/feature_artist.py\n--- a/lib/cartopy/mpl/feature_artist.py\n+++ b/lib/cartopy/mpl/feature_artist.py\n@@ -75,7 +75,10 @@\n elif feature.kwargs.get('zorder') is not None:\n self.set_zorder(feature.kwargs['zorder'])\n else:\n- self.set_zorder(matplotlib.collections.PathCollection.zorder)\n+ # The class attribute matplotlib.collections.PathCollection.zorder\n+ # was removed after mpl v1.2.0, so the hard-coded value of 1 is\n+ # used instead.\n+ self.set_zorder(1)\n \n self._feature = feature\n", "issue": "coastlines drawn under filled contours\nWhen using matplotlib v1.3.x and cartopy v0.7.x (development master from github) coastlines and other features are drawn underneath filled contours unless:\n\n```\nax.coastlines(zorder=1)\n```\n\nis used to manually raise them above. I suspect this may be a matplotlib issue, but thought it would be best to raise it here for the cartopy experts to take a look at first.\n\n", "before_files": [{"content": "# (C) British Crown Copyright 2011 - 2012, Met Office\n#\n# This file is part of cartopy.\n#\n# cartopy is free software: you can redistribute it and/or modify it under\n# the terms of the GNU Lesser General Public License as published by the\n# Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# cartopy is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with cartopy. If not, see <http://www.gnu.org/licenses/>.\n\"\"\"\nThis module defines the :class:`FeatureArtist` class, for drawing\n:class:`Feature` instances with matplotlib.\n\n\"\"\"\nimport warnings\nimport weakref\n\nimport matplotlib.artist\nimport matplotlib.cbook\nimport matplotlib.collections\nimport matplotlib.lines\n\nimport cartopy.mpl.patch\n\n\nclass FeatureArtist(matplotlib.artist.Artist):\n \"\"\"\n A subclass of :class:`~matplotlib.artist.Artist` capable of\n drawing a :class:`cartopy.feature.Feature`.\n\n \"\"\"\n _geometry_to_path_cache = weakref.WeakKeyDictionary()\n \"\"\"\n A nested mapping from geometry and target projection to the\n resulting transformed matplotlib paths::\n\n {geom: {target_projection: list_of_paths}}\n\n This provides a significant boost when producing multiple maps of the\n same projection.\n\n \"\"\"\n def __init__(self, feature, **kwargs):\n \"\"\"\n Args:\n\n * feature:\n an instance of :class:`cartopy.feature.Feature` to draw.\n * kwargs:\n keyword arguments to be used when drawing the feature. These\n will override those shared with the feature.\n\n \"\"\"\n super(FeatureArtist, self).__init__()\n\n if kwargs is None:\n kwargs = {}\n self._kwargs = dict(kwargs)\n\n # Set default zorder so that features are drawn before\n # lines e.g. contours but after images.\n # Note that the zorder of Patch, PatchCollection and PathCollection\n # are all 1 by default. Assuming equal zorder drawing takes place in\n # the following order: collections, patches, lines (default zorder=2),\n # text (default zorder=3), then other artists e.g. FeatureArtist.\n if self._kwargs.get('zorder') is not None:\n self.set_zorder(self._kwargs['zorder'])\n elif feature.kwargs.get('zorder') is not None:\n self.set_zorder(feature.kwargs['zorder'])\n else:\n self.set_zorder(matplotlib.collections.PathCollection.zorder)\n\n self._feature = feature\n\n @matplotlib.artist.allow_rasterization\n def draw(self, renderer, *args, **kwargs):\n \"\"\"\n Draws the geometries of the feature that intersect with the extent of\n the :class:`cartopy.mpl.GeoAxes` instance to which this\n object has been added.\n\n \"\"\"\n if not self.get_visible():\n return\n\n ax = self.get_axes()\n feature_crs = self._feature.crs\n\n # Get geometries that we need to draw.\n extent = None\n try:\n extent = ax.get_extent(feature_crs)\n except ValueError:\n warnings.warn('Unable to determine extent. Defaulting to global.')\n geoms = self._feature.intersecting_geometries(extent)\n\n # Project (if necessary) and convert geometries to matplotlib paths.\n paths = []\n key = ax.projection\n for geom in geoms:\n mapping = FeatureArtist._geometry_to_path_cache.setdefault(geom,\n {})\n geom_paths = mapping.get(key)\n if geom_paths is None:\n if ax.projection != feature_crs:\n projected_geom = ax.projection.project_geometry(\n geom, feature_crs)\n else:\n projected_geom = geom\n geom_paths = cartopy.mpl.patch.geos_to_path(\n projected_geom)\n mapping[key] = geom_paths\n paths.extend(geom_paths)\n\n # Build path collection and draw it.\n transform = ax.projection._as_mpl_transform(ax)\n # Combine all the keyword args in priority order\n final_kwargs = dict(self._feature.kwargs)\n final_kwargs.update(self._kwargs)\n final_kwargs.update(kwargs)\n c = matplotlib.collections.PathCollection(paths,\n transform=transform,\n **final_kwargs)\n c.set_clip_path(ax.patch)\n return c.draw(renderer)\n", "path": "lib/cartopy/mpl/feature_artist.py"}]}
| 1,924 | 162 |
gh_patches_debug_9163
|
rasdani/github-patches
|
git_diff
|
opsdroid__opsdroid-1540
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add 3.8 support to setup.py
We've been running CI against 3.8 for a while now, we should update the metadata in `setup.py` to explicitly state we support 3.8.
We should also update the [support table](https://github.com/opsdroid/opsdroid/blob/master/docs/maintaining/supported-python-versions.md) to say we support 3.8.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python3
2 import os
3 from setuptools import setup, find_packages
4 from setuptools.command.build_py import build_py
5 from setuptools.command.sdist import sdist
6 from setuptools.command.develop import develop
7 import versioneer
8
9 PACKAGE_NAME = "opsdroid"
10 HERE = os.path.abspath(os.path.dirname(__file__))
11 README = open(os.path.join(HERE, "README.md"), encoding="utf8").read()
12
13 PACKAGES = find_packages(
14 exclude=["tests", "tests.*", "modules", "modules.*", "docs", "docs.*"]
15 )
16
17
18 # For now we simply define the install_requires based on the contents
19 # of requirements.txt. In the future, install_requires may become much
20 # looser than the (automatically) resolved requirements.txt.
21 with open(os.path.join(HERE, "requirements.txt"), "r") as fh:
22 REQUIRES = [line.strip() for line in fh]
23
24
25 class Develop(develop):
26 """Custom `develop` command to always build mo files on install -e."""
27
28 def run(self):
29 self.run_command("compile_catalog")
30 develop.run(self) # old style class
31
32
33 class BuildPy(build_py):
34 """Custom `build_py` command to always build mo files for wheels."""
35
36 def run(self):
37 self.run_command("compile_catalog")
38 build_py.run(self) # old style class
39
40
41 class Sdist(sdist):
42 """Custom `sdist` command to ensure that mo files are always created."""
43
44 def run(self):
45 self.run_command("compile_catalog")
46 sdist.run(self) # old style class
47
48
49 setup(
50 name=PACKAGE_NAME,
51 version=versioneer.get_version(),
52 license="Apache License 2.0",
53 url="https://opsdroid.github.io/",
54 download_url="https://github.com/opsdroid/opsdroid/releases",
55 author="Jacob Tomlinson",
56 author_email="[email protected]",
57 description="An open source ChatOps bot framework.",
58 long_description=README,
59 long_description_content_type="text/markdown",
60 packages=PACKAGES,
61 include_package_data=True,
62 zip_safe=False,
63 platforms="any",
64 classifiers=[
65 "Development Status :: 4 - Beta",
66 "Environment :: Console",
67 "Framework :: AsyncIO",
68 "Intended Audience :: Developers",
69 "Intended Audience :: System Administrators",
70 "Intended Audience :: Information Technology",
71 "License :: OSI Approved :: Apache Software License",
72 "Programming Language :: Python",
73 "Programming Language :: Python :: 3",
74 "Programming Language :: Python :: 3 :: Only",
75 "Programming Language :: Python :: 3.6",
76 "Programming Language :: Python :: 3.7",
77 "Topic :: Communications :: Chat",
78 "Topic :: Scientific/Engineering :: Artificial Intelligence",
79 "Topic :: Software Development :: Libraries :: Python Modules",
80 ],
81 install_requires=REQUIRES,
82 test_suite="tests",
83 keywords=[
84 "bot",
85 "bot-framework",
86 "opsdroid",
87 "botkit",
88 "python3",
89 "asyncio",
90 "chatops",
91 "devops",
92 "nlu",
93 ],
94 setup_requires=["Babel"],
95 cmdclass=versioneer.get_cmdclass(
96 {"sdist": Sdist, "build_py": BuildPy, "develop": Develop}
97 ),
98 entry_points={"console_scripts": ["opsdroid = opsdroid.cli:cli"]},
99 )
100
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -74,6 +74,7 @@
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
"Topic :: Communications :: Chat",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development :: Libraries :: Python Modules",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -74,6 +74,7 @@\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n+ \"Programming Language :: Python :: 3.8\",\n \"Topic :: Communications :: Chat\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n", "issue": "Add 3.8 support to setup.py\nWe've been running CI against 3.8 for a while now, we should update the metadata in `setup.py` to explicitly state we support 3.8.\r\n\r\nWe should also update the [support table](https://github.com/opsdroid/opsdroid/blob/master/docs/maintaining/supported-python-versions.md) to say we support 3.8.\n", "before_files": [{"content": "#!/usr/bin/env python3\nimport os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.build_py import build_py\nfrom setuptools.command.sdist import sdist\nfrom setuptools.command.develop import develop\nimport versioneer\n\nPACKAGE_NAME = \"opsdroid\"\nHERE = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(HERE, \"README.md\"), encoding=\"utf8\").read()\n\nPACKAGES = find_packages(\n exclude=[\"tests\", \"tests.*\", \"modules\", \"modules.*\", \"docs\", \"docs.*\"]\n)\n\n\n# For now we simply define the install_requires based on the contents\n# of requirements.txt. In the future, install_requires may become much\n# looser than the (automatically) resolved requirements.txt.\nwith open(os.path.join(HERE, \"requirements.txt\"), \"r\") as fh:\n REQUIRES = [line.strip() for line in fh]\n\n\nclass Develop(develop):\n \"\"\"Custom `develop` command to always build mo files on install -e.\"\"\"\n\n def run(self):\n self.run_command(\"compile_catalog\")\n develop.run(self) # old style class\n\n\nclass BuildPy(build_py):\n \"\"\"Custom `build_py` command to always build mo files for wheels.\"\"\"\n\n def run(self):\n self.run_command(\"compile_catalog\")\n build_py.run(self) # old style class\n\n\nclass Sdist(sdist):\n \"\"\"Custom `sdist` command to ensure that mo files are always created.\"\"\"\n\n def run(self):\n self.run_command(\"compile_catalog\")\n sdist.run(self) # old style class\n\n\nsetup(\n name=PACKAGE_NAME,\n version=versioneer.get_version(),\n license=\"Apache License 2.0\",\n url=\"https://opsdroid.github.io/\",\n download_url=\"https://github.com/opsdroid/opsdroid/releases\",\n author=\"Jacob Tomlinson\",\n author_email=\"[email protected]\",\n description=\"An open source ChatOps bot framework.\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n packages=PACKAGES,\n include_package_data=True,\n zip_safe=False,\n platforms=\"any\",\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: Console\",\n \"Framework :: AsyncIO\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"Intended Audience :: Information Technology\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Communications :: Chat\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n install_requires=REQUIRES,\n test_suite=\"tests\",\n keywords=[\n \"bot\",\n \"bot-framework\",\n \"opsdroid\",\n \"botkit\",\n \"python3\",\n \"asyncio\",\n \"chatops\",\n \"devops\",\n \"nlu\",\n ],\n setup_requires=[\"Babel\"],\n cmdclass=versioneer.get_cmdclass(\n {\"sdist\": Sdist, \"build_py\": BuildPy, \"develop\": Develop}\n ),\n entry_points={\"console_scripts\": [\"opsdroid = opsdroid.cli:cli\"]},\n)\n", "path": "setup.py"}]}
| 1,553 | 112 |
gh_patches_debug_14665
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-9517
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SVG static assets broken and not displayed
Every asset with svg type are displayed as broken in web interface.

We installed sentry 9 with python and nginx as proxy on CentOS 7.2
When I look the content-type returned, svg files are not recognized.
```
$ curl -v https://sentry/_static/1533634345/sentry/dist/php.svg
(...)
< Content-Type: application/octet-stream
```
If I check the file on server, the svg is not recognized.
```
$ file php.svg
php.svg: HTML document, ASCII text, with very long lines, with no line terminators
```
If we serve the file directly with nginx, the correct mime type is sent as _image/svg+xml_
</issue>
<code>
[start of src/sentry/runner/settings.py]
1 """
2 sentry.runner.settings
3 ~~~~~~~~~~~~~~~~~~~~~~
4
5 :copyright: (c) 2015 by the Sentry Team, see AUTHORS for more details.
6 :license: BSD, see LICENSE for more details.
7 """
8 from __future__ import absolute_import, print_function
9
10 import os
11 import click
12
13 DEFAULT_SETTINGS_MODULE = 'sentry.conf.server'
14 DEFAULT_SETTINGS_CONF = 'config.yml'
15 DEFAULT_SETTINGS_OVERRIDE = 'sentry.conf.py'
16
17
18 def generate_secret_key():
19 from django.utils.crypto import get_random_string
20 chars = u'abcdefghijklmnopqrstuvwxyz0123456789!@#%^&*(-_=+)'
21 return get_random_string(50, chars)
22
23
24 def load_config_template(path, version='default'):
25 from pkg_resources import resource_string
26 return resource_string('sentry', 'data/config/%s.%s' % (path, version)).decode('utf8')
27
28
29 def generate_settings(dev=False):
30 """
31 This command is run when ``default_path`` doesn't exist, or ``init`` is
32 run and returns a string representing the default data to put into their
33 settings file.
34 """
35 context = {
36 'secret_key': generate_secret_key(),
37 'debug_flag': dev,
38 'mail.backend': 'console' if dev else 'smtp',
39 }
40
41 py = load_config_template(DEFAULT_SETTINGS_OVERRIDE, 'default') % context
42 yaml = load_config_template(DEFAULT_SETTINGS_CONF, 'default') % context
43 return py, yaml
44
45
46 def get_sentry_conf():
47 """
48 Fetch the SENTRY_CONF value, either from the click context
49 if available, or SENTRY_CONF environment variable.
50 """
51 try:
52 ctx = click.get_current_context()
53 return ctx.obj['config']
54 except (RuntimeError, KeyError, TypeError):
55 try:
56 return os.environ['SENTRY_CONF']
57 except KeyError:
58 return '~/.sentry'
59
60
61 def discover_configs():
62 """
63 Discover the locations of three configuration components:
64 * Config directory (~/.sentry)
65 * Optional python config file (~/.sentry/sentry.conf.py)
66 * Optional yaml config (~/.sentry/config.yml)
67 """
68 try:
69 config = os.environ['SENTRY_CONF']
70 except KeyError:
71 config = '~/.sentry'
72
73 config = os.path.expanduser(config)
74
75 # This is the old, now deprecated code path where SENTRY_CONF is pointed directly
76 # to a python file
77 if config.endswith(('.py', '.conf')) or os.path.isfile(config):
78 return (os.path.dirname(config), config, None, )
79
80 return (
81 config, os.path.join(config, DEFAULT_SETTINGS_OVERRIDE),
82 os.path.join(config, DEFAULT_SETTINGS_CONF),
83 )
84
85
86 def configure(ctx, py, yaml, skip_service_validation=False):
87 """
88 Given the two different config files, set up the environment.
89
90 NOTE: Will only execute once, so it's safe to call multiple times.
91 """
92 global __installed
93 if __installed:
94 return
95
96 # Make sure that our warnings are always displayed
97 import warnings
98 warnings.filterwarnings('default', '', Warning, r'^sentry')
99
100 # Add in additional mimetypes that are useful for our static files
101 # which aren't common in default system registries
102 import mimetypes
103 for type, ext in (
104 ('application/json', 'map'), ('application/font-woff', 'woff'),
105 ('application/font-woff2', 'woff2'), ('application/vnd.ms-fontobject', 'eot'),
106 ('application/x-font-ttf', 'ttf'), ('application/x-font-ttf',
107 'ttc'), ('font/opentype', 'otf'),
108 ):
109 mimetypes.add_type(type, '.' + ext)
110
111 from .importer import install
112
113 if yaml is None:
114 # `yaml` will be None when SENTRY_CONF is pointed
115 # directly to a file, in which case, this file must exist
116 if not os.path.exists(py):
117 if ctx:
118 raise click.ClickException(
119 "Configuration file does not exist. Use 'sentry init' to initialize the file."
120 )
121 raise ValueError(
122 "Configuration file does not exist at '%s'" % click.format_filename(py)
123 )
124 elif not os.path.exists(yaml) and not os.path.exists(py):
125 if ctx:
126 raise click.ClickException(
127 "Configuration file does not exist. Use 'sentry init' to initialize the file."
128 )
129 raise ValueError("Configuration file does not exist at '%s'" % click.format_filename(yaml))
130
131 # Add autoreload for config.yml file if needed
132 if yaml is not None and os.path.exists(yaml):
133 from sentry.utils.uwsgi import reload_on_change
134 reload_on_change(yaml)
135
136 os.environ['DJANGO_SETTINGS_MODULE'] = 'sentry_config'
137
138 install('sentry_config', py, DEFAULT_SETTINGS_MODULE)
139
140 # HACK: we need to force access of django.conf.settings to
141 # ensure we don't hit any import-driven recursive behavior
142 from django.conf import settings
143 hasattr(settings, 'INSTALLED_APPS')
144
145 from .initializer import initialize_app, on_configure
146 initialize_app(
147 {
148 'config_path': py,
149 'settings': settings,
150 'options': yaml,
151 },
152 skip_service_validation=skip_service_validation
153 )
154 on_configure({'settings': settings})
155
156 __installed = True
157
158
159 __installed = False
160
[end of src/sentry/runner/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/sentry/runner/settings.py b/src/sentry/runner/settings.py
--- a/src/sentry/runner/settings.py
+++ b/src/sentry/runner/settings.py
@@ -101,10 +101,14 @@
# which aren't common in default system registries
import mimetypes
for type, ext in (
- ('application/json', 'map'), ('application/font-woff', 'woff'),
- ('application/font-woff2', 'woff2'), ('application/vnd.ms-fontobject', 'eot'),
- ('application/x-font-ttf', 'ttf'), ('application/x-font-ttf',
- 'ttc'), ('font/opentype', 'otf'),
+ ('application/json', 'map'),
+ ('application/font-woff', 'woff'),
+ ('application/font-woff2', 'woff2'),
+ ('application/vnd.ms-fontobject', 'eot'),
+ ('application/x-font-ttf', 'ttf'),
+ ('application/x-font-ttf', 'ttc'),
+ ('font/opentype', 'otf'),
+ ('image/svg+xml', 'svg'),
):
mimetypes.add_type(type, '.' + ext)
|
{"golden_diff": "diff --git a/src/sentry/runner/settings.py b/src/sentry/runner/settings.py\n--- a/src/sentry/runner/settings.py\n+++ b/src/sentry/runner/settings.py\n@@ -101,10 +101,14 @@\n # which aren't common in default system registries\n import mimetypes\n for type, ext in (\n- ('application/json', 'map'), ('application/font-woff', 'woff'),\n- ('application/font-woff2', 'woff2'), ('application/vnd.ms-fontobject', 'eot'),\n- ('application/x-font-ttf', 'ttf'), ('application/x-font-ttf',\n- 'ttc'), ('font/opentype', 'otf'),\n+ ('application/json', 'map'),\n+ ('application/font-woff', 'woff'),\n+ ('application/font-woff2', 'woff2'),\n+ ('application/vnd.ms-fontobject', 'eot'),\n+ ('application/x-font-ttf', 'ttf'),\n+ ('application/x-font-ttf', 'ttc'),\n+ ('font/opentype', 'otf'),\n+ ('image/svg+xml', 'svg'),\n ):\n mimetypes.add_type(type, '.' + ext)\n", "issue": "SVG static assets broken and not displayed\nEvery asset with svg type are displayed as broken in web interface.\r\n\r\n\r\n\r\nWe installed sentry 9 with python and nginx as proxy on CentOS 7.2\r\n\r\nWhen I look the content-type returned, svg files are not recognized.\r\n\r\n```\r\n$ curl -v https://sentry/_static/1533634345/sentry/dist/php.svg\r\n(...)\r\n< Content-Type: application/octet-stream\r\n```\r\n\r\nIf I check the file on server, the svg is not recognized.\r\n\r\n```\r\n$ file php.svg \r\nphp.svg: HTML document, ASCII text, with very long lines, with no line terminators\r\n```\r\n\r\nIf we serve the file directly with nginx, the correct mime type is sent as _image/svg+xml_\r\n\n", "before_files": [{"content": "\"\"\"\nsentry.runner.settings\n~~~~~~~~~~~~~~~~~~~~~~\n\n:copyright: (c) 2015 by the Sentry Team, see AUTHORS for more details.\n:license: BSD, see LICENSE for more details.\n\"\"\"\nfrom __future__ import absolute_import, print_function\n\nimport os\nimport click\n\nDEFAULT_SETTINGS_MODULE = 'sentry.conf.server'\nDEFAULT_SETTINGS_CONF = 'config.yml'\nDEFAULT_SETTINGS_OVERRIDE = 'sentry.conf.py'\n\n\ndef generate_secret_key():\n from django.utils.crypto import get_random_string\n chars = u'abcdefghijklmnopqrstuvwxyz0123456789!@#%^&*(-_=+)'\n return get_random_string(50, chars)\n\n\ndef load_config_template(path, version='default'):\n from pkg_resources import resource_string\n return resource_string('sentry', 'data/config/%s.%s' % (path, version)).decode('utf8')\n\n\ndef generate_settings(dev=False):\n \"\"\"\n This command is run when ``default_path`` doesn't exist, or ``init`` is\n run and returns a string representing the default data to put into their\n settings file.\n \"\"\"\n context = {\n 'secret_key': generate_secret_key(),\n 'debug_flag': dev,\n 'mail.backend': 'console' if dev else 'smtp',\n }\n\n py = load_config_template(DEFAULT_SETTINGS_OVERRIDE, 'default') % context\n yaml = load_config_template(DEFAULT_SETTINGS_CONF, 'default') % context\n return py, yaml\n\n\ndef get_sentry_conf():\n \"\"\"\n Fetch the SENTRY_CONF value, either from the click context\n if available, or SENTRY_CONF environment variable.\n \"\"\"\n try:\n ctx = click.get_current_context()\n return ctx.obj['config']\n except (RuntimeError, KeyError, TypeError):\n try:\n return os.environ['SENTRY_CONF']\n except KeyError:\n return '~/.sentry'\n\n\ndef discover_configs():\n \"\"\"\n Discover the locations of three configuration components:\n * Config directory (~/.sentry)\n * Optional python config file (~/.sentry/sentry.conf.py)\n * Optional yaml config (~/.sentry/config.yml)\n \"\"\"\n try:\n config = os.environ['SENTRY_CONF']\n except KeyError:\n config = '~/.sentry'\n\n config = os.path.expanduser(config)\n\n # This is the old, now deprecated code path where SENTRY_CONF is pointed directly\n # to a python file\n if config.endswith(('.py', '.conf')) or os.path.isfile(config):\n return (os.path.dirname(config), config, None, )\n\n return (\n config, os.path.join(config, DEFAULT_SETTINGS_OVERRIDE),\n os.path.join(config, DEFAULT_SETTINGS_CONF),\n )\n\n\ndef configure(ctx, py, yaml, skip_service_validation=False):\n \"\"\"\n Given the two different config files, set up the environment.\n\n NOTE: Will only execute once, so it's safe to call multiple times.\n \"\"\"\n global __installed\n if __installed:\n return\n\n # Make sure that our warnings are always displayed\n import warnings\n warnings.filterwarnings('default', '', Warning, r'^sentry')\n\n # Add in additional mimetypes that are useful for our static files\n # which aren't common in default system registries\n import mimetypes\n for type, ext in (\n ('application/json', 'map'), ('application/font-woff', 'woff'),\n ('application/font-woff2', 'woff2'), ('application/vnd.ms-fontobject', 'eot'),\n ('application/x-font-ttf', 'ttf'), ('application/x-font-ttf',\n 'ttc'), ('font/opentype', 'otf'),\n ):\n mimetypes.add_type(type, '.' + ext)\n\n from .importer import install\n\n if yaml is None:\n # `yaml` will be None when SENTRY_CONF is pointed\n # directly to a file, in which case, this file must exist\n if not os.path.exists(py):\n if ctx:\n raise click.ClickException(\n \"Configuration file does not exist. Use 'sentry init' to initialize the file.\"\n )\n raise ValueError(\n \"Configuration file does not exist at '%s'\" % click.format_filename(py)\n )\n elif not os.path.exists(yaml) and not os.path.exists(py):\n if ctx:\n raise click.ClickException(\n \"Configuration file does not exist. Use 'sentry init' to initialize the file.\"\n )\n raise ValueError(\"Configuration file does not exist at '%s'\" % click.format_filename(yaml))\n\n # Add autoreload for config.yml file if needed\n if yaml is not None and os.path.exists(yaml):\n from sentry.utils.uwsgi import reload_on_change\n reload_on_change(yaml)\n\n os.environ['DJANGO_SETTINGS_MODULE'] = 'sentry_config'\n\n install('sentry_config', py, DEFAULT_SETTINGS_MODULE)\n\n # HACK: we need to force access of django.conf.settings to\n # ensure we don't hit any import-driven recursive behavior\n from django.conf import settings\n hasattr(settings, 'INSTALLED_APPS')\n\n from .initializer import initialize_app, on_configure\n initialize_app(\n {\n 'config_path': py,\n 'settings': settings,\n 'options': yaml,\n },\n skip_service_validation=skip_service_validation\n )\n on_configure({'settings': settings})\n\n __installed = True\n\n\n__installed = False\n", "path": "src/sentry/runner/settings.py"}]}
| 2,307 | 265 |
gh_patches_debug_28156
|
rasdani/github-patches
|
git_diff
|
nltk__nltk-3080
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AttributeError: 'itertools.chain' object has no attribute 'lookup'
nltk version: 3.7.
python version: 3.9.15
From the following code:
```
padded_line = [list(pad_both_ends(word_tokenize(input_text), n=N))]
train_data, vocab = padded_everygram_pipeline(order=N, text=padded_line)
lm = Lidstone(gamma=0.2, vocabulary=vocab, order=N)
lm.fit(text=train_data)
```
where input_text is a long string and N as in the Ngram model. From the last line of code above, I get the following error:
>lm.fit(text=train_data)
> File "env\lib\site-packages\nltk\lm\api.py", line 110, in fit
> self.counts.update(self.vocab.lookup(sent) for sent in text)
> File "env\lib\site-packages\nltk\lm\counter.py", line 115, in update
> for sent in ngram_text:
> File "env\lib\site-packages\nltk\lm\api.py", line 110, in <genexpr>
> self.counts.update(self.vocab.lookup(sent) for sent in text)
> AttributeError: 'itertools.chain' object has no attribute 'lookup'
Can anyone fix this error for me?
</issue>
<code>
[start of nltk/lm/api.py]
1 # Natural Language Toolkit: Language Models
2 #
3 # Copyright (C) 2001-2022 NLTK Project
4 # Authors: Ilia Kurenkov <[email protected]>
5 # URL: <https://www.nltk.org/>
6 # For license information, see LICENSE.TXT
7 """Language Model Interface."""
8
9 import random
10 from abc import ABCMeta, abstractmethod
11 from bisect import bisect
12 from itertools import accumulate
13
14 from nltk.lm.counter import NgramCounter
15 from nltk.lm.util import log_base2
16 from nltk.lm.vocabulary import Vocabulary
17
18
19 class Smoothing(metaclass=ABCMeta):
20 """Ngram Smoothing Interface
21
22 Implements Chen & Goodman 1995's idea that all smoothing algorithms have
23 certain features in common. This should ideally allow smoothing algorithms to
24 work both with Backoff and Interpolation.
25 """
26
27 def __init__(self, vocabulary, counter):
28 """
29 :param vocabulary: The Ngram vocabulary object.
30 :type vocabulary: nltk.lm.vocab.Vocabulary
31 :param counter: The counts of the vocabulary items.
32 :type counter: nltk.lm.counter.NgramCounter
33 """
34 self.vocab = vocabulary
35 self.counts = counter
36
37 @abstractmethod
38 def unigram_score(self, word):
39 raise NotImplementedError()
40
41 @abstractmethod
42 def alpha_gamma(self, word, context):
43 raise NotImplementedError()
44
45
46 def _mean(items):
47 """Return average (aka mean) for sequence of items."""
48 return sum(items) / len(items)
49
50
51 def _random_generator(seed_or_generator):
52 if isinstance(seed_or_generator, random.Random):
53 return seed_or_generator
54 return random.Random(seed_or_generator)
55
56
57 def _weighted_choice(population, weights, random_generator=None):
58 """Like random.choice, but with weights.
59
60 Heavily inspired by python 3.6 `random.choices`.
61 """
62 if not population:
63 raise ValueError("Can't choose from empty population")
64 if len(population) != len(weights):
65 raise ValueError("The number of weights does not match the population")
66 cum_weights = list(accumulate(weights))
67 total = cum_weights[-1]
68 threshold = random_generator.random()
69 return population[bisect(cum_weights, total * threshold)]
70
71
72 class LanguageModel(metaclass=ABCMeta):
73 """ABC for Language Models.
74
75 Cannot be directly instantiated itself.
76
77 """
78
79 def __init__(self, order, vocabulary=None, counter=None):
80 """Creates new LanguageModel.
81
82 :param vocabulary: If provided, this vocabulary will be used instead
83 of creating a new one when training.
84 :type vocabulary: `nltk.lm.Vocabulary` or None
85 :param counter: If provided, use this object to count ngrams.
86 :type vocabulary: `nltk.lm.NgramCounter` or None
87 :param ngrams_fn: If given, defines how sentences in training text are turned to ngram
88 sequences.
89 :type ngrams_fn: function or None
90 :param pad_fn: If given, defines how sentences in training text are padded.
91 :type pad_fn: function or None
92 """
93 self.order = order
94 self.vocab = Vocabulary() if vocabulary is None else vocabulary
95 self.counts = NgramCounter() if counter is None else counter
96
97 def fit(self, text, vocabulary_text=None):
98 """Trains the model on a text.
99
100 :param text: Training text as a sequence of sentences.
101
102 """
103 if not self.vocab:
104 if vocabulary_text is None:
105 raise ValueError(
106 "Cannot fit without a vocabulary or text to create it from."
107 )
108 self.vocab.update(vocabulary_text)
109 self.counts.update(self.vocab.lookup(sent) for sent in text)
110
111 def score(self, word, context=None):
112 """Masks out of vocab (OOV) words and computes their model score.
113
114 For model-specific logic of calculating scores, see the `unmasked_score`
115 method.
116 """
117 return self.unmasked_score(
118 self.vocab.lookup(word), self.vocab.lookup(context) if context else None
119 )
120
121 @abstractmethod
122 def unmasked_score(self, word, context=None):
123 """Score a word given some optional context.
124
125 Concrete models are expected to provide an implementation.
126 Note that this method does not mask its arguments with the OOV label.
127 Use the `score` method for that.
128
129 :param str word: Word for which we want the score
130 :param tuple(str) context: Context the word is in.
131 If `None`, compute unigram score.
132 :param context: tuple(str) or None
133 :rtype: float
134 """
135 raise NotImplementedError()
136
137 def logscore(self, word, context=None):
138 """Evaluate the log score of this word in this context.
139
140 The arguments are the same as for `score` and `unmasked_score`.
141
142 """
143 return log_base2(self.score(word, context))
144
145 def context_counts(self, context):
146 """Helper method for retrieving counts for a given context.
147
148 Assumes context has been checked and oov words in it masked.
149 :type context: tuple(str) or None
150
151 """
152 return (
153 self.counts[len(context) + 1][context] if context else self.counts.unigrams
154 )
155
156 def entropy(self, text_ngrams):
157 """Calculate cross-entropy of model for given evaluation text.
158
159 :param Iterable(tuple(str)) text_ngrams: A sequence of ngram tuples.
160 :rtype: float
161
162 """
163 return -1 * _mean(
164 [self.logscore(ngram[-1], ngram[:-1]) for ngram in text_ngrams]
165 )
166
167 def perplexity(self, text_ngrams):
168 """Calculates the perplexity of the given text.
169
170 This is simply 2 ** cross-entropy for the text, so the arguments are the same.
171
172 """
173 return pow(2.0, self.entropy(text_ngrams))
174
175 def generate(self, num_words=1, text_seed=None, random_seed=None):
176 """Generate words from the model.
177
178 :param int num_words: How many words to generate. By default 1.
179 :param text_seed: Generation can be conditioned on preceding context.
180 :param random_seed: A random seed or an instance of `random.Random`. If provided,
181 makes the random sampling part of generation reproducible.
182 :return: One (str) word or a list of words generated from model.
183
184 Examples:
185
186 >>> from nltk.lm import MLE
187 >>> lm = MLE(2)
188 >>> lm.fit([[("a", "b"), ("b", "c")]], vocabulary_text=['a', 'b', 'c'])
189 >>> lm.fit([[("a",), ("b",), ("c",)]])
190 >>> lm.generate(random_seed=3)
191 'a'
192 >>> lm.generate(text_seed=['a'])
193 'b'
194
195 """
196 text_seed = [] if text_seed is None else list(text_seed)
197 random_generator = _random_generator(random_seed)
198 # This is the base recursion case.
199 if num_words == 1:
200 context = (
201 text_seed[-self.order + 1 :]
202 if len(text_seed) >= self.order
203 else text_seed
204 )
205 samples = self.context_counts(self.vocab.lookup(context))
206 while context and not samples:
207 context = context[1:] if len(context) > 1 else []
208 samples = self.context_counts(self.vocab.lookup(context))
209 # Sorting samples achieves two things:
210 # - reproducible randomness when sampling
211 # - turns Mapping into Sequence which `_weighted_choice` expects
212 samples = sorted(samples)
213 return _weighted_choice(
214 samples,
215 tuple(self.score(w, context) for w in samples),
216 random_generator,
217 )
218 # We build up text one word at a time using the preceding context.
219 generated = []
220 for _ in range(num_words):
221 generated.append(
222 self.generate(
223 num_words=1,
224 text_seed=text_seed + generated,
225 random_seed=random_generator,
226 )
227 )
228 return generated
229
[end of nltk/lm/api.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/nltk/lm/api.py b/nltk/lm/api.py
--- a/nltk/lm/api.py
+++ b/nltk/lm/api.py
@@ -7,6 +7,7 @@
"""Language Model Interface."""
import random
+import warnings
from abc import ABCMeta, abstractmethod
from bisect import bisect
from itertools import accumulate
@@ -83,7 +84,7 @@
of creating a new one when training.
:type vocabulary: `nltk.lm.Vocabulary` or None
:param counter: If provided, use this object to count ngrams.
- :type vocabulary: `nltk.lm.NgramCounter` or None
+ :type counter: `nltk.lm.NgramCounter` or None
:param ngrams_fn: If given, defines how sentences in training text are turned to ngram
sequences.
:type ngrams_fn: function or None
@@ -91,6 +92,12 @@
:type pad_fn: function or None
"""
self.order = order
+ if vocabulary and not isinstance(vocabulary, Vocabulary):
+ warnings.warn(
+ f"The `vocabulary` argument passed to {self.__class__.__name__!r} "
+ "must be an instance of `nltk.lm.Vocabulary`.",
+ stacklevel=3,
+ )
self.vocab = Vocabulary() if vocabulary is None else vocabulary
self.counts = NgramCounter() if counter is None else counter
|
{"golden_diff": "diff --git a/nltk/lm/api.py b/nltk/lm/api.py\n--- a/nltk/lm/api.py\n+++ b/nltk/lm/api.py\n@@ -7,6 +7,7 @@\n \"\"\"Language Model Interface.\"\"\"\n \n import random\n+import warnings\n from abc import ABCMeta, abstractmethod\n from bisect import bisect\n from itertools import accumulate\n@@ -83,7 +84,7 @@\n of creating a new one when training.\n :type vocabulary: `nltk.lm.Vocabulary` or None\n :param counter: If provided, use this object to count ngrams.\n- :type vocabulary: `nltk.lm.NgramCounter` or None\n+ :type counter: `nltk.lm.NgramCounter` or None\n :param ngrams_fn: If given, defines how sentences in training text are turned to ngram\n sequences.\n :type ngrams_fn: function or None\n@@ -91,6 +92,12 @@\n :type pad_fn: function or None\n \"\"\"\n self.order = order\n+ if vocabulary and not isinstance(vocabulary, Vocabulary):\n+ warnings.warn(\n+ f\"The `vocabulary` argument passed to {self.__class__.__name__!r} \"\n+ \"must be an instance of `nltk.lm.Vocabulary`.\",\n+ stacklevel=3,\n+ )\n self.vocab = Vocabulary() if vocabulary is None else vocabulary\n self.counts = NgramCounter() if counter is None else counter\n", "issue": "AttributeError: 'itertools.chain' object has no attribute 'lookup'\nnltk version: 3.7.\r\npython version: 3.9.15\r\nFrom the following code:\r\n\r\n```\r\npadded_line = [list(pad_both_ends(word_tokenize(input_text), n=N))]\r\ntrain_data, vocab = padded_everygram_pipeline(order=N, text=padded_line)\r\nlm = Lidstone(gamma=0.2, vocabulary=vocab, order=N)\r\nlm.fit(text=train_data)\r\n```\r\n\r\nwhere input_text is a long string and N as in the Ngram model. From the last line of code above, I get the following error:\r\n\r\n>lm.fit(text=train_data)\r\n> File \"env\\lib\\site-packages\\nltk\\lm\\api.py\", line 110, in fit\r\n> self.counts.update(self.vocab.lookup(sent) for sent in text)\r\n> File \"env\\lib\\site-packages\\nltk\\lm\\counter.py\", line 115, in update\r\n> for sent in ngram_text:\r\n> File \"env\\lib\\site-packages\\nltk\\lm\\api.py\", line 110, in <genexpr>\r\n> self.counts.update(self.vocab.lookup(sent) for sent in text)\r\n> AttributeError: 'itertools.chain' object has no attribute 'lookup'\r\n\r\nCan anyone fix this error for me?\n", "before_files": [{"content": "# Natural Language Toolkit: Language Models\n#\n# Copyright (C) 2001-2022 NLTK Project\n# Authors: Ilia Kurenkov <[email protected]>\n# URL: <https://www.nltk.org/>\n# For license information, see LICENSE.TXT\n\"\"\"Language Model Interface.\"\"\"\n\nimport random\nfrom abc import ABCMeta, abstractmethod\nfrom bisect import bisect\nfrom itertools import accumulate\n\nfrom nltk.lm.counter import NgramCounter\nfrom nltk.lm.util import log_base2\nfrom nltk.lm.vocabulary import Vocabulary\n\n\nclass Smoothing(metaclass=ABCMeta):\n \"\"\"Ngram Smoothing Interface\n\n Implements Chen & Goodman 1995's idea that all smoothing algorithms have\n certain features in common. This should ideally allow smoothing algorithms to\n work both with Backoff and Interpolation.\n \"\"\"\n\n def __init__(self, vocabulary, counter):\n \"\"\"\n :param vocabulary: The Ngram vocabulary object.\n :type vocabulary: nltk.lm.vocab.Vocabulary\n :param counter: The counts of the vocabulary items.\n :type counter: nltk.lm.counter.NgramCounter\n \"\"\"\n self.vocab = vocabulary\n self.counts = counter\n\n @abstractmethod\n def unigram_score(self, word):\n raise NotImplementedError()\n\n @abstractmethod\n def alpha_gamma(self, word, context):\n raise NotImplementedError()\n\n\ndef _mean(items):\n \"\"\"Return average (aka mean) for sequence of items.\"\"\"\n return sum(items) / len(items)\n\n\ndef _random_generator(seed_or_generator):\n if isinstance(seed_or_generator, random.Random):\n return seed_or_generator\n return random.Random(seed_or_generator)\n\n\ndef _weighted_choice(population, weights, random_generator=None):\n \"\"\"Like random.choice, but with weights.\n\n Heavily inspired by python 3.6 `random.choices`.\n \"\"\"\n if not population:\n raise ValueError(\"Can't choose from empty population\")\n if len(population) != len(weights):\n raise ValueError(\"The number of weights does not match the population\")\n cum_weights = list(accumulate(weights))\n total = cum_weights[-1]\n threshold = random_generator.random()\n return population[bisect(cum_weights, total * threshold)]\n\n\nclass LanguageModel(metaclass=ABCMeta):\n \"\"\"ABC for Language Models.\n\n Cannot be directly instantiated itself.\n\n \"\"\"\n\n def __init__(self, order, vocabulary=None, counter=None):\n \"\"\"Creates new LanguageModel.\n\n :param vocabulary: If provided, this vocabulary will be used instead\n of creating a new one when training.\n :type vocabulary: `nltk.lm.Vocabulary` or None\n :param counter: If provided, use this object to count ngrams.\n :type vocabulary: `nltk.lm.NgramCounter` or None\n :param ngrams_fn: If given, defines how sentences in training text are turned to ngram\n sequences.\n :type ngrams_fn: function or None\n :param pad_fn: If given, defines how sentences in training text are padded.\n :type pad_fn: function or None\n \"\"\"\n self.order = order\n self.vocab = Vocabulary() if vocabulary is None else vocabulary\n self.counts = NgramCounter() if counter is None else counter\n\n def fit(self, text, vocabulary_text=None):\n \"\"\"Trains the model on a text.\n\n :param text: Training text as a sequence of sentences.\n\n \"\"\"\n if not self.vocab:\n if vocabulary_text is None:\n raise ValueError(\n \"Cannot fit without a vocabulary or text to create it from.\"\n )\n self.vocab.update(vocabulary_text)\n self.counts.update(self.vocab.lookup(sent) for sent in text)\n\n def score(self, word, context=None):\n \"\"\"Masks out of vocab (OOV) words and computes their model score.\n\n For model-specific logic of calculating scores, see the `unmasked_score`\n method.\n \"\"\"\n return self.unmasked_score(\n self.vocab.lookup(word), self.vocab.lookup(context) if context else None\n )\n\n @abstractmethod\n def unmasked_score(self, word, context=None):\n \"\"\"Score a word given some optional context.\n\n Concrete models are expected to provide an implementation.\n Note that this method does not mask its arguments with the OOV label.\n Use the `score` method for that.\n\n :param str word: Word for which we want the score\n :param tuple(str) context: Context the word is in.\n If `None`, compute unigram score.\n :param context: tuple(str) or None\n :rtype: float\n \"\"\"\n raise NotImplementedError()\n\n def logscore(self, word, context=None):\n \"\"\"Evaluate the log score of this word in this context.\n\n The arguments are the same as for `score` and `unmasked_score`.\n\n \"\"\"\n return log_base2(self.score(word, context))\n\n def context_counts(self, context):\n \"\"\"Helper method for retrieving counts for a given context.\n\n Assumes context has been checked and oov words in it masked.\n :type context: tuple(str) or None\n\n \"\"\"\n return (\n self.counts[len(context) + 1][context] if context else self.counts.unigrams\n )\n\n def entropy(self, text_ngrams):\n \"\"\"Calculate cross-entropy of model for given evaluation text.\n\n :param Iterable(tuple(str)) text_ngrams: A sequence of ngram tuples.\n :rtype: float\n\n \"\"\"\n return -1 * _mean(\n [self.logscore(ngram[-1], ngram[:-1]) for ngram in text_ngrams]\n )\n\n def perplexity(self, text_ngrams):\n \"\"\"Calculates the perplexity of the given text.\n\n This is simply 2 ** cross-entropy for the text, so the arguments are the same.\n\n \"\"\"\n return pow(2.0, self.entropy(text_ngrams))\n\n def generate(self, num_words=1, text_seed=None, random_seed=None):\n \"\"\"Generate words from the model.\n\n :param int num_words: How many words to generate. By default 1.\n :param text_seed: Generation can be conditioned on preceding context.\n :param random_seed: A random seed or an instance of `random.Random`. If provided,\n makes the random sampling part of generation reproducible.\n :return: One (str) word or a list of words generated from model.\n\n Examples:\n\n >>> from nltk.lm import MLE\n >>> lm = MLE(2)\n >>> lm.fit([[(\"a\", \"b\"), (\"b\", \"c\")]], vocabulary_text=['a', 'b', 'c'])\n >>> lm.fit([[(\"a\",), (\"b\",), (\"c\",)]])\n >>> lm.generate(random_seed=3)\n 'a'\n >>> lm.generate(text_seed=['a'])\n 'b'\n\n \"\"\"\n text_seed = [] if text_seed is None else list(text_seed)\n random_generator = _random_generator(random_seed)\n # This is the base recursion case.\n if num_words == 1:\n context = (\n text_seed[-self.order + 1 :]\n if len(text_seed) >= self.order\n else text_seed\n )\n samples = self.context_counts(self.vocab.lookup(context))\n while context and not samples:\n context = context[1:] if len(context) > 1 else []\n samples = self.context_counts(self.vocab.lookup(context))\n # Sorting samples achieves two things:\n # - reproducible randomness when sampling\n # - turns Mapping into Sequence which `_weighted_choice` expects\n samples = sorted(samples)\n return _weighted_choice(\n samples,\n tuple(self.score(w, context) for w in samples),\n random_generator,\n )\n # We build up text one word at a time using the preceding context.\n generated = []\n for _ in range(num_words):\n generated.append(\n self.generate(\n num_words=1,\n text_seed=text_seed + generated,\n random_seed=random_generator,\n )\n )\n return generated\n", "path": "nltk/lm/api.py"}]}
| 3,152 | 331 |
gh_patches_debug_30665
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-contrib-1542
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add readthedocs documentation for elasticsearch instrumentation
Part of [1491](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/1491)
</issue>
<code>
[start of instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 This library allows tracing HTTP elasticsearch made by the
17 `elasticsearch <https://elasticsearch-py.readthedocs.io/en/master/>`_ library.
18
19 Usage
20 -----
21
22 .. code-block:: python
23
24 from opentelemetry.instrumentation.elasticsearch import ElasticsearchInstrumentor
25 import elasticsearch
26
27
28 # instrument elasticsearch
29 ElasticsearchInstrumentor().instrument()
30
31 # Using elasticsearch as normal now will automatically generate spans
32 es = elasticsearch.Elasticsearch()
33 es.index(index='my-index', doc_type='my-type', id=1, body={'my': 'data', 'timestamp': datetime.now()})
34 es.get(index='my-index', doc_type='my-type', id=1)
35
36 Elasticsearch instrumentation prefixes operation names with the string "Elasticsearch". This
37 can be changed to a different string by either setting the `OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX`
38 environment variable or by passing the prefix as an argument to the instrumentor. For example,
39
40
41 .. code-block:: python
42
43 ElasticsearchInstrumentor("my-custom-prefix").instrument()
44
45
46 The `instrument` method accepts the following keyword args:
47
48 tracer_provider (TracerProvider) - an optional tracer provider
49 request_hook (Callable) - a function with extra user-defined logic to be performed before performing the request
50 this function signature is:
51 def request_hook(span: Span, method: str, url: str, kwargs)
52 response_hook (Callable) - a function with extra user-defined logic to be performed after performing the request
53 this function signature is:
54 def response_hook(span: Span, response: dict)
55
56 for example:
57
58 .. code: python
59
60 from opentelemetry.instrumentation.elasticsearch import ElasticsearchInstrumentor
61 import elasticsearch
62
63 def request_hook(span, method, url, kwargs):
64 if span and span.is_recording():
65 span.set_attribute("custom_user_attribute_from_request_hook", "some-value")
66
67 def response_hook(span, response):
68 if span and span.is_recording():
69 span.set_attribute("custom_user_attribute_from_response_hook", "some-value")
70
71 # instrument elasticsearch with request and response hooks
72 ElasticsearchInstrumentor().instrument(request_hook=request_hook, response_hook=response_hook)
73
74 # Using elasticsearch as normal now will automatically generate spans,
75 # including user custom attributes added from the hooks
76 es = elasticsearch.Elasticsearch()
77 es.index(index='my-index', doc_type='my-type', id=1, body={'my': 'data', 'timestamp': datetime.now()})
78 es.get(index='my-index', doc_type='my-type', id=1)
79
80 API
81 ---
82 """
83
84 import re
85 from logging import getLogger
86 from os import environ
87 from typing import Collection
88
89 import elasticsearch
90 import elasticsearch.exceptions
91 from wrapt import wrap_function_wrapper as _wrap
92
93 from opentelemetry.instrumentation.elasticsearch.package import _instruments
94 from opentelemetry.instrumentation.elasticsearch.version import __version__
95 from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
96 from opentelemetry.instrumentation.utils import unwrap
97 from opentelemetry.semconv.trace import SpanAttributes
98 from opentelemetry.trace import SpanKind, get_tracer
99
100 logger = getLogger(__name__)
101
102
103 # Values to add as tags from the actual
104 # payload returned by Elasticsearch, if any.
105 _ATTRIBUTES_FROM_RESULT = [
106 "found",
107 "timed_out",
108 "took",
109 ]
110
111 _DEFAULT_OP_NAME = "request"
112
113
114 class ElasticsearchInstrumentor(BaseInstrumentor):
115 """An instrumentor for elasticsearch
116 See `BaseInstrumentor`
117 """
118
119 def __init__(self, span_name_prefix=None):
120 if not span_name_prefix:
121 span_name_prefix = environ.get(
122 "OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX",
123 "Elasticsearch",
124 )
125 self._span_name_prefix = span_name_prefix.strip()
126 super().__init__()
127
128 def instrumentation_dependencies(self) -> Collection[str]:
129 return _instruments
130
131 def _instrument(self, **kwargs):
132 """
133 Instruments Elasticsearch module
134 """
135 tracer_provider = kwargs.get("tracer_provider")
136 tracer = get_tracer(__name__, __version__, tracer_provider)
137 request_hook = kwargs.get("request_hook")
138 response_hook = kwargs.get("response_hook")
139 _wrap(
140 elasticsearch,
141 "Transport.perform_request",
142 _wrap_perform_request(
143 tracer, self._span_name_prefix, request_hook, response_hook
144 ),
145 )
146
147 def _uninstrument(self, **kwargs):
148 unwrap(elasticsearch.Transport, "perform_request")
149
150
151 _regex_doc_url = re.compile(r"/_doc/([^/]+)")
152
153 # search api https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html
154 _regex_search_url = re.compile(r"/([^/]+)/_search[/]?")
155
156
157 def _wrap_perform_request(
158 tracer, span_name_prefix, request_hook=None, response_hook=None
159 ):
160 # pylint: disable=R0912,R0914
161 def wrapper(wrapped, _, args, kwargs):
162 method = url = None
163 try:
164 method, url, *_ = args
165 except IndexError:
166 logger.warning(
167 "expected perform_request to receive two positional arguments. "
168 "Got %d",
169 len(args),
170 )
171
172 op_name = span_name_prefix + (url or method or _DEFAULT_OP_NAME)
173
174 doc_id = None
175 search_target = None
176
177 if url:
178 # TODO: This regex-based solution avoids creating an unbounded number of span names, but should be replaced by instrumenting individual Elasticsearch methods instead of Transport.perform_request()
179 # A limitation of the regex is that only the '_doc' mapping type is supported. Mapping types are deprecated since Elasticsearch 7
180 # https://github.com/open-telemetry/opentelemetry-python-contrib/issues/708
181 match = _regex_doc_url.search(url)
182 if match is not None:
183 # Remove the full document ID from the URL
184 doc_span = match.span()
185 op_name = (
186 span_name_prefix
187 + url[: doc_span[0]]
188 + "/_doc/:id"
189 + url[doc_span[1] :]
190 )
191 # Put the document ID in attributes
192 doc_id = match.group(1)
193 match = _regex_search_url.search(url)
194 if match is not None:
195 op_name = span_name_prefix + "/<target>/_search"
196 search_target = match.group(1)
197
198 params = kwargs.get("params", {})
199 body = kwargs.get("body", None)
200
201 with tracer.start_as_current_span(
202 op_name,
203 kind=SpanKind.CLIENT,
204 ) as span:
205
206 if callable(request_hook):
207 request_hook(span, method, url, kwargs)
208
209 if span.is_recording():
210 attributes = {
211 SpanAttributes.DB_SYSTEM: "elasticsearch",
212 }
213 if url:
214 attributes["elasticsearch.url"] = url
215 if method:
216 attributes["elasticsearch.method"] = method
217 if body:
218 attributes[SpanAttributes.DB_STATEMENT] = str(body)
219 if params:
220 attributes["elasticsearch.params"] = str(params)
221 if doc_id:
222 attributes["elasticsearch.id"] = doc_id
223 if search_target:
224 attributes["elasticsearch.target"] = search_target
225 for key, value in attributes.items():
226 span.set_attribute(key, value)
227
228 rv = wrapped(*args, **kwargs)
229 if isinstance(rv, dict) and span.is_recording():
230 for member in _ATTRIBUTES_FROM_RESULT:
231 if member in rv:
232 span.set_attribute(
233 f"elasticsearch.{member}",
234 str(rv[member]),
235 )
236
237 if callable(response_hook):
238 response_hook(span, rv)
239 return rv
240
241 return wrapper
242
[end of instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py
--- a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py
+++ b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py
@@ -34,7 +34,7 @@
es.get(index='my-index', doc_type='my-type', id=1)
Elasticsearch instrumentation prefixes operation names with the string "Elasticsearch". This
-can be changed to a different string by either setting the `OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX`
+can be changed to a different string by either setting the OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX
environment variable or by passing the prefix as an argument to the instrumentor. For example,
@@ -42,16 +42,15 @@
ElasticsearchInstrumentor("my-custom-prefix").instrument()
-
-The `instrument` method accepts the following keyword args:
-
+The instrument() method accepts the following keyword args:
tracer_provider (TracerProvider) - an optional tracer provider
request_hook (Callable) - a function with extra user-defined logic to be performed before performing the request
- this function signature is:
- def request_hook(span: Span, method: str, url: str, kwargs)
+this function signature is:
+def request_hook(span: Span, method: str, url: str, kwargs)
+
response_hook (Callable) - a function with extra user-defined logic to be performed after performing the request
- this function signature is:
- def response_hook(span: Span, response: dict)
+this function signature is:
+def response_hook(span: Span, response: dict)
for example:
|
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py\n@@ -34,7 +34,7 @@\n es.get(index='my-index', doc_type='my-type', id=1)\n \n Elasticsearch instrumentation prefixes operation names with the string \"Elasticsearch\". This\n-can be changed to a different string by either setting the `OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX`\n+can be changed to a different string by either setting the OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX\n environment variable or by passing the prefix as an argument to the instrumentor. For example,\n \n \n@@ -42,16 +42,15 @@\n \n ElasticsearchInstrumentor(\"my-custom-prefix\").instrument()\n \n-\n-The `instrument` method accepts the following keyword args:\n-\n+The instrument() method accepts the following keyword args:\n tracer_provider (TracerProvider) - an optional tracer provider\n request_hook (Callable) - a function with extra user-defined logic to be performed before performing the request\n- this function signature is:\n- def request_hook(span: Span, method: str, url: str, kwargs)\n+this function signature is:\n+def request_hook(span: Span, method: str, url: str, kwargs)\n+\n response_hook (Callable) - a function with extra user-defined logic to be performed after performing the request\n- this function signature is:\n- def response_hook(span: Span, response: dict)\n+this function signature is:\n+def response_hook(span: Span, response: dict)\n \n for example:\n", "issue": "Add readthedocs documentation for elasticsearch instrumentation\nPart of [1491](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/1491)\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis library allows tracing HTTP elasticsearch made by the\n`elasticsearch <https://elasticsearch-py.readthedocs.io/en/master/>`_ library.\n\nUsage\n-----\n\n.. code-block:: python\n\n from opentelemetry.instrumentation.elasticsearch import ElasticsearchInstrumentor\n import elasticsearch\n\n\n # instrument elasticsearch\n ElasticsearchInstrumentor().instrument()\n\n # Using elasticsearch as normal now will automatically generate spans\n es = elasticsearch.Elasticsearch()\n es.index(index='my-index', doc_type='my-type', id=1, body={'my': 'data', 'timestamp': datetime.now()})\n es.get(index='my-index', doc_type='my-type', id=1)\n\nElasticsearch instrumentation prefixes operation names with the string \"Elasticsearch\". This\ncan be changed to a different string by either setting the `OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX`\nenvironment variable or by passing the prefix as an argument to the instrumentor. For example,\n\n\n.. code-block:: python\n\n ElasticsearchInstrumentor(\"my-custom-prefix\").instrument()\n\n\nThe `instrument` method accepts the following keyword args:\n\ntracer_provider (TracerProvider) - an optional tracer provider\nrequest_hook (Callable) - a function with extra user-defined logic to be performed before performing the request\n this function signature is:\n def request_hook(span: Span, method: str, url: str, kwargs)\nresponse_hook (Callable) - a function with extra user-defined logic to be performed after performing the request\n this function signature is:\n def response_hook(span: Span, response: dict)\n\nfor example:\n\n.. code: python\n\n from opentelemetry.instrumentation.elasticsearch import ElasticsearchInstrumentor\n import elasticsearch\n\n def request_hook(span, method, url, kwargs):\n if span and span.is_recording():\n span.set_attribute(\"custom_user_attribute_from_request_hook\", \"some-value\")\n\n def response_hook(span, response):\n if span and span.is_recording():\n span.set_attribute(\"custom_user_attribute_from_response_hook\", \"some-value\")\n\n # instrument elasticsearch with request and response hooks\n ElasticsearchInstrumentor().instrument(request_hook=request_hook, response_hook=response_hook)\n\n # Using elasticsearch as normal now will automatically generate spans,\n # including user custom attributes added from the hooks\n es = elasticsearch.Elasticsearch()\n es.index(index='my-index', doc_type='my-type', id=1, body={'my': 'data', 'timestamp': datetime.now()})\n es.get(index='my-index', doc_type='my-type', id=1)\n\nAPI\n---\n\"\"\"\n\nimport re\nfrom logging import getLogger\nfrom os import environ\nfrom typing import Collection\n\nimport elasticsearch\nimport elasticsearch.exceptions\nfrom wrapt import wrap_function_wrapper as _wrap\n\nfrom opentelemetry.instrumentation.elasticsearch.package import _instruments\nfrom opentelemetry.instrumentation.elasticsearch.version import __version__\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.utils import unwrap\nfrom opentelemetry.semconv.trace import SpanAttributes\nfrom opentelemetry.trace import SpanKind, get_tracer\n\nlogger = getLogger(__name__)\n\n\n# Values to add as tags from the actual\n# payload returned by Elasticsearch, if any.\n_ATTRIBUTES_FROM_RESULT = [\n \"found\",\n \"timed_out\",\n \"took\",\n]\n\n_DEFAULT_OP_NAME = \"request\"\n\n\nclass ElasticsearchInstrumentor(BaseInstrumentor):\n \"\"\"An instrumentor for elasticsearch\n See `BaseInstrumentor`\n \"\"\"\n\n def __init__(self, span_name_prefix=None):\n if not span_name_prefix:\n span_name_prefix = environ.get(\n \"OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX\",\n \"Elasticsearch\",\n )\n self._span_name_prefix = span_name_prefix.strip()\n super().__init__()\n\n def instrumentation_dependencies(self) -> Collection[str]:\n return _instruments\n\n def _instrument(self, **kwargs):\n \"\"\"\n Instruments Elasticsearch module\n \"\"\"\n tracer_provider = kwargs.get(\"tracer_provider\")\n tracer = get_tracer(__name__, __version__, tracer_provider)\n request_hook = kwargs.get(\"request_hook\")\n response_hook = kwargs.get(\"response_hook\")\n _wrap(\n elasticsearch,\n \"Transport.perform_request\",\n _wrap_perform_request(\n tracer, self._span_name_prefix, request_hook, response_hook\n ),\n )\n\n def _uninstrument(self, **kwargs):\n unwrap(elasticsearch.Transport, \"perform_request\")\n\n\n_regex_doc_url = re.compile(r\"/_doc/([^/]+)\")\n\n# search api https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html\n_regex_search_url = re.compile(r\"/([^/]+)/_search[/]?\")\n\n\ndef _wrap_perform_request(\n tracer, span_name_prefix, request_hook=None, response_hook=None\n):\n # pylint: disable=R0912,R0914\n def wrapper(wrapped, _, args, kwargs):\n method = url = None\n try:\n method, url, *_ = args\n except IndexError:\n logger.warning(\n \"expected perform_request to receive two positional arguments. \"\n \"Got %d\",\n len(args),\n )\n\n op_name = span_name_prefix + (url or method or _DEFAULT_OP_NAME)\n\n doc_id = None\n search_target = None\n\n if url:\n # TODO: This regex-based solution avoids creating an unbounded number of span names, but should be replaced by instrumenting individual Elasticsearch methods instead of Transport.perform_request()\n # A limitation of the regex is that only the '_doc' mapping type is supported. Mapping types are deprecated since Elasticsearch 7\n # https://github.com/open-telemetry/opentelemetry-python-contrib/issues/708\n match = _regex_doc_url.search(url)\n if match is not None:\n # Remove the full document ID from the URL\n doc_span = match.span()\n op_name = (\n span_name_prefix\n + url[: doc_span[0]]\n + \"/_doc/:id\"\n + url[doc_span[1] :]\n )\n # Put the document ID in attributes\n doc_id = match.group(1)\n match = _regex_search_url.search(url)\n if match is not None:\n op_name = span_name_prefix + \"/<target>/_search\"\n search_target = match.group(1)\n\n params = kwargs.get(\"params\", {})\n body = kwargs.get(\"body\", None)\n\n with tracer.start_as_current_span(\n op_name,\n kind=SpanKind.CLIENT,\n ) as span:\n\n if callable(request_hook):\n request_hook(span, method, url, kwargs)\n\n if span.is_recording():\n attributes = {\n SpanAttributes.DB_SYSTEM: \"elasticsearch\",\n }\n if url:\n attributes[\"elasticsearch.url\"] = url\n if method:\n attributes[\"elasticsearch.method\"] = method\n if body:\n attributes[SpanAttributes.DB_STATEMENT] = str(body)\n if params:\n attributes[\"elasticsearch.params\"] = str(params)\n if doc_id:\n attributes[\"elasticsearch.id\"] = doc_id\n if search_target:\n attributes[\"elasticsearch.target\"] = search_target\n for key, value in attributes.items():\n span.set_attribute(key, value)\n\n rv = wrapped(*args, **kwargs)\n if isinstance(rv, dict) and span.is_recording():\n for member in _ATTRIBUTES_FROM_RESULT:\n if member in rv:\n span.set_attribute(\n f\"elasticsearch.{member}\",\n str(rv[member]),\n )\n\n if callable(response_hook):\n response_hook(span, rv)\n return rv\n\n return wrapper\n", "path": "instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py"}]}
| 3,007 | 418 |
gh_patches_debug_63641
|
rasdani/github-patches
|
git_diff
|
bookwyrm-social__bookwyrm-566
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Feature request - block quote formatting in reviews
I published [a review](https://bookwyrm.social/user/darius/review/14497) just now with a markdown-style block quote in it. The second-to-last paragraph is formatted, at least when I wrote it in the dialog box, like this:
```
> "I was expecting to go to prison for the rest of my life," Ellsberg wryly said later, "and Ithiel wanted to know whether I understood that I'd never get another dollar from the federal government."
```
But it renders as regular text. I'm guessing a lot of reviewers will want to excerpt their books, so block quotes seem like a good thing to include!
I would expect it to render more like it does here on Github:
> "I was expecting to go to prison for the rest of my life," Ellsberg wryly said later, "and Ithiel wanted to know whether I understood that I'd never get another dollar from the federal government."
</issue>
<code>
[start of bookwyrm/sanitize_html.py]
1 ''' html parser to clean up incoming text from unknown sources '''
2 from html.parser import HTMLParser
3
4 class InputHtmlParser(HTMLParser):#pylint: disable=abstract-method
5 ''' Removes any html that isn't allowed_tagsed from a block '''
6
7 def __init__(self):
8 HTMLParser.__init__(self)
9 self.allowed_tags = [
10 'p', 'br',
11 'b', 'i', 'strong', 'em', 'pre',
12 'a', 'span', 'ul', 'ol', 'li'
13 ]
14 self.tag_stack = []
15 self.output = []
16 # if the html appears invalid, we just won't allow any at all
17 self.allow_html = True
18
19
20 def handle_starttag(self, tag, attrs):
21 ''' check if the tag is valid '''
22 if self.allow_html and tag in self.allowed_tags:
23 self.output.append(('tag', self.get_starttag_text()))
24 self.tag_stack.append(tag)
25 else:
26 self.output.append(('data', ''))
27
28
29 def handle_endtag(self, tag):
30 ''' keep the close tag '''
31 if not self.allow_html or tag not in self.allowed_tags:
32 self.output.append(('data', ''))
33 return
34
35 if not self.tag_stack or self.tag_stack[-1] != tag:
36 # the end tag doesn't match the most recent start tag
37 self.allow_html = False
38 self.output.append(('data', ''))
39 return
40
41 self.tag_stack = self.tag_stack[:-1]
42 self.output.append(('tag', '</%s>' % tag))
43
44
45 def handle_data(self, data):
46 ''' extract the answer, if we're in an answer tag '''
47 self.output.append(('data', data))
48
49
50 def get_output(self):
51 ''' convert the output from a list of tuples to a string '''
52 if self.tag_stack:
53 self.allow_html = False
54 if not self.allow_html:
55 return ''.join(v for (k, v) in self.output if k == 'data')
56 return ''.join(v for (k, v) in self.output)
57
[end of bookwyrm/sanitize_html.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bookwyrm/sanitize_html.py b/bookwyrm/sanitize_html.py
--- a/bookwyrm/sanitize_html.py
+++ b/bookwyrm/sanitize_html.py
@@ -7,7 +7,7 @@
def __init__(self):
HTMLParser.__init__(self)
self.allowed_tags = [
- 'p', 'br',
+ 'p', 'blockquote', 'br',
'b', 'i', 'strong', 'em', 'pre',
'a', 'span', 'ul', 'ol', 'li'
]
|
{"golden_diff": "diff --git a/bookwyrm/sanitize_html.py b/bookwyrm/sanitize_html.py\n--- a/bookwyrm/sanitize_html.py\n+++ b/bookwyrm/sanitize_html.py\n@@ -7,7 +7,7 @@\n def __init__(self):\n HTMLParser.__init__(self)\n self.allowed_tags = [\n- 'p', 'br',\n+ 'p', 'blockquote', 'br',\n 'b', 'i', 'strong', 'em', 'pre',\n 'a', 'span', 'ul', 'ol', 'li'\n ]\n", "issue": "Feature request - block quote formatting in reviews\nI published [a review](https://bookwyrm.social/user/darius/review/14497) just now with a markdown-style block quote in it. The second-to-last paragraph is formatted, at least when I wrote it in the dialog box, like this:\r\n\r\n```\r\n> \"I was expecting to go to prison for the rest of my life,\" Ellsberg wryly said later, \"and Ithiel wanted to know whether I understood that I'd never get another dollar from the federal government.\"\r\n```\r\n\r\nBut it renders as regular text. I'm guessing a lot of reviewers will want to excerpt their books, so block quotes seem like a good thing to include!\r\n\r\nI would expect it to render more like it does here on Github:\r\n\r\n> \"I was expecting to go to prison for the rest of my life,\" Ellsberg wryly said later, \"and Ithiel wanted to know whether I understood that I'd never get another dollar from the federal government.\"\n", "before_files": [{"content": "''' html parser to clean up incoming text from unknown sources '''\nfrom html.parser import HTMLParser\n\nclass InputHtmlParser(HTMLParser):#pylint: disable=abstract-method\n ''' Removes any html that isn't allowed_tagsed from a block '''\n\n def __init__(self):\n HTMLParser.__init__(self)\n self.allowed_tags = [\n 'p', 'br',\n 'b', 'i', 'strong', 'em', 'pre',\n 'a', 'span', 'ul', 'ol', 'li'\n ]\n self.tag_stack = []\n self.output = []\n # if the html appears invalid, we just won't allow any at all\n self.allow_html = True\n\n\n def handle_starttag(self, tag, attrs):\n ''' check if the tag is valid '''\n if self.allow_html and tag in self.allowed_tags:\n self.output.append(('tag', self.get_starttag_text()))\n self.tag_stack.append(tag)\n else:\n self.output.append(('data', ''))\n\n\n def handle_endtag(self, tag):\n ''' keep the close tag '''\n if not self.allow_html or tag not in self.allowed_tags:\n self.output.append(('data', ''))\n return\n\n if not self.tag_stack or self.tag_stack[-1] != tag:\n # the end tag doesn't match the most recent start tag\n self.allow_html = False\n self.output.append(('data', ''))\n return\n\n self.tag_stack = self.tag_stack[:-1]\n self.output.append(('tag', '</%s>' % tag))\n\n\n def handle_data(self, data):\n ''' extract the answer, if we're in an answer tag '''\n self.output.append(('data', data))\n\n\n def get_output(self):\n ''' convert the output from a list of tuples to a string '''\n if self.tag_stack:\n self.allow_html = False\n if not self.allow_html:\n return ''.join(v for (k, v) in self.output if k == 'data')\n return ''.join(v for (k, v) in self.output)\n", "path": "bookwyrm/sanitize_html.py"}]}
| 1,303 | 124 |
gh_patches_debug_20702
|
rasdani/github-patches
|
git_diff
|
python-discord__bot-1206
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
feature suggestion: add url in title instead of description in !pep command
currently the link when you use the !pep command is located in the description, but with the discord.Embed.url parameter, the title is now able to become a link (as shown in second image)


</issue>
<code>
[start of bot/exts/utils/utils.py]
1 import difflib
2 import logging
3 import re
4 import unicodedata
5 from email.parser import HeaderParser
6 from io import StringIO
7 from typing import Tuple, Union
8
9 from discord import Colour, Embed, utils
10 from discord.ext.commands import BadArgument, Cog, Context, clean_content, command, has_any_role
11
12 from bot.bot import Bot
13 from bot.constants import Channels, MODERATION_ROLES, STAFF_ROLES
14 from bot.decorators import in_whitelist
15 from bot.pagination import LinePaginator
16 from bot.utils import messages
17
18 log = logging.getLogger(__name__)
19
20 ZEN_OF_PYTHON = """\
21 Beautiful is better than ugly.
22 Explicit is better than implicit.
23 Simple is better than complex.
24 Complex is better than complicated.
25 Flat is better than nested.
26 Sparse is better than dense.
27 Readability counts.
28 Special cases aren't special enough to break the rules.
29 Although practicality beats purity.
30 Errors should never pass silently.
31 Unless explicitly silenced.
32 In the face of ambiguity, refuse the temptation to guess.
33 There should be one-- and preferably only one --obvious way to do it.
34 Although that way may not be obvious at first unless you're Dutch.
35 Now is better than never.
36 Although never is often better than *right* now.
37 If the implementation is hard to explain, it's a bad idea.
38 If the implementation is easy to explain, it may be a good idea.
39 Namespaces are one honking great idea -- let's do more of those!
40 """
41
42 ICON_URL = "https://www.python.org/static/opengraph-icon-200x200.png"
43
44
45 class Utils(Cog):
46 """A selection of utilities which don't have a clear category."""
47
48 def __init__(self, bot: Bot):
49 self.bot = bot
50
51 self.base_pep_url = "http://www.python.org/dev/peps/pep-"
52 self.base_github_pep_url = "https://raw.githubusercontent.com/python/peps/master/pep-"
53
54 @command(name='pep', aliases=('get_pep', 'p'))
55 async def pep_command(self, ctx: Context, pep_number: str) -> None:
56 """Fetches information about a PEP and sends it to the channel."""
57 if pep_number.isdigit():
58 pep_number = int(pep_number)
59 else:
60 await ctx.send_help(ctx.command)
61 return
62
63 # Handle PEP 0 directly because it's not in .rst or .txt so it can't be accessed like other PEPs.
64 if pep_number == 0:
65 return await self.send_pep_zero(ctx)
66
67 possible_extensions = ['.txt', '.rst']
68 found_pep = False
69 for extension in possible_extensions:
70 # Attempt to fetch the PEP
71 pep_url = f"{self.base_github_pep_url}{pep_number:04}{extension}"
72 log.trace(f"Requesting PEP {pep_number} with {pep_url}")
73 response = await self.bot.http_session.get(pep_url)
74
75 if response.status == 200:
76 log.trace("PEP found")
77 found_pep = True
78
79 pep_content = await response.text()
80
81 # Taken from https://github.com/python/peps/blob/master/pep0/pep.py#L179
82 pep_header = HeaderParser().parse(StringIO(pep_content))
83
84 # Assemble the embed
85 pep_embed = Embed(
86 title=f"**PEP {pep_number} - {pep_header['Title']}**",
87 description=f"[Link]({self.base_pep_url}{pep_number:04})",
88 )
89
90 pep_embed.set_thumbnail(url=ICON_URL)
91
92 # Add the interesting information
93 fields_to_check = ("Status", "Python-Version", "Created", "Type")
94 for field in fields_to_check:
95 # Check for a PEP metadata field that is present but has an empty value
96 # embed field values can't contain an empty string
97 if pep_header.get(field, ""):
98 pep_embed.add_field(name=field, value=pep_header[field])
99
100 elif response.status != 404:
101 # any response except 200 and 404 is expected
102 found_pep = True # actually not, but it's easier to display this way
103 log.trace(f"The user requested PEP {pep_number}, but the response had an unexpected status code: "
104 f"{response.status}.\n{response.text}")
105
106 error_message = "Unexpected HTTP error during PEP search. Please let us know."
107 pep_embed = Embed(title="Unexpected error", description=error_message)
108 pep_embed.colour = Colour.red()
109 break
110
111 if not found_pep:
112 log.trace("PEP was not found")
113 not_found = f"PEP {pep_number} does not exist."
114 pep_embed = Embed(title="PEP not found", description=not_found)
115 pep_embed.colour = Colour.red()
116
117 await ctx.message.channel.send(embed=pep_embed)
118
119 @command()
120 @in_whitelist(channels=(Channels.bot_commands,), roles=STAFF_ROLES)
121 async def charinfo(self, ctx: Context, *, characters: str) -> None:
122 """Shows you information on up to 50 unicode characters."""
123 match = re.match(r"<(a?):(\w+):(\d+)>", characters)
124 if match:
125 return await messages.send_denial(
126 ctx,
127 "**Non-Character Detected**\n"
128 "Only unicode characters can be processed, but a custom Discord emoji "
129 "was found. Please remove it and try again."
130 )
131
132 if len(characters) > 50:
133 return await messages.send_denial(ctx, f"Too many characters ({len(characters)}/50)")
134
135 def get_info(char: str) -> Tuple[str, str]:
136 digit = f"{ord(char):x}"
137 if len(digit) <= 4:
138 u_code = f"\\u{digit:>04}"
139 else:
140 u_code = f"\\U{digit:>08}"
141 url = f"https://www.compart.com/en/unicode/U+{digit:>04}"
142 name = f"[{unicodedata.name(char, '')}]({url})"
143 info = f"`{u_code.ljust(10)}`: {name} - {utils.escape_markdown(char)}"
144 return info, u_code
145
146 char_list, raw_list = zip(*(get_info(c) for c in characters))
147 embed = Embed().set_author(name="Character Info")
148
149 if len(characters) > 1:
150 # Maximum length possible is 502 out of 1024, so there's no need to truncate.
151 embed.add_field(name='Full Raw Text', value=f"`{''.join(raw_list)}`", inline=False)
152
153 await LinePaginator.paginate(char_list, ctx, embed, max_lines=10, max_size=2000, empty=False)
154
155 @command()
156 async def zen(self, ctx: Context, *, search_value: Union[int, str, None] = None) -> None:
157 """
158 Show the Zen of Python.
159
160 Without any arguments, the full Zen will be produced.
161 If an integer is provided, the line with that index will be produced.
162 If a string is provided, the line which matches best will be produced.
163 """
164 embed = Embed(
165 colour=Colour.blurple(),
166 title="The Zen of Python",
167 description=ZEN_OF_PYTHON
168 )
169
170 if search_value is None:
171 embed.title += ", by Tim Peters"
172 await ctx.send(embed=embed)
173 return
174
175 zen_lines = ZEN_OF_PYTHON.splitlines()
176
177 # handle if it's an index int
178 if isinstance(search_value, int):
179 upper_bound = len(zen_lines) - 1
180 lower_bound = -1 * upper_bound
181 if not (lower_bound <= search_value <= upper_bound):
182 raise BadArgument(f"Please provide an index between {lower_bound} and {upper_bound}.")
183
184 embed.title += f" (line {search_value % len(zen_lines)}):"
185 embed.description = zen_lines[search_value]
186 await ctx.send(embed=embed)
187 return
188
189 # Try to handle first exact word due difflib.SequenceMatched may use some other similar word instead
190 # exact word.
191 for i, line in enumerate(zen_lines):
192 for word in line.split():
193 if word.lower() == search_value.lower():
194 embed.title += f" (line {i}):"
195 embed.description = line
196 await ctx.send(embed=embed)
197 return
198
199 # handle if it's a search string and not exact word
200 matcher = difflib.SequenceMatcher(None, search_value.lower())
201
202 best_match = ""
203 match_index = 0
204 best_ratio = 0
205
206 for index, line in enumerate(zen_lines):
207 matcher.set_seq2(line.lower())
208
209 # the match ratio needs to be adjusted because, naturally,
210 # longer lines will have worse ratios than shorter lines when
211 # fuzzy searching for keywords. this seems to work okay.
212 adjusted_ratio = (len(line) - 5) ** 0.5 * matcher.ratio()
213
214 if adjusted_ratio > best_ratio:
215 best_ratio = adjusted_ratio
216 best_match = line
217 match_index = index
218
219 if not best_match:
220 raise BadArgument("I didn't get a match! Please try again with a different search term.")
221
222 embed.title += f" (line {match_index}):"
223 embed.description = best_match
224 await ctx.send(embed=embed)
225
226 @command(aliases=("poll",))
227 @has_any_role(*MODERATION_ROLES)
228 async def vote(self, ctx: Context, title: clean_content(fix_channel_mentions=True), *options: str) -> None:
229 """
230 Build a quick voting poll with matching reactions with the provided options.
231
232 A maximum of 20 options can be provided, as Discord supports a max of 20
233 reactions on a single message.
234 """
235 if len(title) > 256:
236 raise BadArgument("The title cannot be longer than 256 characters.")
237 if len(options) < 2:
238 raise BadArgument("Please provide at least 2 options.")
239 if len(options) > 20:
240 raise BadArgument("I can only handle 20 options!")
241
242 codepoint_start = 127462 # represents "regional_indicator_a" unicode value
243 options = {chr(i): f"{chr(i)} - {v}" for i, v in enumerate(options, start=codepoint_start)}
244 embed = Embed(title=title, description="\n".join(options.values()))
245 message = await ctx.send(embed=embed)
246 for reaction in options:
247 await message.add_reaction(reaction)
248
249 async def send_pep_zero(self, ctx: Context) -> None:
250 """Send information about PEP 0."""
251 pep_embed = Embed(
252 title="**PEP 0 - Index of Python Enhancement Proposals (PEPs)**",
253 description="[Link](https://www.python.org/dev/peps/)"
254 )
255 pep_embed.set_thumbnail(url=ICON_URL)
256 pep_embed.add_field(name="Status", value="Active")
257 pep_embed.add_field(name="Created", value="13-Jul-2000")
258 pep_embed.add_field(name="Type", value="Informational")
259
260 await ctx.send(embed=pep_embed)
261
262
263 def setup(bot: Bot) -> None:
264 """Load the Utils cog."""
265 bot.add_cog(Utils(bot))
266
[end of bot/exts/utils/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bot/exts/utils/utils.py b/bot/exts/utils/utils.py
--- a/bot/exts/utils/utils.py
+++ b/bot/exts/utils/utils.py
@@ -84,7 +84,7 @@
# Assemble the embed
pep_embed = Embed(
title=f"**PEP {pep_number} - {pep_header['Title']}**",
- description=f"[Link]({self.base_pep_url}{pep_number:04})",
+ url=f"{self.base_pep_url}{pep_number:04}"
)
pep_embed.set_thumbnail(url=ICON_URL)
@@ -250,7 +250,7 @@
"""Send information about PEP 0."""
pep_embed = Embed(
title="**PEP 0 - Index of Python Enhancement Proposals (PEPs)**",
- description="[Link](https://www.python.org/dev/peps/)"
+ url="https://www.python.org/dev/peps/"
)
pep_embed.set_thumbnail(url=ICON_URL)
pep_embed.add_field(name="Status", value="Active")
|
{"golden_diff": "diff --git a/bot/exts/utils/utils.py b/bot/exts/utils/utils.py\n--- a/bot/exts/utils/utils.py\n+++ b/bot/exts/utils/utils.py\n@@ -84,7 +84,7 @@\n # Assemble the embed\n pep_embed = Embed(\n title=f\"**PEP {pep_number} - {pep_header['Title']}**\",\n- description=f\"[Link]({self.base_pep_url}{pep_number:04})\",\n+ url=f\"{self.base_pep_url}{pep_number:04}\"\n )\n \n pep_embed.set_thumbnail(url=ICON_URL)\n@@ -250,7 +250,7 @@\n \"\"\"Send information about PEP 0.\"\"\"\n pep_embed = Embed(\n title=\"**PEP 0 - Index of Python Enhancement Proposals (PEPs)**\",\n- description=\"[Link](https://www.python.org/dev/peps/)\"\n+ url=\"https://www.python.org/dev/peps/\"\n )\n pep_embed.set_thumbnail(url=ICON_URL)\n pep_embed.add_field(name=\"Status\", value=\"Active\")\n", "issue": "feature suggestion: add url in title instead of description in !pep command\ncurrently the link when you use the !pep command is located in the description, but with the discord.Embed.url parameter, the title is now able to become a link (as shown in second image)\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "import difflib\nimport logging\nimport re\nimport unicodedata\nfrom email.parser import HeaderParser\nfrom io import StringIO\nfrom typing import Tuple, Union\n\nfrom discord import Colour, Embed, utils\nfrom discord.ext.commands import BadArgument, Cog, Context, clean_content, command, has_any_role\n\nfrom bot.bot import Bot\nfrom bot.constants import Channels, MODERATION_ROLES, STAFF_ROLES\nfrom bot.decorators import in_whitelist\nfrom bot.pagination import LinePaginator\nfrom bot.utils import messages\n\nlog = logging.getLogger(__name__)\n\nZEN_OF_PYTHON = \"\"\"\\\nBeautiful is better than ugly.\nExplicit is better than implicit.\nSimple is better than complex.\nComplex is better than complicated.\nFlat is better than nested.\nSparse is better than dense.\nReadability counts.\nSpecial cases aren't special enough to break the rules.\nAlthough practicality beats purity.\nErrors should never pass silently.\nUnless explicitly silenced.\nIn the face of ambiguity, refuse the temptation to guess.\nThere should be one-- and preferably only one --obvious way to do it.\nAlthough that way may not be obvious at first unless you're Dutch.\nNow is better than never.\nAlthough never is often better than *right* now.\nIf the implementation is hard to explain, it's a bad idea.\nIf the implementation is easy to explain, it may be a good idea.\nNamespaces are one honking great idea -- let's do more of those!\n\"\"\"\n\nICON_URL = \"https://www.python.org/static/opengraph-icon-200x200.png\"\n\n\nclass Utils(Cog):\n \"\"\"A selection of utilities which don't have a clear category.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n self.base_pep_url = \"http://www.python.org/dev/peps/pep-\"\n self.base_github_pep_url = \"https://raw.githubusercontent.com/python/peps/master/pep-\"\n\n @command(name='pep', aliases=('get_pep', 'p'))\n async def pep_command(self, ctx: Context, pep_number: str) -> None:\n \"\"\"Fetches information about a PEP and sends it to the channel.\"\"\"\n if pep_number.isdigit():\n pep_number = int(pep_number)\n else:\n await ctx.send_help(ctx.command)\n return\n\n # Handle PEP 0 directly because it's not in .rst or .txt so it can't be accessed like other PEPs.\n if pep_number == 0:\n return await self.send_pep_zero(ctx)\n\n possible_extensions = ['.txt', '.rst']\n found_pep = False\n for extension in possible_extensions:\n # Attempt to fetch the PEP\n pep_url = f\"{self.base_github_pep_url}{pep_number:04}{extension}\"\n log.trace(f\"Requesting PEP {pep_number} with {pep_url}\")\n response = await self.bot.http_session.get(pep_url)\n\n if response.status == 200:\n log.trace(\"PEP found\")\n found_pep = True\n\n pep_content = await response.text()\n\n # Taken from https://github.com/python/peps/blob/master/pep0/pep.py#L179\n pep_header = HeaderParser().parse(StringIO(pep_content))\n\n # Assemble the embed\n pep_embed = Embed(\n title=f\"**PEP {pep_number} - {pep_header['Title']}**\",\n description=f\"[Link]({self.base_pep_url}{pep_number:04})\",\n )\n\n pep_embed.set_thumbnail(url=ICON_URL)\n\n # Add the interesting information\n fields_to_check = (\"Status\", \"Python-Version\", \"Created\", \"Type\")\n for field in fields_to_check:\n # Check for a PEP metadata field that is present but has an empty value\n # embed field values can't contain an empty string\n if pep_header.get(field, \"\"):\n pep_embed.add_field(name=field, value=pep_header[field])\n\n elif response.status != 404:\n # any response except 200 and 404 is expected\n found_pep = True # actually not, but it's easier to display this way\n log.trace(f\"The user requested PEP {pep_number}, but the response had an unexpected status code: \"\n f\"{response.status}.\\n{response.text}\")\n\n error_message = \"Unexpected HTTP error during PEP search. Please let us know.\"\n pep_embed = Embed(title=\"Unexpected error\", description=error_message)\n pep_embed.colour = Colour.red()\n break\n\n if not found_pep:\n log.trace(\"PEP was not found\")\n not_found = f\"PEP {pep_number} does not exist.\"\n pep_embed = Embed(title=\"PEP not found\", description=not_found)\n pep_embed.colour = Colour.red()\n\n await ctx.message.channel.send(embed=pep_embed)\n\n @command()\n @in_whitelist(channels=(Channels.bot_commands,), roles=STAFF_ROLES)\n async def charinfo(self, ctx: Context, *, characters: str) -> None:\n \"\"\"Shows you information on up to 50 unicode characters.\"\"\"\n match = re.match(r\"<(a?):(\\w+):(\\d+)>\", characters)\n if match:\n return await messages.send_denial(\n ctx,\n \"**Non-Character Detected**\\n\"\n \"Only unicode characters can be processed, but a custom Discord emoji \"\n \"was found. Please remove it and try again.\"\n )\n\n if len(characters) > 50:\n return await messages.send_denial(ctx, f\"Too many characters ({len(characters)}/50)\")\n\n def get_info(char: str) -> Tuple[str, str]:\n digit = f\"{ord(char):x}\"\n if len(digit) <= 4:\n u_code = f\"\\\\u{digit:>04}\"\n else:\n u_code = f\"\\\\U{digit:>08}\"\n url = f\"https://www.compart.com/en/unicode/U+{digit:>04}\"\n name = f\"[{unicodedata.name(char, '')}]({url})\"\n info = f\"`{u_code.ljust(10)}`: {name} - {utils.escape_markdown(char)}\"\n return info, u_code\n\n char_list, raw_list = zip(*(get_info(c) for c in characters))\n embed = Embed().set_author(name=\"Character Info\")\n\n if len(characters) > 1:\n # Maximum length possible is 502 out of 1024, so there's no need to truncate.\n embed.add_field(name='Full Raw Text', value=f\"`{''.join(raw_list)}`\", inline=False)\n\n await LinePaginator.paginate(char_list, ctx, embed, max_lines=10, max_size=2000, empty=False)\n\n @command()\n async def zen(self, ctx: Context, *, search_value: Union[int, str, None] = None) -> None:\n \"\"\"\n Show the Zen of Python.\n\n Without any arguments, the full Zen will be produced.\n If an integer is provided, the line with that index will be produced.\n If a string is provided, the line which matches best will be produced.\n \"\"\"\n embed = Embed(\n colour=Colour.blurple(),\n title=\"The Zen of Python\",\n description=ZEN_OF_PYTHON\n )\n\n if search_value is None:\n embed.title += \", by Tim Peters\"\n await ctx.send(embed=embed)\n return\n\n zen_lines = ZEN_OF_PYTHON.splitlines()\n\n # handle if it's an index int\n if isinstance(search_value, int):\n upper_bound = len(zen_lines) - 1\n lower_bound = -1 * upper_bound\n if not (lower_bound <= search_value <= upper_bound):\n raise BadArgument(f\"Please provide an index between {lower_bound} and {upper_bound}.\")\n\n embed.title += f\" (line {search_value % len(zen_lines)}):\"\n embed.description = zen_lines[search_value]\n await ctx.send(embed=embed)\n return\n\n # Try to handle first exact word due difflib.SequenceMatched may use some other similar word instead\n # exact word.\n for i, line in enumerate(zen_lines):\n for word in line.split():\n if word.lower() == search_value.lower():\n embed.title += f\" (line {i}):\"\n embed.description = line\n await ctx.send(embed=embed)\n return\n\n # handle if it's a search string and not exact word\n matcher = difflib.SequenceMatcher(None, search_value.lower())\n\n best_match = \"\"\n match_index = 0\n best_ratio = 0\n\n for index, line in enumerate(zen_lines):\n matcher.set_seq2(line.lower())\n\n # the match ratio needs to be adjusted because, naturally,\n # longer lines will have worse ratios than shorter lines when\n # fuzzy searching for keywords. this seems to work okay.\n adjusted_ratio = (len(line) - 5) ** 0.5 * matcher.ratio()\n\n if adjusted_ratio > best_ratio:\n best_ratio = adjusted_ratio\n best_match = line\n match_index = index\n\n if not best_match:\n raise BadArgument(\"I didn't get a match! Please try again with a different search term.\")\n\n embed.title += f\" (line {match_index}):\"\n embed.description = best_match\n await ctx.send(embed=embed)\n\n @command(aliases=(\"poll\",))\n @has_any_role(*MODERATION_ROLES)\n async def vote(self, ctx: Context, title: clean_content(fix_channel_mentions=True), *options: str) -> None:\n \"\"\"\n Build a quick voting poll with matching reactions with the provided options.\n\n A maximum of 20 options can be provided, as Discord supports a max of 20\n reactions on a single message.\n \"\"\"\n if len(title) > 256:\n raise BadArgument(\"The title cannot be longer than 256 characters.\")\n if len(options) < 2:\n raise BadArgument(\"Please provide at least 2 options.\")\n if len(options) > 20:\n raise BadArgument(\"I can only handle 20 options!\")\n\n codepoint_start = 127462 # represents \"regional_indicator_a\" unicode value\n options = {chr(i): f\"{chr(i)} - {v}\" for i, v in enumerate(options, start=codepoint_start)}\n embed = Embed(title=title, description=\"\\n\".join(options.values()))\n message = await ctx.send(embed=embed)\n for reaction in options:\n await message.add_reaction(reaction)\n\n async def send_pep_zero(self, ctx: Context) -> None:\n \"\"\"Send information about PEP 0.\"\"\"\n pep_embed = Embed(\n title=\"**PEP 0 - Index of Python Enhancement Proposals (PEPs)**\",\n description=\"[Link](https://www.python.org/dev/peps/)\"\n )\n pep_embed.set_thumbnail(url=ICON_URL)\n pep_embed.add_field(name=\"Status\", value=\"Active\")\n pep_embed.add_field(name=\"Created\", value=\"13-Jul-2000\")\n pep_embed.add_field(name=\"Type\", value=\"Informational\")\n\n await ctx.send(embed=pep_embed)\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the Utils cog.\"\"\"\n bot.add_cog(Utils(bot))\n", "path": "bot/exts/utils/utils.py"}]}
| 3,925 | 246 |
gh_patches_debug_21318
|
rasdani/github-patches
|
git_diff
|
openai__gym-1400
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
why MultiDiscrete dtype=int8
MultiDiscrete dtype is hardcoded as int8.
Is this a bug ? What happens when we need more than 256 category
```
space = MultiDiscrete([7, 1024])
space.sample()
```
https://github.com/openai/gym/blob/e944885e3b31a10fb6973093b39ff7682ef3aa3d/gym/spaces/multi_discrete.py#L10
</issue>
<code>
[start of gym/spaces/multi_discrete.py]
1 import gym
2 import numpy as np
3 from .space import Space
4
5
6 class MultiDiscrete(Space):
7 """
8 - The multi-discrete action space consists of a series of discrete action spaces with different number of actions in eachs
9 - It is useful to represent game controllers or keyboards where each key can be represented as a discrete action space
10 - It is parametrized by passing an array of positive integers specifying number of actions for each discrete action space
11
12 Note: A value of 0 always need to represent the NOOP action.
13
14 e.g. Nintendo Game Controller
15 - Can be conceptualized as 3 discrete action spaces:
16
17 1) Arrow Keys: Discrete 5 - NOOP[0], UP[1], RIGHT[2], DOWN[3], LEFT[4] - params: min: 0, max: 4
18 2) Button A: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
19 3) Button B: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
20
21 - Can be initialized as
22
23 MultiDiscrete([ 5, 2, 2 ])
24
25 """
26 def __init__(self, nvec):
27
28 """
29 nvec: vector of counts of each categorical variable
30 """
31 assert (np.array(nvec) > 0).all(), 'nvec (counts) have to be positive'
32 self.nvec = np.asarray(nvec, dtype=np.uint32)
33
34 super(MultiDiscrete, self).__init__(self.nvec.shape, np.uint32)
35 self.np_random = np.random.RandomState()
36
37 def seed(self, seed):
38 self.np_random.seed(seed)
39
40 def sample(self):
41 return (self.np_random.random_sample(self.nvec.shape) * self.nvec).astype(self.dtype)
42
43 def contains(self, x):
44 # if nvec is uint32 and space dtype is uint32, then 0 <= x < self.nvec guarantees that x
45 # is within correct bounds for space dtype (even though x does not have to be unsigned)
46 return (0 <= x).all() and (x < self.nvec).all()
47
48 def to_jsonable(self, sample_n):
49 return [sample.tolist() for sample in sample_n]
50
51 def from_jsonable(self, sample_n):
52 return np.array(sample_n)
53
54 def __repr__(self):
55 return "MultiDiscrete({})".format(self.nvec)
56
57 def __eq__(self, other):
58 return np.all(self.nvec == other.nvec)
59
[end of gym/spaces/multi_discrete.py]
[start of gym/spaces/space.py]
1 import numpy as np
2
3
4 class Space(object):
5 """Defines the observation and action spaces, so you can write generic
6 code that applies to any Env. For example, you can choose a random
7 action.
8 """
9 def __init__(self, shape=None, dtype=None):
10 import numpy as np # takes about 300-400ms to import, so we load lazily
11 self.shape = None if shape is None else tuple(shape)
12 self.dtype = None if dtype is None else np.dtype(dtype)
13
14 def sample(self):
15 """
16 Uniformly randomly sample a random element of this space
17 """
18 raise NotImplementedError
19
20 def seed(self, seed):
21 """Set the seed for this space's pseudo-random number generator. """
22 raise NotImplementedError
23
24 def contains(self, x):
25 """
26 Return boolean specifying if x is a valid
27 member of this space
28 """
29 raise NotImplementedError
30
31 def __contains__(self, x):
32 return self.contains(x)
33
34 def to_jsonable(self, sample_n):
35 """Convert a batch of samples from this space to a JSONable data type."""
36 # By default, assume identity is JSONable
37 return sample_n
38
39 def from_jsonable(self, sample_n):
40 """Convert a JSONable data type to a batch of samples from this space."""
41 # By default, assume identity is JSONable
42 return sample_n
43
[end of gym/spaces/space.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/gym/spaces/multi_discrete.py b/gym/spaces/multi_discrete.py
--- a/gym/spaces/multi_discrete.py
+++ b/gym/spaces/multi_discrete.py
@@ -1,4 +1,3 @@
-import gym
import numpy as np
from .space import Space
@@ -29,9 +28,9 @@
nvec: vector of counts of each categorical variable
"""
assert (np.array(nvec) > 0).all(), 'nvec (counts) have to be positive'
- self.nvec = np.asarray(nvec, dtype=np.uint32)
+ self.nvec = np.asarray(nvec, dtype=np.int64)
- super(MultiDiscrete, self).__init__(self.nvec.shape, np.uint32)
+ super(MultiDiscrete, self).__init__(self.nvec.shape, np.int64)
self.np_random = np.random.RandomState()
def seed(self, seed):
diff --git a/gym/spaces/space.py b/gym/spaces/space.py
--- a/gym/spaces/space.py
+++ b/gym/spaces/space.py
@@ -1,6 +1,3 @@
-import numpy as np
-
-
class Space(object):
"""Defines the observation and action spaces, so you can write generic
code that applies to any Env. For example, you can choose a random
|
{"golden_diff": "diff --git a/gym/spaces/multi_discrete.py b/gym/spaces/multi_discrete.py\n--- a/gym/spaces/multi_discrete.py\n+++ b/gym/spaces/multi_discrete.py\n@@ -1,4 +1,3 @@\n-import gym\n import numpy as np\n from .space import Space\n \n@@ -29,9 +28,9 @@\n nvec: vector of counts of each categorical variable\n \"\"\"\n assert (np.array(nvec) > 0).all(), 'nvec (counts) have to be positive'\n- self.nvec = np.asarray(nvec, dtype=np.uint32)\n+ self.nvec = np.asarray(nvec, dtype=np.int64)\n \n- super(MultiDiscrete, self).__init__(self.nvec.shape, np.uint32)\n+ super(MultiDiscrete, self).__init__(self.nvec.shape, np.int64)\n self.np_random = np.random.RandomState()\n \n def seed(self, seed):\ndiff --git a/gym/spaces/space.py b/gym/spaces/space.py\n--- a/gym/spaces/space.py\n+++ b/gym/spaces/space.py\n@@ -1,6 +1,3 @@\n-import numpy as np\n-\n-\n class Space(object):\n \"\"\"Defines the observation and action spaces, so you can write generic\n code that applies to any Env. For example, you can choose a random\n", "issue": "why MultiDiscrete dtype=int8 \nMultiDiscrete dtype is hardcoded as int8. \r\nIs this a bug ? What happens when we need more than 256 category \r\n```\r\nspace = MultiDiscrete([7, 1024])\r\nspace.sample()\r\n```\r\n\r\nhttps://github.com/openai/gym/blob/e944885e3b31a10fb6973093b39ff7682ef3aa3d/gym/spaces/multi_discrete.py#L10\n", "before_files": [{"content": "import gym\nimport numpy as np\nfrom .space import Space\n\n\nclass MultiDiscrete(Space):\n \"\"\"\n - The multi-discrete action space consists of a series of discrete action spaces with different number of actions in eachs\n - It is useful to represent game controllers or keyboards where each key can be represented as a discrete action space\n - It is parametrized by passing an array of positive integers specifying number of actions for each discrete action space\n\n Note: A value of 0 always need to represent the NOOP action.\n\n e.g. Nintendo Game Controller\n - Can be conceptualized as 3 discrete action spaces:\n\n 1) Arrow Keys: Discrete 5 - NOOP[0], UP[1], RIGHT[2], DOWN[3], LEFT[4] - params: min: 0, max: 4\n 2) Button A: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1\n 3) Button B: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1\n\n - Can be initialized as\n\n MultiDiscrete([ 5, 2, 2 ])\n\n \"\"\"\n def __init__(self, nvec):\n \n \"\"\"\n nvec: vector of counts of each categorical variable\n \"\"\"\n assert (np.array(nvec) > 0).all(), 'nvec (counts) have to be positive'\n self.nvec = np.asarray(nvec, dtype=np.uint32)\n\n super(MultiDiscrete, self).__init__(self.nvec.shape, np.uint32)\n self.np_random = np.random.RandomState()\n\n def seed(self, seed):\n self.np_random.seed(seed)\n\n def sample(self):\n return (self.np_random.random_sample(self.nvec.shape) * self.nvec).astype(self.dtype)\n\n def contains(self, x):\n # if nvec is uint32 and space dtype is uint32, then 0 <= x < self.nvec guarantees that x\n # is within correct bounds for space dtype (even though x does not have to be unsigned)\n return (0 <= x).all() and (x < self.nvec).all()\n\n def to_jsonable(self, sample_n):\n return [sample.tolist() for sample in sample_n]\n\n def from_jsonable(self, sample_n):\n return np.array(sample_n)\n\n def __repr__(self):\n return \"MultiDiscrete({})\".format(self.nvec)\n\n def __eq__(self, other):\n return np.all(self.nvec == other.nvec)\n", "path": "gym/spaces/multi_discrete.py"}, {"content": "import numpy as np\n\n\nclass Space(object):\n \"\"\"Defines the observation and action spaces, so you can write generic\n code that applies to any Env. For example, you can choose a random\n action.\n \"\"\"\n def __init__(self, shape=None, dtype=None):\n import numpy as np # takes about 300-400ms to import, so we load lazily\n self.shape = None if shape is None else tuple(shape)\n self.dtype = None if dtype is None else np.dtype(dtype)\n\n def sample(self):\n \"\"\"\n Uniformly randomly sample a random element of this space\n \"\"\"\n raise NotImplementedError\n\n def seed(self, seed):\n \"\"\"Set the seed for this space's pseudo-random number generator. \"\"\"\n raise NotImplementedError\n\n def contains(self, x):\n \"\"\"\n Return boolean specifying if x is a valid\n member of this space\n \"\"\"\n raise NotImplementedError\n\n def __contains__(self, x):\n return self.contains(x)\n\n def to_jsonable(self, sample_n):\n \"\"\"Convert a batch of samples from this space to a JSONable data type.\"\"\"\n # By default, assume identity is JSONable\n return sample_n\n\n def from_jsonable(self, sample_n):\n \"\"\"Convert a JSONable data type to a batch of samples from this space.\"\"\"\n # By default, assume identity is JSONable\n return sample_n\n", "path": "gym/spaces/space.py"}]}
| 1,763 | 314 |
gh_patches_debug_56609
|
rasdani/github-patches
|
git_diff
|
spacetelescope__jwql-678
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Upgrade Django to 3.0
Django 3.0 is out, and since it is a major release, we should consider upgrading to this.
</issue>
<code>
[start of setup.py]
1 import numpy as np
2 from setuptools import setup
3 from setuptools import find_packages
4
5 VERSION = '0.24.0'
6
7 AUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Mike Engesser, Mees Fix, Joe Filippazzo, Bryan Hilbert, '
8 AUTHORS += 'Graham Kanarek, Teagan King, Catherine Martlin, Maria Pena-Guerrero, Johannes Sahlmann, Ben Sunnquist'
9
10 DESCRIPTION = 'The James Webb Space Telescope Quicklook Project'
11
12 DEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/jwst_reffiles#egg=jwst_reffiles']
13
14 REQUIRES = [
15 'asdf>=2.3.3',
16 'astropy>=3.2.1',
17 'astroquery>=0.3.9',
18 'authlib',
19 'bokeh>=1.0,<1.4',
20 'codecov',
21 'crds',
22 'cryptography',
23 'django>=2.0,<3.0',
24 'flake8',
25 'inflection',
26 'ipython',
27 'jinja2',
28 'jsonschema',
29 'jwedb>=0.0.3',
30 'jwst',
31 'matplotlib',
32 'nodejs',
33 'numpy',
34 'numpydoc',
35 'pandas',
36 'psycopg2',
37 'pysiaf',
38 'pytest',
39 'pytest-cov',
40 'scipy',
41 'sphinx',
42 'sqlalchemy',
43 'stsci_rtd_theme',
44 'twine',
45 'wtforms'
46 ]
47
48 setup(
49 name='jwql',
50 version=VERSION,
51 description=DESCRIPTION,
52 url='https://github.com/spacetelescope/jwql.git',
53 author=AUTHORS,
54 author_email='[email protected]',
55 license='BSD',
56 keywords=['astronomy', 'python'],
57 classifiers=['Programming Language :: Python'],
58 packages=find_packages(),
59 install_requires=REQUIRES,
60 dependency_links=DEPENDENCY_LINKS,
61 include_package_data=True,
62 include_dirs=[np.get_include()],
63 )
64
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -20,7 +20,7 @@
'codecov',
'crds',
'cryptography',
- 'django>=2.0,<3.0',
+ 'django',
'flake8',
'inflection',
'ipython',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -20,7 +20,7 @@\n 'codecov',\n 'crds',\n 'cryptography',\n- 'django>=2.0,<3.0',\n+ 'django',\n 'flake8',\n 'inflection',\n 'ipython',\n", "issue": "Upgrade Django to 3.0\nDjango 3.0 is out, and since it is a major release, we should consider upgrading to this.\n", "before_files": [{"content": "import numpy as np\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.24.0'\n\nAUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Mike Engesser, Mees Fix, Joe Filippazzo, Bryan Hilbert, '\nAUTHORS += 'Graham Kanarek, Teagan King, Catherine Martlin, Maria Pena-Guerrero, Johannes Sahlmann, Ben Sunnquist'\n\nDESCRIPTION = 'The James Webb Space Telescope Quicklook Project'\n\nDEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/jwst_reffiles#egg=jwst_reffiles']\n\nREQUIRES = [\n 'asdf>=2.3.3',\n 'astropy>=3.2.1',\n 'astroquery>=0.3.9',\n 'authlib',\n 'bokeh>=1.0,<1.4',\n 'codecov',\n 'crds',\n 'cryptography',\n 'django>=2.0,<3.0',\n 'flake8',\n 'inflection',\n 'ipython',\n 'jinja2',\n 'jsonschema',\n 'jwedb>=0.0.3',\n 'jwst',\n 'matplotlib',\n 'nodejs',\n 'numpy',\n 'numpydoc',\n 'pandas',\n 'psycopg2',\n 'pysiaf',\n 'pytest',\n 'pytest-cov',\n 'scipy',\n 'sphinx',\n 'sqlalchemy',\n 'stsci_rtd_theme',\n 'twine',\n 'wtforms'\n]\n\nsetup(\n name='jwql',\n version=VERSION,\n description=DESCRIPTION,\n url='https://github.com/spacetelescope/jwql.git',\n author=AUTHORS,\n author_email='[email protected]',\n license='BSD',\n keywords=['astronomy', 'python'],\n classifiers=['Programming Language :: Python'],\n packages=find_packages(),\n install_requires=REQUIRES,\n dependency_links=DEPENDENCY_LINKS,\n include_package_data=True,\n include_dirs=[np.get_include()],\n)\n", "path": "setup.py"}]}
| 1,141 | 80 |
gh_patches_debug_47926
|
rasdani/github-patches
|
git_diff
|
uccser__cs-unplugged-717
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Workaround Crowdin bug where integer yaml keys are not preserved
When downloading in-context localisation files, integer keys in YAML files are not preserved. This is only an issue in the file `topics/content/en/programming-challenges-structure-difficulties.yaml`, which uses the difficulty number as the key.
As a work around, we can use the string value of the integer as the key, i.e. `"0"` instead of `0`
</issue>
<code>
[start of csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py]
1 """Custom loader for loading structure of programming challenges."""
2
3 import os
4 from django.db import transaction
5 from utils.errors.MissingRequiredFieldError import MissingRequiredFieldError
6 from utils.TranslatableModelLoader import TranslatableModelLoader
7 from topics.models import ProgrammingChallengeLanguage, ProgrammingChallengeDifficulty
8
9
10 class ProgrammingChallengesStructureLoader(TranslatableModelLoader):
11 """Custom loader for loading structure of programming challenges."""
12
13 @transaction.atomic
14 def load(self):
15 """Load the content for structure of programming challenges.
16
17 Raises:
18 MissingRequiredFieldError: when no object can be found with the matching
19 attribute.
20 """
21 structure = self.load_yaml_file(self.structure_file_path)
22
23 prog_languages = structure.get("languages", None)
24 difficulty_levels = structure.get("difficulties", None)
25 if None in [prog_languages, difficulty_levels]:
26 raise MissingRequiredFieldError(
27 self.structure_file_path,
28 ["lanugages", "difficulties"],
29 "Programming Challenge Structure"
30 )
31
32 # Add "-languages" to the structure filename
33 prog_languages_translation_filename = "{}-languages.yaml".format(
34 os.path.splitext(self.structure_filename)[0]
35 )
36 prog_languages_translations = self.get_yaml_translations(
37 prog_languages_translation_filename,
38 required_slugs=prog_languages.keys(),
39 required_fields=["name"]
40 )
41
42 for (prog_language, prog_language_data) in prog_languages.items():
43
44 if prog_language_data is None:
45 raise MissingRequiredFieldError(
46 self.structure_file_path,
47 ["number"],
48 "Programming Challenge Language"
49 )
50
51 # Check for required fields
52 prog_language_number = prog_language_data.get("number", None)
53 if prog_language_number is None:
54 raise MissingRequiredFieldError(
55 self.structure_file_path,
56 ["number"],
57 "Programming Challenge Language"
58 )
59
60 # Check if icon is given
61 if "icon" in prog_language_data:
62 prog_language_icon = prog_language_data["icon"]
63 else:
64 prog_language_icon = None
65
66 new_prog_language = ProgrammingChallengeLanguage(
67 slug=prog_language,
68 number=prog_language_number,
69 icon=prog_language_icon
70 )
71
72 translations = prog_languages_translations.get(prog_language, dict())
73 self.populate_translations(new_prog_language, translations)
74 self.mark_translation_availability(new_prog_language, required_fields=["name"])
75 new_prog_language.save()
76
77 self.log("Added programming language: {}".format(new_prog_language.__str__()))
78
79 # Add "-languages" to the structure filename
80 difficulties_translation_filename = "{}-difficulties.yaml".format(
81 os.path.splitext(self.structure_filename)[0]
82 )
83 difficulties_translations = self.get_yaml_translations(
84 difficulties_translation_filename,
85 required_slugs=difficulty_levels,
86 required_fields=["name"],
87 )
88
89 for difficulty in difficulty_levels:
90
91 new_difficulty = ProgrammingChallengeDifficulty(
92 level=difficulty,
93 )
94
95 translations = difficulties_translations.get(difficulty, dict())
96 self.populate_translations(new_difficulty, translations)
97 self.mark_translation_availability(new_difficulty, required_fields=["name"])
98 new_difficulty.save()
99
100 self.log("Added programming difficulty level: {}".format(new_difficulty.__str__()))
101
102 self.log("")
103
[end of csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py b/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py
--- a/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py
+++ b/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py
@@ -89,7 +89,7 @@
for difficulty in difficulty_levels:
new_difficulty = ProgrammingChallengeDifficulty(
- level=difficulty,
+ level=int(difficulty),
)
translations = difficulties_translations.get(difficulty, dict())
|
{"golden_diff": "diff --git a/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py b/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py\n--- a/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py\n+++ b/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py\n@@ -89,7 +89,7 @@\n for difficulty in difficulty_levels:\n \n new_difficulty = ProgrammingChallengeDifficulty(\n- level=difficulty,\n+ level=int(difficulty),\n )\n \n translations = difficulties_translations.get(difficulty, dict())\n", "issue": "Workaround Crowdin bug where integer yaml keys are not preserved\nWhen downloading in-context localisation files, integer keys in YAML files are not preserved. This is only an issue in the file `topics/content/en/programming-challenges-structure-difficulties.yaml`, which uses the difficulty number as the key.\r\n\r\nAs a work around, we can use the string value of the integer as the key, i.e. `\"0\"` instead of `0`\n", "before_files": [{"content": "\"\"\"Custom loader for loading structure of programming challenges.\"\"\"\n\nimport os\nfrom django.db import transaction\nfrom utils.errors.MissingRequiredFieldError import MissingRequiredFieldError\nfrom utils.TranslatableModelLoader import TranslatableModelLoader\nfrom topics.models import ProgrammingChallengeLanguage, ProgrammingChallengeDifficulty\n\n\nclass ProgrammingChallengesStructureLoader(TranslatableModelLoader):\n \"\"\"Custom loader for loading structure of programming challenges.\"\"\"\n\n @transaction.atomic\n def load(self):\n \"\"\"Load the content for structure of programming challenges.\n\n Raises:\n MissingRequiredFieldError: when no object can be found with the matching\n attribute.\n \"\"\"\n structure = self.load_yaml_file(self.structure_file_path)\n\n prog_languages = structure.get(\"languages\", None)\n difficulty_levels = structure.get(\"difficulties\", None)\n if None in [prog_languages, difficulty_levels]:\n raise MissingRequiredFieldError(\n self.structure_file_path,\n [\"lanugages\", \"difficulties\"],\n \"Programming Challenge Structure\"\n )\n\n # Add \"-languages\" to the structure filename\n prog_languages_translation_filename = \"{}-languages.yaml\".format(\n os.path.splitext(self.structure_filename)[0]\n )\n prog_languages_translations = self.get_yaml_translations(\n prog_languages_translation_filename,\n required_slugs=prog_languages.keys(),\n required_fields=[\"name\"]\n )\n\n for (prog_language, prog_language_data) in prog_languages.items():\n\n if prog_language_data is None:\n raise MissingRequiredFieldError(\n self.structure_file_path,\n [\"number\"],\n \"Programming Challenge Language\"\n )\n\n # Check for required fields\n prog_language_number = prog_language_data.get(\"number\", None)\n if prog_language_number is None:\n raise MissingRequiredFieldError(\n self.structure_file_path,\n [\"number\"],\n \"Programming Challenge Language\"\n )\n\n # Check if icon is given\n if \"icon\" in prog_language_data:\n prog_language_icon = prog_language_data[\"icon\"]\n else:\n prog_language_icon = None\n\n new_prog_language = ProgrammingChallengeLanguage(\n slug=prog_language,\n number=prog_language_number,\n icon=prog_language_icon\n )\n\n translations = prog_languages_translations.get(prog_language, dict())\n self.populate_translations(new_prog_language, translations)\n self.mark_translation_availability(new_prog_language, required_fields=[\"name\"])\n new_prog_language.save()\n\n self.log(\"Added programming language: {}\".format(new_prog_language.__str__()))\n\n # Add \"-languages\" to the structure filename\n difficulties_translation_filename = \"{}-difficulties.yaml\".format(\n os.path.splitext(self.structure_filename)[0]\n )\n difficulties_translations = self.get_yaml_translations(\n difficulties_translation_filename,\n required_slugs=difficulty_levels,\n required_fields=[\"name\"],\n )\n\n for difficulty in difficulty_levels:\n\n new_difficulty = ProgrammingChallengeDifficulty(\n level=difficulty,\n )\n\n translations = difficulties_translations.get(difficulty, dict())\n self.populate_translations(new_difficulty, translations)\n self.mark_translation_availability(new_difficulty, required_fields=[\"name\"])\n new_difficulty.save()\n\n self.log(\"Added programming difficulty level: {}\".format(new_difficulty.__str__()))\n\n self.log(\"\")\n", "path": "csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py"}]}
| 1,537 | 133 |
gh_patches_debug_21496
|
rasdani/github-patches
|
git_diff
|
getnikola__nikola-2682
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CATEGORY_ALLOW_HIERARCHIES does not work
### Environment (if reporting a bug)
**Python Version:**
3.5.2
**Nikola Version:**
7.8.3 and master
**Operating System:**
Ubuntu 16.04
### Description:
I installed nikola in virtualenv from pip.
Then I set CATEGORY_ALLOW_HIERARCHIES to True in conf.py.
I created new post and edit category to be ".. category: code/python"
The error when building site:
```
TaskError - taskid:render_taxonomies:output/categories/cat_code/index.html
PythonAction Error
Traceback (most recent call last):
File "/home/denin/Workspace/nikola/nikola3/lib/python3.5/site-packages/doit/action.py", line 403, in execute
returned_value = self.py_callable(*self.args, **kwargs)
File "/home/denin/Workspace/nikola/nikola3/lib/python3.5/site-packages/nikola/nikola.py", line 1396, in render_template
template_name, None, local_context)
File "/home/denin/Workspace/nikola/nikola3/lib/python3.5/site-packages/nikola/plugins/template/mako.py", line 117, in render_template
data = template.render_unicode(**context)
File "/home/denin/Workspace/nikola/nikola3/lib/python3.5/site-packages/mako/template.py", line 471, in render_unicode
as_unicode=True)
File "/home/denin/Workspace/nikola/nikola3/lib/python3.5/site-packages/mako/runtime.py", line 838, in _render
**_kwargs_for_callable(callable_, data))
File "/home/denin/Workspace/nikola/nikola3/lib/python3.5/site-packages/mako/runtime.py", line 873, in _render_context
_exec_template(inherit, lclcontext, args=args, kwargs=kwargs)
File "/home/denin/Workspace/nikola/nikola3/lib/python3.5/site-packages/mako/runtime.py", line 899, in _exec_template
callable_(context, *args, **kwargs)
File "/home/denin/Workspace/nikola/nikola3/my_first_site/cache/.mako.tmp/base.tmpl.py", line 127, in render_body
context['self'].content(**pageargs)
File "/home/denin/Workspace/nikola/nikola3/my_first_site/cache/.mako.tmp/tag.tmpl.py", line 137, in render_content
for name, link in subcategories:
ValueError: too many values to unpack (expected 2)
```
</issue>
<code>
[start of nikola/plugins/task/categories.py]
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2017 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 """Render the category pages and feeds."""
28
29 from __future__ import unicode_literals
30
31 from nikola.plugin_categories import Taxonomy
32 from nikola import utils
33
34
35 class ClassifyCategories(Taxonomy):
36 """Classify the posts by categories."""
37
38 name = "classify_categories"
39
40 classification_name = "category"
41 overview_page_variable_name = "categories"
42 overview_page_items_variable_name = "cat_items"
43 overview_page_hierarchy_variable_name = "cat_hierarchy"
44 more_than_one_classifications_per_post = False
45 has_hierarchy = True
46 include_posts_from_subhierarchies = True
47 include_posts_into_hierarchy_root = False
48 show_list_as_subcategories_list = False
49 generate_atom_feeds_for_post_lists = True
50 template_for_classification_overview = "tags.tmpl"
51 always_disable_rss = False
52 apply_to_posts = True
53 apply_to_pages = False
54 minimum_post_count_per_classification_in_overview = 1
55 omit_empty_classifications = True
56 also_create_classifications_from_other_languages = True
57 path_handler_docstrings = {
58 'category_index': """A link to the category index.
59
60 Example:
61
62 link://category_index => /categories/index.html""",
63 'category': """A link to a category. Takes page number as optional keyword argument.
64
65 Example:
66
67 link://category/dogs => /categories/dogs.html""",
68 'category_atom': """A link to a category's Atom feed.
69
70 Example:
71
72 link://category_atom/dogs => /categories/dogs.atom""",
73 'category_rss': """A link to a category's RSS feed.
74
75 Example:
76
77 link://category_rss/dogs => /categories/dogs.xml""",
78 }
79
80 def set_site(self, site):
81 """Set site, which is a Nikola instance."""
82 super(ClassifyCategories, self).set_site(site)
83 self.show_list_as_index = self.site.config['CATEGORY_PAGES_ARE_INDEXES']
84 self.template_for_single_list = "tagindex.tmpl" if self.show_list_as_index else "tag.tmpl"
85
86 def is_enabled(self, lang=None):
87 """Return True if this taxonomy is enabled, or False otherwise."""
88 return True
89
90 def classify(self, post, lang):
91 """Classify the given post for the given language."""
92 cat = post.meta('category', lang=lang).strip()
93 return [cat] if cat else []
94
95 def get_classification_friendly_name(self, classification, lang, only_last_component=False):
96 """Extract a friendly name from the classification."""
97 classification = self.extract_hierarchy(classification)
98 return classification[-1] if classification else ''
99
100 def get_overview_path(self, lang, dest_type='page'):
101 """A path handler for the list of all classifications."""
102 if self.site.config['CATEGORIES_INDEX_PATH'](lang):
103 path = self.site.config['CATEGORIES_INDEX_PATH'](lang)
104 if path.endswith('/index'): # TODO: remove in v8
105 utils.LOGGER.warn("CATEGORIES_INDEX_PATH for language {0} is missing a .html extension. Please update your configuration!".format(lang))
106 path += '.html'
107 return [_f for _f in [path] if _f], 'never'
108 else:
109 return [_f for _f in [self.site.config['CATEGORY_PATH'](lang)] if _f], 'always'
110
111 def slugify_tag_name(self, name, lang):
112 """Slugify a tag name."""
113 if self.site.config['SLUG_TAG_PATH']:
114 name = utils.slugify(name, lang)
115 return name
116
117 def slugify_category_name(self, path, lang):
118 """Slugify a category name."""
119 if lang is None: # TODO: remove in v8
120 utils.LOGGER.warn("ClassifyCategories.slugify_category_name() called without language!")
121 lang = ''
122 if self.site.config['CATEGORY_OUTPUT_FLAT_HIERARCHY']:
123 path = path[-1:] # only the leaf
124 result = [self.slugify_tag_name(part, lang) for part in path]
125 result[0] = self.site.config['CATEGORY_PREFIX'] + result[0]
126 if not self.site.config['PRETTY_URLS']:
127 result = ['-'.join(result)]
128 return result
129
130 def get_path(self, classification, lang, dest_type='page'):
131 """A path handler for the given classification."""
132 return [_f for _f in [self.site.config['CATEGORY_PATH'](lang)] if _f] + self.slugify_category_name(classification, lang), 'auto'
133
134 def extract_hierarchy(self, classification):
135 """Given a classification, return a list of parts in the hierarchy."""
136 return utils.parse_escaped_hierarchical_category_name(classification)
137
138 def recombine_classification_from_hierarchy(self, hierarchy):
139 """Given a list of parts in the hierarchy, return the classification string."""
140 return utils.join_hierarchical_category_path(hierarchy)
141
142 def provide_overview_context_and_uptodate(self, lang):
143 """Provide data for the context and the uptodate list for the list of all classifiations."""
144 kw = {
145 'category_path': self.site.config['CATEGORY_PATH'],
146 'category_prefix': self.site.config['CATEGORY_PREFIX'],
147 "category_pages_are_indexes": self.site.config['CATEGORY_PAGES_ARE_INDEXES'],
148 "tzinfo": self.site.tzinfo,
149 "category_pages_descriptions": self.site.config['CATEGORY_PAGES_DESCRIPTIONS'],
150 "category_pages_titles": self.site.config['CATEGORY_PAGES_TITLES'],
151 }
152 context = {
153 "title": self.site.MESSAGES[lang]["Categories"],
154 "description": self.site.MESSAGES[lang]["Categories"],
155 "pagekind": ["list", "tags_page"],
156 }
157 kw.update(context)
158 return context, kw
159
160 def provide_context_and_uptodate(self, cat, lang, node=None):
161 """Provide data for the context and the uptodate list for the list of the given classifiation."""
162 cat_path = self.extract_hierarchy(cat)
163 kw = {
164 'category_path': self.site.config['CATEGORY_PATH'],
165 'category_prefix': self.site.config['CATEGORY_PREFIX'],
166 "category_pages_are_indexes": self.site.config['CATEGORY_PAGES_ARE_INDEXES'],
167 "tzinfo": self.site.tzinfo,
168 "category_pages_descriptions": self.site.config['CATEGORY_PAGES_DESCRIPTIONS'],
169 "category_pages_titles": self.site.config['CATEGORY_PAGES_TITLES'],
170 }
171 posts = self.site.posts_per_classification[self.classification_name][lang]
172 children = [child for child in node.children if len([post for post in posts.get(child.classification_name, []) if self.site.config['SHOW_UNTRANSLATED_POSTS'] or post.is_translation_available(lang)]) > 0]
173 subcats = [(child.name, self.site.link(self.classification_name, child.classification_name, lang), child.classification_name, child.classification_path) for child in children]
174 friendly_name = self.get_classification_friendly_name(cat, lang)
175 context = {
176 "title": self.site.config['CATEGORY_PAGES_TITLES'].get(lang, {}).get(cat, self.site.MESSAGES[lang]["Posts about %s"] % friendly_name),
177 "description": self.site.config['CATEGORY_PAGES_DESCRIPTIONS'].get(lang, {}).get(cat),
178 "kind": "category",
179 "pagekind": ["tag_page", "index" if self.show_list_as_index else "list"],
180 "tag": friendly_name,
181 "category": cat,
182 "category_path": cat_path,
183 "subcategories": subcats,
184 }
185 if self.show_list_as_index:
186 context["rss_link"] = """<link rel="alternate" type="application/rss+xml" type="application/rss+xml" title="RSS for tag {0} ({1})" href="{2}">""".format(friendly_name, lang, self.site.link("category_rss", cat, lang))
187 kw.update(context)
188 return context, kw
189
[end of nikola/plugins/task/categories.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/nikola/plugins/task/categories.py b/nikola/plugins/task/categories.py
--- a/nikola/plugins/task/categories.py
+++ b/nikola/plugins/task/categories.py
@@ -170,7 +170,7 @@
}
posts = self.site.posts_per_classification[self.classification_name][lang]
children = [child for child in node.children if len([post for post in posts.get(child.classification_name, []) if self.site.config['SHOW_UNTRANSLATED_POSTS'] or post.is_translation_available(lang)]) > 0]
- subcats = [(child.name, self.site.link(self.classification_name, child.classification_name, lang), child.classification_name, child.classification_path) for child in children]
+ subcats = [(child.name, self.site.link(self.classification_name, child.classification_name, lang)) for child in children]
friendly_name = self.get_classification_friendly_name(cat, lang)
context = {
"title": self.site.config['CATEGORY_PAGES_TITLES'].get(lang, {}).get(cat, self.site.MESSAGES[lang]["Posts about %s"] % friendly_name),
|
{"golden_diff": "diff --git a/nikola/plugins/task/categories.py b/nikola/plugins/task/categories.py\n--- a/nikola/plugins/task/categories.py\n+++ b/nikola/plugins/task/categories.py\n@@ -170,7 +170,7 @@\n }\n posts = self.site.posts_per_classification[self.classification_name][lang]\n children = [child for child in node.children if len([post for post in posts.get(child.classification_name, []) if self.site.config['SHOW_UNTRANSLATED_POSTS'] or post.is_translation_available(lang)]) > 0]\n- subcats = [(child.name, self.site.link(self.classification_name, child.classification_name, lang), child.classification_name, child.classification_path) for child in children]\n+ subcats = [(child.name, self.site.link(self.classification_name, child.classification_name, lang)) for child in children]\n friendly_name = self.get_classification_friendly_name(cat, lang)\n context = {\n \"title\": self.site.config['CATEGORY_PAGES_TITLES'].get(lang, {}).get(cat, self.site.MESSAGES[lang][\"Posts about %s\"] % friendly_name),\n", "issue": "CATEGORY_ALLOW_HIERARCHIES does not work\n\r\n\r\n### Environment (if reporting a bug)\r\n\r\n**Python Version:**\r\n3.5.2\r\n\r\n**Nikola Version:**\r\n7.8.3 and master\r\n\r\n**Operating System:**\r\nUbuntu 16.04\r\n\r\n### Description:\r\n\r\nI installed nikola in virtualenv from pip.\r\nThen I set CATEGORY_ALLOW_HIERARCHIES to True in conf.py.\r\nI created new post and edit category to be \".. category: code/python\"\r\nThe error when building site:\r\n```\r\nTaskError - taskid:render_taxonomies:output/categories/cat_code/index.html\r\nPythonAction Error\r\nTraceback (most recent call last):\r\n File \"/home/denin/Workspace/nikola/nikola3/lib/python3.5/site-packages/doit/action.py\", line 403, in execute\r\n returned_value = self.py_callable(*self.args, **kwargs)\r\n File \"/home/denin/Workspace/nikola/nikola3/lib/python3.5/site-packages/nikola/nikola.py\", line 1396, in render_template\r\n template_name, None, local_context)\r\n File \"/home/denin/Workspace/nikola/nikola3/lib/python3.5/site-packages/nikola/plugins/template/mako.py\", line 117, in render_template\r\n data = template.render_unicode(**context)\r\n File \"/home/denin/Workspace/nikola/nikola3/lib/python3.5/site-packages/mako/template.py\", line 471, in render_unicode\r\n as_unicode=True)\r\n File \"/home/denin/Workspace/nikola/nikola3/lib/python3.5/site-packages/mako/runtime.py\", line 838, in _render\r\n **_kwargs_for_callable(callable_, data))\r\n File \"/home/denin/Workspace/nikola/nikola3/lib/python3.5/site-packages/mako/runtime.py\", line 873, in _render_context\r\n _exec_template(inherit, lclcontext, args=args, kwargs=kwargs)\r\n File \"/home/denin/Workspace/nikola/nikola3/lib/python3.5/site-packages/mako/runtime.py\", line 899, in _exec_template\r\n callable_(context, *args, **kwargs)\r\n File \"/home/denin/Workspace/nikola/nikola3/my_first_site/cache/.mako.tmp/base.tmpl.py\", line 127, in render_body\r\n context['self'].content(**pageargs)\r\n File \"/home/denin/Workspace/nikola/nikola3/my_first_site/cache/.mako.tmp/tag.tmpl.py\", line 137, in render_content\r\n for name, link in subcategories:\r\nValueError: too many values to unpack (expected 2)\r\n```\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2017 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Render the category pages and feeds.\"\"\"\n\nfrom __future__ import unicode_literals\n\nfrom nikola.plugin_categories import Taxonomy\nfrom nikola import utils\n\n\nclass ClassifyCategories(Taxonomy):\n \"\"\"Classify the posts by categories.\"\"\"\n\n name = \"classify_categories\"\n\n classification_name = \"category\"\n overview_page_variable_name = \"categories\"\n overview_page_items_variable_name = \"cat_items\"\n overview_page_hierarchy_variable_name = \"cat_hierarchy\"\n more_than_one_classifications_per_post = False\n has_hierarchy = True\n include_posts_from_subhierarchies = True\n include_posts_into_hierarchy_root = False\n show_list_as_subcategories_list = False\n generate_atom_feeds_for_post_lists = True\n template_for_classification_overview = \"tags.tmpl\"\n always_disable_rss = False\n apply_to_posts = True\n apply_to_pages = False\n minimum_post_count_per_classification_in_overview = 1\n omit_empty_classifications = True\n also_create_classifications_from_other_languages = True\n path_handler_docstrings = {\n 'category_index': \"\"\"A link to the category index.\n\nExample:\n\nlink://category_index => /categories/index.html\"\"\",\n 'category': \"\"\"A link to a category. Takes page number as optional keyword argument.\n\nExample:\n\nlink://category/dogs => /categories/dogs.html\"\"\",\n 'category_atom': \"\"\"A link to a category's Atom feed.\n\nExample:\n\nlink://category_atom/dogs => /categories/dogs.atom\"\"\",\n 'category_rss': \"\"\"A link to a category's RSS feed.\n\nExample:\n\nlink://category_rss/dogs => /categories/dogs.xml\"\"\",\n }\n\n def set_site(self, site):\n \"\"\"Set site, which is a Nikola instance.\"\"\"\n super(ClassifyCategories, self).set_site(site)\n self.show_list_as_index = self.site.config['CATEGORY_PAGES_ARE_INDEXES']\n self.template_for_single_list = \"tagindex.tmpl\" if self.show_list_as_index else \"tag.tmpl\"\n\n def is_enabled(self, lang=None):\n \"\"\"Return True if this taxonomy is enabled, or False otherwise.\"\"\"\n return True\n\n def classify(self, post, lang):\n \"\"\"Classify the given post for the given language.\"\"\"\n cat = post.meta('category', lang=lang).strip()\n return [cat] if cat else []\n\n def get_classification_friendly_name(self, classification, lang, only_last_component=False):\n \"\"\"Extract a friendly name from the classification.\"\"\"\n classification = self.extract_hierarchy(classification)\n return classification[-1] if classification else ''\n\n def get_overview_path(self, lang, dest_type='page'):\n \"\"\"A path handler for the list of all classifications.\"\"\"\n if self.site.config['CATEGORIES_INDEX_PATH'](lang):\n path = self.site.config['CATEGORIES_INDEX_PATH'](lang)\n if path.endswith('/index'): # TODO: remove in v8\n utils.LOGGER.warn(\"CATEGORIES_INDEX_PATH for language {0} is missing a .html extension. Please update your configuration!\".format(lang))\n path += '.html'\n return [_f for _f in [path] if _f], 'never'\n else:\n return [_f for _f in [self.site.config['CATEGORY_PATH'](lang)] if _f], 'always'\n\n def slugify_tag_name(self, name, lang):\n \"\"\"Slugify a tag name.\"\"\"\n if self.site.config['SLUG_TAG_PATH']:\n name = utils.slugify(name, lang)\n return name\n\n def slugify_category_name(self, path, lang):\n \"\"\"Slugify a category name.\"\"\"\n if lang is None: # TODO: remove in v8\n utils.LOGGER.warn(\"ClassifyCategories.slugify_category_name() called without language!\")\n lang = ''\n if self.site.config['CATEGORY_OUTPUT_FLAT_HIERARCHY']:\n path = path[-1:] # only the leaf\n result = [self.slugify_tag_name(part, lang) for part in path]\n result[0] = self.site.config['CATEGORY_PREFIX'] + result[0]\n if not self.site.config['PRETTY_URLS']:\n result = ['-'.join(result)]\n return result\n\n def get_path(self, classification, lang, dest_type='page'):\n \"\"\"A path handler for the given classification.\"\"\"\n return [_f for _f in [self.site.config['CATEGORY_PATH'](lang)] if _f] + self.slugify_category_name(classification, lang), 'auto'\n\n def extract_hierarchy(self, classification):\n \"\"\"Given a classification, return a list of parts in the hierarchy.\"\"\"\n return utils.parse_escaped_hierarchical_category_name(classification)\n\n def recombine_classification_from_hierarchy(self, hierarchy):\n \"\"\"Given a list of parts in the hierarchy, return the classification string.\"\"\"\n return utils.join_hierarchical_category_path(hierarchy)\n\n def provide_overview_context_and_uptodate(self, lang):\n \"\"\"Provide data for the context and the uptodate list for the list of all classifiations.\"\"\"\n kw = {\n 'category_path': self.site.config['CATEGORY_PATH'],\n 'category_prefix': self.site.config['CATEGORY_PREFIX'],\n \"category_pages_are_indexes\": self.site.config['CATEGORY_PAGES_ARE_INDEXES'],\n \"tzinfo\": self.site.tzinfo,\n \"category_pages_descriptions\": self.site.config['CATEGORY_PAGES_DESCRIPTIONS'],\n \"category_pages_titles\": self.site.config['CATEGORY_PAGES_TITLES'],\n }\n context = {\n \"title\": self.site.MESSAGES[lang][\"Categories\"],\n \"description\": self.site.MESSAGES[lang][\"Categories\"],\n \"pagekind\": [\"list\", \"tags_page\"],\n }\n kw.update(context)\n return context, kw\n\n def provide_context_and_uptodate(self, cat, lang, node=None):\n \"\"\"Provide data for the context and the uptodate list for the list of the given classifiation.\"\"\"\n cat_path = self.extract_hierarchy(cat)\n kw = {\n 'category_path': self.site.config['CATEGORY_PATH'],\n 'category_prefix': self.site.config['CATEGORY_PREFIX'],\n \"category_pages_are_indexes\": self.site.config['CATEGORY_PAGES_ARE_INDEXES'],\n \"tzinfo\": self.site.tzinfo,\n \"category_pages_descriptions\": self.site.config['CATEGORY_PAGES_DESCRIPTIONS'],\n \"category_pages_titles\": self.site.config['CATEGORY_PAGES_TITLES'],\n }\n posts = self.site.posts_per_classification[self.classification_name][lang]\n children = [child for child in node.children if len([post for post in posts.get(child.classification_name, []) if self.site.config['SHOW_UNTRANSLATED_POSTS'] or post.is_translation_available(lang)]) > 0]\n subcats = [(child.name, self.site.link(self.classification_name, child.classification_name, lang), child.classification_name, child.classification_path) for child in children]\n friendly_name = self.get_classification_friendly_name(cat, lang)\n context = {\n \"title\": self.site.config['CATEGORY_PAGES_TITLES'].get(lang, {}).get(cat, self.site.MESSAGES[lang][\"Posts about %s\"] % friendly_name),\n \"description\": self.site.config['CATEGORY_PAGES_DESCRIPTIONS'].get(lang, {}).get(cat),\n \"kind\": \"category\",\n \"pagekind\": [\"tag_page\", \"index\" if self.show_list_as_index else \"list\"],\n \"tag\": friendly_name,\n \"category\": cat,\n \"category_path\": cat_path,\n \"subcategories\": subcats,\n }\n if self.show_list_as_index:\n context[\"rss_link\"] = \"\"\"<link rel=\"alternate\" type=\"application/rss+xml\" type=\"application/rss+xml\" title=\"RSS for tag {0} ({1})\" href=\"{2}\">\"\"\".format(friendly_name, lang, self.site.link(\"category_rss\", cat, lang))\n kw.update(context)\n return context, kw\n", "path": "nikola/plugins/task/categories.py"}]}
| 3,537 | 243 |
gh_patches_debug_13056
|
rasdani/github-patches
|
git_diff
|
pypa__pip-392
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
"pip freeze -r" returns ValueError() if requirements file contains --find-links
It seems that pip freeze should not pass --find-links or --index-url to distribute/setuptools when inlined in a requirements file. Here is an easy way to repro the problem:
1) echo "--find-links http://foo.bar/" > /tmp/req.txt
2) pip freeze -r /tmp/req.txt
"""
Exception:
Traceback (most recent call last):
File "/Users/hpfennig/work/pip/pip/basecommand.py", line 95, in main
self.run(options, args)
File "/Users/hpfennig/work/pip/pip/commands/freeze.py", line 92, in run
line_req = InstallRequirement.from_line(line)
File "/Users/hpfennig/work/pip/pip/req.py", line 105, in from_line
return cls(req, comes_from, url=url)
File "/Users/hpfennig/work/pip/pip/req.py", line 39, in **init**
req = pkg_resources.Requirement.parse(req)
File "/Users/hpfennig/.virtualenvs/pip-bug-virt-env/lib/python2.7/site-packages/setuptools-0.6c11-py2.7.egg/pkg_resources.py", line 2510, in parse
reqs = list(parse_requirements(s))
File "/Users/hpfennig/.virtualenvs/pip-bug-virt-env/lib/python2.7/site-packages/setuptools-0.6c11-py2.7.egg/pkg_resources.py", line 2436, in parse_requirements
line, p, specs = scan_list(VERSION,LINE_END,line,p,(1,2),"version spec")
File "/Users/hpfennig/.virtualenvs/pip-bug-virt-env/lib/python2.7/site-packages/setuptools-0.6c11-py2.7.egg/pkg_resources.py", line 2404, in scan_list
raise ValueError("Expected "+item_name+" in",line,"at",line[p:])
ValueError: ('Expected version spec in', '--find-links http://foo.bar/', 'at', ' http://foo.bar/')
Storing complete log in /Users/hpfennig/.pip/pip.log
"""
I have a fix, but I am not sure this is the correct way to go about it. Would be happy to do an official pull request if needed.
diff --git a/pip/commands/freeze.py b/pip/commands/freeze.py
index 01b5df9..03ac80f 100644
--- a/pip/commands/freeze.py
+++ b/pip/commands/freeze.py
@@ -85,7 +85,9 @@ class FreezeCommand(Command):
elif (line.startswith('-r') or line.startswith('--requirement')
or line.startswith('-Z') or line.startswith('--always-unzip')
or line.startswith('-f') or line.startswith('-i')
- or line.startswith('--extra-index-url')):
- or line.startswith('--extra-index-url')
- or line.startswith('--find-links')
- or line.startswith('--index-url')):
f.write(line)
continue
else:
</issue>
<code>
[start of pip/commands/freeze.py]
1 import re
2 import sys
3 import pkg_resources
4 import pip
5 from pip.req import InstallRequirement
6 from pip.log import logger
7 from pip.basecommand import Command
8 from pip.util import get_installed_distributions
9
10
11 class FreezeCommand(Command):
12 name = 'freeze'
13 usage = '%prog [OPTIONS]'
14 summary = 'Output all currently installed packages (exact versions) to stdout'
15
16 def __init__(self):
17 super(FreezeCommand, self).__init__()
18 self.parser.add_option(
19 '-r', '--requirement',
20 dest='requirement',
21 action='store',
22 default=None,
23 metavar='FILENAME',
24 help='Use the given requirements file as a hint about how to generate the new frozen requirements')
25 self.parser.add_option(
26 '-f', '--find-links',
27 dest='find_links',
28 action='append',
29 default=[],
30 metavar='URL',
31 help='URL for finding packages, which will be added to the frozen requirements file')
32 self.parser.add_option(
33 '-l', '--local',
34 dest='local',
35 action='store_true',
36 default=False,
37 help='If in a virtualenv, do not report globally-installed packages')
38
39 def setup_logging(self):
40 logger.move_stdout_to_stderr()
41
42 def run(self, options, args):
43 requirement = options.requirement
44 find_links = options.find_links or []
45 local_only = options.local
46 ## FIXME: Obviously this should be settable:
47 find_tags = False
48 skip_match = None
49
50 skip_regex = options.skip_requirements_regex
51 if skip_regex:
52 skip_match = re.compile(skip_regex)
53
54 dependency_links = []
55
56 f = sys.stdout
57
58 for dist in pkg_resources.working_set:
59 if dist.has_metadata('dependency_links.txt'):
60 dependency_links.extend(dist.get_metadata_lines('dependency_links.txt'))
61 for link in find_links:
62 if '#egg=' in link:
63 dependency_links.append(link)
64 for link in find_links:
65 f.write('-f %s\n' % link)
66 installations = {}
67 for dist in get_installed_distributions(local_only=local_only):
68 req = pip.FrozenRequirement.from_dist(dist, dependency_links, find_tags=find_tags)
69 installations[req.name] = req
70 if requirement:
71 req_f = open(requirement)
72 for line in req_f:
73 if not line.strip() or line.strip().startswith('#'):
74 f.write(line)
75 continue
76 if skip_match and skip_match.search(line):
77 f.write(line)
78 continue
79 elif line.startswith('-e') or line.startswith('--editable'):
80 if line.startswith('-e'):
81 line = line[2:].strip()
82 else:
83 line = line[len('--editable'):].strip().lstrip('=')
84 line_req = InstallRequirement.from_editable(line, default_vcs=options.default_vcs)
85 elif (line.startswith('-r') or line.startswith('--requirement')
86 or line.startswith('-Z') or line.startswith('--always-unzip')
87 or line.startswith('-f') or line.startswith('-i')
88 or line.startswith('--extra-index-url')):
89 f.write(line)
90 continue
91 else:
92 line_req = InstallRequirement.from_line(line)
93 if not line_req.name:
94 logger.notify("Skipping line because it's not clear what it would install: %s"
95 % line.strip())
96 logger.notify(" (add #egg=PackageName to the URL to avoid this warning)")
97 continue
98 if line_req.name not in installations:
99 logger.warn("Requirement file contains %s, but that package is not installed"
100 % line.strip())
101 continue
102 f.write(str(installations[line_req.name]))
103 del installations[line_req.name]
104 f.write('## The following requirements were added by pip --freeze:\n')
105 for installation in sorted(installations.values(), key=lambda x: x.name):
106 f.write(str(installation))
107
108
109 FreezeCommand()
110
[end of pip/commands/freeze.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pip/commands/freeze.py b/pip/commands/freeze.py
--- a/pip/commands/freeze.py
+++ b/pip/commands/freeze.py
@@ -85,7 +85,9 @@
elif (line.startswith('-r') or line.startswith('--requirement')
or line.startswith('-Z') or line.startswith('--always-unzip')
or line.startswith('-f') or line.startswith('-i')
- or line.startswith('--extra-index-url')):
+ or line.startswith('--extra-index-url')
+ or line.startswith('--find-links')
+ or line.startswith('--index-url')):
f.write(line)
continue
else:
|
{"golden_diff": "diff --git a/pip/commands/freeze.py b/pip/commands/freeze.py\n--- a/pip/commands/freeze.py\n+++ b/pip/commands/freeze.py\n@@ -85,7 +85,9 @@\n elif (line.startswith('-r') or line.startswith('--requirement')\n or line.startswith('-Z') or line.startswith('--always-unzip')\n or line.startswith('-f') or line.startswith('-i')\n- or line.startswith('--extra-index-url')):\n+ or line.startswith('--extra-index-url')\n+ or line.startswith('--find-links')\n+ or line.startswith('--index-url')):\n f.write(line)\n continue\n else:\n", "issue": "\"pip freeze -r\" returns ValueError() if requirements file contains --find-links\nIt seems that pip freeze should not pass --find-links or --index-url to distribute/setuptools when inlined in a requirements file. Here is an easy way to repro the problem:\n\n1) echo \"--find-links http://foo.bar/\" > /tmp/req.txt\n2) pip freeze -r /tmp/req.txt\n\n\"\"\"\nException:\nTraceback (most recent call last):\n File \"/Users/hpfennig/work/pip/pip/basecommand.py\", line 95, in main\n self.run(options, args)\n File \"/Users/hpfennig/work/pip/pip/commands/freeze.py\", line 92, in run\n line_req = InstallRequirement.from_line(line)\n File \"/Users/hpfennig/work/pip/pip/req.py\", line 105, in from_line\n return cls(req, comes_from, url=url)\n File \"/Users/hpfennig/work/pip/pip/req.py\", line 39, in **init**\n req = pkg_resources.Requirement.parse(req)\n File \"/Users/hpfennig/.virtualenvs/pip-bug-virt-env/lib/python2.7/site-packages/setuptools-0.6c11-py2.7.egg/pkg_resources.py\", line 2510, in parse\n reqs = list(parse_requirements(s))\n File \"/Users/hpfennig/.virtualenvs/pip-bug-virt-env/lib/python2.7/site-packages/setuptools-0.6c11-py2.7.egg/pkg_resources.py\", line 2436, in parse_requirements\n line, p, specs = scan_list(VERSION,LINE_END,line,p,(1,2),\"version spec\")\n File \"/Users/hpfennig/.virtualenvs/pip-bug-virt-env/lib/python2.7/site-packages/setuptools-0.6c11-py2.7.egg/pkg_resources.py\", line 2404, in scan_list\n raise ValueError(\"Expected \"+item_name+\" in\",line,\"at\",line[p:])\nValueError: ('Expected version spec in', '--find-links http://foo.bar/', 'at', ' http://foo.bar/')\n\nStoring complete log in /Users/hpfennig/.pip/pip.log\n\"\"\"\n\nI have a fix, but I am not sure this is the correct way to go about it. Would be happy to do an official pull request if needed.\n\ndiff --git a/pip/commands/freeze.py b/pip/commands/freeze.py\nindex 01b5df9..03ac80f 100644\n--- a/pip/commands/freeze.py\n+++ b/pip/commands/freeze.py\n@@ -85,7 +85,9 @@ class FreezeCommand(Command):\n elif (line.startswith('-r') or line.startswith('--requirement')\n or line.startswith('-Z') or line.startswith('--always-unzip')\n or line.startswith('-f') or line.startswith('-i')\n- or line.startswith('--extra-index-url')):\n- or line.startswith('--extra-index-url')\n- or line.startswith('--find-links')\n- or line.startswith('--index-url')):\n f.write(line)\n continue\n else:\n\n", "before_files": [{"content": "import re\nimport sys\nimport pkg_resources\nimport pip\nfrom pip.req import InstallRequirement\nfrom pip.log import logger\nfrom pip.basecommand import Command\nfrom pip.util import get_installed_distributions\n\n\nclass FreezeCommand(Command):\n name = 'freeze'\n usage = '%prog [OPTIONS]'\n summary = 'Output all currently installed packages (exact versions) to stdout'\n\n def __init__(self):\n super(FreezeCommand, self).__init__()\n self.parser.add_option(\n '-r', '--requirement',\n dest='requirement',\n action='store',\n default=None,\n metavar='FILENAME',\n help='Use the given requirements file as a hint about how to generate the new frozen requirements')\n self.parser.add_option(\n '-f', '--find-links',\n dest='find_links',\n action='append',\n default=[],\n metavar='URL',\n help='URL for finding packages, which will be added to the frozen requirements file')\n self.parser.add_option(\n '-l', '--local',\n dest='local',\n action='store_true',\n default=False,\n help='If in a virtualenv, do not report globally-installed packages')\n\n def setup_logging(self):\n logger.move_stdout_to_stderr()\n\n def run(self, options, args):\n requirement = options.requirement\n find_links = options.find_links or []\n local_only = options.local\n ## FIXME: Obviously this should be settable:\n find_tags = False\n skip_match = None\n\n skip_regex = options.skip_requirements_regex\n if skip_regex:\n skip_match = re.compile(skip_regex)\n\n dependency_links = []\n\n f = sys.stdout\n\n for dist in pkg_resources.working_set:\n if dist.has_metadata('dependency_links.txt'):\n dependency_links.extend(dist.get_metadata_lines('dependency_links.txt'))\n for link in find_links:\n if '#egg=' in link:\n dependency_links.append(link)\n for link in find_links:\n f.write('-f %s\\n' % link)\n installations = {}\n for dist in get_installed_distributions(local_only=local_only):\n req = pip.FrozenRequirement.from_dist(dist, dependency_links, find_tags=find_tags)\n installations[req.name] = req\n if requirement:\n req_f = open(requirement)\n for line in req_f:\n if not line.strip() or line.strip().startswith('#'):\n f.write(line)\n continue\n if skip_match and skip_match.search(line):\n f.write(line)\n continue\n elif line.startswith('-e') or line.startswith('--editable'):\n if line.startswith('-e'):\n line = line[2:].strip()\n else:\n line = line[len('--editable'):].strip().lstrip('=')\n line_req = InstallRequirement.from_editable(line, default_vcs=options.default_vcs)\n elif (line.startswith('-r') or line.startswith('--requirement')\n or line.startswith('-Z') or line.startswith('--always-unzip')\n or line.startswith('-f') or line.startswith('-i')\n or line.startswith('--extra-index-url')):\n f.write(line)\n continue\n else:\n line_req = InstallRequirement.from_line(line)\n if not line_req.name:\n logger.notify(\"Skipping line because it's not clear what it would install: %s\"\n % line.strip())\n logger.notify(\" (add #egg=PackageName to the URL to avoid this warning)\")\n continue\n if line_req.name not in installations:\n logger.warn(\"Requirement file contains %s, but that package is not installed\"\n % line.strip())\n continue\n f.write(str(installations[line_req.name]))\n del installations[line_req.name]\n f.write('## The following requirements were added by pip --freeze:\\n')\n for installation in sorted(installations.values(), key=lambda x: x.name):\n f.write(str(installation))\n\n\nFreezeCommand()\n", "path": "pip/commands/freeze.py"}]}
| 2,291 | 144 |
gh_patches_debug_11571
|
rasdani/github-patches
|
git_diff
|
microsoft__hi-ml-78
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make the package tag-line on PyPi more concrete
Javier pointed out that our tagline, _Microsoft Health Intelligence AzureML helpers_, on https://pypi.org/manage/project/hi-ml/releases/ is too generic.
</issue>
<code>
[start of setup.py]
1 # ------------------------------------------------------------------------------------------
2 # Copyright (c) Microsoft Corporation. All rights reserved.
3 # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 # ------------------------------------------------------------------------------------------
5
6 """A setuptools based setup module.
7
8 See:
9 https://packaging.python.org/guides/distributing-packages-using-setuptools/
10 """
11
12 import os
13 from math import floor
14 import pathlib
15 from random import random
16 from setuptools import setup, find_packages # type: ignore
17
18
19 here = pathlib.Path(__file__).parent.resolve()
20
21 # Get the long description from the README file
22 long_description = (here / 'README.md').read_text(encoding='utf-8')
23
24 version = ''
25
26 # If running from a GitHub Action then a standard set of environment variables will be
27 # populated (https://docs.github.com/en/actions/reference/environment-variables#default-environment-variables).
28 # In particular, GITHUB_REF is the branch or tag ref that triggered the workflow.
29 # If this was triggered by a tagged commit then GITHUB_REF will be: 'ref/tags/new_tag'.
30 # Extract this tag and use it as a version string
31 # See also:
32 # https://packaging.python.org/guides/publishing-package-distribution-releases-using-github-actions-ci-cd-workflows/
33 # https://github.com/pypa/gh-action-pypi-publish
34 GITHUB_REF_TAG_COMMIT = 'refs/tags/'
35
36 github_ref = os.getenv('GITHUB_REF')
37 if github_ref and github_ref.startswith(GITHUB_REF_TAG_COMMIT):
38 version = github_ref[len(GITHUB_REF_TAG_COMMIT):]
39
40 # Otherwise, if running from a GitHub Action, but not a tagged commit then GITHUB_RUN_NUMBER will be populated.
41 # Use this as a post release number. For example if GITHUB_RUN_NUMBER = 124 then the version string will be
42 # '0.1.2.post124'. Although this is discouraged, see:
43 # https://www.python.org/dev/peps/pep-0440/#post-releases
44 # it is necessary here to avoid duplicate packages in Test.PyPI.
45 if not version:
46 # TODO: Replace this with more principled package version management for the package wheels built during local test
47 # runs, one which circumvents AzureML's apparent package caching:
48 build_number = os.getenv('GITHUB_RUN_NUMBER')
49 if build_number:
50 version = '0.1.0.post' + build_number
51 else:
52 default_random_version_number = floor(random() * 10_000_000_000)
53 version = f'0.1.0.post{str(default_random_version_number)}'
54
55 (here / 'latest_version.txt').write_text(version)
56
57 # Read run_requirements.txt to get install_requires
58 install_requires = (here / 'run_requirements.txt').read_text().split("\n")
59 # Remove any whitespace and blank lines
60 install_requires = [line.strip() for line in install_requires if line.strip()]
61
62 setup(
63 name='hi-ml',
64 version=version,
65 description='Microsoft Health Intelligence AzureML helpers',
66 long_description=long_description,
67 long_description_content_type='text/markdown',
68 url='https://github.com/microsoft/hi-ml',
69 author="Microsoft Research Cambridge InnerEye Team ",
70 author_email="[email protected]",
71 classifiers=[
72 'Development Status :: 3 - Alpha',
73 'Intended Audience :: Science/Research',
74 "Topic :: Scientific/Engineering :: Medical Science Apps.",
75 'License :: OSI Approved :: MIT License',
76 'Programming Language :: Python :: 3.7'
77 ],
78 keywords='InnerEye, HealthIntelligence, AzureML',
79 license='MIT License',
80 packages=find_packages(where="src"),
81 package_dir={"": "src"},
82 include_package_data=True,
83 install_requires=install_requires,
84 scripts=['src/health/azure/run_tensorboard.py']
85 )
86
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -59,10 +59,12 @@
# Remove any whitespace and blank lines
install_requires = [line.strip() for line in install_requires if line.strip()]
+description = 'Microsoft Health Intelligence package to elevate and monitor scripts to an AzureML workspace'
+
setup(
name='hi-ml',
version=version,
- description='Microsoft Health Intelligence AzureML helpers',
+ description=description,
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/microsoft/hi-ml',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -59,10 +59,12 @@\n # Remove any whitespace and blank lines\n install_requires = [line.strip() for line in install_requires if line.strip()]\n \n+description = 'Microsoft Health Intelligence package to elevate and monitor scripts to an AzureML workspace'\n+\n setup(\n name='hi-ml',\n version=version,\n- description='Microsoft Health Intelligence AzureML helpers',\n+ description=description,\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/microsoft/hi-ml',\n", "issue": "Make the package tag-line on PyPi more concrete\nJavier pointed out that our tagline, _Microsoft Health Intelligence AzureML helpers_, on https://pypi.org/manage/project/hi-ml/releases/ is too generic.\n", "before_files": [{"content": "# ------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.\n# ------------------------------------------------------------------------------------------\n\n\"\"\"A setuptools based setup module.\n\nSee:\nhttps://packaging.python.org/guides/distributing-packages-using-setuptools/\n\"\"\"\n\nimport os\nfrom math import floor\nimport pathlib\nfrom random import random\nfrom setuptools import setup, find_packages # type: ignore\n\n\nhere = pathlib.Path(__file__).parent.resolve()\n\n# Get the long description from the README file\nlong_description = (here / 'README.md').read_text(encoding='utf-8')\n\nversion = ''\n\n# If running from a GitHub Action then a standard set of environment variables will be\n# populated (https://docs.github.com/en/actions/reference/environment-variables#default-environment-variables).\n# In particular, GITHUB_REF is the branch or tag ref that triggered the workflow.\n# If this was triggered by a tagged commit then GITHUB_REF will be: 'ref/tags/new_tag'.\n# Extract this tag and use it as a version string\n# See also:\n# https://packaging.python.org/guides/publishing-package-distribution-releases-using-github-actions-ci-cd-workflows/\n# https://github.com/pypa/gh-action-pypi-publish\nGITHUB_REF_TAG_COMMIT = 'refs/tags/'\n\ngithub_ref = os.getenv('GITHUB_REF')\nif github_ref and github_ref.startswith(GITHUB_REF_TAG_COMMIT):\n version = github_ref[len(GITHUB_REF_TAG_COMMIT):]\n\n# Otherwise, if running from a GitHub Action, but not a tagged commit then GITHUB_RUN_NUMBER will be populated.\n# Use this as a post release number. For example if GITHUB_RUN_NUMBER = 124 then the version string will be\n# '0.1.2.post124'. Although this is discouraged, see:\n# https://www.python.org/dev/peps/pep-0440/#post-releases\n# it is necessary here to avoid duplicate packages in Test.PyPI.\nif not version:\n # TODO: Replace this with more principled package version management for the package wheels built during local test\n # runs, one which circumvents AzureML's apparent package caching:\n build_number = os.getenv('GITHUB_RUN_NUMBER')\n if build_number:\n version = '0.1.0.post' + build_number\n else:\n default_random_version_number = floor(random() * 10_000_000_000)\n version = f'0.1.0.post{str(default_random_version_number)}'\n\n(here / 'latest_version.txt').write_text(version)\n\n# Read run_requirements.txt to get install_requires\ninstall_requires = (here / 'run_requirements.txt').read_text().split(\"\\n\")\n# Remove any whitespace and blank lines\ninstall_requires = [line.strip() for line in install_requires if line.strip()]\n\nsetup(\n name='hi-ml',\n version=version,\n description='Microsoft Health Intelligence AzureML helpers',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/microsoft/hi-ml',\n author=\"Microsoft Research Cambridge InnerEye Team \",\n author_email=\"[email protected]\",\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n \"Topic :: Scientific/Engineering :: Medical Science Apps.\",\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.7'\n ],\n keywords='InnerEye, HealthIntelligence, AzureML',\n license='MIT License',\n packages=find_packages(where=\"src\"),\n package_dir={\"\": \"src\"},\n include_package_data=True,\n install_requires=install_requires,\n scripts=['src/health/azure/run_tensorboard.py']\n)\n", "path": "setup.py"}]}
| 1,565 | 141 |
gh_patches_debug_904
|
rasdani/github-patches
|
git_diff
|
aws-cloudformation__cfn-lint-1081
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error running cfn-lint with pipe (|)
cfn-lint version: *v0.23.0*
Hello we have a problem running cfn-lint with find command. Only this version is affected as far as we know.
We are keeping couple of template is a folder and linting them like that:
```
find ./templates -type f | xargs cfn-lint -f parseable -c I -t
```
It worked flawlessly before but with the new update we are getting this error:
> 2019-08-02 15:37:01,818 - cfnlint.decode - ERROR - Template file not found: None
None:1:1:1:2:E0000:Template file not found: None
Splitting the files in separated lines with `xargs -L 1` doesn't help.
If you run the cfn-lint command on it's own it works as expected.
This example **doesn't** work:
```
find ./templates -type f | xargs -t cfn-lint -f parseable -c I -t
cfn-lint -f parseable -c I -t ./templates/t1.yml ./templates/t2.yml ./templates/t3.yml
2019-08-02 15:50:20,891 - cfnlint.decode - ERROR - Template file not found: None
None:1:1:1:2:E0000:Template file not found: None
```
This example works:
```
cfn-lint -f parseable -c I -t ./templates/t1.yml ./templates/t2.yml ./templates/t3.yml
```
Regards TT
</issue>
<code>
[start of src/cfnlint/core.py]
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 import logging
18 import os
19 import sys
20 from jsonschema.exceptions import ValidationError
21 from cfnlint import RulesCollection
22 import cfnlint.config
23 import cfnlint.formatters
24 import cfnlint.decode
25 import cfnlint.maintenance
26 from cfnlint.helpers import REGIONS
27
28
29 LOGGER = logging.getLogger('cfnlint')
30 DEFAULT_RULESDIR = os.path.join(os.path.dirname(__file__), 'rules')
31
32
33 class CfnLintExitException(Exception):
34 """Generic exception used when the cli should exit"""
35 def __init__(self, msg=None, exit_code=1):
36 if msg is None:
37 msg = 'process failed with exit code %s' % exit_code
38 super(CfnLintExitException, self).__init__(msg)
39 self.exit_code = exit_code
40
41
42 class InvalidRegionException(CfnLintExitException):
43 """When an unsupported/invalid region is supplied"""
44
45
46 class UnexpectedRuleException(CfnLintExitException):
47 """When processing a rule fails in an unexpected way"""
48
49
50 def run_cli(filename, template, rules, regions, override_spec):
51 """Process args and run"""
52
53 if override_spec:
54 cfnlint.helpers.override_specs(override_spec)
55
56 return run_checks(filename, template, rules, regions)
57
58
59 def get_exit_code(matches):
60 """ Determine exit code """
61 exit_code = 0
62 for match in matches:
63 if match.rule.id[0] == 'I':
64 exit_code = exit_code | 8
65 elif match.rule.id[0] == 'W':
66 exit_code = exit_code | 4
67 elif match.rule.id[0] == 'E':
68 exit_code = exit_code | 2
69
70 return exit_code
71
72
73 def get_formatter(fmt):
74 """ Get Formatter"""
75 formatter = {}
76 if fmt:
77 if fmt == 'quiet':
78 formatter = cfnlint.formatters.QuietFormatter()
79 elif fmt == 'parseable':
80 # pylint: disable=bad-option-value
81 formatter = cfnlint.formatters.ParseableFormatter()
82 elif fmt == 'json':
83 formatter = cfnlint.formatters.JsonFormatter()
84 else:
85 formatter = cfnlint.formatters.Formatter()
86
87 return formatter
88
89
90 def get_rules(rulesdir, ignore_rules, include_rules, configure_rules=None, include_experimental=False):
91 """Get rules"""
92 rules = RulesCollection(ignore_rules, include_rules, configure_rules, include_experimental)
93 rules_dirs = [DEFAULT_RULESDIR] + rulesdir
94 try:
95 for rules_dir in rules_dirs:
96 rules.create_from_directory(rules_dir)
97 except OSError as e:
98 raise UnexpectedRuleException('Tried to append rules but got an error: %s' % str(e), 1)
99 return rules
100
101
102 def configure_logging(debug_logging):
103 """ Backwards compatibility for integrators """
104 LOGGER.info('Update your integrations to use "cfnlint.config.configure_logging" instead')
105 cfnlint.config.configure_logging(debug_logging, False)
106
107
108 def get_args_filenames(cli_args):
109 """ Get Template Configuration items and set them as default values"""
110 try:
111 config = cfnlint.config.ConfigMixIn(cli_args)
112 except ValidationError as e:
113 LOGGER.error('Error parsing config file: %s', str(e))
114 exit(1)
115
116 fmt = config.format
117 formatter = get_formatter(fmt)
118
119 if config.update_specs:
120 cfnlint.maintenance.update_resource_specs()
121 exit(0)
122
123 if config.update_documentation:
124 # Get ALL rules (ignore the CLI settings))
125 documentation_rules = cfnlint.core.get_rules([], [], ['I', 'E', 'W'], {}, True)
126 cfnlint.maintenance.update_documentation(documentation_rules)
127 exit(0)
128
129 if config.update_iam_policies:
130 cfnlint.maintenance.update_iam_policies()
131 exit(0)
132
133 if config.listrules:
134 rules = cfnlint.core.get_rules(
135 config.append_rules,
136 config.ignore_checks,
137 config.include_checks,
138 config.configure_rules
139 )
140 print(rules)
141 exit(0)
142
143 if not sys.stdin.isatty():
144 return(config, [None], formatter)
145
146 if not config.templates:
147 # Not specified, print the help
148 config.parser.print_help()
149 exit(1)
150
151 return(config, config.templates, formatter)
152
153
154 def get_template_rules(filename, args):
155 """ Get Template Configuration items and set them as default values"""
156
157 (template, matches) = cfnlint.decode.decode(filename, args.ignore_bad_template)
158
159 if matches:
160 return(template, [], matches)
161
162 args.template_args = template
163
164 rules = cfnlint.core.get_rules(
165 args.append_rules,
166 args.ignore_checks,
167 args.include_checks,
168 args.configure_rules,
169 args.include_experimental,
170 )
171
172 return(template, rules, [])
173
174
175 def run_checks(filename, template, rules, regions):
176 """Run Checks against the template"""
177 if regions:
178 if not set(regions).issubset(set(REGIONS)):
179 unsupported_regions = list(set(regions).difference(set(REGIONS)))
180 msg = 'Regions %s are unsupported. Supported regions are %s' % (unsupported_regions, REGIONS)
181 raise InvalidRegionException(msg, 32)
182
183 matches = []
184
185 runner = cfnlint.Runner(rules, filename, template, regions)
186 matches.extend(runner.transform())
187 # Only do rule analysis if Transform was successful
188 if not matches:
189 try:
190 matches.extend(runner.run())
191 except Exception as err: # pylint: disable=W0703
192 msg = 'Tried to process rules on file %s but got an error: %s' % (filename, str(err))
193 UnexpectedRuleException(msg, 1)
194 matches.sort(key=lambda x: (x.filename, x.linenumber, x.rule.id))
195
196 return(matches)
197
[end of src/cfnlint/core.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/cfnlint/core.py b/src/cfnlint/core.py
--- a/src/cfnlint/core.py
+++ b/src/cfnlint/core.py
@@ -140,7 +140,7 @@
print(rules)
exit(0)
- if not sys.stdin.isatty():
+ if not sys.stdin.isatty() and not config.templates:
return(config, [None], formatter)
if not config.templates:
|
{"golden_diff": "diff --git a/src/cfnlint/core.py b/src/cfnlint/core.py\n--- a/src/cfnlint/core.py\n+++ b/src/cfnlint/core.py\n@@ -140,7 +140,7 @@\n print(rules)\n exit(0)\n \n- if not sys.stdin.isatty():\n+ if not sys.stdin.isatty() and not config.templates:\n return(config, [None], formatter)\n \n if not config.templates:\n", "issue": "Error running cfn-lint with pipe (|)\ncfn-lint version: *v0.23.0*\r\n\r\nHello we have a problem running cfn-lint with find command. Only this version is affected as far as we know.\r\nWe are keeping couple of template is a folder and linting them like that:\r\n```\r\nfind ./templates -type f | xargs cfn-lint -f parseable -c I -t\r\n```\r\n\r\nIt worked flawlessly before but with the new update we are getting this error:\r\n\r\n> 2019-08-02 15:37:01,818 - cfnlint.decode - ERROR - Template file not found: None\r\nNone:1:1:1:2:E0000:Template file not found: None\r\n\r\nSplitting the files in separated lines with `xargs -L 1` doesn't help.\r\n\r\nIf you run the cfn-lint command on it's own it works as expected.\r\n\r\nThis example **doesn't** work:\r\n```\r\nfind ./templates -type f | xargs -t cfn-lint -f parseable -c I -t\r\ncfn-lint -f parseable -c I -t ./templates/t1.yml ./templates/t2.yml ./templates/t3.yml \r\n2019-08-02 15:50:20,891 - cfnlint.decode - ERROR - Template file not found: None\r\nNone:1:1:1:2:E0000:Template file not found: None\r\n```\r\nThis example works:\r\n```\r\ncfn-lint -f parseable -c I -t ./templates/t1.yml ./templates/t2.yml ./templates/t3.yml\r\n ```\r\n\r\nRegards TT\n", "before_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport logging\nimport os\nimport sys\nfrom jsonschema.exceptions import ValidationError\nfrom cfnlint import RulesCollection\nimport cfnlint.config\nimport cfnlint.formatters\nimport cfnlint.decode\nimport cfnlint.maintenance\nfrom cfnlint.helpers import REGIONS\n\n\nLOGGER = logging.getLogger('cfnlint')\nDEFAULT_RULESDIR = os.path.join(os.path.dirname(__file__), 'rules')\n\n\nclass CfnLintExitException(Exception):\n \"\"\"Generic exception used when the cli should exit\"\"\"\n def __init__(self, msg=None, exit_code=1):\n if msg is None:\n msg = 'process failed with exit code %s' % exit_code\n super(CfnLintExitException, self).__init__(msg)\n self.exit_code = exit_code\n\n\nclass InvalidRegionException(CfnLintExitException):\n \"\"\"When an unsupported/invalid region is supplied\"\"\"\n\n\nclass UnexpectedRuleException(CfnLintExitException):\n \"\"\"When processing a rule fails in an unexpected way\"\"\"\n\n\ndef run_cli(filename, template, rules, regions, override_spec):\n \"\"\"Process args and run\"\"\"\n\n if override_spec:\n cfnlint.helpers.override_specs(override_spec)\n\n return run_checks(filename, template, rules, regions)\n\n\ndef get_exit_code(matches):\n \"\"\" Determine exit code \"\"\"\n exit_code = 0\n for match in matches:\n if match.rule.id[0] == 'I':\n exit_code = exit_code | 8\n elif match.rule.id[0] == 'W':\n exit_code = exit_code | 4\n elif match.rule.id[0] == 'E':\n exit_code = exit_code | 2\n\n return exit_code\n\n\ndef get_formatter(fmt):\n \"\"\" Get Formatter\"\"\"\n formatter = {}\n if fmt:\n if fmt == 'quiet':\n formatter = cfnlint.formatters.QuietFormatter()\n elif fmt == 'parseable':\n # pylint: disable=bad-option-value\n formatter = cfnlint.formatters.ParseableFormatter()\n elif fmt == 'json':\n formatter = cfnlint.formatters.JsonFormatter()\n else:\n formatter = cfnlint.formatters.Formatter()\n\n return formatter\n\n\ndef get_rules(rulesdir, ignore_rules, include_rules, configure_rules=None, include_experimental=False):\n \"\"\"Get rules\"\"\"\n rules = RulesCollection(ignore_rules, include_rules, configure_rules, include_experimental)\n rules_dirs = [DEFAULT_RULESDIR] + rulesdir\n try:\n for rules_dir in rules_dirs:\n rules.create_from_directory(rules_dir)\n except OSError as e:\n raise UnexpectedRuleException('Tried to append rules but got an error: %s' % str(e), 1)\n return rules\n\n\ndef configure_logging(debug_logging):\n \"\"\" Backwards compatibility for integrators \"\"\"\n LOGGER.info('Update your integrations to use \"cfnlint.config.configure_logging\" instead')\n cfnlint.config.configure_logging(debug_logging, False)\n\n\ndef get_args_filenames(cli_args):\n \"\"\" Get Template Configuration items and set them as default values\"\"\"\n try:\n config = cfnlint.config.ConfigMixIn(cli_args)\n except ValidationError as e:\n LOGGER.error('Error parsing config file: %s', str(e))\n exit(1)\n\n fmt = config.format\n formatter = get_formatter(fmt)\n\n if config.update_specs:\n cfnlint.maintenance.update_resource_specs()\n exit(0)\n\n if config.update_documentation:\n # Get ALL rules (ignore the CLI settings))\n documentation_rules = cfnlint.core.get_rules([], [], ['I', 'E', 'W'], {}, True)\n cfnlint.maintenance.update_documentation(documentation_rules)\n exit(0)\n\n if config.update_iam_policies:\n cfnlint.maintenance.update_iam_policies()\n exit(0)\n\n if config.listrules:\n rules = cfnlint.core.get_rules(\n config.append_rules,\n config.ignore_checks,\n config.include_checks,\n config.configure_rules\n )\n print(rules)\n exit(0)\n\n if not sys.stdin.isatty():\n return(config, [None], formatter)\n\n if not config.templates:\n # Not specified, print the help\n config.parser.print_help()\n exit(1)\n\n return(config, config.templates, formatter)\n\n\ndef get_template_rules(filename, args):\n \"\"\" Get Template Configuration items and set them as default values\"\"\"\n\n (template, matches) = cfnlint.decode.decode(filename, args.ignore_bad_template)\n\n if matches:\n return(template, [], matches)\n\n args.template_args = template\n\n rules = cfnlint.core.get_rules(\n args.append_rules,\n args.ignore_checks,\n args.include_checks,\n args.configure_rules,\n args.include_experimental,\n )\n\n return(template, rules, [])\n\n\ndef run_checks(filename, template, rules, regions):\n \"\"\"Run Checks against the template\"\"\"\n if regions:\n if not set(regions).issubset(set(REGIONS)):\n unsupported_regions = list(set(regions).difference(set(REGIONS)))\n msg = 'Regions %s are unsupported. Supported regions are %s' % (unsupported_regions, REGIONS)\n raise InvalidRegionException(msg, 32)\n\n matches = []\n\n runner = cfnlint.Runner(rules, filename, template, regions)\n matches.extend(runner.transform())\n # Only do rule analysis if Transform was successful\n if not matches:\n try:\n matches.extend(runner.run())\n except Exception as err: # pylint: disable=W0703\n msg = 'Tried to process rules on file %s but got an error: %s' % (filename, str(err))\n UnexpectedRuleException(msg, 1)\n matches.sort(key=lambda x: (x.filename, x.linenumber, x.rule.id))\n\n return(matches)\n", "path": "src/cfnlint/core.py"}]}
| 2,863 | 99 |
gh_patches_debug_4369
|
rasdani/github-patches
|
git_diff
|
openvinotoolkit__training_extensions-2672
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
POT -> PTQ transition in docs
Steps to be done:
- Rename POT -> PTQ
- Replace POT docs links with NNCF.PTQ docs
- Mention that the output model would be quantized to INT8
Related pages:
https://openvinotoolkit.github.io/training_extensions/stable/guide/explanation/additional_features/models_optimization.html
https://openvinotoolkit.github.io/training_extensions/stable/guide/tutorials/base/how_to_train/classification.html#optimization and other tasks
</issue>
<code>
[start of src/otx/cli/tools/eval.py]
1 """Model quality evaluation tool."""
2
3 # Copyright (C) 2021 Intel Corporation
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing,
12 # software distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions
15 # and limitations under the License.
16
17 import json
18 from pathlib import Path
19
20 # Update environment variables for CLI use
21 import otx.cli # noqa: F401
22 from otx.api.entities.inference_parameters import InferenceParameters
23 from otx.api.entities.model_template import TaskType
24 from otx.api.entities.resultset import ResultSetEntity
25 from otx.api.entities.subset import Subset
26 from otx.api.entities.task_environment import TaskEnvironment
27 from otx.cli.manager import ConfigManager
28 from otx.cli.utils.importing import get_impl_class
29 from otx.cli.utils.io import read_model
30 from otx.cli.utils.nncf import is_checkpoint_nncf
31 from otx.cli.utils.parser import (
32 add_hyper_parameters_sub_parser,
33 get_override_param,
34 get_parser_and_hprams_data,
35 )
36 from otx.core.data.adapter import get_dataset_adapter
37 from otx.utils.logger import config_logger
38
39 # pylint: disable=too-many-locals
40
41
42 def get_args():
43 """Parses command line arguments."""
44 parser, hyper_parameters, params = get_parser_and_hprams_data()
45
46 parser.add_argument(
47 "--test-data-roots",
48 help="Comma-separated paths to test data folders.",
49 )
50 parser.add_argument(
51 "--load-weights",
52 help="Load model weights from previously saved checkpoint."
53 "It could be a trained/optimized model (POT only) or exported model.",
54 )
55 parser.add_argument(
56 "-o",
57 "--output",
58 help="Location where the intermediate output of the task will be stored.",
59 )
60 parser.add_argument(
61 "--workspace",
62 help="Path to the workspace where the command will run.",
63 default=None,
64 )
65 parser.add_argument(
66 "--data",
67 type=str,
68 default=None,
69 help="The data.yaml path want to use in train task.",
70 )
71
72 add_hyper_parameters_sub_parser(parser, hyper_parameters, modes=("INFERENCE",))
73 override_param = get_override_param(params)
74
75 return parser.parse_args(), override_param
76
77
78 def check_label_schemas(label_schema_a, label_schema_b):
79 """Checks that both passed label schemas have labels with the same names.
80
81 If it is False that it raises RuntimeError.
82 """
83
84 for model_label, snapshot_label in zip(label_schema_a.get_labels(False), label_schema_b.get_labels(False)):
85 if model_label.name != snapshot_label.name:
86 raise RuntimeError(
87 "Labels schemas from model and dataset are different: " f"\n{label_schema_a} \n\tvs\n{label_schema_b}"
88 )
89
90
91 def main():
92 """Main function that is used for model evaluation."""
93
94 # Dynamically create an argument parser based on override parameters.
95 args, override_param = get_args()
96
97 config_manager = ConfigManager(args, workspace_root=args.workspace, mode="eval")
98 config_logger(config_manager.output_path / "otx.log", "INFO")
99 # Auto-Configuration for model template
100 config_manager.configure_template()
101
102 if not args.load_weights and config_manager.check_workspace():
103 latest_model_path = (
104 config_manager.workspace_root / "outputs" / "latest_trained_model" / "models" / "weights.pth"
105 )
106 args.load_weights = str(latest_model_path)
107
108 # Update Hyper Parameter Configs
109 hyper_parameters = config_manager.get_hyparams_config(override_param)
110
111 # Get classes for Task, ConfigurableParameters and Dataset.
112 template = config_manager.template
113 if any(args.load_weights.endswith(x) for x in (".bin", ".xml", ".zip")):
114 task_class = get_impl_class(template.entrypoints.openvino)
115 elif args.load_weights.endswith(".pth"):
116 if is_checkpoint_nncf(args.load_weights):
117 task_class = get_impl_class(template.entrypoints.nncf)
118 else:
119 task_class = get_impl_class(template.entrypoints.base)
120 else:
121 raise ValueError(f"Unsupported file: {args.load_weights}")
122
123 # Auto-Configuration for Dataset configuration
124 config_manager.configure_data_config(update_data_yaml=config_manager.check_workspace())
125 dataset_config = config_manager.get_dataset_config(subsets=["test"])
126 dataset_adapter = get_dataset_adapter(**dataset_config)
127 dataset, label_schema = dataset_adapter.get_otx_dataset(), dataset_adapter.get_label_schema()
128
129 environment = TaskEnvironment(
130 model=None,
131 hyper_parameters=hyper_parameters,
132 label_schema=label_schema,
133 model_template=template,
134 )
135
136 environment.model = read_model(environment.get_model_configuration(), args.load_weights, None)
137
138 task = task_class(task_environment=environment)
139
140 validation_dataset = dataset.get_subset(Subset.TESTING)
141 predicted_validation_dataset = task.infer(
142 # temp (sungchul): remain annotation for visual prompting
143 validation_dataset
144 if getattr(task, "task_type", None) == TaskType.VISUAL_PROMPTING
145 else validation_dataset.with_empty_annotations(),
146 InferenceParameters(is_evaluation=False),
147 )
148
149 resultset = ResultSetEntity(
150 model=environment.model,
151 ground_truth_dataset=validation_dataset,
152 prediction_dataset=predicted_validation_dataset,
153 )
154 task.evaluate(resultset)
155 assert resultset.performance is not None
156 print(resultset.performance)
157
158 output_path = Path(args.output) if args.output else config_manager.output_path
159 performance = {resultset.performance.score.name: resultset.performance.score.value}
160 if hasattr(task, "avg_time_per_image"):
161 performance["avg_time_per_image"] = task.avg_time_per_image
162 with open(output_path / "performance.json", "w", encoding="UTF-8") as write_file:
163 json.dump(performance, write_file)
164
165 return dict(retcode=0, template=template.name)
166
167
168 if __name__ == "__main__":
169 main()
170
[end of src/otx/cli/tools/eval.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/otx/cli/tools/eval.py b/src/otx/cli/tools/eval.py
--- a/src/otx/cli/tools/eval.py
+++ b/src/otx/cli/tools/eval.py
@@ -49,8 +49,8 @@
)
parser.add_argument(
"--load-weights",
- help="Load model weights from previously saved checkpoint."
- "It could be a trained/optimized model (POT only) or exported model.",
+ help="Load model weights from previously saved checkpoint. "
+ "It could be a trained/optimized model (with PTQ only) or exported model.",
)
parser.add_argument(
"-o",
|
{"golden_diff": "diff --git a/src/otx/cli/tools/eval.py b/src/otx/cli/tools/eval.py\n--- a/src/otx/cli/tools/eval.py\n+++ b/src/otx/cli/tools/eval.py\n@@ -49,8 +49,8 @@\n )\n parser.add_argument(\n \"--load-weights\",\n- help=\"Load model weights from previously saved checkpoint.\"\n- \"It could be a trained/optimized model (POT only) or exported model.\",\n+ help=\"Load model weights from previously saved checkpoint. \"\n+ \"It could be a trained/optimized model (with PTQ only) or exported model.\",\n )\n parser.add_argument(\n \"-o\",\n", "issue": "POT -> PTQ transition in docs\nSteps to be done:\r\n- Rename POT -> PTQ\r\n- Replace POT docs links with NNCF.PTQ docs\r\n- Mention that the output model would be quantized to INT8\r\n\r\nRelated pages: \r\nhttps://openvinotoolkit.github.io/training_extensions/stable/guide/explanation/additional_features/models_optimization.html\r\nhttps://openvinotoolkit.github.io/training_extensions/stable/guide/tutorials/base/how_to_train/classification.html#optimization and other tasks\n", "before_files": [{"content": "\"\"\"Model quality evaluation tool.\"\"\"\n\n# Copyright (C) 2021 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions\n# and limitations under the License.\n\nimport json\nfrom pathlib import Path\n\n# Update environment variables for CLI use\nimport otx.cli # noqa: F401\nfrom otx.api.entities.inference_parameters import InferenceParameters\nfrom otx.api.entities.model_template import TaskType\nfrom otx.api.entities.resultset import ResultSetEntity\nfrom otx.api.entities.subset import Subset\nfrom otx.api.entities.task_environment import TaskEnvironment\nfrom otx.cli.manager import ConfigManager\nfrom otx.cli.utils.importing import get_impl_class\nfrom otx.cli.utils.io import read_model\nfrom otx.cli.utils.nncf import is_checkpoint_nncf\nfrom otx.cli.utils.parser import (\n add_hyper_parameters_sub_parser,\n get_override_param,\n get_parser_and_hprams_data,\n)\nfrom otx.core.data.adapter import get_dataset_adapter\nfrom otx.utils.logger import config_logger\n\n# pylint: disable=too-many-locals\n\n\ndef get_args():\n \"\"\"Parses command line arguments.\"\"\"\n parser, hyper_parameters, params = get_parser_and_hprams_data()\n\n parser.add_argument(\n \"--test-data-roots\",\n help=\"Comma-separated paths to test data folders.\",\n )\n parser.add_argument(\n \"--load-weights\",\n help=\"Load model weights from previously saved checkpoint.\"\n \"It could be a trained/optimized model (POT only) or exported model.\",\n )\n parser.add_argument(\n \"-o\",\n \"--output\",\n help=\"Location where the intermediate output of the task will be stored.\",\n )\n parser.add_argument(\n \"--workspace\",\n help=\"Path to the workspace where the command will run.\",\n default=None,\n )\n parser.add_argument(\n \"--data\",\n type=str,\n default=None,\n help=\"The data.yaml path want to use in train task.\",\n )\n\n add_hyper_parameters_sub_parser(parser, hyper_parameters, modes=(\"INFERENCE\",))\n override_param = get_override_param(params)\n\n return parser.parse_args(), override_param\n\n\ndef check_label_schemas(label_schema_a, label_schema_b):\n \"\"\"Checks that both passed label schemas have labels with the same names.\n\n If it is False that it raises RuntimeError.\n \"\"\"\n\n for model_label, snapshot_label in zip(label_schema_a.get_labels(False), label_schema_b.get_labels(False)):\n if model_label.name != snapshot_label.name:\n raise RuntimeError(\n \"Labels schemas from model and dataset are different: \" f\"\\n{label_schema_a} \\n\\tvs\\n{label_schema_b}\"\n )\n\n\ndef main():\n \"\"\"Main function that is used for model evaluation.\"\"\"\n\n # Dynamically create an argument parser based on override parameters.\n args, override_param = get_args()\n\n config_manager = ConfigManager(args, workspace_root=args.workspace, mode=\"eval\")\n config_logger(config_manager.output_path / \"otx.log\", \"INFO\")\n # Auto-Configuration for model template\n config_manager.configure_template()\n\n if not args.load_weights and config_manager.check_workspace():\n latest_model_path = (\n config_manager.workspace_root / \"outputs\" / \"latest_trained_model\" / \"models\" / \"weights.pth\"\n )\n args.load_weights = str(latest_model_path)\n\n # Update Hyper Parameter Configs\n hyper_parameters = config_manager.get_hyparams_config(override_param)\n\n # Get classes for Task, ConfigurableParameters and Dataset.\n template = config_manager.template\n if any(args.load_weights.endswith(x) for x in (\".bin\", \".xml\", \".zip\")):\n task_class = get_impl_class(template.entrypoints.openvino)\n elif args.load_weights.endswith(\".pth\"):\n if is_checkpoint_nncf(args.load_weights):\n task_class = get_impl_class(template.entrypoints.nncf)\n else:\n task_class = get_impl_class(template.entrypoints.base)\n else:\n raise ValueError(f\"Unsupported file: {args.load_weights}\")\n\n # Auto-Configuration for Dataset configuration\n config_manager.configure_data_config(update_data_yaml=config_manager.check_workspace())\n dataset_config = config_manager.get_dataset_config(subsets=[\"test\"])\n dataset_adapter = get_dataset_adapter(**dataset_config)\n dataset, label_schema = dataset_adapter.get_otx_dataset(), dataset_adapter.get_label_schema()\n\n environment = TaskEnvironment(\n model=None,\n hyper_parameters=hyper_parameters,\n label_schema=label_schema,\n model_template=template,\n )\n\n environment.model = read_model(environment.get_model_configuration(), args.load_weights, None)\n\n task = task_class(task_environment=environment)\n\n validation_dataset = dataset.get_subset(Subset.TESTING)\n predicted_validation_dataset = task.infer(\n # temp (sungchul): remain annotation for visual prompting\n validation_dataset\n if getattr(task, \"task_type\", None) == TaskType.VISUAL_PROMPTING\n else validation_dataset.with_empty_annotations(),\n InferenceParameters(is_evaluation=False),\n )\n\n resultset = ResultSetEntity(\n model=environment.model,\n ground_truth_dataset=validation_dataset,\n prediction_dataset=predicted_validation_dataset,\n )\n task.evaluate(resultset)\n assert resultset.performance is not None\n print(resultset.performance)\n\n output_path = Path(args.output) if args.output else config_manager.output_path\n performance = {resultset.performance.score.name: resultset.performance.score.value}\n if hasattr(task, \"avg_time_per_image\"):\n performance[\"avg_time_per_image\"] = task.avg_time_per_image\n with open(output_path / \"performance.json\", \"w\", encoding=\"UTF-8\") as write_file:\n json.dump(performance, write_file)\n\n return dict(retcode=0, template=template.name)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "src/otx/cli/tools/eval.py"}]}
| 2,403 | 150 |
gh_patches_debug_5409
|
rasdani/github-patches
|
git_diff
|
mirumee__ariadne-183
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Move documentation to separate repo and host it on gh-pages
Sphinx has served us well, but we fell its too limiting for what we have planned for Ariadne.
We've decided to migrate the site to the [Docusaurus](https://docusaurus.io) and keep it on separate repo.
</issue>
<code>
[start of docs/conf.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Configuration file for the Sphinx documentation builder.
4 #
5 # This file does only contain a selection of the most common options. For a
6 # full list see the documentation:
7 # http://www.sphinx-doc.org/en/master/config
8
9 # -- Path setup --------------------------------------------------------------
10
11 # If extensions (or modules to document with autodoc) are in another directory,
12 # add these directories to sys.path here. If the directory is relative to the
13 # documentation root, use os.path.abspath to make it absolute, like shown here.
14 #
15 # import os
16 # import sys
17 # sys.path.insert(0, os.path.abspath('.'))
18
19 from datetime import date
20
21 year = date.today().year
22
23
24 # -- Project information -----------------------------------------------------
25
26 project = "Ariadne"
27 copyright = "%s, Mirumee Software" % year
28 author = "Mirumee Software"
29
30 # The short X.Y version
31 version = "4"
32 # The full version, including alpha/beta/rc tags
33 release = "0.4"
34
35
36 # -- General configuration ---------------------------------------------------
37
38 # If your documentation needs a minimal Sphinx version, state it here.
39 #
40 # needs_sphinx = '1.0'
41
42 # Add any Sphinx extension module names here, as strings. They can be
43 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
44 # ones.
45 extensions = ["sphinxcontrib.asyncio"]
46
47 # Add any paths that contain templates here, relative to this directory.
48 templates_path = ["_templates"]
49
50 # The suffix(es) of source filenames.
51 # You can specify multiple suffix as a list of string:
52 #
53 # source_suffix = ['.rst', '.md']
54 source_suffix = [".rst", ".md"]
55
56 # The master toctree document.
57 master_doc = "index"
58
59 # The language for content autogenerated by Sphinx. Refer to documentation
60 # for a list of supported languages.
61 #
62 # This is also used if you do content translation via gettext catalogs.
63 # Usually you set "language" from the command line for these cases.
64 language = None
65
66 # List of patterns, relative to source directory, that match files and
67 # directories to ignore when looking for source files.
68 # This pattern also affects html_static_path and html_extra_path.
69 exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
70
71 # The name of the Pygments (syntax highlighting) style to use.
72 pygments_style = None
73
74
75 # -- Options for HTML output -------------------------------------------------
76
77 # The theme to use for HTML and HTML Help pages. See the documentation for
78 # a list of builtin themes.
79 #
80 html_theme = "alabaster"
81
82 # Theme options are theme-specific and customize the look and feel of a theme
83 # further. For a list of options available for each theme, see the
84 # documentation.
85 #
86 html_theme_options = {
87 "logo": "logo-vertical.png",
88 "github_user": "mirumee",
89 "github_repo": "ariadne",
90 }
91
92 # Add any paths that contain custom static files (such as style sheets) here,
93 # relative to this directory. They are copied after the builtin static files,
94 # so a file named "default.css" will overwrite the builtin "default.css".
95 html_static_path = ["_static"]
96
97 # Custom sidebar templates, must be a dictionary that maps document names
98 # to template names.
99 #
100 # The default sidebars (for documents that don't match any pattern) are
101 # defined by theme itself. Builtin themes are using these templates by
102 # default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
103 # 'searchbox.html']``.
104 #
105 # html_sidebars = {}
106
107
108 # -- Options for HTMLHelp output ---------------------------------------------
109
110 # Output file base name for HTML help builder.
111 htmlhelp_basename = "Ariadnedoc"
112
113
114 # -- Options for LaTeX output ------------------------------------------------
115
116 latex_elements = {
117 # The paper size ('letterpaper' or 'a4paper').
118 #
119 # 'papersize': 'letterpaper',
120 # The font size ('10pt', '11pt' or '12pt').
121 #
122 # 'pointsize': '10pt',
123 # Additional stuff for the LaTeX preamble.
124 #
125 # 'preamble': '',
126 # Latex figure (float) alignment
127 #
128 # 'figure_align': 'htbp',
129 }
130
131 # Grouping the document tree into LaTeX files. List of tuples
132 # (source start file, target name, title,
133 # author, documentclass [howto, manual, or own class]).
134 latex_documents = [
135 (master_doc, "Ariadne.tex", "Ariadne Documentation", "Mirumee Software", "manual")
136 ]
137
138
139 # -- Options for manual page output ------------------------------------------
140
141 # One entry per manual page. List of tuples
142 # (source start file, name, description, authors, manual section).
143 man_pages = [(master_doc, "ariadne", "Ariadne Documentation", [author], 1)]
144
145
146 # -- Options for Texinfo output ----------------------------------------------
147
148 # Grouping the document tree into Texinfo files. List of tuples
149 # (source start file, target name, title, author,
150 # dir menu entry, description, category)
151 texinfo_documents = [
152 (
153 master_doc,
154 "Ariadne",
155 "Ariadne",
156 author,
157 "Ariadne",
158 "Ariadne is a Python library for implementing GraphQL servers, inspired by Apollo Server and built with GraphQL-core-next.",
159 "Miscellaneous",
160 )
161 ]
162
163
164 # -- Options for Epub output -------------------------------------------------
165
166 # Bibliographic Dublin Core info.
167 epub_title = project
168
169 # The unique identifier of the text. This can be a ISBN number
170 # or the project homepage.
171 #
172 # epub_identifier = ''
173
174 # A unique identification for the text.
175 #
176 # epub_uid = ''
177
178 # A list of files that should not be packed into the epub file.
179 epub_exclude_files = ["search.html"]
180
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -42,7 +42,7 @@
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
-extensions = ["sphinxcontrib.asyncio"]
+extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
|
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -42,7 +42,7 @@\n # Add any Sphinx extension module names here, as strings. They can be\n # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n # ones.\n-extensions = [\"sphinxcontrib.asyncio\"]\n+extensions = []\n \n # Add any paths that contain templates here, relative to this directory.\n templates_path = [\"_templates\"]\n", "issue": "Move documentation to separate repo and host it on gh-pages\nSphinx has served us well, but we fell its too limiting for what we have planned for Ariadne.\r\n\r\nWe've decided to migrate the site to the [Docusaurus](https://docusaurus.io) and keep it on separate repo.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\nfrom datetime import date\n\nyear = date.today().year\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Ariadne\"\ncopyright = \"%s, Mirumee Software\" % year\nauthor = \"Mirumee Software\"\n\n# The short X.Y version\nversion = \"4\"\n# The full version, including alpha/beta/rc tags\nrelease = \"0.4\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\"sphinxcontrib.asyncio\"]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = [\".rst\", \".md\"]\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = None\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"alabaster\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\n \"logo\": \"logo-vertical.png\",\n \"github_user\": \"mirumee\",\n \"github_repo\": \"ariadne\",\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\n# html_sidebars = {}\n\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"Ariadnedoc\"\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, \"Ariadne.tex\", \"Ariadne Documentation\", \"Mirumee Software\", \"manual\")\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"ariadne\", \"Ariadne Documentation\", [author], 1)]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"Ariadne\",\n \"Ariadne\",\n author,\n \"Ariadne\",\n \"Ariadne is a Python library for implementing GraphQL servers, inspired by Apollo Server and built with GraphQL-core-next.\",\n \"Miscellaneous\",\n )\n]\n\n\n# -- Options for Epub output -------------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = project\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#\n# epub_identifier = ''\n\n# A unique identification for the text.\n#\n# epub_uid = ''\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = [\"search.html\"]\n", "path": "docs/conf.py"}]}
| 2,293 | 108 |
gh_patches_debug_27258
|
rasdani/github-patches
|
git_diff
|
Kinto__kinto-1674
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
prompt=none not carried for silent authentication
The [documentation](https://auth0.com/docs/api-auth/tutorials/silent-authentication) is more complex that it needs to be.
To authenticate you visit `/v1/openid/auth0/login?callback=http...&scopes=...` it redirects to the OIDC provider (e.g. `https://peterbecom.auth0.com/authorize?client_id=...&response_type=code&scope=openid+email+profile&redirect_uri=...&state=...`) which then redirects you back to the `callback` (if all goes well with the password check) with a code that kinto trades in for an access token which it then appends to the `callback` URL.
Now, if that access token expires (or is *about to* expire!) we need to authenticate again. The default expiration time for the access token (at least with Auth0) is 1 day. To do that, you can simply add `&prompt=none` to the URL. E.g. `https://peterbecom.auth0.com/authorize?client_id=...&response_type=code&scope=openid+email+profile&redirect_uri=...&state=...&prompt=none`. What the OIDC provider does is...
> "Silent authentication lets you perform an authentication flow where Auth0 will only reply with redirects, and never with a login page."
It's basically the same URL as authorization but with `&prompt=none` added.
prompt=none not carried for silent authentication
The [documentation](https://auth0.com/docs/api-auth/tutorials/silent-authentication) is more complex that it needs to be.
To authenticate you visit `/v1/openid/auth0/login?callback=http...&scopes=...` it redirects to the OIDC provider (e.g. `https://peterbecom.auth0.com/authorize?client_id=...&response_type=code&scope=openid+email+profile&redirect_uri=...&state=...`) which then redirects you back to the `callback` (if all goes well with the password check) with a code that kinto trades in for an access token which it then appends to the `callback` URL.
Now, if that access token expires (or is *about to* expire!) we need to authenticate again. The default expiration time for the access token (at least with Auth0) is 1 day. To do that, you can simply add `&prompt=none` to the URL. E.g. `https://peterbecom.auth0.com/authorize?client_id=...&response_type=code&scope=openid+email+profile&redirect_uri=...&state=...&prompt=none`. What the OIDC provider does is...
> "Silent authentication lets you perform an authentication flow where Auth0 will only reply with redirects, and never with a login page."
It's basically the same URL as authorization but with `&prompt=none` added.
</issue>
<code>
[start of kinto/plugins/openid/views.py]
1 import urllib.parse
2
3 import colander
4 import requests
5 from pyramid import httpexceptions
6
7 from cornice.validators import colander_validator
8 from kinto.core import Service
9 from kinto.core.errors import raise_invalid, ERRORS
10 from kinto.core.utils import random_bytes_hex
11 from kinto.core.resource.schema import ErrorResponseSchema
12 from kinto.core.schema import URL
13
14 from .utils import fetch_openid_config
15
16
17 DEFAULT_STATE_TTL_SECONDS = 3600
18 DEFAULT_STATE_LENGTH = 32
19
20
21 class RedirectHeadersSchema(colander.MappingSchema):
22 """Redirect response headers."""
23 location = colander.SchemaNode(colander.String(), name='Location')
24
25
26 class RedirectResponseSchema(colander.MappingSchema):
27 """Redirect response schema."""
28 headers = RedirectHeadersSchema()
29
30
31 response_schemas = {
32 '307': RedirectResponseSchema(description='Successful redirection.'),
33 '400': ErrorResponseSchema(description='The request is invalid.'),
34 }
35
36
37 def provider_validator(request, **kwargs):
38 """
39 This validator verifies that the validator in URL (eg. /openid/auth0/login)
40 is a configured OpenIDConnect policy.
41 """
42 provider = request.matchdict['provider']
43 used = request.registry.settings.get('multiauth.policy.%s.use' % provider, '')
44 if not used.endswith('OpenIDConnectPolicy'):
45 request.errors.add('path', 'provider', 'Unknow provider %r' % provider)
46
47
48 class LoginQuerystringSchema(colander.MappingSchema):
49 """
50 Querystring schema for the login endpoint.
51 """
52 callback = URL()
53 scope = colander.SchemaNode(colander.String())
54
55
56 class LoginSchema(colander.MappingSchema):
57 querystring = LoginQuerystringSchema()
58
59
60 login = Service(name='openid_login',
61 path='/openid/{provider}/login',
62 description='Initiate the OAuth2 login')
63
64
65 @login.get(schema=LoginSchema(),
66 validators=(colander_validator, provider_validator),
67 response_schemas=response_schemas)
68 def get_login(request):
69 """Initiates to login dance for the specified scopes and callback URI
70 using appropriate redirections."""
71
72 # Settings.
73 provider = request.matchdict['provider']
74 settings_prefix = 'multiauth.policy.%s.' % provider
75 issuer = request.registry.settings[settings_prefix + 'issuer']
76 client_id = request.registry.settings[settings_prefix + 'client_id']
77 userid_field = request.registry.settings.get(settings_prefix + 'userid_field')
78 state_ttl = int(request.registry.settings.get(settings_prefix + 'state_ttl_seconds',
79 DEFAULT_STATE_TTL_SECONDS))
80 state_length = int(request.registry.settings.get(settings_prefix + 'state_length',
81 DEFAULT_STATE_LENGTH))
82
83 # Read OpenID configuration (cached by issuer)
84 oid_config = fetch_openid_config(issuer)
85 auth_endpoint = oid_config['authorization_endpoint']
86
87 scope = request.GET['scope']
88 callback = request.GET['callback']
89
90 # Check that email scope is requested if userid field is configured as email.
91 if userid_field == 'email' and 'email' not in scope:
92 error_details = {
93 'name': 'scope',
94 'description': "Provider %s requires 'email' scope" % provider,
95 }
96 raise_invalid(request, **error_details)
97
98 # Generate a random string as state.
99 # And save it until code is traded.
100 state = random_bytes_hex(state_length)
101 request.registry.cache.set('openid:state:' + state, callback, ttl=state_ttl)
102
103 # Redirect the client to the Identity Provider that will eventually redirect
104 # to the OpenID token endpoint.
105 token_uri = request.route_url('openid_token', provider=provider) + '?'
106 params = dict(client_id=client_id, response_type='code', scope=scope,
107 redirect_uri=token_uri, state=state)
108 redirect = '{}?{}'.format(auth_endpoint, urllib.parse.urlencode(params))
109 raise httpexceptions.HTTPTemporaryRedirect(redirect)
110
111
112 class TokenQuerystringSchema(colander.MappingSchema):
113 """
114 Querystring schema for the token endpoint.
115 """
116 code = colander.SchemaNode(colander.String())
117 state = colander.SchemaNode(colander.String())
118
119
120 class TokenSchema(colander.MappingSchema):
121 querystring = TokenQuerystringSchema()
122
123
124 token = Service(name='openid_token',
125 path='/openid/{provider}/token',
126 description='')
127
128
129 @token.get(schema=TokenSchema(),
130 validators=(colander_validator, provider_validator))
131 def get_token(request):
132 """Trades the specified code and state against access and ID tokens.
133 The client is redirected to the original ``callback`` URI with the
134 result in querystring."""
135
136 # Settings.
137 provider = request.matchdict['provider']
138 settings_prefix = 'multiauth.policy.%s.' % provider
139 issuer = request.registry.settings[settings_prefix + 'issuer']
140 client_id = request.registry.settings[settings_prefix + 'client_id']
141 client_secret = request.registry.settings[settings_prefix + 'client_secret']
142
143 # Read OpenID configuration (cached by issuer)
144 oid_config = fetch_openid_config(issuer)
145 token_endpoint = oid_config['token_endpoint']
146
147 code = request.GET['code']
148 state = request.GET['state']
149
150 # State can be used only once.
151 callback = request.registry.cache.delete('openid:state:' + state)
152 if callback is None:
153 error_details = {
154 'name': 'state',
155 'description': 'Invalid state',
156 'errno': ERRORS.INVALID_AUTH_TOKEN.value,
157 }
158 raise_invalid(request, **error_details)
159
160 # Trade the code for tokens on the Identity Provider.
161 # Google Identity requires to specify again redirect_uri.
162 redirect_uri = request.route_url('openid_token', provider=provider) + '?'
163 data = {
164 'code': code,
165 'client_id': client_id,
166 'client_secret': client_secret,
167 'redirect_uri': redirect_uri,
168 'grant_type': 'authorization_code',
169 }
170 resp = requests.post(token_endpoint, data=data)
171
172 # The IdP response is forwarded to the client in the querystring/location hash.
173 # (eg. callback=`http://localhost:3000/#tokens=`)
174 redirect = callback + urllib.parse.quote(resp.text)
175 raise httpexceptions.HTTPTemporaryRedirect(redirect)
176
[end of kinto/plugins/openid/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kinto/plugins/openid/views.py b/kinto/plugins/openid/views.py
--- a/kinto/plugins/openid/views.py
+++ b/kinto/plugins/openid/views.py
@@ -51,6 +51,9 @@
"""
callback = URL()
scope = colander.SchemaNode(colander.String())
+ prompt = colander.SchemaNode(colander.String(),
+ validator=colander.Regex("none"),
+ missing=colander.drop)
class LoginSchema(colander.MappingSchema):
@@ -86,6 +89,7 @@
scope = request.GET['scope']
callback = request.GET['callback']
+ prompt = request.GET.get('prompt')
# Check that email scope is requested if userid field is configured as email.
if userid_field == 'email' and 'email' not in scope:
@@ -105,6 +109,9 @@
token_uri = request.route_url('openid_token', provider=provider) + '?'
params = dict(client_id=client_id, response_type='code', scope=scope,
redirect_uri=token_uri, state=state)
+ if prompt:
+ # The 'prompt' parameter is optional.
+ params['prompt'] = prompt
redirect = '{}?{}'.format(auth_endpoint, urllib.parse.urlencode(params))
raise httpexceptions.HTTPTemporaryRedirect(redirect)
|
{"golden_diff": "diff --git a/kinto/plugins/openid/views.py b/kinto/plugins/openid/views.py\n--- a/kinto/plugins/openid/views.py\n+++ b/kinto/plugins/openid/views.py\n@@ -51,6 +51,9 @@\n \"\"\"\n callback = URL()\n scope = colander.SchemaNode(colander.String())\n+ prompt = colander.SchemaNode(colander.String(),\n+ validator=colander.Regex(\"none\"),\n+ missing=colander.drop)\n \n \n class LoginSchema(colander.MappingSchema):\n@@ -86,6 +89,7 @@\n \n scope = request.GET['scope']\n callback = request.GET['callback']\n+ prompt = request.GET.get('prompt')\n \n # Check that email scope is requested if userid field is configured as email.\n if userid_field == 'email' and 'email' not in scope:\n@@ -105,6 +109,9 @@\n token_uri = request.route_url('openid_token', provider=provider) + '?'\n params = dict(client_id=client_id, response_type='code', scope=scope,\n redirect_uri=token_uri, state=state)\n+ if prompt:\n+ # The 'prompt' parameter is optional.\n+ params['prompt'] = prompt\n redirect = '{}?{}'.format(auth_endpoint, urllib.parse.urlencode(params))\n raise httpexceptions.HTTPTemporaryRedirect(redirect)\n", "issue": "prompt=none not carried for silent authentication\nThe [documentation](https://auth0.com/docs/api-auth/tutorials/silent-authentication) is more complex that it needs to be. \r\n\r\nTo authenticate you visit `/v1/openid/auth0/login?callback=http...&scopes=...` it redirects to the OIDC provider (e.g. `https://peterbecom.auth0.com/authorize?client_id=...&response_type=code&scope=openid+email+profile&redirect_uri=...&state=...`) which then redirects you back to the `callback` (if all goes well with the password check) with a code that kinto trades in for an access token which it then appends to the `callback` URL.\r\n\r\nNow, if that access token expires (or is *about to* expire!) we need to authenticate again. The default expiration time for the access token (at least with Auth0) is 1 day. To do that, you can simply add `&prompt=none` to the URL. E.g. `https://peterbecom.auth0.com/authorize?client_id=...&response_type=code&scope=openid+email+profile&redirect_uri=...&state=...&prompt=none`. What the OIDC provider does is...\r\n\r\n> \"Silent authentication lets you perform an authentication flow where Auth0 will only reply with redirects, and never with a login page.\"\r\n\r\nIt's basically the same URL as authorization but with `&prompt=none` added. \nprompt=none not carried for silent authentication\nThe [documentation](https://auth0.com/docs/api-auth/tutorials/silent-authentication) is more complex that it needs to be. \r\n\r\nTo authenticate you visit `/v1/openid/auth0/login?callback=http...&scopes=...` it redirects to the OIDC provider (e.g. `https://peterbecom.auth0.com/authorize?client_id=...&response_type=code&scope=openid+email+profile&redirect_uri=...&state=...`) which then redirects you back to the `callback` (if all goes well with the password check) with a code that kinto trades in for an access token which it then appends to the `callback` URL.\r\n\r\nNow, if that access token expires (or is *about to* expire!) we need to authenticate again. The default expiration time for the access token (at least with Auth0) is 1 day. To do that, you can simply add `&prompt=none` to the URL. E.g. `https://peterbecom.auth0.com/authorize?client_id=...&response_type=code&scope=openid+email+profile&redirect_uri=...&state=...&prompt=none`. What the OIDC provider does is...\r\n\r\n> \"Silent authentication lets you perform an authentication flow where Auth0 will only reply with redirects, and never with a login page.\"\r\n\r\nIt's basically the same URL as authorization but with `&prompt=none` added. \n", "before_files": [{"content": "import urllib.parse\n\nimport colander\nimport requests\nfrom pyramid import httpexceptions\n\nfrom cornice.validators import colander_validator\nfrom kinto.core import Service\nfrom kinto.core.errors import raise_invalid, ERRORS\nfrom kinto.core.utils import random_bytes_hex\nfrom kinto.core.resource.schema import ErrorResponseSchema\nfrom kinto.core.schema import URL\n\nfrom .utils import fetch_openid_config\n\n\nDEFAULT_STATE_TTL_SECONDS = 3600\nDEFAULT_STATE_LENGTH = 32\n\n\nclass RedirectHeadersSchema(colander.MappingSchema):\n \"\"\"Redirect response headers.\"\"\"\n location = colander.SchemaNode(colander.String(), name='Location')\n\n\nclass RedirectResponseSchema(colander.MappingSchema):\n \"\"\"Redirect response schema.\"\"\"\n headers = RedirectHeadersSchema()\n\n\nresponse_schemas = {\n '307': RedirectResponseSchema(description='Successful redirection.'),\n '400': ErrorResponseSchema(description='The request is invalid.'),\n}\n\n\ndef provider_validator(request, **kwargs):\n \"\"\"\n This validator verifies that the validator in URL (eg. /openid/auth0/login)\n is a configured OpenIDConnect policy.\n \"\"\"\n provider = request.matchdict['provider']\n used = request.registry.settings.get('multiauth.policy.%s.use' % provider, '')\n if not used.endswith('OpenIDConnectPolicy'):\n request.errors.add('path', 'provider', 'Unknow provider %r' % provider)\n\n\nclass LoginQuerystringSchema(colander.MappingSchema):\n \"\"\"\n Querystring schema for the login endpoint.\n \"\"\"\n callback = URL()\n scope = colander.SchemaNode(colander.String())\n\n\nclass LoginSchema(colander.MappingSchema):\n querystring = LoginQuerystringSchema()\n\n\nlogin = Service(name='openid_login',\n path='/openid/{provider}/login',\n description='Initiate the OAuth2 login')\n\n\[email protected](schema=LoginSchema(),\n validators=(colander_validator, provider_validator),\n response_schemas=response_schemas)\ndef get_login(request):\n \"\"\"Initiates to login dance for the specified scopes and callback URI\n using appropriate redirections.\"\"\"\n\n # Settings.\n provider = request.matchdict['provider']\n settings_prefix = 'multiauth.policy.%s.' % provider\n issuer = request.registry.settings[settings_prefix + 'issuer']\n client_id = request.registry.settings[settings_prefix + 'client_id']\n userid_field = request.registry.settings.get(settings_prefix + 'userid_field')\n state_ttl = int(request.registry.settings.get(settings_prefix + 'state_ttl_seconds',\n DEFAULT_STATE_TTL_SECONDS))\n state_length = int(request.registry.settings.get(settings_prefix + 'state_length',\n DEFAULT_STATE_LENGTH))\n\n # Read OpenID configuration (cached by issuer)\n oid_config = fetch_openid_config(issuer)\n auth_endpoint = oid_config['authorization_endpoint']\n\n scope = request.GET['scope']\n callback = request.GET['callback']\n\n # Check that email scope is requested if userid field is configured as email.\n if userid_field == 'email' and 'email' not in scope:\n error_details = {\n 'name': 'scope',\n 'description': \"Provider %s requires 'email' scope\" % provider,\n }\n raise_invalid(request, **error_details)\n\n # Generate a random string as state.\n # And save it until code is traded.\n state = random_bytes_hex(state_length)\n request.registry.cache.set('openid:state:' + state, callback, ttl=state_ttl)\n\n # Redirect the client to the Identity Provider that will eventually redirect\n # to the OpenID token endpoint.\n token_uri = request.route_url('openid_token', provider=provider) + '?'\n params = dict(client_id=client_id, response_type='code', scope=scope,\n redirect_uri=token_uri, state=state)\n redirect = '{}?{}'.format(auth_endpoint, urllib.parse.urlencode(params))\n raise httpexceptions.HTTPTemporaryRedirect(redirect)\n\n\nclass TokenQuerystringSchema(colander.MappingSchema):\n \"\"\"\n Querystring schema for the token endpoint.\n \"\"\"\n code = colander.SchemaNode(colander.String())\n state = colander.SchemaNode(colander.String())\n\n\nclass TokenSchema(colander.MappingSchema):\n querystring = TokenQuerystringSchema()\n\n\ntoken = Service(name='openid_token',\n path='/openid/{provider}/token',\n description='')\n\n\[email protected](schema=TokenSchema(),\n validators=(colander_validator, provider_validator))\ndef get_token(request):\n \"\"\"Trades the specified code and state against access and ID tokens.\n The client is redirected to the original ``callback`` URI with the\n result in querystring.\"\"\"\n\n # Settings.\n provider = request.matchdict['provider']\n settings_prefix = 'multiauth.policy.%s.' % provider\n issuer = request.registry.settings[settings_prefix + 'issuer']\n client_id = request.registry.settings[settings_prefix + 'client_id']\n client_secret = request.registry.settings[settings_prefix + 'client_secret']\n\n # Read OpenID configuration (cached by issuer)\n oid_config = fetch_openid_config(issuer)\n token_endpoint = oid_config['token_endpoint']\n\n code = request.GET['code']\n state = request.GET['state']\n\n # State can be used only once.\n callback = request.registry.cache.delete('openid:state:' + state)\n if callback is None:\n error_details = {\n 'name': 'state',\n 'description': 'Invalid state',\n 'errno': ERRORS.INVALID_AUTH_TOKEN.value,\n }\n raise_invalid(request, **error_details)\n\n # Trade the code for tokens on the Identity Provider.\n # Google Identity requires to specify again redirect_uri.\n redirect_uri = request.route_url('openid_token', provider=provider) + '?'\n data = {\n 'code': code,\n 'client_id': client_id,\n 'client_secret': client_secret,\n 'redirect_uri': redirect_uri,\n 'grant_type': 'authorization_code',\n }\n resp = requests.post(token_endpoint, data=data)\n\n # The IdP response is forwarded to the client in the querystring/location hash.\n # (eg. callback=`http://localhost:3000/#tokens=`)\n redirect = callback + urllib.parse.quote(resp.text)\n raise httpexceptions.HTTPTemporaryRedirect(redirect)\n", "path": "kinto/plugins/openid/views.py"}]}
| 2,931 | 298 |
gh_patches_debug_21805
|
rasdani/github-patches
|
git_diff
|
psychopy__psychopy-4622
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
404 errors on some of the page in the HELP menus
Version used
- psychopy: 2021.2.3
https://www.psychopy.org/builder/builder.html
https://www.psychopy.org/api/api.html
</issue>
<code>
[start of psychopy/app/urls.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """A central location to store information about urls
5 """
6 urls = dict()
7
8 # links based on string names
9 urls['builder'] = "https://www.psychopy.org/builder/builder.html"
10 urls['builder.loops'] = "https://www.psychopy.org/builder/flow.html#loops"
11 # NB. builder components get their urls defined by the component code
12 # (so a custom component can have a url)
13
14 urls['downloads'] = "https://github.com/psychopy/psychopy/releases"
15 urls['changelog'] = "https://www.psychopy.org/changelog.html"
16
17 general = "https://www.psychopy.org/general/"
18 urls['prefs'] = general + "prefs.html"
19 urls['prefs.general'] = general + "prefs.html#general-settings"
20 urls['prefs.app'] = general + "prefs.html#application-settings"
21 urls['prefs.coder'] = general + "prefs.html#coder-settings"
22 urls['prefs.builder'] = general + "prefs.html#builder-settings"
23 urls['prefs.connections'] = general + "prefs.html#connection-settings"
24
25 # links keyed by wxIDs (e.g. menu item IDs)
26 urls['psychopyHome'] = "https://www.psychopy.org/"
27 urls['psychopyReference'] = "https://www.psychopy.org/api/api.html"
28 urls['coderTutorial'] = "https://www.psychopy.org/coder/tutorial1.html"
29 urls['builderHelp'] = urls['builder']
30 urls['builderDemos'] = "http://code.google.com/p/psychopy/downloads/list?can=2&q=demos"
31 urls['projsAbout'] = "https://www.psychopy.org/general/projects.html"
32
[end of psychopy/app/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/psychopy/app/urls.py b/psychopy/app/urls.py
--- a/psychopy/app/urls.py
+++ b/psychopy/app/urls.py
@@ -6,7 +6,7 @@
urls = dict()
# links based on string names
-urls['builder'] = "https://www.psychopy.org/builder/builder.html"
+urls['builder'] = "https://www.psychopy.org/builder"
urls['builder.loops'] = "https://www.psychopy.org/builder/flow.html#loops"
# NB. builder components get their urls defined by the component code
# (so a custom component can have a url)
@@ -24,7 +24,7 @@
# links keyed by wxIDs (e.g. menu item IDs)
urls['psychopyHome'] = "https://www.psychopy.org/"
-urls['psychopyReference'] = "https://www.psychopy.org/api/api.html"
+urls['psychopyReference'] = "https://www.psychopy.org/api"
urls['coderTutorial'] = "https://www.psychopy.org/coder/tutorial1.html"
urls['builderHelp'] = urls['builder']
urls['builderDemos'] = "http://code.google.com/p/psychopy/downloads/list?can=2&q=demos"
|
{"golden_diff": "diff --git a/psychopy/app/urls.py b/psychopy/app/urls.py\n--- a/psychopy/app/urls.py\n+++ b/psychopy/app/urls.py\n@@ -6,7 +6,7 @@\n urls = dict()\n \n # links based on string names\n-urls['builder'] = \"https://www.psychopy.org/builder/builder.html\"\n+urls['builder'] = \"https://www.psychopy.org/builder\"\n urls['builder.loops'] = \"https://www.psychopy.org/builder/flow.html#loops\"\n # NB. builder components get their urls defined by the component code\n # (so a custom component can have a url)\n@@ -24,7 +24,7 @@\n \n # links keyed by wxIDs (e.g. menu item IDs)\n urls['psychopyHome'] = \"https://www.psychopy.org/\"\n-urls['psychopyReference'] = \"https://www.psychopy.org/api/api.html\"\n+urls['psychopyReference'] = \"https://www.psychopy.org/api\"\n urls['coderTutorial'] = \"https://www.psychopy.org/coder/tutorial1.html\"\n urls['builderHelp'] = urls['builder']\n urls['builderDemos'] = \"http://code.google.com/p/psychopy/downloads/list?can=2&q=demos\"\n", "issue": "404 errors on some of the page in the HELP menus\nVersion used\r\n- psychopy: 2021.2.3\r\n\r\nhttps://www.psychopy.org/builder/builder.html\r\nhttps://www.psychopy.org/api/api.html\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"A central location to store information about urls\n\"\"\"\nurls = dict()\n\n# links based on string names\nurls['builder'] = \"https://www.psychopy.org/builder/builder.html\"\nurls['builder.loops'] = \"https://www.psychopy.org/builder/flow.html#loops\"\n# NB. builder components get their urls defined by the component code\n# (so a custom component can have a url)\n\nurls['downloads'] = \"https://github.com/psychopy/psychopy/releases\"\nurls['changelog'] = \"https://www.psychopy.org/changelog.html\"\n\ngeneral = \"https://www.psychopy.org/general/\"\nurls['prefs'] = general + \"prefs.html\"\nurls['prefs.general'] = general + \"prefs.html#general-settings\"\nurls['prefs.app'] = general + \"prefs.html#application-settings\"\nurls['prefs.coder'] = general + \"prefs.html#coder-settings\"\nurls['prefs.builder'] = general + \"prefs.html#builder-settings\"\nurls['prefs.connections'] = general + \"prefs.html#connection-settings\"\n\n# links keyed by wxIDs (e.g. menu item IDs)\nurls['psychopyHome'] = \"https://www.psychopy.org/\"\nurls['psychopyReference'] = \"https://www.psychopy.org/api/api.html\"\nurls['coderTutorial'] = \"https://www.psychopy.org/coder/tutorial1.html\"\nurls['builderHelp'] = urls['builder']\nurls['builderDemos'] = \"http://code.google.com/p/psychopy/downloads/list?can=2&q=demos\"\nurls['projsAbout'] = \"https://www.psychopy.org/general/projects.html\"\n", "path": "psychopy/app/urls.py"}]}
| 1,016 | 285 |
gh_patches_debug_543
|
rasdani/github-patches
|
git_diff
|
edgedb__edgedb-3085
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Consider implementing ++ operator for JSON
Possible behaviour:
```
db> SELECT to_json('[1,2]') ++ to_json('[3]') = to_json('[1,2,3]');
db> SELECT to_json('{"a": 1}') ++ to_json('{"b":2}') = to_json('{"a":1, "b":2}');
db> SELECT to_json('"123"') ++ to_json('"456"') = to_json('"123456"');
```
</issue>
<code>
[start of edb/buildmeta.py]
1 #
2 # This source file is part of the EdgeDB open source project.
3 #
4 # Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
17 #
18
19
20 from __future__ import annotations
21 from typing import *
22
23 import hashlib
24 import importlib.util
25 import json
26 import logging
27 import os
28 import pathlib
29 import pickle
30 import re
31 import subprocess
32 import tempfile
33
34
35 from edb.common import debug
36 from edb.common import devmode
37 from edb.common import verutils
38
39
40 # Increment this whenever the database layout or stdlib changes.
41 EDGEDB_CATALOG_VERSION = 2021_10_19_00_00
42
43
44 class MetadataError(Exception):
45 pass
46
47
48 def get_build_metadata_value(prop: str) -> str:
49 env_val = os.environ.get(f'_EDGEDB_BUILDMETA_{prop}')
50 if env_val:
51 return env_val
52
53 try:
54 from . import _buildmeta # type: ignore
55 return getattr(_buildmeta, prop)
56 except (ImportError, AttributeError):
57 raise MetadataError(
58 f'could not find {prop} in EdgeDB distribution metadata') from None
59
60
61 def get_pg_config_path() -> pathlib.Path:
62 if devmode.is_in_dev_mode():
63 root = pathlib.Path(__file__).parent.parent
64 pg_config = (root / 'build' / 'postgres' /
65 'install' / 'bin' / 'pg_config').resolve()
66 if not pg_config.is_file():
67 try:
68 pg_config = pathlib.Path(
69 get_build_metadata_value('PG_CONFIG_PATH'))
70 except MetadataError:
71 pass
72
73 if not pg_config.is_file():
74 raise MetadataError('DEV mode: Could not find PostgreSQL build, '
75 'run `pip install -e .`')
76
77 else:
78 pg_config = pathlib.Path(
79 get_build_metadata_value('PG_CONFIG_PATH'))
80
81 if not pg_config.is_file():
82 raise MetadataError(
83 f'invalid pg_config path: {pg_config!r}: file does not exist '
84 f'or is not a regular file')
85
86 return pg_config
87
88
89 def get_runstate_path(data_dir: pathlib.Path) -> pathlib.Path:
90 if devmode.is_in_dev_mode():
91 return data_dir
92 else:
93 runstate_dir = get_build_metadata_value('RUNSTATE_DIR')
94 if runstate_dir is not None:
95 return pathlib.Path(runstate_dir)
96 else:
97 return data_dir
98
99
100 def get_shared_data_dir_path() -> pathlib.Path:
101 if devmode.is_in_dev_mode():
102 return devmode.get_dev_mode_cache_dir() # type: ignore[return-value]
103 else:
104 return pathlib.Path(get_build_metadata_value('SHARED_DATA_DIR'))
105
106
107 def hash_dirs(
108 dirs: Sequence[Tuple[str, str]],
109 *,
110 extra_files: Optional[Sequence[Union[str, pathlib.Path]]]=None
111 ) -> bytes:
112 def hash_dir(dirname, ext, paths):
113 with os.scandir(dirname) as it:
114 for entry in it:
115 if entry.is_file() and entry.name.endswith(ext):
116 paths.append(entry.path)
117 elif entry.is_dir():
118 hash_dir(entry.path, ext, paths)
119
120 paths: List[str] = []
121 for dirname, ext in dirs:
122 hash_dir(dirname, ext, paths)
123
124 if extra_files:
125 for extra_file in extra_files:
126 if isinstance(extra_file, pathlib.Path):
127 extra_file = str(extra_file.resolve())
128 paths.append(extra_file)
129
130 h = hashlib.sha1() # sha1 is the fastest one.
131 for path in sorted(paths):
132 with open(path, 'rb') as f:
133 h.update(f.read())
134
135 return h.digest()
136
137
138 def read_data_cache(
139 cache_key: bytes,
140 path: str,
141 *,
142 pickled: bool=True,
143 source_dir: Optional[pathlib.Path] = None,
144 ) -> Any:
145 if source_dir is None:
146 source_dir = get_shared_data_dir_path()
147 full_path = source_dir / path
148
149 if full_path.exists():
150 with open(full_path, 'rb') as f:
151 src_hash = f.read(len(cache_key))
152 if src_hash == cache_key or debug.flags.bootstrap_cache_yolo:
153 if pickled:
154 data = f.read()
155 try:
156 return pickle.loads(data)
157 except Exception:
158 logging.exception(f'could not unpickle {path}')
159 else:
160 return f.read()
161
162
163 def write_data_cache(
164 obj: Any,
165 cache_key: bytes,
166 path: str,
167 *,
168 pickled: bool = True,
169 target_dir: Optional[pathlib.Path] = None,
170 ):
171 if target_dir is None:
172 target_dir = get_shared_data_dir_path()
173 full_path = target_dir / path
174
175 try:
176 with tempfile.NamedTemporaryFile(
177 mode='wb', dir=full_path.parent, delete=False) as f:
178 f.write(cache_key)
179 if pickled:
180 pickle.dump(obj, file=f, protocol=pickle.HIGHEST_PROTOCOL)
181 else:
182 f.write(obj)
183 except Exception:
184 try:
185 os.unlink(f.name)
186 except OSError:
187 pass
188 finally:
189 raise
190 else:
191 os.rename(f.name, full_path)
192
193
194 def get_version() -> verutils.Version:
195 if devmode.is_in_dev_mode():
196 root = pathlib.Path(__file__).parent.parent.resolve()
197 version = verutils.parse_version(get_version_from_scm(root))
198 else:
199 vertuple: List[Any] = list(get_build_metadata_value('VERSION'))
200 vertuple[2] = verutils.VersionStage(vertuple[2])
201 version = verutils.Version(*vertuple)
202
203 return version
204
205
206 _version_dict: Optional[Mapping[str, Any]] = None
207
208
209 def get_version_dict() -> Mapping[str, Any]:
210 global _version_dict
211
212 if _version_dict is None:
213 ver = get_version()
214 _version_dict = {
215 'major': ver.major,
216 'minor': ver.minor,
217 'stage': ver.stage.name.lower(),
218 'stage_no': ver.stage_no,
219 'local': tuple(ver.local) if ver.local else (),
220 }
221
222 return _version_dict
223
224
225 _version_json: Optional[str] = None
226
227
228 def get_version_json() -> str:
229 global _version_json
230 if _version_json is None:
231 _version_json = json.dumps(get_version_dict())
232 return _version_json
233
234
235 def get_version_from_scm(root: pathlib.Path) -> str:
236 pretend = os.environ.get('SETUPTOOLS_SCM_PRETEND_VERSION')
237 if pretend:
238 return pretend
239
240 posint = r'(0|[1-9]\d*)'
241 pep440_version_re = re.compile(
242 rf"""
243 ^
244 (?P<major>{posint})
245 \.
246 (?P<minor>{posint})
247 (
248 \.
249 (?P<micro>{posint})
250 )?
251 (
252 (?P<prekind>a|b|rc)
253 (?P<preval>{posint})
254 )?
255 $
256 """,
257 re.X,
258 )
259
260 proc = subprocess.run(
261 ['git', 'tag', '--list', 'v*'],
262 stdout=subprocess.PIPE,
263 universal_newlines=True,
264 check=True,
265 cwd=root,
266 )
267 all_tags = {
268 v[1:]
269 for v in proc.stdout.strip().split('\n')
270 if pep440_version_re.match(v[1:])
271 }
272
273 proc = subprocess.run(
274 ['git', 'tag', '--points-at', 'HEAD'],
275 stdout=subprocess.PIPE,
276 universal_newlines=True,
277 check=True,
278 cwd=root,
279 )
280 head_tags = {
281 v[1:]
282 for v in proc.stdout.strip().split('\n')
283 if pep440_version_re.match(v[1:])
284 }
285
286 if all_tags & head_tags:
287 tag = max(head_tags)
288 else:
289 tag = max(all_tags)
290
291 m = pep440_version_re.match(tag)
292 assert m is not None
293 major = m.group('major')
294 minor = m.group('minor')
295 micro = m.group('micro') or ''
296 microkind = '.' if micro else ''
297 prekind = m.group('prekind') or ''
298 preval = m.group('preval') or ''
299
300 if os.environ.get("EDGEDB_BUILD_IS_RELEASE"):
301 # Release build.
302 ver = f'{major}.{minor}{microkind}{micro}{prekind}{preval}'
303 else:
304 # Dev/nightly build.
305 if prekind and preval:
306 preval = str(int(preval) + 1)
307 elif micro:
308 micro = str(int(micro) + 1)
309 else:
310 minor = str(int(minor) + 1)
311
312 incremented_ver = f'{major}.{minor}{microkind}{micro}{prekind}{preval}'
313
314 proc = subprocess.run(
315 ['git', 'rev-list', '--count', 'HEAD'],
316 stdout=subprocess.PIPE,
317 universal_newlines=True,
318 check=True,
319 cwd=root,
320 )
321 commits_on_branch = proc.stdout.strip()
322 ver = f'{incremented_ver}.dev{commits_on_branch}'
323
324 proc = subprocess.run(
325 ['git', 'rev-parse', '--verify', '--quiet', 'HEAD'],
326 stdout=subprocess.PIPE,
327 universal_newlines=True,
328 check=True,
329 cwd=root,
330 )
331 commitish = proc.stdout.strip()
332
333 env = dict(os.environ)
334 env['TZ'] = 'UTC'
335 proc = subprocess.run(
336 ['git', 'show', '-s', '--format=%cd',
337 '--date=format-local:%Y%m%d%H', commitish],
338 stdout=subprocess.PIPE,
339 universal_newlines=True,
340 check=True,
341 cwd=root,
342 env=env,
343 )
344 rev_date = proc.stdout.strip()
345
346 catver = EDGEDB_CATALOG_VERSION
347 return f'{ver}+d{rev_date}.g{commitish[:9]}.cv{catver}'
348
349
350 def get_cache_src_dirs():
351 find_spec = importlib.util.find_spec
352
353 edgeql = pathlib.Path(find_spec('edb.edgeql').origin).parent
354 return (
355 (pathlib.Path(find_spec('edb.schema').origin).parent, '.py'),
356 (edgeql / 'compiler', '.py'),
357 (edgeql / 'parser', '.py'),
358 (pathlib.Path(find_spec('edb.lib').origin).parent, '.edgeql'),
359 (pathlib.Path(find_spec('edb.pgsql.metaschema').origin).parent, '.py'),
360 )
361
362
363 def get_default_tenant_id() -> str:
364 catver = EDGEDB_CATALOG_VERSION
365 return f'V{catver:x}'
366
[end of edb/buildmeta.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/edb/buildmeta.py b/edb/buildmeta.py
--- a/edb/buildmeta.py
+++ b/edb/buildmeta.py
@@ -38,7 +38,7 @@
# Increment this whenever the database layout or stdlib changes.
-EDGEDB_CATALOG_VERSION = 2021_10_19_00_00
+EDGEDB_CATALOG_VERSION = 2021_10_24_00_00
class MetadataError(Exception):
|
{"golden_diff": "diff --git a/edb/buildmeta.py b/edb/buildmeta.py\n--- a/edb/buildmeta.py\n+++ b/edb/buildmeta.py\n@@ -38,7 +38,7 @@\n \n \n # Increment this whenever the database layout or stdlib changes.\n-EDGEDB_CATALOG_VERSION = 2021_10_19_00_00\n+EDGEDB_CATALOG_VERSION = 2021_10_24_00_00\n \n \n class MetadataError(Exception):\n", "issue": "Consider implementing ++ operator for JSON\nPossible behaviour:\r\n\r\n```\r\ndb> SELECT to_json('[1,2]') ++ to_json('[3]') = to_json('[1,2,3]');\r\ndb> SELECT to_json('{\"a\": 1}') ++ to_json('{\"b\":2}') = to_json('{\"a\":1, \"b\":2}');\r\ndb> SELECT to_json('\"123\"') ++ to_json('\"456\"') = to_json('\"123456\"');\r\n```\n", "before_files": [{"content": "#\n# This source file is part of the EdgeDB open source project.\n#\n# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nfrom __future__ import annotations\nfrom typing import *\n\nimport hashlib\nimport importlib.util\nimport json\nimport logging\nimport os\nimport pathlib\nimport pickle\nimport re\nimport subprocess\nimport tempfile\n\n\nfrom edb.common import debug\nfrom edb.common import devmode\nfrom edb.common import verutils\n\n\n# Increment this whenever the database layout or stdlib changes.\nEDGEDB_CATALOG_VERSION = 2021_10_19_00_00\n\n\nclass MetadataError(Exception):\n pass\n\n\ndef get_build_metadata_value(prop: str) -> str:\n env_val = os.environ.get(f'_EDGEDB_BUILDMETA_{prop}')\n if env_val:\n return env_val\n\n try:\n from . import _buildmeta # type: ignore\n return getattr(_buildmeta, prop)\n except (ImportError, AttributeError):\n raise MetadataError(\n f'could not find {prop} in EdgeDB distribution metadata') from None\n\n\ndef get_pg_config_path() -> pathlib.Path:\n if devmode.is_in_dev_mode():\n root = pathlib.Path(__file__).parent.parent\n pg_config = (root / 'build' / 'postgres' /\n 'install' / 'bin' / 'pg_config').resolve()\n if not pg_config.is_file():\n try:\n pg_config = pathlib.Path(\n get_build_metadata_value('PG_CONFIG_PATH'))\n except MetadataError:\n pass\n\n if not pg_config.is_file():\n raise MetadataError('DEV mode: Could not find PostgreSQL build, '\n 'run `pip install -e .`')\n\n else:\n pg_config = pathlib.Path(\n get_build_metadata_value('PG_CONFIG_PATH'))\n\n if not pg_config.is_file():\n raise MetadataError(\n f'invalid pg_config path: {pg_config!r}: file does not exist '\n f'or is not a regular file')\n\n return pg_config\n\n\ndef get_runstate_path(data_dir: pathlib.Path) -> pathlib.Path:\n if devmode.is_in_dev_mode():\n return data_dir\n else:\n runstate_dir = get_build_metadata_value('RUNSTATE_DIR')\n if runstate_dir is not None:\n return pathlib.Path(runstate_dir)\n else:\n return data_dir\n\n\ndef get_shared_data_dir_path() -> pathlib.Path:\n if devmode.is_in_dev_mode():\n return devmode.get_dev_mode_cache_dir() # type: ignore[return-value]\n else:\n return pathlib.Path(get_build_metadata_value('SHARED_DATA_DIR'))\n\n\ndef hash_dirs(\n dirs: Sequence[Tuple[str, str]],\n *,\n extra_files: Optional[Sequence[Union[str, pathlib.Path]]]=None\n) -> bytes:\n def hash_dir(dirname, ext, paths):\n with os.scandir(dirname) as it:\n for entry in it:\n if entry.is_file() and entry.name.endswith(ext):\n paths.append(entry.path)\n elif entry.is_dir():\n hash_dir(entry.path, ext, paths)\n\n paths: List[str] = []\n for dirname, ext in dirs:\n hash_dir(dirname, ext, paths)\n\n if extra_files:\n for extra_file in extra_files:\n if isinstance(extra_file, pathlib.Path):\n extra_file = str(extra_file.resolve())\n paths.append(extra_file)\n\n h = hashlib.sha1() # sha1 is the fastest one.\n for path in sorted(paths):\n with open(path, 'rb') as f:\n h.update(f.read())\n\n return h.digest()\n\n\ndef read_data_cache(\n cache_key: bytes,\n path: str,\n *,\n pickled: bool=True,\n source_dir: Optional[pathlib.Path] = None,\n) -> Any:\n if source_dir is None:\n source_dir = get_shared_data_dir_path()\n full_path = source_dir / path\n\n if full_path.exists():\n with open(full_path, 'rb') as f:\n src_hash = f.read(len(cache_key))\n if src_hash == cache_key or debug.flags.bootstrap_cache_yolo:\n if pickled:\n data = f.read()\n try:\n return pickle.loads(data)\n except Exception:\n logging.exception(f'could not unpickle {path}')\n else:\n return f.read()\n\n\ndef write_data_cache(\n obj: Any,\n cache_key: bytes,\n path: str,\n *,\n pickled: bool = True,\n target_dir: Optional[pathlib.Path] = None,\n):\n if target_dir is None:\n target_dir = get_shared_data_dir_path()\n full_path = target_dir / path\n\n try:\n with tempfile.NamedTemporaryFile(\n mode='wb', dir=full_path.parent, delete=False) as f:\n f.write(cache_key)\n if pickled:\n pickle.dump(obj, file=f, protocol=pickle.HIGHEST_PROTOCOL)\n else:\n f.write(obj)\n except Exception:\n try:\n os.unlink(f.name)\n except OSError:\n pass\n finally:\n raise\n else:\n os.rename(f.name, full_path)\n\n\ndef get_version() -> verutils.Version:\n if devmode.is_in_dev_mode():\n root = pathlib.Path(__file__).parent.parent.resolve()\n version = verutils.parse_version(get_version_from_scm(root))\n else:\n vertuple: List[Any] = list(get_build_metadata_value('VERSION'))\n vertuple[2] = verutils.VersionStage(vertuple[2])\n version = verutils.Version(*vertuple)\n\n return version\n\n\n_version_dict: Optional[Mapping[str, Any]] = None\n\n\ndef get_version_dict() -> Mapping[str, Any]:\n global _version_dict\n\n if _version_dict is None:\n ver = get_version()\n _version_dict = {\n 'major': ver.major,\n 'minor': ver.minor,\n 'stage': ver.stage.name.lower(),\n 'stage_no': ver.stage_no,\n 'local': tuple(ver.local) if ver.local else (),\n }\n\n return _version_dict\n\n\n_version_json: Optional[str] = None\n\n\ndef get_version_json() -> str:\n global _version_json\n if _version_json is None:\n _version_json = json.dumps(get_version_dict())\n return _version_json\n\n\ndef get_version_from_scm(root: pathlib.Path) -> str:\n pretend = os.environ.get('SETUPTOOLS_SCM_PRETEND_VERSION')\n if pretend:\n return pretend\n\n posint = r'(0|[1-9]\\d*)'\n pep440_version_re = re.compile(\n rf\"\"\"\n ^\n (?P<major>{posint})\n \\.\n (?P<minor>{posint})\n (\n \\.\n (?P<micro>{posint})\n )?\n (\n (?P<prekind>a|b|rc)\n (?P<preval>{posint})\n )?\n $\n \"\"\",\n re.X,\n )\n\n proc = subprocess.run(\n ['git', 'tag', '--list', 'v*'],\n stdout=subprocess.PIPE,\n universal_newlines=True,\n check=True,\n cwd=root,\n )\n all_tags = {\n v[1:]\n for v in proc.stdout.strip().split('\\n')\n if pep440_version_re.match(v[1:])\n }\n\n proc = subprocess.run(\n ['git', 'tag', '--points-at', 'HEAD'],\n stdout=subprocess.PIPE,\n universal_newlines=True,\n check=True,\n cwd=root,\n )\n head_tags = {\n v[1:]\n for v in proc.stdout.strip().split('\\n')\n if pep440_version_re.match(v[1:])\n }\n\n if all_tags & head_tags:\n tag = max(head_tags)\n else:\n tag = max(all_tags)\n\n m = pep440_version_re.match(tag)\n assert m is not None\n major = m.group('major')\n minor = m.group('minor')\n micro = m.group('micro') or ''\n microkind = '.' if micro else ''\n prekind = m.group('prekind') or ''\n preval = m.group('preval') or ''\n\n if os.environ.get(\"EDGEDB_BUILD_IS_RELEASE\"):\n # Release build.\n ver = f'{major}.{minor}{microkind}{micro}{prekind}{preval}'\n else:\n # Dev/nightly build.\n if prekind and preval:\n preval = str(int(preval) + 1)\n elif micro:\n micro = str(int(micro) + 1)\n else:\n minor = str(int(minor) + 1)\n\n incremented_ver = f'{major}.{minor}{microkind}{micro}{prekind}{preval}'\n\n proc = subprocess.run(\n ['git', 'rev-list', '--count', 'HEAD'],\n stdout=subprocess.PIPE,\n universal_newlines=True,\n check=True,\n cwd=root,\n )\n commits_on_branch = proc.stdout.strip()\n ver = f'{incremented_ver}.dev{commits_on_branch}'\n\n proc = subprocess.run(\n ['git', 'rev-parse', '--verify', '--quiet', 'HEAD'],\n stdout=subprocess.PIPE,\n universal_newlines=True,\n check=True,\n cwd=root,\n )\n commitish = proc.stdout.strip()\n\n env = dict(os.environ)\n env['TZ'] = 'UTC'\n proc = subprocess.run(\n ['git', 'show', '-s', '--format=%cd',\n '--date=format-local:%Y%m%d%H', commitish],\n stdout=subprocess.PIPE,\n universal_newlines=True,\n check=True,\n cwd=root,\n env=env,\n )\n rev_date = proc.stdout.strip()\n\n catver = EDGEDB_CATALOG_VERSION\n return f'{ver}+d{rev_date}.g{commitish[:9]}.cv{catver}'\n\n\ndef get_cache_src_dirs():\n find_spec = importlib.util.find_spec\n\n edgeql = pathlib.Path(find_spec('edb.edgeql').origin).parent\n return (\n (pathlib.Path(find_spec('edb.schema').origin).parent, '.py'),\n (edgeql / 'compiler', '.py'),\n (edgeql / 'parser', '.py'),\n (pathlib.Path(find_spec('edb.lib').origin).parent, '.edgeql'),\n (pathlib.Path(find_spec('edb.pgsql.metaschema').origin).parent, '.py'),\n )\n\n\ndef get_default_tenant_id() -> str:\n catver = EDGEDB_CATALOG_VERSION\n return f'V{catver:x}'\n", "path": "edb/buildmeta.py"}]}
| 4,078 | 115 |
gh_patches_debug_11681
|
rasdani/github-patches
|
git_diff
|
joke2k__faker-494
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add a file path provider
In the file providers would be nice to have a file_path provider who would return a path like ```/lorem/ipsum/lorem.pdf```.
</issue>
<code>
[start of faker/providers/file/__init__.py]
1 # coding=utf-8
2 from __future__ import unicode_literals
3 from collections import OrderedDict
4
5 from .. import BaseProvider
6 from ..lorem.la import Provider as WordProvider
7
8
9 class Provider(BaseProvider):
10 application_mime_types = (
11
12 "application/atom+xml", # Atom feeds
13 "application/ecmascript",
14 # ECMAScript/JavaScript; Defined in RFC 4329 (equivalent to application/javascript but with stricter processing rules)
15 "application/EDI-X12", # EDI X12 data; Defined in RFC 1767
16 "application/EDIFACT", # EDI EDIFACT data; Defined in RFC 1767
17 "application/json", # JavaScript Object Notation JSON; Defined in RFC 4627
18 "application/javascript", # ECMAScript/JavaScript; Defined in RFC 4329 (equivalent to application/ecmascript
19 # but with looser processing rules) It is not accepted in IE 8
20 # or earlier - text/javascript is accepted but it is defined as obsolete in RFC 4329.
21 # The "type" attribute of the <script> tag in HTML5 is optional and in practice
22 # omitting the media type of JavaScript programs is the most interoperable
23 # solution since all browsers have always assumed the correct
24 # default even before HTML5. "application/octet-stream", # Arbitrary binary data.[6] Generally speaking this type identifies files that are not associated with a specific application. Contrary to past assumptions by software packages such as Apache this is not a type that should be applied to unknown files. In such a case, a server or application should not indicate a content type, as it may be incorrect, but rather, should omit the type in order to allow the recipient to guess the type.[7]
25 "application/ogg", # Ogg, a multimedia bitstream container format; Defined in RFC 5334
26 "application/pdf", # Portable Document Format, PDF has been in use for document exchange
27 # on the Internet since 1993; Defined in RFC 3778
28 "application/postscript", # PostScript; Defined in RFC 2046
29 "application/rdf+xml", # Resource Description Framework; Defined by RFC 3870
30 "application/rss+xml", # RSS feeds
31 "application/soap+xml", # SOAP; Defined by RFC 3902
32 "application/font-woff", # Web Open Font Format; (candidate recommendation; use application/x-font-woff
33 # until standard is official)
34 "application/xhtml+xml", # XHTML; Defined by RFC 3236
35 "application/xml-dtd", # DTD files; Defined by RFC 3023
36 "application/xop+xml", # XOP
37 "application/zip", # ZIP archive files; Registered[8]
38 "application/gzip", # Gzip, Defined in RFC 6713
39 )
40
41 audio_mime_types = (
42 "audio/basic", # mulaw audio at 8 kHz, 1 channel; Defined in RFC 2046
43 "audio/L24", # 24bit Linear PCM audio at 8-48 kHz, 1-N channels; Defined in RFC 3190
44 "audio/mp4", # MP4 audio
45 "audio/mpeg", # MP3 or other MPEG audio; Defined in RFC 3003
46 "audio/ogg", # Ogg Vorbis, Speex, Flac and other audio; Defined in RFC 5334
47 "audio/vorbis", # Vorbis encoded audio; Defined in RFC 5215
48 "audio/vnd.rn-realaudio", # RealAudio; Documented in RealPlayer Help[9]
49 "audio/vnd.wave", # WAV audio; Defined in RFC 2361
50 "audio/webm", # WebM open media format
51 )
52
53 image_mime_types = (
54 "image/gif", # GIF image; Defined in RFC 2045 and RFC 2046
55 "image/jpeg", # JPEG JFIF image; Defined in RFC 2045 and RFC 2046
56 "image/pjpeg",
57 # JPEG JFIF image; Associated with Internet Explorer; Listed in ms775147(v=vs.85) - Progressive JPEG, initiated before global browser support for progressive JPEGs (Microsoft and Firefox).
58 "image/png", # Portable Network Graphics; Registered,[10] Defined in RFC 2083
59 "image/svg+xml", # SVG vector image; Defined in SVG Tiny 1.2 Specification Appendix M
60 "image/tiff", # Tag Image File Format (only for Baseline TIFF); Defined in RFC 3302
61 "image/vnd.microsoft.icon", # ICO image; Registered[11]
62 )
63
64 message_mime_types = (
65 "message/http", # Defined in RFC 2616
66 "message/imdn+xml", # IMDN Instant Message Disposition Notification; Defined in RFC 5438
67 "message/partial", # Email; Defined in RFC 2045 and RFC 2046
68 "message/rfc822", # Email; EML files, MIME files, MHT files, MHTML files; Defined in RFC 2045 and RFC 2046
69 )
70
71 model_mime_types = (
72 "model/example", # Defined in RFC 4735
73 "model/iges", # IGS files, IGES files; Defined in RFC 2077
74 "model/mesh", # MSH files, MESH files; Defined in RFC 2077, SILO files
75 "model/vrml", # WRL files, VRML files; Defined in RFC 2077
76 "model/x3d+binary", # X3D ISO standard for representing 3D computer graphics, X3DB binary files
77 "model/x3d+vrml", # X3D ISO standard for representing 3D computer graphics, X3DV VRML files
78 "model/x3d+xml", # X3D ISO standard for representing 3D computer graphics, X3D XML files
79 )
80
81 multipart_mime_types = (
82 "multipart/mixed", # MIME Email; Defined in RFC 2045 and RFC 2046
83 "multipart/alternative", # MIME Email; Defined in RFC 2045 and RFC 2046
84 "multipart/related", # MIME Email; Defined in RFC 2387 and used by MHTML (HTML mail)
85 "multipart/form-data", # MIME Webform; Defined in RFC 2388
86 "multipart/signed", # Defined in RFC 1847
87 "multipart/encrypted", # Defined in RFC 1847
88 )
89
90 text_mime_types = (
91 "text/cmd", # commands; subtype resident in Gecko browsers like Firefox 3.5
92 "text/css", # Cascading Style Sheets; Defined in RFC 2318
93 "text/csv", # Comma-separated values; Defined in RFC 4180
94 "text/html", # HTML; Defined in RFC 2854
95 "text/javascript",
96 # (Obsolete): JavaScript; Defined in and obsoleted by RFC 4329 in order to discourage its usage in favor of application/javascript. However, text/javascript is allowed in HTML 4 and 5 and, unlike application/javascript, has cross-browser support. The "type" attribute of the <script> tag in HTML5 is optional and there is no need to use it at all since all browsers have always assumed the correct default (even in HTML 4 where it was required by the specification).
97 "text/plain", # Textual data; Defined in RFC 2046 and RFC 3676
98 "text/vcard", # vCard (contact information); Defined in RFC 6350
99 "text/xml", # Extensible Markup Language; Defined in RFC 3023
100 )
101
102 video_mime_types = (
103 "video/mpeg", # MPEG-1 video with multiplexed audio; Defined in RFC 2045 and RFC 2046
104 "video/mp4", # MP4 video; Defined in RFC 4337
105 "video/ogg", # Ogg Theora or other video (with audio); Defined in RFC 5334
106 "video/quicktime", # QuickTime video; Registered[12]
107 "video/webm", # WebM Matroska-based open media format
108 "video/x-matroska", # Matroska open media format
109 "video/x-ms-wmv", # Windows Media Video; Documented in Microsoft KB 288102
110 "video/x-flv", # Flash video (FLV files)
111 )
112
113 mime_types = OrderedDict((
114 ('application', application_mime_types),
115 ('audio', audio_mime_types),
116 ('image', image_mime_types),
117 ('message', message_mime_types),
118 ('model', model_mime_types),
119 ('multipart', multipart_mime_types),
120 ('text', text_mime_types),
121 ('video', video_mime_types),
122 ))
123
124 audio_file_extensions = (
125 "flac",
126 "mp3",
127 "wav",
128 )
129
130 image_file_extensions = (
131 "bmp",
132 "gif",
133 "jpeg",
134 "jpg",
135 "png",
136 "tiff",
137 )
138
139 text_file_extensions = (
140 "css",
141 "csv",
142 "html",
143 "js",
144 "json",
145 "txt",
146 )
147
148 video_file_extensions = (
149 "mp4",
150 "avi",
151 "mov",
152 "webm",
153 )
154
155 office_file_extensions = (
156 "doc", # legacy MS Word
157 "docx", # MS Word
158 "xls", # legacy MS Excel
159 "xlsx", # MS Excel
160 "ppt", # legacy MS PowerPoint
161 "pptx", # MS PowerPoint
162 "odt", # LibreOffice document
163 "ods", # LibreOffice spreadsheet
164 "odp", # LibreOffice presentation
165 "pages", # Apple Pages
166 "numbers", # Apple Numbers
167 "key", # Apple Keynote
168 "pdf", # Portable Document Format
169 )
170
171 file_extensions = OrderedDict((
172 ("audio", audio_file_extensions),
173 ("image", image_file_extensions),
174 ("office", office_file_extensions),
175 ("text", text_file_extensions),
176 ("video", video_file_extensions),
177 ))
178
179 @classmethod
180 def mime_type(cls, category=None):
181 """
182 :param category: application|audio|image|message|model|multipart|text|video
183 """
184 category = category if category else cls.random_element(list(cls.mime_types.keys()))
185 return cls.random_element(cls.mime_types[category])
186
187 @classmethod
188 def file_name(cls, category=None, extension=None):
189 """
190 :param category: audio|image|office|text|video
191 :param extension: file extension
192 """
193 extension = extension if extension else cls.file_extension(category)
194 filename = WordProvider.word()
195 return '{0}.{1}'.format(filename, extension)
196
197 @classmethod
198 def file_extension(cls, category=None):
199 """
200 :param category: audio|image|office|text|video
201 """
202 category = category if category else cls.random_element(list(cls.file_extensions.keys()))
203 return cls.random_element(cls.file_extensions[category])
204
[end of faker/providers/file/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/faker/providers/file/__init__.py b/faker/providers/file/__init__.py
--- a/faker/providers/file/__init__.py
+++ b/faker/providers/file/__init__.py
@@ -201,3 +201,16 @@
"""
category = category if category else cls.random_element(list(cls.file_extensions.keys()))
return cls.random_element(cls.file_extensions[category])
+
+ @classmethod
+ def file_path(cls, depth=1, category=None, extension=None):
+ """
+ :param category: audio|image|office|text|video
+ :param extension: file extension
+ :param depth: depth of the file (depth >= 0)
+ """
+ file = Provider.file_name(category, extension)
+ path = "/{0}".format(file)
+ for d in range(0, depth):
+ path = "/{0}{1}".format(WordProvider.word(), path)
+ return path
|
{"golden_diff": "diff --git a/faker/providers/file/__init__.py b/faker/providers/file/__init__.py\n--- a/faker/providers/file/__init__.py\n+++ b/faker/providers/file/__init__.py\n@@ -201,3 +201,16 @@\n \"\"\"\n category = category if category else cls.random_element(list(cls.file_extensions.keys()))\n return cls.random_element(cls.file_extensions[category])\n+\n+ @classmethod\n+ def file_path(cls, depth=1, category=None, extension=None):\n+ \"\"\"\n+ :param category: audio|image|office|text|video\n+ :param extension: file extension\n+ :param depth: depth of the file (depth >= 0)\n+ \"\"\"\n+ file = Provider.file_name(category, extension)\n+ path = \"/{0}\".format(file)\n+ for d in range(0, depth):\n+ path = \"/{0}{1}\".format(WordProvider.word(), path)\n+ return path\n", "issue": "Add a file path provider\nIn the file providers would be nice to have a file_path provider who would return a path like ```/lorem/ipsum/lorem.pdf```.\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import unicode_literals\nfrom collections import OrderedDict\n\nfrom .. import BaseProvider\nfrom ..lorem.la import Provider as WordProvider\n\n\nclass Provider(BaseProvider):\n application_mime_types = (\n\n \"application/atom+xml\", # Atom feeds\n \"application/ecmascript\",\n # ECMAScript/JavaScript; Defined in RFC 4329 (equivalent to application/javascript but with stricter processing rules)\n \"application/EDI-X12\", # EDI X12 data; Defined in RFC 1767\n \"application/EDIFACT\", # EDI EDIFACT data; Defined in RFC 1767\n \"application/json\", # JavaScript Object Notation JSON; Defined in RFC 4627\n \"application/javascript\", # ECMAScript/JavaScript; Defined in RFC 4329 (equivalent to application/ecmascript\n # but with looser processing rules) It is not accepted in IE 8\n # or earlier - text/javascript is accepted but it is defined as obsolete in RFC 4329.\n # The \"type\" attribute of the <script> tag in HTML5 is optional and in practice\n # omitting the media type of JavaScript programs is the most interoperable\n # solution since all browsers have always assumed the correct\n # default even before HTML5. \"application/octet-stream\", # Arbitrary binary data.[6] Generally speaking this type identifies files that are not associated with a specific application. Contrary to past assumptions by software packages such as Apache this is not a type that should be applied to unknown files. In such a case, a server or application should not indicate a content type, as it may be incorrect, but rather, should omit the type in order to allow the recipient to guess the type.[7]\n \"application/ogg\", # Ogg, a multimedia bitstream container format; Defined in RFC 5334\n \"application/pdf\", # Portable Document Format, PDF has been in use for document exchange\n # on the Internet since 1993; Defined in RFC 3778\n \"application/postscript\", # PostScript; Defined in RFC 2046\n \"application/rdf+xml\", # Resource Description Framework; Defined by RFC 3870\n \"application/rss+xml\", # RSS feeds\n \"application/soap+xml\", # SOAP; Defined by RFC 3902\n \"application/font-woff\", # Web Open Font Format; (candidate recommendation; use application/x-font-woff\n # until standard is official)\n \"application/xhtml+xml\", # XHTML; Defined by RFC 3236\n \"application/xml-dtd\", # DTD files; Defined by RFC 3023\n \"application/xop+xml\", # XOP\n \"application/zip\", # ZIP archive files; Registered[8]\n \"application/gzip\", # Gzip, Defined in RFC 6713\n )\n\n audio_mime_types = (\n \"audio/basic\", # mulaw audio at 8 kHz, 1 channel; Defined in RFC 2046\n \"audio/L24\", # 24bit Linear PCM audio at 8-48 kHz, 1-N channels; Defined in RFC 3190\n \"audio/mp4\", # MP4 audio\n \"audio/mpeg\", # MP3 or other MPEG audio; Defined in RFC 3003\n \"audio/ogg\", # Ogg Vorbis, Speex, Flac and other audio; Defined in RFC 5334\n \"audio/vorbis\", # Vorbis encoded audio; Defined in RFC 5215\n \"audio/vnd.rn-realaudio\", # RealAudio; Documented in RealPlayer Help[9]\n \"audio/vnd.wave\", # WAV audio; Defined in RFC 2361\n \"audio/webm\", # WebM open media format\n )\n\n image_mime_types = (\n \"image/gif\", # GIF image; Defined in RFC 2045 and RFC 2046\n \"image/jpeg\", # JPEG JFIF image; Defined in RFC 2045 and RFC 2046\n \"image/pjpeg\",\n # JPEG JFIF image; Associated with Internet Explorer; Listed in ms775147(v=vs.85) - Progressive JPEG, initiated before global browser support for progressive JPEGs (Microsoft and Firefox).\n \"image/png\", # Portable Network Graphics; Registered,[10] Defined in RFC 2083\n \"image/svg+xml\", # SVG vector image; Defined in SVG Tiny 1.2 Specification Appendix M\n \"image/tiff\", # Tag Image File Format (only for Baseline TIFF); Defined in RFC 3302\n \"image/vnd.microsoft.icon\", # ICO image; Registered[11]\n )\n\n message_mime_types = (\n \"message/http\", # Defined in RFC 2616\n \"message/imdn+xml\", # IMDN Instant Message Disposition Notification; Defined in RFC 5438\n \"message/partial\", # Email; Defined in RFC 2045 and RFC 2046\n \"message/rfc822\", # Email; EML files, MIME files, MHT files, MHTML files; Defined in RFC 2045 and RFC 2046\n )\n\n model_mime_types = (\n \"model/example\", # Defined in RFC 4735\n \"model/iges\", # IGS files, IGES files; Defined in RFC 2077\n \"model/mesh\", # MSH files, MESH files; Defined in RFC 2077, SILO files\n \"model/vrml\", # WRL files, VRML files; Defined in RFC 2077\n \"model/x3d+binary\", # X3D ISO standard for representing 3D computer graphics, X3DB binary files\n \"model/x3d+vrml\", # X3D ISO standard for representing 3D computer graphics, X3DV VRML files\n \"model/x3d+xml\", # X3D ISO standard for representing 3D computer graphics, X3D XML files\n )\n\n multipart_mime_types = (\n \"multipart/mixed\", # MIME Email; Defined in RFC 2045 and RFC 2046\n \"multipart/alternative\", # MIME Email; Defined in RFC 2045 and RFC 2046\n \"multipart/related\", # MIME Email; Defined in RFC 2387 and used by MHTML (HTML mail)\n \"multipart/form-data\", # MIME Webform; Defined in RFC 2388\n \"multipart/signed\", # Defined in RFC 1847\n \"multipart/encrypted\", # Defined in RFC 1847\n )\n\n text_mime_types = (\n \"text/cmd\", # commands; subtype resident in Gecko browsers like Firefox 3.5\n \"text/css\", # Cascading Style Sheets; Defined in RFC 2318\n \"text/csv\", # Comma-separated values; Defined in RFC 4180\n \"text/html\", # HTML; Defined in RFC 2854\n \"text/javascript\",\n # (Obsolete): JavaScript; Defined in and obsoleted by RFC 4329 in order to discourage its usage in favor of application/javascript. However, text/javascript is allowed in HTML 4 and 5 and, unlike application/javascript, has cross-browser support. The \"type\" attribute of the <script> tag in HTML5 is optional and there is no need to use it at all since all browsers have always assumed the correct default (even in HTML 4 where it was required by the specification).\n \"text/plain\", # Textual data; Defined in RFC 2046 and RFC 3676\n \"text/vcard\", # vCard (contact information); Defined in RFC 6350\n \"text/xml\", # Extensible Markup Language; Defined in RFC 3023\n )\n\n video_mime_types = (\n \"video/mpeg\", # MPEG-1 video with multiplexed audio; Defined in RFC 2045 and RFC 2046\n \"video/mp4\", # MP4 video; Defined in RFC 4337\n \"video/ogg\", # Ogg Theora or other video (with audio); Defined in RFC 5334\n \"video/quicktime\", # QuickTime video; Registered[12]\n \"video/webm\", # WebM Matroska-based open media format\n \"video/x-matroska\", # Matroska open media format\n \"video/x-ms-wmv\", # Windows Media Video; Documented in Microsoft KB 288102\n \"video/x-flv\", # Flash video (FLV files)\n )\n\n mime_types = OrderedDict((\n ('application', application_mime_types),\n ('audio', audio_mime_types),\n ('image', image_mime_types),\n ('message', message_mime_types),\n ('model', model_mime_types),\n ('multipart', multipart_mime_types),\n ('text', text_mime_types),\n ('video', video_mime_types),\n ))\n\n audio_file_extensions = (\n \"flac\",\n \"mp3\",\n \"wav\",\n )\n\n image_file_extensions = (\n \"bmp\",\n \"gif\",\n \"jpeg\",\n \"jpg\",\n \"png\",\n \"tiff\",\n )\n\n text_file_extensions = (\n \"css\",\n \"csv\",\n \"html\",\n \"js\",\n \"json\",\n \"txt\",\n )\n\n video_file_extensions = (\n \"mp4\",\n \"avi\",\n \"mov\",\n \"webm\",\n )\n\n office_file_extensions = (\n \"doc\", # legacy MS Word\n \"docx\", # MS Word\n \"xls\", # legacy MS Excel\n \"xlsx\", # MS Excel\n \"ppt\", # legacy MS PowerPoint\n \"pptx\", # MS PowerPoint\n \"odt\", # LibreOffice document\n \"ods\", # LibreOffice spreadsheet\n \"odp\", # LibreOffice presentation\n \"pages\", # Apple Pages\n \"numbers\", # Apple Numbers\n \"key\", # Apple Keynote\n \"pdf\", # Portable Document Format\n )\n\n file_extensions = OrderedDict((\n (\"audio\", audio_file_extensions),\n (\"image\", image_file_extensions),\n (\"office\", office_file_extensions),\n (\"text\", text_file_extensions),\n (\"video\", video_file_extensions),\n ))\n\n @classmethod\n def mime_type(cls, category=None):\n \"\"\"\n :param category: application|audio|image|message|model|multipart|text|video\n \"\"\"\n category = category if category else cls.random_element(list(cls.mime_types.keys()))\n return cls.random_element(cls.mime_types[category])\n\n @classmethod\n def file_name(cls, category=None, extension=None):\n \"\"\"\n :param category: audio|image|office|text|video\n :param extension: file extension\n \"\"\"\n extension = extension if extension else cls.file_extension(category)\n filename = WordProvider.word()\n return '{0}.{1}'.format(filename, extension)\n\n @classmethod\n def file_extension(cls, category=None):\n \"\"\"\n :param category: audio|image|office|text|video\n \"\"\"\n category = category if category else cls.random_element(list(cls.file_extensions.keys()))\n return cls.random_element(cls.file_extensions[category])\n", "path": "faker/providers/file/__init__.py"}]}
| 3,634 | 214 |
gh_patches_debug_30758
|
rasdani/github-patches
|
git_diff
|
opendatacube__datacube-core-1061
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Example PickleWriterDriver does not support URIs
### Expected behaviour
Using the example `pickles` driver should allow to successfully run, e.g., `integration_tests/test_end_to_end`. This is assuming the example dcio driver has been installed first.
### Actual behaviour
The test fails because a URI gets passed to `PickleWriterDriver.write_dataset_to_storage()` which still expects a filepath string. Also, this URI may contain intermediate directories that need creating.
### Steps to reproduce the behaviour
- Set `pickles` as driver name in, e.g., `datacube-core/docs/config_samples/ingester/ls5_nbar_albers.yaml(106)`
- Run `pytest integration_tests/test_end_to_end.py`
- First error: `AttributeError: 'PickleWriterDriver' object has no attribute 'mk_uri'` but subsequent errors happen due to missing intermediate directories.
### Environment information
- `Open Data Cube core, version 1.8.4.dev52+g07bc51a5.d20210222`
- Docker image: `opendatacube/datacube-tests:latest`
</issue>
<code>
[start of examples/io_plugin/dcio_example/pickles.py]
1 # This file is part of the Open Data Cube, see https://opendatacube.org for more information
2 #
3 # Copyright (c) 2015-2020 ODC Contributors
4 # SPDX-License-Identifier: Apache-2.0
5 """ Example reader plugin
6 """
7 from contextlib import contextmanager
8 import pickle
9
10
11 PROTOCOL = 'file'
12 FORMAT = 'pickle'
13
14
15 def uri_split(uri):
16 loc = uri.find('://')
17 if loc < 0:
18 return uri, PROTOCOL
19 return uri[loc+3:], uri[:loc]
20
21
22 class PickleDataSource(object):
23 class BandDataSource(object):
24 def __init__(self, da):
25 self._da = da
26 self.nodata = da.nodata
27
28 @property
29 def crs(self):
30 return self._da.crs
31
32 @property
33 def transform(self):
34 return self._da.affine
35
36 @property
37 def dtype(self):
38 return self._da.dtype
39
40 @property
41 def shape(self):
42 return self._da.shape
43
44 def read(self, window=None, out_shape=None):
45 if window is None:
46 data = self._da.values
47 else:
48 rows, cols = [slice(*w) for w in window]
49 data = self._da.values[rows, cols]
50
51 if out_shape is None or out_shape == data.shape:
52 return data
53
54 raise NotImplementedError('Native reading not supported for this data source')
55
56 def __init__(self, band):
57 self._band = band
58 uri = band.uri
59 self._filename, protocol = uri_split(uri)
60
61 if protocol not in [PROTOCOL, 'pickle']:
62 raise ValueError('Expected file:// or pickle:// url')
63
64 @contextmanager
65 def open(self):
66 with open(self._filename, 'rb') as f:
67 ds = pickle.load(f)
68
69 yield PickleDataSource.BandDataSource(ds[self._band.name].isel(time=0))
70
71
72 class PickleReaderDriver(object):
73 def __init__(self):
74 self.name = 'PickleReader'
75 self.protocols = [PROTOCOL, 'pickle']
76 self.formats = [FORMAT]
77
78 def supports(self, protocol, fmt):
79 return (protocol in self.protocols and
80 fmt in self.formats)
81
82 def new_datasource(self, band):
83 return PickleDataSource(band)
84
85
86 def rdr_driver_init():
87 return PickleReaderDriver()
88
89
90 class PickleWriterDriver(object):
91 def __init__(self):
92 pass
93
94 @property
95 def aliases(self):
96 return ['pickles']
97
98 @property
99 def format(self):
100 return FORMAT
101
102 @property
103 def uri_scheme(self):
104 return PROTOCOL
105
106 def write_dataset_to_storage(self, dataset, filename,
107 global_attributes=None,
108 variable_params=None,
109 storage_config=None,
110 **kwargs):
111 with open(filename, 'wb') as f:
112 pickle.dump(dataset, f)
113 return {}
114
115
116 def writer_driver_init():
117 return PickleWriterDriver()
118
[end of examples/io_plugin/dcio_example/pickles.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/examples/io_plugin/dcio_example/pickles.py b/examples/io_plugin/dcio_example/pickles.py
--- a/examples/io_plugin/dcio_example/pickles.py
+++ b/examples/io_plugin/dcio_example/pickles.py
@@ -4,9 +4,12 @@
# SPDX-License-Identifier: Apache-2.0
""" Example reader plugin
"""
-from contextlib import contextmanager
import pickle
+from contextlib import contextmanager
+from pathlib import Path
+from urllib.parse import urlsplit
+from datacube.utils.uris import normalise_path
PROTOCOL = 'file'
FORMAT = 'pickle'
@@ -103,12 +106,33 @@
def uri_scheme(self):
return PROTOCOL
- def write_dataset_to_storage(self, dataset, filename,
+ def mk_uri(self, file_path, storage_config):
+ """
+ Constructs a URI from the file_path and storage config.
+
+ A typical implementation should return f'{scheme}://{file_path}'
+
+ Example:
+ file_path = '/path/to/my_file.pickled'
+ storage_config = {'driver': 'pickles'}
+
+ mk_uri(file_path, storage_config) should return 'file:///path/to/my_file.pickled'
+
+ :param Path file_path: The file path of the file to be converted into a URI.
+ :param dict storage_config: The dict holding the storage config found in the ingest definition.
+ :return: file_path as a URI that the Driver understands.
+ :rtype: str
+ """
+ return normalise_path(file_path).as_uri()
+
+ def write_dataset_to_storage(self, dataset, file_uri,
global_attributes=None,
variable_params=None,
storage_config=None,
**kwargs):
- with open(filename, 'wb') as f:
+ filepath = Path(urlsplit(file_uri).path)
+ filepath.parent.mkdir(parents=True, exist_ok=True)
+ with filepath.open('wb') as f:
pickle.dump(dataset, f)
return {}
|
{"golden_diff": "diff --git a/examples/io_plugin/dcio_example/pickles.py b/examples/io_plugin/dcio_example/pickles.py\n--- a/examples/io_plugin/dcio_example/pickles.py\n+++ b/examples/io_plugin/dcio_example/pickles.py\n@@ -4,9 +4,12 @@\n # SPDX-License-Identifier: Apache-2.0\n \"\"\" Example reader plugin\n \"\"\"\n-from contextlib import contextmanager\n import pickle\n+from contextlib import contextmanager\n+from pathlib import Path\n+from urllib.parse import urlsplit\n \n+from datacube.utils.uris import normalise_path\n \n PROTOCOL = 'file'\n FORMAT = 'pickle'\n@@ -103,12 +106,33 @@\n def uri_scheme(self):\n return PROTOCOL\n \n- def write_dataset_to_storage(self, dataset, filename,\n+ def mk_uri(self, file_path, storage_config):\n+ \"\"\"\n+ Constructs a URI from the file_path and storage config.\n+\n+ A typical implementation should return f'{scheme}://{file_path}'\n+\n+ Example:\n+ file_path = '/path/to/my_file.pickled'\n+ storage_config = {'driver': 'pickles'}\n+\n+ mk_uri(file_path, storage_config) should return 'file:///path/to/my_file.pickled'\n+\n+ :param Path file_path: The file path of the file to be converted into a URI.\n+ :param dict storage_config: The dict holding the storage config found in the ingest definition.\n+ :return: file_path as a URI that the Driver understands.\n+ :rtype: str\n+ \"\"\"\n+ return normalise_path(file_path).as_uri()\n+\n+ def write_dataset_to_storage(self, dataset, file_uri,\n global_attributes=None,\n variable_params=None,\n storage_config=None,\n **kwargs):\n- with open(filename, 'wb') as f:\n+ filepath = Path(urlsplit(file_uri).path)\n+ filepath.parent.mkdir(parents=True, exist_ok=True)\n+ with filepath.open('wb') as f:\n pickle.dump(dataset, f)\n return {}\n", "issue": "Example PickleWriterDriver does not support URIs\n### Expected behaviour\r\nUsing the example `pickles` driver should allow to successfully run, e.g., `integration_tests/test_end_to_end`. This is assuming the example dcio driver has been installed first.\r\n\r\n### Actual behaviour\r\nThe test fails because a URI gets passed to `PickleWriterDriver.write_dataset_to_storage()` which still expects a filepath string. Also, this URI may contain intermediate directories that need creating.\r\n\r\n### Steps to reproduce the behaviour\r\n- Set `pickles` as driver name in, e.g., `datacube-core/docs/config_samples/ingester/ls5_nbar_albers.yaml(106)`\r\n- Run `pytest integration_tests/test_end_to_end.py`\r\n- First error: `AttributeError: 'PickleWriterDriver' object has no attribute 'mk_uri'` but subsequent errors happen due to missing intermediate directories.\r\n\r\n### Environment information\r\n- `Open Data Cube core, version 1.8.4.dev52+g07bc51a5.d20210222`\r\n- Docker image: `opendatacube/datacube-tests:latest`\n", "before_files": [{"content": "# This file is part of the Open Data Cube, see https://opendatacube.org for more information\n#\n# Copyright (c) 2015-2020 ODC Contributors\n# SPDX-License-Identifier: Apache-2.0\n\"\"\" Example reader plugin\n\"\"\"\nfrom contextlib import contextmanager\nimport pickle\n\n\nPROTOCOL = 'file'\nFORMAT = 'pickle'\n\n\ndef uri_split(uri):\n loc = uri.find('://')\n if loc < 0:\n return uri, PROTOCOL\n return uri[loc+3:], uri[:loc]\n\n\nclass PickleDataSource(object):\n class BandDataSource(object):\n def __init__(self, da):\n self._da = da\n self.nodata = da.nodata\n\n @property\n def crs(self):\n return self._da.crs\n\n @property\n def transform(self):\n return self._da.affine\n\n @property\n def dtype(self):\n return self._da.dtype\n\n @property\n def shape(self):\n return self._da.shape\n\n def read(self, window=None, out_shape=None):\n if window is None:\n data = self._da.values\n else:\n rows, cols = [slice(*w) for w in window]\n data = self._da.values[rows, cols]\n\n if out_shape is None or out_shape == data.shape:\n return data\n\n raise NotImplementedError('Native reading not supported for this data source')\n\n def __init__(self, band):\n self._band = band\n uri = band.uri\n self._filename, protocol = uri_split(uri)\n\n if protocol not in [PROTOCOL, 'pickle']:\n raise ValueError('Expected file:// or pickle:// url')\n\n @contextmanager\n def open(self):\n with open(self._filename, 'rb') as f:\n ds = pickle.load(f)\n\n yield PickleDataSource.BandDataSource(ds[self._band.name].isel(time=0))\n\n\nclass PickleReaderDriver(object):\n def __init__(self):\n self.name = 'PickleReader'\n self.protocols = [PROTOCOL, 'pickle']\n self.formats = [FORMAT]\n\n def supports(self, protocol, fmt):\n return (protocol in self.protocols and\n fmt in self.formats)\n\n def new_datasource(self, band):\n return PickleDataSource(band)\n\n\ndef rdr_driver_init():\n return PickleReaderDriver()\n\n\nclass PickleWriterDriver(object):\n def __init__(self):\n pass\n\n @property\n def aliases(self):\n return ['pickles']\n\n @property\n def format(self):\n return FORMAT\n\n @property\n def uri_scheme(self):\n return PROTOCOL\n\n def write_dataset_to_storage(self, dataset, filename,\n global_attributes=None,\n variable_params=None,\n storage_config=None,\n **kwargs):\n with open(filename, 'wb') as f:\n pickle.dump(dataset, f)\n return {}\n\n\ndef writer_driver_init():\n return PickleWriterDriver()\n", "path": "examples/io_plugin/dcio_example/pickles.py"}]}
| 1,700 | 444 |
gh_patches_debug_34429
|
rasdani/github-patches
|
git_diff
|
googleapis__google-cloud-python-4704
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Logging: Can't retrieve the writerIdentity for a logging sink
In #4578 , support was added for creating a sink with a unique writerIdentity. It would appear that only the uniqueWriterIdentity boolean for logging.sinks.create was implemented; but retrieving the writerIdentity for an existing sink is not possible.
## API response (via gcloud)
```
$ gcloud logging sinks list --project xxxx --log-http
[...]
-- body start --
{
"sinks": [
{
"name": "xxxx",
"destination": "storage.googleapis.com/xxxx",
"filter": "xxxx",
"outputVersionFormt": "V2",
"writerIdentity": "serviceAccount:[email protected]"
}
]
}
-- body end --
```
## Simplified example code
```
from google.cloud import logging
logging_client = logging.Client(project_id)
sink = logging_client.sink(sink_name)
sink.reload()
```
## Resulting object
```
(Pdb) sink.
sink.__class__( sink.__hash__( sink.__setattr__( sink.destination
sink.__delattr__( sink.__init__( sink.__sizeof__( sink.exists(
sink.__dict__ sink.__le__( sink.__str__( sink.filter_
sink.__dir__( sink.__lt__( sink.__subclasshook__( sink.from_api_repr(
sink.__doc__ sink.__module__ sink.__weakref__ sink.full_name
sink.__eq__( sink.__ne__( sink._client sink.name
sink.__format__( sink.__new__( sink._require_client( sink.path
sink.__ge__( sink.__reduce__( sink.client sink.project
sink.__getattribute__( sink.__reduce_ex__( sink.create( sink.reload(
sink.__gt__( sink.__repr__( sink.delete( sink.update(
(Pdb) sink.__dict__
{'destination': 'storage.googleapis.com/logs-00006913-fake-customer', '_client': <google.cloud.logging.client.Client object at 0x7f1d2728e860>, 'name': 'asdf1234', 'filter_': '*'}
```
</issue>
<code>
[start of logging/google/cloud/logging/sink.py]
1 # Copyright 2016 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Define Stackdriver Logging API Sinks."""
16
17 from google.cloud.exceptions import NotFound
18
19
20 class Sink(object):
21 """Sinks represent filtered exports for log entries.
22
23 See
24 https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks
25
26 :type name: str
27 :param name: the name of the sink
28
29 :type filter_: str
30 :param filter_: (optional) the advanced logs filter expression defining
31 the entries exported by the sink.
32
33 :type destination: str
34 :param destination: destination URI for the entries exported by the sink.
35 If not passed, the instance should already exist, to
36 be refreshed via :meth:`reload`.
37
38 :type client: :class:`google.cloud.logging.client.Client`
39 :param client: A client which holds credentials and project configuration
40 for the sink (which requires a project).
41
42 :type unique_writer_identity: bool
43 :param unique_writer_identity: (Optional) determines the kind of
44 IAM identity returned as
45 writer_identity in the new sink.
46 """
47 def __init__(self, name, filter_=None, destination=None, client=None,
48 unique_writer_identity=False):
49 self.name = name
50 self.filter_ = filter_
51 self.destination = destination
52 self._client = client
53 self._unique_writer_identity = unique_writer_identity
54
55 @property
56 def client(self):
57 """Client bound to the sink."""
58 return self._client
59
60 @property
61 def project(self):
62 """Project bound to the sink."""
63 return self._client.project
64
65 @property
66 def full_name(self):
67 """Fully-qualified name used in sink APIs"""
68 return 'projects/%s/sinks/%s' % (self.project, self.name)
69
70 @property
71 def path(self):
72 """URL path for the sink's APIs"""
73 return '/%s' % (self.full_name)
74
75 @classmethod
76 def from_api_repr(cls, resource, client):
77 """Factory: construct a sink given its API representation
78
79 :type resource: dict
80 :param resource: sink resource representation returned from the API
81
82 :type client: :class:`google.cloud.logging.client.Client`
83 :param client: Client which holds credentials and project
84 configuration for the sink.
85
86 :rtype: :class:`google.cloud.logging.sink.Sink`
87 :returns: Sink parsed from ``resource``.
88 :raises: :class:`ValueError` if ``client`` is not ``None`` and the
89 project from the resource does not agree with the project
90 from the client.
91 """
92 sink_name = resource['name']
93 destination = resource['destination']
94 filter_ = resource.get('filter')
95 return cls(sink_name, filter_, destination, client=client)
96
97 def _require_client(self, client):
98 """Check client or verify over-ride.
99
100 :type client: :class:`~google.cloud.logging.client.Client` or
101 ``NoneType``
102 :param client: the client to use. If not passed, falls back to the
103 ``client`` stored on the current sink.
104
105 :rtype: :class:`google.cloud.logging.client.Client`
106 :returns: The client passed in or the currently bound client.
107 """
108 if client is None:
109 client = self._client
110 return client
111
112 def create(self, client=None):
113 """API call: create the sink via a PUT request
114
115 See
116 https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/create
117
118 :type client: :class:`~google.cloud.logging.client.Client` or
119 ``NoneType``
120 :param client: the client to use. If not passed, falls back to the
121 ``client`` stored on the current sink.
122 """
123 client = self._require_client(client)
124 client.sinks_api.sink_create(
125 self.project, self.name, self.filter_, self.destination,
126 unique_writer_identity=self._unique_writer_identity,
127 )
128
129 def exists(self, client=None):
130 """API call: test for the existence of the sink via a GET request
131
132 See
133 https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/get
134
135 :type client: :class:`~google.cloud.logging.client.Client` or
136 ``NoneType``
137 :param client: the client to use. If not passed, falls back to the
138 ``client`` stored on the current sink.
139
140 :rtype: bool
141 :returns: Boolean indicating existence of the sink.
142 """
143 client = self._require_client(client)
144
145 try:
146 client.sinks_api.sink_get(self.project, self.name)
147 except NotFound:
148 return False
149 else:
150 return True
151
152 def reload(self, client=None):
153 """API call: sync local sink configuration via a GET request
154
155 See
156 https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/get
157
158 :type client: :class:`~google.cloud.logging.client.Client` or
159 ``NoneType``
160 :param client: the client to use. If not passed, falls back to the
161 ``client`` stored on the current sink.
162 """
163 client = self._require_client(client)
164 data = client.sinks_api.sink_get(self.project, self.name)
165 self.destination = data['destination']
166 self.filter_ = data.get('filter')
167
168 def update(self, client=None):
169 """API call: update sink configuration via a PUT request
170
171 See
172 https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/update
173
174 :type client: :class:`~google.cloud.logging.client.Client` or
175 ``NoneType``
176 :param client: the client to use. If not passed, falls back to the
177 ``client`` stored on the current sink.
178 """
179 client = self._require_client(client)
180 client.sinks_api.sink_update(
181 self.project, self.name, self.filter_, self.destination)
182
183 def delete(self, client=None):
184 """API call: delete a sink via a DELETE request
185
186 See
187 https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/delete
188
189 :type client: :class:`~google.cloud.logging.client.Client` or
190 ``NoneType``
191 :param client: the client to use. If not passed, falls back to the
192 ``client`` stored on the current sink.
193 """
194 client = self._require_client(client)
195 client.sinks_api.sink_delete(self.project, self.name)
196
[end of logging/google/cloud/logging/sink.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/logging/google/cloud/logging/sink.py b/logging/google/cloud/logging/sink.py
--- a/logging/google/cloud/logging/sink.py
+++ b/logging/google/cloud/logging/sink.py
@@ -51,6 +51,7 @@
self.destination = destination
self._client = client
self._unique_writer_identity = unique_writer_identity
+ self._writer_identity = None
@property
def client(self):
@@ -72,6 +73,11 @@
"""URL path for the sink's APIs"""
return '/%s' % (self.full_name)
+ @property
+ def writer_identity(self):
+ """Identity used for exports via the sink"""
+ return self._writer_identity
+
@classmethod
def from_api_repr(cls, resource, client):
"""Factory: construct a sink given its API representation
@@ -92,7 +98,9 @@
sink_name = resource['name']
destination = resource['destination']
filter_ = resource.get('filter')
- return cls(sink_name, filter_, destination, client=client)
+ instance = cls(sink_name, filter_, destination, client=client)
+ instance._writer_identity = resource.get('writerIdentity')
+ return instance
def _require_client(self, client):
"""Check client or verify over-ride.
@@ -161,9 +169,10 @@
``client`` stored on the current sink.
"""
client = self._require_client(client)
- data = client.sinks_api.sink_get(self.project, self.name)
- self.destination = data['destination']
- self.filter_ = data.get('filter')
+ resource = client.sinks_api.sink_get(self.project, self.name)
+ self.destination = resource['destination']
+ self.filter_ = resource.get('filter')
+ self._writer_identity = resource.get('writerIdentity')
def update(self, client=None):
"""API call: update sink configuration via a PUT request
|
{"golden_diff": "diff --git a/logging/google/cloud/logging/sink.py b/logging/google/cloud/logging/sink.py\n--- a/logging/google/cloud/logging/sink.py\n+++ b/logging/google/cloud/logging/sink.py\n@@ -51,6 +51,7 @@\n self.destination = destination\n self._client = client\n self._unique_writer_identity = unique_writer_identity\n+ self._writer_identity = None\n \n @property\n def client(self):\n@@ -72,6 +73,11 @@\n \"\"\"URL path for the sink's APIs\"\"\"\n return '/%s' % (self.full_name)\n \n+ @property\n+ def writer_identity(self):\n+ \"\"\"Identity used for exports via the sink\"\"\"\n+ return self._writer_identity\n+\n @classmethod\n def from_api_repr(cls, resource, client):\n \"\"\"Factory: construct a sink given its API representation\n@@ -92,7 +98,9 @@\n sink_name = resource['name']\n destination = resource['destination']\n filter_ = resource.get('filter')\n- return cls(sink_name, filter_, destination, client=client)\n+ instance = cls(sink_name, filter_, destination, client=client)\n+ instance._writer_identity = resource.get('writerIdentity')\n+ return instance\n \n def _require_client(self, client):\n \"\"\"Check client or verify over-ride.\n@@ -161,9 +169,10 @@\n ``client`` stored on the current sink.\n \"\"\"\n client = self._require_client(client)\n- data = client.sinks_api.sink_get(self.project, self.name)\n- self.destination = data['destination']\n- self.filter_ = data.get('filter')\n+ resource = client.sinks_api.sink_get(self.project, self.name)\n+ self.destination = resource['destination']\n+ self.filter_ = resource.get('filter')\n+ self._writer_identity = resource.get('writerIdentity')\n \n def update(self, client=None):\n \"\"\"API call: update sink configuration via a PUT request\n", "issue": "Logging: Can't retrieve the writerIdentity for a logging sink\nIn #4578 , support was added for creating a sink with a unique writerIdentity. It would appear that only the uniqueWriterIdentity boolean for logging.sinks.create was implemented; but retrieving the writerIdentity for an existing sink is not possible.\r\n\r\n## API response (via gcloud)\r\n```\r\n$ gcloud logging sinks list --project xxxx --log-http\r\n[...]\r\n-- body start --\r\n{\r\n \"sinks\": [\r\n {\r\n \"name\": \"xxxx\",\r\n \"destination\": \"storage.googleapis.com/xxxx\",\r\n \"filter\": \"xxxx\",\r\n \"outputVersionFormt\": \"V2\",\r\n \"writerIdentity\": \"serviceAccount:[email protected]\"\r\n }\r\n ]\r\n}\r\n\r\n-- body end --\r\n```\r\n\r\n## Simplified example code\r\n```\r\nfrom google.cloud import logging\r\n\r\nlogging_client = logging.Client(project_id)\r\nsink = logging_client.sink(sink_name)\r\nsink.reload()\r\n```\r\n\r\n## Resulting object\r\n```\r\n(Pdb) sink.\r\nsink.__class__( sink.__hash__( sink.__setattr__( sink.destination\r\nsink.__delattr__( sink.__init__( sink.__sizeof__( sink.exists(\r\nsink.__dict__ sink.__le__( sink.__str__( sink.filter_\r\nsink.__dir__( sink.__lt__( sink.__subclasshook__( sink.from_api_repr(\r\nsink.__doc__ sink.__module__ sink.__weakref__ sink.full_name\r\nsink.__eq__( sink.__ne__( sink._client sink.name\r\nsink.__format__( sink.__new__( sink._require_client( sink.path\r\nsink.__ge__( sink.__reduce__( sink.client sink.project\r\nsink.__getattribute__( sink.__reduce_ex__( sink.create( sink.reload(\r\nsink.__gt__( sink.__repr__( sink.delete( sink.update(\r\n\r\n(Pdb) sink.__dict__\r\n{'destination': 'storage.googleapis.com/logs-00006913-fake-customer', '_client': <google.cloud.logging.client.Client object at 0x7f1d2728e860>, 'name': 'asdf1234', 'filter_': '*'}\r\n```\r\n\r\n\n", "before_files": [{"content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Define Stackdriver Logging API Sinks.\"\"\"\n\nfrom google.cloud.exceptions import NotFound\n\n\nclass Sink(object):\n \"\"\"Sinks represent filtered exports for log entries.\n\n See\n https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks\n\n :type name: str\n :param name: the name of the sink\n\n :type filter_: str\n :param filter_: (optional) the advanced logs filter expression defining\n the entries exported by the sink.\n\n :type destination: str\n :param destination: destination URI for the entries exported by the sink.\n If not passed, the instance should already exist, to\n be refreshed via :meth:`reload`.\n\n :type client: :class:`google.cloud.logging.client.Client`\n :param client: A client which holds credentials and project configuration\n for the sink (which requires a project).\n\n :type unique_writer_identity: bool\n :param unique_writer_identity: (Optional) determines the kind of\n IAM identity returned as\n writer_identity in the new sink.\n \"\"\"\n def __init__(self, name, filter_=None, destination=None, client=None,\n unique_writer_identity=False):\n self.name = name\n self.filter_ = filter_\n self.destination = destination\n self._client = client\n self._unique_writer_identity = unique_writer_identity\n\n @property\n def client(self):\n \"\"\"Client bound to the sink.\"\"\"\n return self._client\n\n @property\n def project(self):\n \"\"\"Project bound to the sink.\"\"\"\n return self._client.project\n\n @property\n def full_name(self):\n \"\"\"Fully-qualified name used in sink APIs\"\"\"\n return 'projects/%s/sinks/%s' % (self.project, self.name)\n\n @property\n def path(self):\n \"\"\"URL path for the sink's APIs\"\"\"\n return '/%s' % (self.full_name)\n\n @classmethod\n def from_api_repr(cls, resource, client):\n \"\"\"Factory: construct a sink given its API representation\n\n :type resource: dict\n :param resource: sink resource representation returned from the API\n\n :type client: :class:`google.cloud.logging.client.Client`\n :param client: Client which holds credentials and project\n configuration for the sink.\n\n :rtype: :class:`google.cloud.logging.sink.Sink`\n :returns: Sink parsed from ``resource``.\n :raises: :class:`ValueError` if ``client`` is not ``None`` and the\n project from the resource does not agree with the project\n from the client.\n \"\"\"\n sink_name = resource['name']\n destination = resource['destination']\n filter_ = resource.get('filter')\n return cls(sink_name, filter_, destination, client=client)\n\n def _require_client(self, client):\n \"\"\"Check client or verify over-ride.\n\n :type client: :class:`~google.cloud.logging.client.Client` or\n ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current sink.\n\n :rtype: :class:`google.cloud.logging.client.Client`\n :returns: The client passed in or the currently bound client.\n \"\"\"\n if client is None:\n client = self._client\n return client\n\n def create(self, client=None):\n \"\"\"API call: create the sink via a PUT request\n\n See\n https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/create\n\n :type client: :class:`~google.cloud.logging.client.Client` or\n ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current sink.\n \"\"\"\n client = self._require_client(client)\n client.sinks_api.sink_create(\n self.project, self.name, self.filter_, self.destination,\n unique_writer_identity=self._unique_writer_identity,\n )\n\n def exists(self, client=None):\n \"\"\"API call: test for the existence of the sink via a GET request\n\n See\n https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/get\n\n :type client: :class:`~google.cloud.logging.client.Client` or\n ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current sink.\n\n :rtype: bool\n :returns: Boolean indicating existence of the sink.\n \"\"\"\n client = self._require_client(client)\n\n try:\n client.sinks_api.sink_get(self.project, self.name)\n except NotFound:\n return False\n else:\n return True\n\n def reload(self, client=None):\n \"\"\"API call: sync local sink configuration via a GET request\n\n See\n https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/get\n\n :type client: :class:`~google.cloud.logging.client.Client` or\n ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current sink.\n \"\"\"\n client = self._require_client(client)\n data = client.sinks_api.sink_get(self.project, self.name)\n self.destination = data['destination']\n self.filter_ = data.get('filter')\n\n def update(self, client=None):\n \"\"\"API call: update sink configuration via a PUT request\n\n See\n https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/update\n\n :type client: :class:`~google.cloud.logging.client.Client` or\n ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current sink.\n \"\"\"\n client = self._require_client(client)\n client.sinks_api.sink_update(\n self.project, self.name, self.filter_, self.destination)\n\n def delete(self, client=None):\n \"\"\"API call: delete a sink via a DELETE request\n\n See\n https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/delete\n\n :type client: :class:`~google.cloud.logging.client.Client` or\n ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current sink.\n \"\"\"\n client = self._require_client(client)\n client.sinks_api.sink_delete(self.project, self.name)\n", "path": "logging/google/cloud/logging/sink.py"}]}
| 3,062 | 442 |
gh_patches_debug_244
|
rasdani/github-patches
|
git_diff
|
wagtail__wagtail-8940
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
__str__ method doesn't return a string.
This code sample:
>>> from wagtail.contrib.forms.models import FormSubmission
>>> FormSubmission.objects.count()
1
>>> FormSubmission.objects.first()
Traceback (most recent call last):
File "<console>", line 1, in <module>
File "lib64/python3.10/site-packages/django/db/models/base.py", line 580, in __repr__
return "<%s: %s>" % (self.__class__.__name__, self)
TypeError: __str__ returned non-string (type dict)
This method:
https://github.com/wagtail/wagtail/blob/18ad15a18f8e533b858ccde7d060b9d4e85dcfd4/wagtail/contrib/forms/models.py#L61-L62
should be:
def __str__(self):
return f"{self.form_data}"
</issue>
<code>
[start of wagtail/contrib/forms/models.py]
1 import datetime
2 import os
3
4 from django.conf import settings
5 from django.core.serializers.json import DjangoJSONEncoder
6 from django.core.validators import validate_email
7 from django.db import models
8 from django.template.response import TemplateResponse
9 from django.utils.formats import date_format
10 from django.utils.translation import gettext_lazy as _
11
12 from wagtail.admin.mail import send_mail
13 from wagtail.admin.panels import FieldPanel
14 from wagtail.contrib.forms.utils import get_field_clean_name
15 from wagtail.models import Orderable, Page
16
17 from .forms import FormBuilder, WagtailAdminFormPageForm
18
19 FORM_FIELD_CHOICES = (
20 ("singleline", _("Single line text")),
21 ("multiline", _("Multi-line text")),
22 ("email", _("Email")),
23 ("number", _("Number")),
24 ("url", _("URL")),
25 ("checkbox", _("Checkbox")),
26 ("checkboxes", _("Checkboxes")),
27 ("dropdown", _("Drop down")),
28 ("multiselect", _("Multiple select")),
29 ("radio", _("Radio buttons")),
30 ("date", _("Date")),
31 ("datetime", _("Date/time")),
32 ("hidden", _("Hidden field")),
33 )
34
35
36 class AbstractFormSubmission(models.Model):
37 """
38 Data for a form submission.
39
40 You can create custom submission model based on this abstract model.
41 For example, if you need to save additional data or a reference to a user.
42 """
43
44 form_data = models.JSONField(encoder=DjangoJSONEncoder)
45 page = models.ForeignKey(Page, on_delete=models.CASCADE)
46
47 submit_time = models.DateTimeField(verbose_name=_("submit time"), auto_now_add=True)
48
49 def get_data(self):
50 """
51 Returns dict with form data.
52
53 You can override this method to add additional data.
54 """
55
56 return {
57 **self.form_data,
58 "submit_time": self.submit_time,
59 }
60
61 def __str__(self):
62 return self.form_data
63
64 class Meta:
65 abstract = True
66 verbose_name = _("form submission")
67 verbose_name_plural = _("form submissions")
68
69
70 class FormSubmission(AbstractFormSubmission):
71 """Data for a Form submission."""
72
73
74 class AbstractFormField(Orderable):
75 """
76 Database Fields required for building a Django Form field.
77 """
78
79 clean_name = models.CharField(
80 verbose_name=_("name"),
81 max_length=255,
82 blank=True,
83 default="",
84 help_text=_(
85 "Safe name of the form field, the label converted to ascii_snake_case"
86 ),
87 )
88 label = models.CharField(
89 verbose_name=_("label"),
90 max_length=255,
91 help_text=_("The label of the form field"),
92 )
93 field_type = models.CharField(
94 verbose_name=_("field type"), max_length=16, choices=FORM_FIELD_CHOICES
95 )
96 required = models.BooleanField(verbose_name=_("required"), default=True)
97 choices = models.TextField(
98 verbose_name=_("choices"),
99 blank=True,
100 help_text=_(
101 "Comma or new line separated list of choices. Only applicable in checkboxes, radio and dropdown."
102 ),
103 )
104 default_value = models.TextField(
105 verbose_name=_("default value"),
106 blank=True,
107 help_text=_(
108 "Default value. Comma or new line separated values supported for checkboxes."
109 ),
110 )
111 help_text = models.CharField(
112 verbose_name=_("help text"), max_length=255, blank=True
113 )
114
115 panels = [
116 FieldPanel("label"),
117 FieldPanel("help_text"),
118 FieldPanel("required"),
119 FieldPanel("field_type", classname="formbuilder-type"),
120 FieldPanel("choices", classname="formbuilder-choices"),
121 FieldPanel("default_value", classname="formbuilder-default"),
122 ]
123
124 def get_field_clean_name(self):
125 """
126 Prepare an ascii safe lower_snake_case variant of the field name to use as the field key.
127 This key is used to reference the field responses in the JSON store and as the field name in forms.
128 Called for new field creation, validation of duplicate labels and form previews.
129 When called, does not have access to the Page, nor its own id as the record is not yet created.
130 """
131
132 return get_field_clean_name(self.label)
133
134 def save(self, *args, **kwargs):
135 """
136 When new fields are created, generate a template safe ascii name to use as the
137 JSON storage reference for this field. Previously created fields will be updated
138 to use the legacy unidecode method via checks & _migrate_legacy_clean_name.
139 We do not want to update the clean name on any subsequent changes to the label
140 as this would invalidate any previously submitted data.
141 """
142
143 is_new = self.pk is None
144 if is_new:
145 clean_name = self.get_field_clean_name()
146 self.clean_name = clean_name
147
148 super().save(*args, **kwargs)
149
150 class Meta:
151 abstract = True
152 ordering = ["sort_order"]
153
154
155 class AbstractForm(Page):
156 """
157 A Form Page. Pages implementing a form should inherit from it
158 """
159
160 base_form_class = WagtailAdminFormPageForm
161
162 form_builder = FormBuilder
163
164 submissions_list_view_class = None
165
166 def __init__(self, *args, **kwargs):
167 super().__init__(*args, **kwargs)
168 if not hasattr(self, "landing_page_template"):
169 name, ext = os.path.splitext(self.template)
170 self.landing_page_template = name + "_landing" + ext
171
172 class Meta:
173 abstract = True
174
175 def get_form_fields(self):
176 """
177 Form page expects `form_fields` to be declared.
178 If you want to change backwards relation name,
179 you need to override this method.
180 """
181
182 return self.form_fields.all()
183
184 def get_data_fields(self):
185 """
186 Returns a list of tuples with (field_name, field_label).
187 """
188
189 data_fields = [
190 ("submit_time", _("Submission date")),
191 ]
192 data_fields += [
193 (field.clean_name, field.label) for field in self.get_form_fields()
194 ]
195
196 return data_fields
197
198 def get_form_class(self):
199 fb = self.form_builder(self.get_form_fields())
200 return fb.get_form_class()
201
202 def get_form_parameters(self):
203 return {}
204
205 def get_form(self, *args, **kwargs):
206 form_class = self.get_form_class()
207 form_params = self.get_form_parameters()
208 form_params.update(kwargs)
209
210 return form_class(*args, **form_params)
211
212 def get_landing_page_template(self, request, *args, **kwargs):
213 return self.landing_page_template
214
215 def get_submission_class(self):
216 """
217 Returns submission class.
218
219 You can override this method to provide custom submission class.
220 Your class must be inherited from AbstractFormSubmission.
221 """
222
223 return FormSubmission
224
225 def get_submissions_list_view_class(self):
226 from .views import SubmissionsListView
227
228 return self.submissions_list_view_class or SubmissionsListView
229
230 def process_form_submission(self, form):
231 """
232 Accepts form instance with submitted data, user and page.
233 Creates submission instance.
234
235 You can override this method if you want to have custom creation logic.
236 For example, if you want to save reference to a user.
237 """
238
239 return self.get_submission_class().objects.create(
240 form_data=form.cleaned_data,
241 page=self,
242 )
243
244 def render_landing_page(self, request, form_submission=None, *args, **kwargs):
245 """
246 Renders the landing page.
247
248 You can override this method to return a different HttpResponse as
249 landing page. E.g. you could return a redirect to a separate page.
250 """
251 context = self.get_context(request)
252 context["form_submission"] = form_submission
253 return TemplateResponse(
254 request, self.get_landing_page_template(request), context
255 )
256
257 def serve_submissions_list_view(self, request, *args, **kwargs):
258 """
259 Returns list submissions view for admin.
260
261 `list_submissions_view_class` can bse set to provide custom view class.
262 Your class must be inherited from SubmissionsListView.
263 """
264 view = self.get_submissions_list_view_class().as_view()
265 return view(request, form_page=self, *args, **kwargs)
266
267 def serve(self, request, *args, **kwargs):
268 if request.method == "POST":
269 form = self.get_form(
270 request.POST, request.FILES, page=self, user=request.user
271 )
272
273 if form.is_valid():
274 form_submission = self.process_form_submission(form)
275 return self.render_landing_page(
276 request, form_submission, *args, **kwargs
277 )
278 else:
279 form = self.get_form(page=self, user=request.user)
280
281 context = self.get_context(request)
282 context["form"] = form
283 return TemplateResponse(request, self.get_template(request), context)
284
285 preview_modes = [
286 ("form", _("Form")),
287 ("landing", _("Landing page")),
288 ]
289
290 def serve_preview(self, request, mode_name):
291 if mode_name == "landing":
292 return self.render_landing_page(request)
293 else:
294 return super().serve_preview(request, mode_name)
295
296
297 def validate_to_address(value):
298 for address in value.split(","):
299 validate_email(address.strip())
300
301
302 class AbstractEmailForm(AbstractForm):
303 """
304 A Form Page that sends email. Pages implementing a form to be send to an email should inherit from it
305 """
306
307 to_address = models.CharField(
308 verbose_name=_("to address"),
309 max_length=255,
310 blank=True,
311 help_text=_(
312 "Optional - form submissions will be emailed to these addresses. Separate multiple addresses by comma."
313 ),
314 validators=[validate_to_address],
315 )
316 from_address = models.EmailField(
317 verbose_name=_("from address"), max_length=255, blank=True
318 )
319 subject = models.CharField(verbose_name=_("subject"), max_length=255, blank=True)
320
321 def process_form_submission(self, form):
322 submission = super().process_form_submission(form)
323 if self.to_address:
324 self.send_mail(form)
325 return submission
326
327 def send_mail(self, form):
328 addresses = [x.strip() for x in self.to_address.split(",")]
329 send_mail(
330 self.subject,
331 self.render_email(form),
332 addresses,
333 self.from_address,
334 )
335
336 def render_email(self, form):
337 content = []
338
339 cleaned_data = form.cleaned_data
340 for field in form:
341 if field.name not in cleaned_data:
342 continue
343
344 value = cleaned_data.get(field.name)
345
346 if isinstance(value, list):
347 value = ", ".join(value)
348
349 # Format dates and datetimes with SHORT_DATE(TIME)_FORMAT
350 if isinstance(value, datetime.datetime):
351 value = date_format(value, settings.SHORT_DATETIME_FORMAT)
352 elif isinstance(value, datetime.date):
353 value = date_format(value, settings.SHORT_DATE_FORMAT)
354
355 content.append("{}: {}".format(field.label, value))
356
357 return "\n".join(content)
358
359 class Meta:
360 abstract = True
361
[end of wagtail/contrib/forms/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/wagtail/contrib/forms/models.py b/wagtail/contrib/forms/models.py
--- a/wagtail/contrib/forms/models.py
+++ b/wagtail/contrib/forms/models.py
@@ -59,7 +59,7 @@
}
def __str__(self):
- return self.form_data
+ return f"{self.form_data}"
class Meta:
abstract = True
|
{"golden_diff": "diff --git a/wagtail/contrib/forms/models.py b/wagtail/contrib/forms/models.py\n--- a/wagtail/contrib/forms/models.py\n+++ b/wagtail/contrib/forms/models.py\n@@ -59,7 +59,7 @@\n }\n \n def __str__(self):\n- return self.form_data\n+ return f\"{self.form_data}\"\n \n class Meta:\n abstract = True\n", "issue": "__str__ method doesn't return a string.\nThis code sample:\r\n\r\n >>> from wagtail.contrib.forms.models import FormSubmission\r\n\r\n >>> FormSubmission.objects.count()\r\n 1\r\n >>> FormSubmission.objects.first()\r\n Traceback (most recent call last):\r\n File \"<console>\", line 1, in <module>\r\n File \"lib64/python3.10/site-packages/django/db/models/base.py\", line 580, in __repr__\r\n return \"<%s: %s>\" % (self.__class__.__name__, self)\r\n TypeError: __str__ returned non-string (type dict)\r\n\r\nThis method:\r\n\r\nhttps://github.com/wagtail/wagtail/blob/18ad15a18f8e533b858ccde7d060b9d4e85dcfd4/wagtail/contrib/forms/models.py#L61-L62\r\n\r\nshould be:\r\n\r\n def __str__(self):\r\n return f\"{self.form_data}\"\r\n\r\n \n", "before_files": [{"content": "import datetime\nimport os\n\nfrom django.conf import settings\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.core.validators import validate_email\nfrom django.db import models\nfrom django.template.response import TemplateResponse\nfrom django.utils.formats import date_format\nfrom django.utils.translation import gettext_lazy as _\n\nfrom wagtail.admin.mail import send_mail\nfrom wagtail.admin.panels import FieldPanel\nfrom wagtail.contrib.forms.utils import get_field_clean_name\nfrom wagtail.models import Orderable, Page\n\nfrom .forms import FormBuilder, WagtailAdminFormPageForm\n\nFORM_FIELD_CHOICES = (\n (\"singleline\", _(\"Single line text\")),\n (\"multiline\", _(\"Multi-line text\")),\n (\"email\", _(\"Email\")),\n (\"number\", _(\"Number\")),\n (\"url\", _(\"URL\")),\n (\"checkbox\", _(\"Checkbox\")),\n (\"checkboxes\", _(\"Checkboxes\")),\n (\"dropdown\", _(\"Drop down\")),\n (\"multiselect\", _(\"Multiple select\")),\n (\"radio\", _(\"Radio buttons\")),\n (\"date\", _(\"Date\")),\n (\"datetime\", _(\"Date/time\")),\n (\"hidden\", _(\"Hidden field\")),\n)\n\n\nclass AbstractFormSubmission(models.Model):\n \"\"\"\n Data for a form submission.\n\n You can create custom submission model based on this abstract model.\n For example, if you need to save additional data or a reference to a user.\n \"\"\"\n\n form_data = models.JSONField(encoder=DjangoJSONEncoder)\n page = models.ForeignKey(Page, on_delete=models.CASCADE)\n\n submit_time = models.DateTimeField(verbose_name=_(\"submit time\"), auto_now_add=True)\n\n def get_data(self):\n \"\"\"\n Returns dict with form data.\n\n You can override this method to add additional data.\n \"\"\"\n\n return {\n **self.form_data,\n \"submit_time\": self.submit_time,\n }\n\n def __str__(self):\n return self.form_data\n\n class Meta:\n abstract = True\n verbose_name = _(\"form submission\")\n verbose_name_plural = _(\"form submissions\")\n\n\nclass FormSubmission(AbstractFormSubmission):\n \"\"\"Data for a Form submission.\"\"\"\n\n\nclass AbstractFormField(Orderable):\n \"\"\"\n Database Fields required for building a Django Form field.\n \"\"\"\n\n clean_name = models.CharField(\n verbose_name=_(\"name\"),\n max_length=255,\n blank=True,\n default=\"\",\n help_text=_(\n \"Safe name of the form field, the label converted to ascii_snake_case\"\n ),\n )\n label = models.CharField(\n verbose_name=_(\"label\"),\n max_length=255,\n help_text=_(\"The label of the form field\"),\n )\n field_type = models.CharField(\n verbose_name=_(\"field type\"), max_length=16, choices=FORM_FIELD_CHOICES\n )\n required = models.BooleanField(verbose_name=_(\"required\"), default=True)\n choices = models.TextField(\n verbose_name=_(\"choices\"),\n blank=True,\n help_text=_(\n \"Comma or new line separated list of choices. Only applicable in checkboxes, radio and dropdown.\"\n ),\n )\n default_value = models.TextField(\n verbose_name=_(\"default value\"),\n blank=True,\n help_text=_(\n \"Default value. Comma or new line separated values supported for checkboxes.\"\n ),\n )\n help_text = models.CharField(\n verbose_name=_(\"help text\"), max_length=255, blank=True\n )\n\n panels = [\n FieldPanel(\"label\"),\n FieldPanel(\"help_text\"),\n FieldPanel(\"required\"),\n FieldPanel(\"field_type\", classname=\"formbuilder-type\"),\n FieldPanel(\"choices\", classname=\"formbuilder-choices\"),\n FieldPanel(\"default_value\", classname=\"formbuilder-default\"),\n ]\n\n def get_field_clean_name(self):\n \"\"\"\n Prepare an ascii safe lower_snake_case variant of the field name to use as the field key.\n This key is used to reference the field responses in the JSON store and as the field name in forms.\n Called for new field creation, validation of duplicate labels and form previews.\n When called, does not have access to the Page, nor its own id as the record is not yet created.\n \"\"\"\n\n return get_field_clean_name(self.label)\n\n def save(self, *args, **kwargs):\n \"\"\"\n When new fields are created, generate a template safe ascii name to use as the\n JSON storage reference for this field. Previously created fields will be updated\n to use the legacy unidecode method via checks & _migrate_legacy_clean_name.\n We do not want to update the clean name on any subsequent changes to the label\n as this would invalidate any previously submitted data.\n \"\"\"\n\n is_new = self.pk is None\n if is_new:\n clean_name = self.get_field_clean_name()\n self.clean_name = clean_name\n\n super().save(*args, **kwargs)\n\n class Meta:\n abstract = True\n ordering = [\"sort_order\"]\n\n\nclass AbstractForm(Page):\n \"\"\"\n A Form Page. Pages implementing a form should inherit from it\n \"\"\"\n\n base_form_class = WagtailAdminFormPageForm\n\n form_builder = FormBuilder\n\n submissions_list_view_class = None\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if not hasattr(self, \"landing_page_template\"):\n name, ext = os.path.splitext(self.template)\n self.landing_page_template = name + \"_landing\" + ext\n\n class Meta:\n abstract = True\n\n def get_form_fields(self):\n \"\"\"\n Form page expects `form_fields` to be declared.\n If you want to change backwards relation name,\n you need to override this method.\n \"\"\"\n\n return self.form_fields.all()\n\n def get_data_fields(self):\n \"\"\"\n Returns a list of tuples with (field_name, field_label).\n \"\"\"\n\n data_fields = [\n (\"submit_time\", _(\"Submission date\")),\n ]\n data_fields += [\n (field.clean_name, field.label) for field in self.get_form_fields()\n ]\n\n return data_fields\n\n def get_form_class(self):\n fb = self.form_builder(self.get_form_fields())\n return fb.get_form_class()\n\n def get_form_parameters(self):\n return {}\n\n def get_form(self, *args, **kwargs):\n form_class = self.get_form_class()\n form_params = self.get_form_parameters()\n form_params.update(kwargs)\n\n return form_class(*args, **form_params)\n\n def get_landing_page_template(self, request, *args, **kwargs):\n return self.landing_page_template\n\n def get_submission_class(self):\n \"\"\"\n Returns submission class.\n\n You can override this method to provide custom submission class.\n Your class must be inherited from AbstractFormSubmission.\n \"\"\"\n\n return FormSubmission\n\n def get_submissions_list_view_class(self):\n from .views import SubmissionsListView\n\n return self.submissions_list_view_class or SubmissionsListView\n\n def process_form_submission(self, form):\n \"\"\"\n Accepts form instance with submitted data, user and page.\n Creates submission instance.\n\n You can override this method if you want to have custom creation logic.\n For example, if you want to save reference to a user.\n \"\"\"\n\n return self.get_submission_class().objects.create(\n form_data=form.cleaned_data,\n page=self,\n )\n\n def render_landing_page(self, request, form_submission=None, *args, **kwargs):\n \"\"\"\n Renders the landing page.\n\n You can override this method to return a different HttpResponse as\n landing page. E.g. you could return a redirect to a separate page.\n \"\"\"\n context = self.get_context(request)\n context[\"form_submission\"] = form_submission\n return TemplateResponse(\n request, self.get_landing_page_template(request), context\n )\n\n def serve_submissions_list_view(self, request, *args, **kwargs):\n \"\"\"\n Returns list submissions view for admin.\n\n `list_submissions_view_class` can bse set to provide custom view class.\n Your class must be inherited from SubmissionsListView.\n \"\"\"\n view = self.get_submissions_list_view_class().as_view()\n return view(request, form_page=self, *args, **kwargs)\n\n def serve(self, request, *args, **kwargs):\n if request.method == \"POST\":\n form = self.get_form(\n request.POST, request.FILES, page=self, user=request.user\n )\n\n if form.is_valid():\n form_submission = self.process_form_submission(form)\n return self.render_landing_page(\n request, form_submission, *args, **kwargs\n )\n else:\n form = self.get_form(page=self, user=request.user)\n\n context = self.get_context(request)\n context[\"form\"] = form\n return TemplateResponse(request, self.get_template(request), context)\n\n preview_modes = [\n (\"form\", _(\"Form\")),\n (\"landing\", _(\"Landing page\")),\n ]\n\n def serve_preview(self, request, mode_name):\n if mode_name == \"landing\":\n return self.render_landing_page(request)\n else:\n return super().serve_preview(request, mode_name)\n\n\ndef validate_to_address(value):\n for address in value.split(\",\"):\n validate_email(address.strip())\n\n\nclass AbstractEmailForm(AbstractForm):\n \"\"\"\n A Form Page that sends email. Pages implementing a form to be send to an email should inherit from it\n \"\"\"\n\n to_address = models.CharField(\n verbose_name=_(\"to address\"),\n max_length=255,\n blank=True,\n help_text=_(\n \"Optional - form submissions will be emailed to these addresses. Separate multiple addresses by comma.\"\n ),\n validators=[validate_to_address],\n )\n from_address = models.EmailField(\n verbose_name=_(\"from address\"), max_length=255, blank=True\n )\n subject = models.CharField(verbose_name=_(\"subject\"), max_length=255, blank=True)\n\n def process_form_submission(self, form):\n submission = super().process_form_submission(form)\n if self.to_address:\n self.send_mail(form)\n return submission\n\n def send_mail(self, form):\n addresses = [x.strip() for x in self.to_address.split(\",\")]\n send_mail(\n self.subject,\n self.render_email(form),\n addresses,\n self.from_address,\n )\n\n def render_email(self, form):\n content = []\n\n cleaned_data = form.cleaned_data\n for field in form:\n if field.name not in cleaned_data:\n continue\n\n value = cleaned_data.get(field.name)\n\n if isinstance(value, list):\n value = \", \".join(value)\n\n # Format dates and datetimes with SHORT_DATE(TIME)_FORMAT\n if isinstance(value, datetime.datetime):\n value = date_format(value, settings.SHORT_DATETIME_FORMAT)\n elif isinstance(value, datetime.date):\n value = date_format(value, settings.SHORT_DATE_FORMAT)\n\n content.append(\"{}: {}\".format(field.label, value))\n\n return \"\\n\".join(content)\n\n class Meta:\n abstract = True\n", "path": "wagtail/contrib/forms/models.py"}]}
| 4,082 | 92 |
gh_patches_debug_16915
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-5322
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add unit test for Event helpers
**Is your feature request related to a problem? Please describe.**
<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->
We should add unit tests for event helpers.
**Additional context**
<!-- Add any other context or screenshots about the feature request here. -->
Parent: #5320
</issue>
<code>
[start of app/api/helpers/errors.py]
1 import json
2
3 from flask import make_response
4 from flask_rest_jsonapi.errors import jsonapi_errors
5
6
7 class ErrorResponse:
8 """
9 Parent ErrorResponse class for handling json-api compliant errors.
10 Inspired by the JsonApiException class of `flask-rest-jsonapi` itself
11 """
12 title = 'Unknown error'
13 status = 500
14 headers = {'Content-Type': 'application/vnd.api+json'}
15
16 def __init__(self, source, detail, title=None, status=None):
17 """Initialize a jsonapi ErrorResponse Object
18
19 :param dict source: the source of the error
20 :param str detail: the detail of the error
21 """
22 self.source = source
23 self.detail = detail
24 if title is not None:
25 self.title = title
26 if status is not None:
27 self.status = status
28
29 def respond(self):
30 """
31 :return: a jsonapi compliant response object
32 """
33 dict_ = self.to_dict()
34 return make_response(json.dumps(jsonapi_errors([dict_])), self.status, self.headers)
35
36 def to_dict(self):
37 """
38 :return: Dict from details of the object
39 """
40 return {'status': self.status,
41 'source': self.source,
42 'title': self.title,
43 'detail': self.detail}
44
45
46 class ForbiddenError(ErrorResponse):
47 """
48 Default class for 403 Error
49 """
50 title = 'Access Forbidden'
51 status = 403
52
53
54 class NotFoundError(ErrorResponse):
55 """
56 Default class for 404 Error
57 """
58 title = 'Not Found'
59 status = 404
60
61
62 class ServerError(ErrorResponse):
63 status = 500
64 title = 'Internal Server Error'
65
66
67 class UnprocessableEntityError(ErrorResponse):
68 status = 422
69 title = 'Unprocessable Entity'
70
71
72 class BadRequestError(ErrorResponse):
73 status = 400
74 title = 'Bad Request'
75
[end of app/api/helpers/errors.py]
[start of app/api/helpers/exceptions.py]
1 from flask_rest_jsonapi.exceptions import JsonApiException
2
3
4 class UnprocessableEntity(JsonApiException):
5 title = "Unprocessable Entity"
6 status = 422
7
8
9 class ConflictException(JsonApiException):
10 title = "Conflict"
11 status = 409
12
13
14 class ForbiddenException(JsonApiException):
15 """
16 Default class for 403 Error
17 """
18 title = 'Access Forbidden'
19 status = 403
20
21
22 class MethodNotAllowed(JsonApiException):
23 """
24 Default Class to throw HTTP 405 Exception
25 """
26 title = "Method Not Allowed"
27 status = 405
28
[end of app/api/helpers/exceptions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/app/api/helpers/errors.py b/app/api/helpers/errors.py
--- a/app/api/helpers/errors.py
+++ b/app/api/helpers/errors.py
@@ -65,10 +65,16 @@
class UnprocessableEntityError(ErrorResponse):
+ """
+ Default class for 422 Error
+ """
status = 422
title = 'Unprocessable Entity'
class BadRequestError(ErrorResponse):
+ """
+ Default class for 400 Error
+ """
status = 400
title = 'Bad Request'
diff --git a/app/api/helpers/exceptions.py b/app/api/helpers/exceptions.py
--- a/app/api/helpers/exceptions.py
+++ b/app/api/helpers/exceptions.py
@@ -2,11 +2,17 @@
class UnprocessableEntity(JsonApiException):
+ """
+ Default class for 422 Error
+ """
title = "Unprocessable Entity"
status = 422
class ConflictException(JsonApiException):
+ """
+ Default class for 409 Error
+ """
title = "Conflict"
status = 409
|
{"golden_diff": "diff --git a/app/api/helpers/errors.py b/app/api/helpers/errors.py\n--- a/app/api/helpers/errors.py\n+++ b/app/api/helpers/errors.py\n@@ -65,10 +65,16 @@\n \n \n class UnprocessableEntityError(ErrorResponse):\n+ \"\"\"\n+ Default class for 422 Error\n+ \"\"\"\n status = 422\n title = 'Unprocessable Entity'\n \n \n class BadRequestError(ErrorResponse):\n+ \"\"\"\n+ Default class for 400 Error\n+ \"\"\"\n status = 400\n title = 'Bad Request'\ndiff --git a/app/api/helpers/exceptions.py b/app/api/helpers/exceptions.py\n--- a/app/api/helpers/exceptions.py\n+++ b/app/api/helpers/exceptions.py\n@@ -2,11 +2,17 @@\n \n \n class UnprocessableEntity(JsonApiException):\n+ \"\"\"\n+ Default class for 422 Error\n+ \"\"\"\n title = \"Unprocessable Entity\"\n status = 422\n \n \n class ConflictException(JsonApiException):\n+ \"\"\"\n+ Default class for 409 Error\n+ \"\"\"\n title = \"Conflict\"\n status = 409\n", "issue": "Add unit test for Event helpers\n**Is your feature request related to a problem? Please describe.**\r\n<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->\r\nWe should add unit tests for event helpers.\r\n\r\n**Additional context**\r\n<!-- Add any other context or screenshots about the feature request here. -->\r\nParent: #5320 \n", "before_files": [{"content": "import json\n\nfrom flask import make_response\nfrom flask_rest_jsonapi.errors import jsonapi_errors\n\n\nclass ErrorResponse:\n \"\"\"\n Parent ErrorResponse class for handling json-api compliant errors.\n Inspired by the JsonApiException class of `flask-rest-jsonapi` itself\n \"\"\"\n title = 'Unknown error'\n status = 500\n headers = {'Content-Type': 'application/vnd.api+json'}\n\n def __init__(self, source, detail, title=None, status=None):\n \"\"\"Initialize a jsonapi ErrorResponse Object\n\n :param dict source: the source of the error\n :param str detail: the detail of the error\n \"\"\"\n self.source = source\n self.detail = detail\n if title is not None:\n self.title = title\n if status is not None:\n self.status = status\n\n def respond(self):\n \"\"\"\n :return: a jsonapi compliant response object\n \"\"\"\n dict_ = self.to_dict()\n return make_response(json.dumps(jsonapi_errors([dict_])), self.status, self.headers)\n\n def to_dict(self):\n \"\"\"\n :return: Dict from details of the object\n \"\"\"\n return {'status': self.status,\n 'source': self.source,\n 'title': self.title,\n 'detail': self.detail}\n\n\nclass ForbiddenError(ErrorResponse):\n \"\"\"\n Default class for 403 Error\n \"\"\"\n title = 'Access Forbidden'\n status = 403\n\n\nclass NotFoundError(ErrorResponse):\n \"\"\"\n Default class for 404 Error\n \"\"\"\n title = 'Not Found'\n status = 404\n\n\nclass ServerError(ErrorResponse):\n status = 500\n title = 'Internal Server Error'\n\n\nclass UnprocessableEntityError(ErrorResponse):\n status = 422\n title = 'Unprocessable Entity'\n\n\nclass BadRequestError(ErrorResponse):\n status = 400\n title = 'Bad Request'\n", "path": "app/api/helpers/errors.py"}, {"content": "from flask_rest_jsonapi.exceptions import JsonApiException\n\n\nclass UnprocessableEntity(JsonApiException):\n title = \"Unprocessable Entity\"\n status = 422\n\n\nclass ConflictException(JsonApiException):\n title = \"Conflict\"\n status = 409\n\n\nclass ForbiddenException(JsonApiException):\n \"\"\"\n Default class for 403 Error\n \"\"\"\n title = 'Access Forbidden'\n status = 403\n\n\nclass MethodNotAllowed(JsonApiException):\n \"\"\"\n Default Class to throw HTTP 405 Exception\n \"\"\"\n title = \"Method Not Allowed\"\n status = 405\n", "path": "app/api/helpers/exceptions.py"}]}
| 1,384 | 256 |
gh_patches_debug_36249
|
rasdani/github-patches
|
git_diff
|
microsoft__AzureTRE-965
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Rename custom_parameters.json to template_schema.json (inc in python code)
</issue>
<code>
[start of api_app/models/schemas/user_resource_template.py]
1 from pydantic import Field
2
3 from models.domain.resource import ResourceType
4 from models.domain.resource_template import Property
5 from models.domain.user_resource_template import UserResourceTemplate
6 from models.schemas.resource_template import ResourceTemplateInCreate, ResourceTemplateInResponse
7
8
9 def get_sample_user_resource_template_object(template_name: str = "guacamole-vm") -> UserResourceTemplate:
10 return UserResourceTemplate(
11 id="a7a7a7bd-7f4e-4a4e-b970-dc86a6b31dfb",
12 name=template_name,
13 parentWorkspaceService="guacamole",
14 description="user resource bundle",
15 version="0.1.0",
16 resourceType=ResourceType.UserResource,
17 current=True,
18 type="object",
19 required=["display_name", "description"],
20 properties={
21 "display_name": Property(type="string"),
22 "description": Property(type="string")
23 }
24 )
25
26
27 def get_sample_user_resource_template() -> dict:
28 return get_sample_user_resource_template_object().dict()
29
30
31 def get_sample_user_resource_template_in_response() -> dict:
32 workspace_template = get_sample_user_resource_template()
33 return workspace_template
34
35
36 class UserResourceTemplateInCreate(ResourceTemplateInCreate):
37
38 class Config:
39 schema_extra = {
40 "example": {
41 "name": "my-tre-user-resource",
42 "version": "0.0.1",
43 "current": "true",
44 "json_schema": {
45 "$schema": "http://json-schema.org/draft-07/schema",
46 "$id": "https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/user_resource.json",
47 "type": "object",
48 "title": "My User Resource Template Custom Parameters",
49 "description": "These parameters are specific to my user resource template",
50 "required": [],
51 "properties": {}
52 }
53 }
54 }
55
56
57 class UserResourceTemplateInResponse(ResourceTemplateInResponse):
58 parentWorkspaceService: str = Field(title="Workspace type", description="Bundle name")
59
60 class Config:
61 schema_extra = {
62 "example": get_sample_user_resource_template_in_response()
63 }
64
[end of api_app/models/schemas/user_resource_template.py]
[start of api_app/models/schemas/workspace_template.py]
1 from models.domain.resource import ResourceType
2 from models.domain.resource_template import ResourceTemplate, Property
3 from models.schemas.resource_template import ResourceTemplateInCreate, ResourceTemplateInResponse
4
5
6 def get_sample_workspace_template_object(template_name: str = "tre-workspace-base") -> ResourceTemplate:
7 return ResourceTemplate(
8 id="a7a7a7bd-7f4e-4a4e-b970-dc86a6b31dfb",
9 name=template_name,
10 description="base workspace bundle",
11 version="0.1.0",
12 resourceType=ResourceType.Workspace,
13 current=True,
14 type="object",
15 required=["display_name", "description", "app_id"],
16 properties={
17 "display_name": Property(type="string"),
18 "description": Property(type="string"),
19 "app_id": Property(type="string"),
20 "address_space": Property(type="string", default="10.2.1.0/24", description="VNet address space for the workspace services")
21 }
22 )
23
24
25 def get_sample_workspace_template_in_response() -> dict:
26 workspace_template = get_sample_workspace_template_object().dict()
27 workspace_template["system_properties"] = {
28 "tre_id": Property(type="string"),
29 "workspace_id": Property(type="string"),
30 "azure_location": Property(type="string"),
31 }
32 return workspace_template
33
34
35 class WorkspaceTemplateInCreate(ResourceTemplateInCreate):
36
37 class Config:
38 schema_extra = {
39 "example": {
40 "name": "my-tre-workspace",
41 "version": "0.0.1",
42 "current": "true",
43 "json_schema": {
44 "$schema": "http://json-schema.org/draft-07/schema",
45 "$id": "https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/workspace.json",
46 "type": "object",
47 "title": "My Workspace Template Custom Parameters",
48 "description": "These parameters are specific to my workspace template",
49 "required": [
50 "vm_size",
51 "no_of_vms"
52 ],
53 "properties": {
54 "vm_size": {
55 "$id": "#/properties/vm_size",
56 "type": "string",
57 "title": "VM size",
58 "description": "Size of the VMs in my workspace",
59 "default": "Standard_A1",
60 "enum": [
61 "Standard_A1",
62 "Standard_A2",
63 "Standard_A3"
64 ]
65 },
66 "no_of_vms": {
67 "$id": "#/properties/no_of_vms",
68 "type": "integer",
69 "title": "Number of VMs",
70 "description": "Number of virtual machines to be deployed in the workspace",
71 "default": 0
72 }
73 }
74 }
75 }
76 }
77
78
79 class WorkspaceTemplateInResponse(ResourceTemplateInResponse):
80
81 class Config:
82 schema_extra = {
83 "example": get_sample_workspace_template_in_response()
84 }
85
[end of api_app/models/schemas/workspace_template.py]
[start of api_app/models/schemas/workspace_service_template.py]
1 from models.domain.resource import ResourceType
2 from models.domain.resource_template import ResourceTemplate, Property
3 from models.schemas.resource_template import ResourceTemplateInCreate, ResourceTemplateInResponse
4
5
6 def get_sample_workspace_service_template_object(template_name: str = "tre-workspace-service") -> ResourceTemplate:
7 return ResourceTemplate(
8 id="a7a7a7bd-7f4e-4a4e-b970-dc86a6b31dfb",
9 name=template_name,
10 description="workspace service bundle",
11 version="0.1.0",
12 resourceType=ResourceType.WorkspaceService,
13 current=True,
14 type="object",
15 required=["display_name", "description"],
16 properties={
17 "display_name": Property(type="string"),
18 "description": Property(type="string")
19 }
20 )
21
22
23 def get_sample_workspace_service_template() -> dict:
24 return get_sample_workspace_service_template_object().dict()
25
26
27 def get_sample_workspace_service_template_in_response() -> dict:
28 workspace_template = get_sample_workspace_service_template()
29 workspace_template["system_properties"] = {
30 "tre_id": Property(type="string"),
31 "workspace_id": Property(type="string"),
32 "azure_location": Property(type="string"),
33 }
34 return workspace_template
35
36
37 class WorkspaceServiceTemplateInCreate(ResourceTemplateInCreate):
38
39 class Config:
40 schema_extra = {
41 "example": {
42 "name": "my-tre-workspace-service",
43 "version": "0.0.1",
44 "current": "true",
45 "json_schema": {
46 "$schema": "http://json-schema.org/draft-07/schema",
47 "$id": "https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/workspace_service.json",
48 "type": "object",
49 "title": "My Workspace Service Template Custom Parameters",
50 "description": "These parameters are specific to my workspace service template",
51 "required": [],
52 "properties": {}
53 }
54 }
55 }
56
57
58 class WorkspaceServiceTemplateInResponse(ResourceTemplateInResponse):
59
60 class Config:
61 schema_extra = {
62 "example": get_sample_workspace_service_template_in_response()
63 }
64
[end of api_app/models/schemas/workspace_service_template.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/api_app/models/schemas/user_resource_template.py b/api_app/models/schemas/user_resource_template.py
--- a/api_app/models/schemas/user_resource_template.py
+++ b/api_app/models/schemas/user_resource_template.py
@@ -45,8 +45,8 @@
"$schema": "http://json-schema.org/draft-07/schema",
"$id": "https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/user_resource.json",
"type": "object",
- "title": "My User Resource Template Custom Parameters",
- "description": "These parameters are specific to my user resource template",
+ "title": "My User Resource Template",
+ "description": "These is a test user resource template schema",
"required": [],
"properties": {}
}
diff --git a/api_app/models/schemas/workspace_service_template.py b/api_app/models/schemas/workspace_service_template.py
--- a/api_app/models/schemas/workspace_service_template.py
+++ b/api_app/models/schemas/workspace_service_template.py
@@ -46,8 +46,8 @@
"$schema": "http://json-schema.org/draft-07/schema",
"$id": "https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/workspace_service.json",
"type": "object",
- "title": "My Workspace Service Template Custom Parameters",
- "description": "These parameters are specific to my workspace service template",
+ "title": "My Workspace Service Template",
+ "description": "These is a test workspace service resource template schema",
"required": [],
"properties": {}
}
diff --git a/api_app/models/schemas/workspace_template.py b/api_app/models/schemas/workspace_template.py
--- a/api_app/models/schemas/workspace_template.py
+++ b/api_app/models/schemas/workspace_template.py
@@ -44,8 +44,8 @@
"$schema": "http://json-schema.org/draft-07/schema",
"$id": "https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/workspace.json",
"type": "object",
- "title": "My Workspace Template Custom Parameters",
- "description": "These parameters are specific to my workspace template",
+ "title": "My Workspace Template",
+ "description": "This is a test workspace template schema",
"required": [
"vm_size",
"no_of_vms"
|
{"golden_diff": "diff --git a/api_app/models/schemas/user_resource_template.py b/api_app/models/schemas/user_resource_template.py\n--- a/api_app/models/schemas/user_resource_template.py\n+++ b/api_app/models/schemas/user_resource_template.py\n@@ -45,8 +45,8 @@\n \"$schema\": \"http://json-schema.org/draft-07/schema\",\n \"$id\": \"https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/user_resource.json\",\n \"type\": \"object\",\n- \"title\": \"My User Resource Template Custom Parameters\",\n- \"description\": \"These parameters are specific to my user resource template\",\n+ \"title\": \"My User Resource Template\",\n+ \"description\": \"These is a test user resource template schema\",\n \"required\": [],\n \"properties\": {}\n }\ndiff --git a/api_app/models/schemas/workspace_service_template.py b/api_app/models/schemas/workspace_service_template.py\n--- a/api_app/models/schemas/workspace_service_template.py\n+++ b/api_app/models/schemas/workspace_service_template.py\n@@ -46,8 +46,8 @@\n \"$schema\": \"http://json-schema.org/draft-07/schema\",\n \"$id\": \"https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/workspace_service.json\",\n \"type\": \"object\",\n- \"title\": \"My Workspace Service Template Custom Parameters\",\n- \"description\": \"These parameters are specific to my workspace service template\",\n+ \"title\": \"My Workspace Service Template\",\n+ \"description\": \"These is a test workspace service resource template schema\",\n \"required\": [],\n \"properties\": {}\n }\ndiff --git a/api_app/models/schemas/workspace_template.py b/api_app/models/schemas/workspace_template.py\n--- a/api_app/models/schemas/workspace_template.py\n+++ b/api_app/models/schemas/workspace_template.py\n@@ -44,8 +44,8 @@\n \"$schema\": \"http://json-schema.org/draft-07/schema\",\n \"$id\": \"https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/workspace.json\",\n \"type\": \"object\",\n- \"title\": \"My Workspace Template Custom Parameters\",\n- \"description\": \"These parameters are specific to my workspace template\",\n+ \"title\": \"My Workspace Template\",\n+ \"description\": \"This is a test workspace template schema\",\n \"required\": [\n \"vm_size\",\n \"no_of_vms\"\n", "issue": "Rename custom_parameters.json to template_schema.json (inc in python code)\n\n", "before_files": [{"content": "from pydantic import Field\n\nfrom models.domain.resource import ResourceType\nfrom models.domain.resource_template import Property\nfrom models.domain.user_resource_template import UserResourceTemplate\nfrom models.schemas.resource_template import ResourceTemplateInCreate, ResourceTemplateInResponse\n\n\ndef get_sample_user_resource_template_object(template_name: str = \"guacamole-vm\") -> UserResourceTemplate:\n return UserResourceTemplate(\n id=\"a7a7a7bd-7f4e-4a4e-b970-dc86a6b31dfb\",\n name=template_name,\n parentWorkspaceService=\"guacamole\",\n description=\"user resource bundle\",\n version=\"0.1.0\",\n resourceType=ResourceType.UserResource,\n current=True,\n type=\"object\",\n required=[\"display_name\", \"description\"],\n properties={\n \"display_name\": Property(type=\"string\"),\n \"description\": Property(type=\"string\")\n }\n )\n\n\ndef get_sample_user_resource_template() -> dict:\n return get_sample_user_resource_template_object().dict()\n\n\ndef get_sample_user_resource_template_in_response() -> dict:\n workspace_template = get_sample_user_resource_template()\n return workspace_template\n\n\nclass UserResourceTemplateInCreate(ResourceTemplateInCreate):\n\n class Config:\n schema_extra = {\n \"example\": {\n \"name\": \"my-tre-user-resource\",\n \"version\": \"0.0.1\",\n \"current\": \"true\",\n \"json_schema\": {\n \"$schema\": \"http://json-schema.org/draft-07/schema\",\n \"$id\": \"https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/user_resource.json\",\n \"type\": \"object\",\n \"title\": \"My User Resource Template Custom Parameters\",\n \"description\": \"These parameters are specific to my user resource template\",\n \"required\": [],\n \"properties\": {}\n }\n }\n }\n\n\nclass UserResourceTemplateInResponse(ResourceTemplateInResponse):\n parentWorkspaceService: str = Field(title=\"Workspace type\", description=\"Bundle name\")\n\n class Config:\n schema_extra = {\n \"example\": get_sample_user_resource_template_in_response()\n }\n", "path": "api_app/models/schemas/user_resource_template.py"}, {"content": "from models.domain.resource import ResourceType\nfrom models.domain.resource_template import ResourceTemplate, Property\nfrom models.schemas.resource_template import ResourceTemplateInCreate, ResourceTemplateInResponse\n\n\ndef get_sample_workspace_template_object(template_name: str = \"tre-workspace-base\") -> ResourceTemplate:\n return ResourceTemplate(\n id=\"a7a7a7bd-7f4e-4a4e-b970-dc86a6b31dfb\",\n name=template_name,\n description=\"base workspace bundle\",\n version=\"0.1.0\",\n resourceType=ResourceType.Workspace,\n current=True,\n type=\"object\",\n required=[\"display_name\", \"description\", \"app_id\"],\n properties={\n \"display_name\": Property(type=\"string\"),\n \"description\": Property(type=\"string\"),\n \"app_id\": Property(type=\"string\"),\n \"address_space\": Property(type=\"string\", default=\"10.2.1.0/24\", description=\"VNet address space for the workspace services\")\n }\n )\n\n\ndef get_sample_workspace_template_in_response() -> dict:\n workspace_template = get_sample_workspace_template_object().dict()\n workspace_template[\"system_properties\"] = {\n \"tre_id\": Property(type=\"string\"),\n \"workspace_id\": Property(type=\"string\"),\n \"azure_location\": Property(type=\"string\"),\n }\n return workspace_template\n\n\nclass WorkspaceTemplateInCreate(ResourceTemplateInCreate):\n\n class Config:\n schema_extra = {\n \"example\": {\n \"name\": \"my-tre-workspace\",\n \"version\": \"0.0.1\",\n \"current\": \"true\",\n \"json_schema\": {\n \"$schema\": \"http://json-schema.org/draft-07/schema\",\n \"$id\": \"https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/workspace.json\",\n \"type\": \"object\",\n \"title\": \"My Workspace Template Custom Parameters\",\n \"description\": \"These parameters are specific to my workspace template\",\n \"required\": [\n \"vm_size\",\n \"no_of_vms\"\n ],\n \"properties\": {\n \"vm_size\": {\n \"$id\": \"#/properties/vm_size\",\n \"type\": \"string\",\n \"title\": \"VM size\",\n \"description\": \"Size of the VMs in my workspace\",\n \"default\": \"Standard_A1\",\n \"enum\": [\n \"Standard_A1\",\n \"Standard_A2\",\n \"Standard_A3\"\n ]\n },\n \"no_of_vms\": {\n \"$id\": \"#/properties/no_of_vms\",\n \"type\": \"integer\",\n \"title\": \"Number of VMs\",\n \"description\": \"Number of virtual machines to be deployed in the workspace\",\n \"default\": 0\n }\n }\n }\n }\n }\n\n\nclass WorkspaceTemplateInResponse(ResourceTemplateInResponse):\n\n class Config:\n schema_extra = {\n \"example\": get_sample_workspace_template_in_response()\n }\n", "path": "api_app/models/schemas/workspace_template.py"}, {"content": "from models.domain.resource import ResourceType\nfrom models.domain.resource_template import ResourceTemplate, Property\nfrom models.schemas.resource_template import ResourceTemplateInCreate, ResourceTemplateInResponse\n\n\ndef get_sample_workspace_service_template_object(template_name: str = \"tre-workspace-service\") -> ResourceTemplate:\n return ResourceTemplate(\n id=\"a7a7a7bd-7f4e-4a4e-b970-dc86a6b31dfb\",\n name=template_name,\n description=\"workspace service bundle\",\n version=\"0.1.0\",\n resourceType=ResourceType.WorkspaceService,\n current=True,\n type=\"object\",\n required=[\"display_name\", \"description\"],\n properties={\n \"display_name\": Property(type=\"string\"),\n \"description\": Property(type=\"string\")\n }\n )\n\n\ndef get_sample_workspace_service_template() -> dict:\n return get_sample_workspace_service_template_object().dict()\n\n\ndef get_sample_workspace_service_template_in_response() -> dict:\n workspace_template = get_sample_workspace_service_template()\n workspace_template[\"system_properties\"] = {\n \"tre_id\": Property(type=\"string\"),\n \"workspace_id\": Property(type=\"string\"),\n \"azure_location\": Property(type=\"string\"),\n }\n return workspace_template\n\n\nclass WorkspaceServiceTemplateInCreate(ResourceTemplateInCreate):\n\n class Config:\n schema_extra = {\n \"example\": {\n \"name\": \"my-tre-workspace-service\",\n \"version\": \"0.0.1\",\n \"current\": \"true\",\n \"json_schema\": {\n \"$schema\": \"http://json-schema.org/draft-07/schema\",\n \"$id\": \"https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/workspace_service.json\",\n \"type\": \"object\",\n \"title\": \"My Workspace Service Template Custom Parameters\",\n \"description\": \"These parameters are specific to my workspace service template\",\n \"required\": [],\n \"properties\": {}\n }\n }\n }\n\n\nclass WorkspaceServiceTemplateInResponse(ResourceTemplateInResponse):\n\n class Config:\n schema_extra = {\n \"example\": get_sample_workspace_service_template_in_response()\n }\n", "path": "api_app/models/schemas/workspace_service_template.py"}]}
| 2,568 | 522 |
gh_patches_debug_15280
|
rasdani/github-patches
|
git_diff
|
PlasmaPy__PlasmaPy-891
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Change name of chat room from Matrix/Riot to Element
The name of the chat platform we are using has [changed from Matrix/Riot to Element](https://element.io/blog/the-world-is-changing/). We should change the name of our chat room accordingly, including in our README file and possibly some places in our documentation.
</issue>
<code>
[start of plasmapy/particles/isotopes.py]
1 """
2 Module for loading isotope data from :file:`plasmapy/particles/data/isotopes.json`.
3
4 .. attention::
5 This module only contains non-public functionality. To learn more about the
6 package functionality, then examine the code itself.
7 """
8 __all__ = []
9
10 import astropy.units as u
11 import json
12 import pkgutil
13
14 # this code was used to create the JSON file as per vn-ki on Riot:
15 # https://matrix.to/#/!hkWCiyhQyxiYJlUtKF:matrix.org/
16 # $1554667515670438wIKlP:matrix.org?via=matrix.org&via=cadair.com
17 #
18 # def _isotope_default(obj):
19 # if isinstance(obj, u.Quantity):
20 # return {
21 # "unit": obj.unit.name,
22 # "value": obj.value,
23 # }
24 # with open("isotopes.json", "w") as f:
25 # json.dump(_Isotopes, f, default=plasma_default, indent=2)
26
27
28 def _isotope_obj_hook(obj):
29 """An `object_hook` designed for `json.load` and `json.loads`."""
30 if "unit" in obj:
31 return obj["value"] * u.Unit(obj["unit"])
32 return obj
33
34
35 #: Dictionary of isotope data.
36 _Isotopes = json.loads(
37 pkgutil.get_data("plasmapy", "particles/data/isotopes.json"),
38 object_hook=_isotope_obj_hook,
39 )
40
[end of plasmapy/particles/isotopes.py]
[start of plasmapy/particles/elements.py]
1 """
2 Module for loading atomic data for elements from
3 :file:`plasmapy/particles/data/elements.json`.
4
5 The periodic tabla data is from: http://periodic.lanl.gov/index.shtml
6
7 .. attention::
8 This module only contains non-public functionality. To learn more about the
9 package functionality, examine the code itself.
10 """
11 __all__ = []
12
13 import astropy.units as u
14 import collections
15 import json
16 import pkgutil
17
18 _PeriodicTable = collections.namedtuple(
19 "periodic_table", ["group", "category", "block", "period"]
20 )
21
22
23 def _element_obj_hook(obj):
24 if "unit" in obj:
25 return obj["value"] * u.Unit(obj["unit"])
26 return obj
27
28
29 # this code was used to create the JSON file as per vn-ki on Riot:
30 # https://matrix.to/#/!hkWCiyhQyxiYJlUtKF:matrix.org/
31 # $1554667515670438wIKlP:matrix.org?via=matrix.org&via=cadair.com
32 #
33 # def plasma_default(obj):
34 # if isinstance(obj, u.Quantity):
35 # return {
36 # "unit": obj.unit.name,
37 # "value": obj.value,
38 # }
39 #
40 # with open("elements.json", "w") as f:
41 # json.dump(_Elements, f, default=plasma_default, indent=2)
42
43
44 _Elements = json.loads(
45 pkgutil.get_data("plasmapy", "particles/data/elements.json"),
46 object_hook=_element_obj_hook,
47 )
48
49
50 _atomic_numbers_to_symbols = {
51 elemdict["atomic number"]: symb for (symb, elemdict) in _Elements.items()
52 }
53
54 _element_names_to_symbols = {
55 elemdict["element name"]: symb for (symb, elemdict) in _Elements.items()
56 }
57
[end of plasmapy/particles/elements.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/plasmapy/particles/elements.py b/plasmapy/particles/elements.py
--- a/plasmapy/particles/elements.py
+++ b/plasmapy/particles/elements.py
@@ -26,7 +26,7 @@
return obj
-# this code was used to create the JSON file as per vn-ki on Riot:
+# this code was used to create the JSON file as per vn-ki on Matrix:
# https://matrix.to/#/!hkWCiyhQyxiYJlUtKF:matrix.org/
# $1554667515670438wIKlP:matrix.org?via=matrix.org&via=cadair.com
#
diff --git a/plasmapy/particles/isotopes.py b/plasmapy/particles/isotopes.py
--- a/plasmapy/particles/isotopes.py
+++ b/plasmapy/particles/isotopes.py
@@ -11,7 +11,7 @@
import json
import pkgutil
-# this code was used to create the JSON file as per vn-ki on Riot:
+# this code was used to create the JSON file as per vn-ki on Matrix:
# https://matrix.to/#/!hkWCiyhQyxiYJlUtKF:matrix.org/
# $1554667515670438wIKlP:matrix.org?via=matrix.org&via=cadair.com
#
|
{"golden_diff": "diff --git a/plasmapy/particles/elements.py b/plasmapy/particles/elements.py\n--- a/plasmapy/particles/elements.py\n+++ b/plasmapy/particles/elements.py\n@@ -26,7 +26,7 @@\n return obj\n \n \n-# this code was used to create the JSON file as per vn-ki on Riot:\n+# this code was used to create the JSON file as per vn-ki on Matrix:\n # https://matrix.to/#/!hkWCiyhQyxiYJlUtKF:matrix.org/\n # $1554667515670438wIKlP:matrix.org?via=matrix.org&via=cadair.com\n #\ndiff --git a/plasmapy/particles/isotopes.py b/plasmapy/particles/isotopes.py\n--- a/plasmapy/particles/isotopes.py\n+++ b/plasmapy/particles/isotopes.py\n@@ -11,7 +11,7 @@\n import json\n import pkgutil\n \n-# this code was used to create the JSON file as per vn-ki on Riot:\n+# this code was used to create the JSON file as per vn-ki on Matrix:\n # https://matrix.to/#/!hkWCiyhQyxiYJlUtKF:matrix.org/\n # $1554667515670438wIKlP:matrix.org?via=matrix.org&via=cadair.com\n #\n", "issue": "Change name of chat room from Matrix/Riot to Element\nThe name of the chat platform we are using has [changed from Matrix/Riot to Element](https://element.io/blog/the-world-is-changing/). We should change the name of our chat room accordingly, including in our README file and possibly some places in our documentation.\n", "before_files": [{"content": "\"\"\"\nModule for loading isotope data from :file:`plasmapy/particles/data/isotopes.json`.\n\n.. attention::\n This module only contains non-public functionality. To learn more about the\n package functionality, then examine the code itself.\n\"\"\"\n__all__ = []\n\nimport astropy.units as u\nimport json\nimport pkgutil\n\n# this code was used to create the JSON file as per vn-ki on Riot:\n# https://matrix.to/#/!hkWCiyhQyxiYJlUtKF:matrix.org/\n# $1554667515670438wIKlP:matrix.org?via=matrix.org&via=cadair.com\n#\n# def _isotope_default(obj):\n# if isinstance(obj, u.Quantity):\n# return {\n# \"unit\": obj.unit.name,\n# \"value\": obj.value,\n# }\n# with open(\"isotopes.json\", \"w\") as f:\n# json.dump(_Isotopes, f, default=plasma_default, indent=2)\n\n\ndef _isotope_obj_hook(obj):\n \"\"\"An `object_hook` designed for `json.load` and `json.loads`.\"\"\"\n if \"unit\" in obj:\n return obj[\"value\"] * u.Unit(obj[\"unit\"])\n return obj\n\n\n#: Dictionary of isotope data.\n_Isotopes = json.loads(\n pkgutil.get_data(\"plasmapy\", \"particles/data/isotopes.json\"),\n object_hook=_isotope_obj_hook,\n)\n", "path": "plasmapy/particles/isotopes.py"}, {"content": "\"\"\"\nModule for loading atomic data for elements from\n:file:`plasmapy/particles/data/elements.json`.\n\nThe periodic tabla data is from: http://periodic.lanl.gov/index.shtml\n\n.. attention::\n This module only contains non-public functionality. To learn more about the\n package functionality, examine the code itself.\n\"\"\"\n__all__ = []\n\nimport astropy.units as u\nimport collections\nimport json\nimport pkgutil\n\n_PeriodicTable = collections.namedtuple(\n \"periodic_table\", [\"group\", \"category\", \"block\", \"period\"]\n)\n\n\ndef _element_obj_hook(obj):\n if \"unit\" in obj:\n return obj[\"value\"] * u.Unit(obj[\"unit\"])\n return obj\n\n\n# this code was used to create the JSON file as per vn-ki on Riot:\n# https://matrix.to/#/!hkWCiyhQyxiYJlUtKF:matrix.org/\n# $1554667515670438wIKlP:matrix.org?via=matrix.org&via=cadair.com\n#\n# def plasma_default(obj):\n# if isinstance(obj, u.Quantity):\n# return {\n# \"unit\": obj.unit.name,\n# \"value\": obj.value,\n# }\n#\n# with open(\"elements.json\", \"w\") as f:\n# json.dump(_Elements, f, default=plasma_default, indent=2)\n\n\n_Elements = json.loads(\n pkgutil.get_data(\"plasmapy\", \"particles/data/elements.json\"),\n object_hook=_element_obj_hook,\n)\n\n\n_atomic_numbers_to_symbols = {\n elemdict[\"atomic number\"]: symb for (symb, elemdict) in _Elements.items()\n}\n\n_element_names_to_symbols = {\n elemdict[\"element name\"]: symb for (symb, elemdict) in _Elements.items()\n}\n", "path": "plasmapy/particles/elements.py"}]}
| 1,560 | 336 |
gh_patches_debug_2500
|
rasdani/github-patches
|
git_diff
|
rlworkgroup__garage-692
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add Intel-optimized version of the package
</issue>
<code>
[start of setup.py]
1 from setuptools import find_packages
2 from setuptools import setup
3
4 # Required dependencies
5 required = [
6 # Please keep alphabetized
7 'akro',
8 'boto3',
9 'cached_property',
10 'click',
11 'cloudpickle',
12 'cma==1.1.06',
13 # dm_control throws an error during install about not being able to
14 # find a build dependency (absl-py). Later pip executes the `install`
15 # command again and the install succeeds because absl-py has been
16 # installed. This is stupid, but harmless.
17 'dm_control @ https://api.github.com/repos/deepmind/dm_control/tarball/92f9913013face0468442cd0964d5973ea2089ea', # noqa: E501
18 'dowel==0.0.2',
19 'gym[all]==0.10.11',
20 'joblib<0.13,>=0.12',
21 'matplotlib',
22 'mujoco-py<1.50.2,>=1.50.1',
23 'numpy==1.14.5',
24 'psutil',
25 'pyprind',
26 'python-dateutil',
27 'scikit-image',
28 'scipy',
29 'tensorflow<1.13,>=1.12.0',
30 'tensorflow-probability<0.6.0,>=0.5.0', # for tensorflow 1.12
31 ]
32
33 # Dependencies for optional features
34 extras = {}
35 extras['all'] = list(set(sum(extras.values(), [])))
36
37 # Development dependencies (*not* included in "all")
38 extras['dev'] = [
39 # Please keep alphabetized
40 'baselines @ https://api.github.com/repos/openai/baselines/tarball/f2729693253c0ef4d4086231d36e0a4307ec1cb3', # noqa: E501
41 'coverage',
42 'flake8',
43 'flake8-docstrings==1.3.0',
44 'flake8-import-order',
45 'nose2',
46 'pandas',
47 'pep8-naming==0.7.0',
48 'pre-commit',
49 'pylint==1.9.2',
50 'sphinx',
51 'sphinx_rtd_theme',
52 'yapf',
53 ]
54
55 with open('README.md') as f:
56 readme = f.read()
57
58 # Get the package version dynamically
59 with open('VERSION') as v:
60 version = v.read().strip()
61
62 setup(
63 name='garage',
64 version=version,
65 author='Reinforcement Learning Working Group',
66 description='A framework for reproducible reinforcement learning research',
67 url='https://github.com/rlworkgroup/garage',
68 packages=find_packages(where='src'),
69 package_dir={'': 'src'},
70 scripts=['scripts/garage'],
71 install_requires=required,
72 extras_require=extras,
73 license='MIT',
74 long_description=readme,
75 long_description_content_type='text/markdown',
76 classifiers=[
77 'Development Status :: 4 - Beta',
78 'Intended Audience :: Developers',
79 'Intended Audience :: Education',
80 'Intended Audience :: Science/Research',
81 'License :: OSI Approved :: MIT License',
82 'Programming Language :: Python :: 3.5',
83 'Programming Language :: Python :: 3.6',
84 'Programming Language :: Python :: 3.7',
85 'Programming Language :: Python :: 3 :: Only',
86 'Topic :: Scientific/Engineering :: Artificial Intelligence',
87 'Topic :: Scientific/Engineering :: Mathematics',
88 'Topic :: Software Development :: Libraries',
89 ],
90 )
91
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -34,6 +34,9 @@
extras = {}
extras['all'] = list(set(sum(extras.values(), [])))
+# Intel dependencies not included in all
+extras['intel'] = ['intel-tensorflow<1.13,>=1.12.0']
+
# Development dependencies (*not* included in "all")
extras['dev'] = [
# Please keep alphabetized
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -34,6 +34,9 @@\n extras = {}\n extras['all'] = list(set(sum(extras.values(), [])))\n \n+# Intel dependencies not included in all\n+extras['intel'] = ['intel-tensorflow<1.13,>=1.12.0']\n+\n # Development dependencies (*not* included in \"all\")\n extras['dev'] = [\n # Please keep alphabetized\n", "issue": "Add Intel-optimized version of the package\n\n", "before_files": [{"content": "from setuptools import find_packages\nfrom setuptools import setup\n\n# Required dependencies\nrequired = [\n # Please keep alphabetized\n 'akro',\n 'boto3',\n 'cached_property',\n 'click',\n 'cloudpickle',\n 'cma==1.1.06',\n # dm_control throws an error during install about not being able to\n # find a build dependency (absl-py). Later pip executes the `install`\n # command again and the install succeeds because absl-py has been\n # installed. This is stupid, but harmless.\n 'dm_control @ https://api.github.com/repos/deepmind/dm_control/tarball/92f9913013face0468442cd0964d5973ea2089ea', # noqa: E501\n 'dowel==0.0.2',\n 'gym[all]==0.10.11',\n 'joblib<0.13,>=0.12',\n 'matplotlib',\n 'mujoco-py<1.50.2,>=1.50.1',\n 'numpy==1.14.5',\n 'psutil',\n 'pyprind',\n 'python-dateutil',\n 'scikit-image',\n 'scipy',\n 'tensorflow<1.13,>=1.12.0',\n 'tensorflow-probability<0.6.0,>=0.5.0', # for tensorflow 1.12\n]\n\n# Dependencies for optional features\nextras = {}\nextras['all'] = list(set(sum(extras.values(), [])))\n\n# Development dependencies (*not* included in \"all\")\nextras['dev'] = [\n # Please keep alphabetized\n 'baselines @ https://api.github.com/repos/openai/baselines/tarball/f2729693253c0ef4d4086231d36e0a4307ec1cb3', # noqa: E501\n 'coverage',\n 'flake8',\n 'flake8-docstrings==1.3.0',\n 'flake8-import-order',\n 'nose2',\n 'pandas',\n 'pep8-naming==0.7.0',\n 'pre-commit',\n 'pylint==1.9.2',\n 'sphinx',\n 'sphinx_rtd_theme',\n 'yapf',\n]\n\nwith open('README.md') as f:\n readme = f.read()\n\n# Get the package version dynamically\nwith open('VERSION') as v:\n version = v.read().strip()\n\nsetup(\n name='garage',\n version=version,\n author='Reinforcement Learning Working Group',\n description='A framework for reproducible reinforcement learning research',\n url='https://github.com/rlworkgroup/garage',\n packages=find_packages(where='src'),\n package_dir={'': 'src'},\n scripts=['scripts/garage'],\n install_requires=required,\n extras_require=extras,\n license='MIT',\n long_description=readme,\n long_description_content_type='text/markdown',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries',\n ],\n)\n", "path": "setup.py"}]}
| 1,538 | 108 |
gh_patches_debug_16985
|
rasdani/github-patches
|
git_diff
|
searxng__searxng-2369
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Double bangs without query redirect to search URL instead of main page
<!-- PLEASE FILL THESE FIELDS, IT REALLY HELPS THE MAINTAINERS OF SearXNG -->
**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**
`2023.4.21+98387e29`
**How did you install SearXNG?**
`searxng-docker`
**What happened?**
Double bangs without query redirect to search URL instead of main page
**How To Reproduce**
Use external bang without any search items e.g. `!!gh` and we land on `https://github.com/search?utf8=%E2%9C%93&q=`
**Expected behavior**
`!!gh` should redirect to `https://github.com` if no search items
</issue>
<code>
[start of searx/external_bang.py]
1 # SPDX-License-Identifier: AGPL-3.0-or-later
2
3 from urllib.parse import quote_plus
4 from searx.data import EXTERNAL_BANGS
5
6 LEAF_KEY = chr(16)
7
8
9 def get_node(external_bangs_db, bang):
10 node = external_bangs_db['trie']
11 after = ''
12 before = ''
13 for bang_letter in bang:
14 after += bang_letter
15 if after in node and isinstance(node, dict):
16 node = node[after]
17 before += after
18 after = ''
19 return node, before, after
20
21
22 def get_bang_definition_and_ac(external_bangs_db, bang):
23 node, before, after = get_node(external_bangs_db, bang)
24
25 bang_definition = None
26 bang_ac_list = []
27 if after != '':
28 for k in node:
29 if k.startswith(after):
30 bang_ac_list.append(before + k)
31 elif isinstance(node, dict):
32 bang_definition = node.get(LEAF_KEY)
33 bang_ac_list = [before + k for k in node.keys() if k != LEAF_KEY]
34 elif isinstance(node, str):
35 bang_definition = node
36 bang_ac_list = []
37
38 return bang_definition, bang_ac_list
39
40
41 def resolve_bang_definition(bang_definition, query):
42 url, rank = bang_definition.split(chr(1))
43 url = url.replace(chr(2), quote_plus(query))
44 if url.startswith('//'):
45 url = 'https:' + url
46 rank = int(rank) if len(rank) > 0 else 0
47 return (url, rank)
48
49
50 def get_bang_definition_and_autocomplete(bang, external_bangs_db=None):
51 if external_bangs_db is None:
52 external_bangs_db = EXTERNAL_BANGS
53
54 bang_definition, bang_ac_list = get_bang_definition_and_ac(external_bangs_db, bang)
55
56 new_autocomplete = []
57 current = [*bang_ac_list]
58 done = set()
59 while len(current) > 0:
60 bang_ac = current.pop(0)
61 done.add(bang_ac)
62
63 current_bang_definition, current_bang_ac_list = get_bang_definition_and_ac(external_bangs_db, bang_ac)
64 if current_bang_definition:
65 _, order = resolve_bang_definition(current_bang_definition, '')
66 new_autocomplete.append((bang_ac, order))
67 for new_bang in current_bang_ac_list:
68 if new_bang not in done and new_bang not in current:
69 current.append(new_bang)
70
71 new_autocomplete.sort(key=lambda t: (-t[1], t[0]))
72 new_autocomplete = list(map(lambda t: t[0], new_autocomplete))
73
74 return bang_definition, new_autocomplete
75
76
77 def get_bang_url(search_query, external_bangs_db=None):
78 """
79 Redirects if the user supplied a correct bang search.
80 :param search_query: This is a search_query object which contains preferences and the submitted queries.
81 :return: None if the bang was invalid, else a string of the redirect url.
82 """
83 ret_val = None
84
85 if external_bangs_db is None:
86 external_bangs_db = EXTERNAL_BANGS
87
88 if search_query.external_bang:
89 bang_definition, _ = get_bang_definition_and_ac(external_bangs_db, search_query.external_bang)
90 if bang_definition and isinstance(bang_definition, str):
91 ret_val = resolve_bang_definition(bang_definition, search_query.query)[0]
92
93 return ret_val
94
[end of searx/external_bang.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/searx/external_bang.py b/searx/external_bang.py
--- a/searx/external_bang.py
+++ b/searx/external_bang.py
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
-from urllib.parse import quote_plus
+from urllib.parse import quote_plus, urlparse
from searx.data import EXTERNAL_BANGS
LEAF_KEY = chr(16)
@@ -40,9 +40,15 @@
def resolve_bang_definition(bang_definition, query):
url, rank = bang_definition.split(chr(1))
- url = url.replace(chr(2), quote_plus(query))
if url.startswith('//'):
url = 'https:' + url
+ if query:
+ url = url.replace(chr(2), quote_plus(query))
+ else:
+ # go to main instead of search page
+ o = urlparse(url)
+ url = o.scheme + '://' + o.netloc
+
rank = int(rank) if len(rank) > 0 else 0
return (url, rank)
|
{"golden_diff": "diff --git a/searx/external_bang.py b/searx/external_bang.py\n--- a/searx/external_bang.py\n+++ b/searx/external_bang.py\n@@ -1,6 +1,6 @@\n # SPDX-License-Identifier: AGPL-3.0-or-later\n \n-from urllib.parse import quote_plus\n+from urllib.parse import quote_plus, urlparse\n from searx.data import EXTERNAL_BANGS\n \n LEAF_KEY = chr(16)\n@@ -40,9 +40,15 @@\n \n def resolve_bang_definition(bang_definition, query):\n url, rank = bang_definition.split(chr(1))\n- url = url.replace(chr(2), quote_plus(query))\n if url.startswith('//'):\n url = 'https:' + url\n+ if query:\n+ url = url.replace(chr(2), quote_plus(query))\n+ else:\n+ # go to main instead of search page\n+ o = urlparse(url)\n+ url = o.scheme + '://' + o.netloc\n+\n rank = int(rank) if len(rank) > 0 else 0\n return (url, rank)\n", "issue": "Double bangs without query redirect to search URL instead of main page\n<!-- PLEASE FILL THESE FIELDS, IT REALLY HELPS THE MAINTAINERS OF SearXNG -->\r\n\r\n**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**\r\n`2023.4.21+98387e29`\r\n\r\n**How did you install SearXNG?**\r\n`searxng-docker`\r\n\r\n**What happened?**\r\nDouble bangs without query redirect to search URL instead of main page\r\n\r\n**How To Reproduce**\r\nUse external bang without any search items e.g. `!!gh` and we land on `https://github.com/search?utf8=%E2%9C%93&q=` \r\n\r\n**Expected behavior**\r\n`!!gh` should redirect to `https://github.com` if no search items\n", "before_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n\nfrom urllib.parse import quote_plus\nfrom searx.data import EXTERNAL_BANGS\n\nLEAF_KEY = chr(16)\n\n\ndef get_node(external_bangs_db, bang):\n node = external_bangs_db['trie']\n after = ''\n before = ''\n for bang_letter in bang:\n after += bang_letter\n if after in node and isinstance(node, dict):\n node = node[after]\n before += after\n after = ''\n return node, before, after\n\n\ndef get_bang_definition_and_ac(external_bangs_db, bang):\n node, before, after = get_node(external_bangs_db, bang)\n\n bang_definition = None\n bang_ac_list = []\n if after != '':\n for k in node:\n if k.startswith(after):\n bang_ac_list.append(before + k)\n elif isinstance(node, dict):\n bang_definition = node.get(LEAF_KEY)\n bang_ac_list = [before + k for k in node.keys() if k != LEAF_KEY]\n elif isinstance(node, str):\n bang_definition = node\n bang_ac_list = []\n\n return bang_definition, bang_ac_list\n\n\ndef resolve_bang_definition(bang_definition, query):\n url, rank = bang_definition.split(chr(1))\n url = url.replace(chr(2), quote_plus(query))\n if url.startswith('//'):\n url = 'https:' + url\n rank = int(rank) if len(rank) > 0 else 0\n return (url, rank)\n\n\ndef get_bang_definition_and_autocomplete(bang, external_bangs_db=None):\n if external_bangs_db is None:\n external_bangs_db = EXTERNAL_BANGS\n\n bang_definition, bang_ac_list = get_bang_definition_and_ac(external_bangs_db, bang)\n\n new_autocomplete = []\n current = [*bang_ac_list]\n done = set()\n while len(current) > 0:\n bang_ac = current.pop(0)\n done.add(bang_ac)\n\n current_bang_definition, current_bang_ac_list = get_bang_definition_and_ac(external_bangs_db, bang_ac)\n if current_bang_definition:\n _, order = resolve_bang_definition(current_bang_definition, '')\n new_autocomplete.append((bang_ac, order))\n for new_bang in current_bang_ac_list:\n if new_bang not in done and new_bang not in current:\n current.append(new_bang)\n\n new_autocomplete.sort(key=lambda t: (-t[1], t[0]))\n new_autocomplete = list(map(lambda t: t[0], new_autocomplete))\n\n return bang_definition, new_autocomplete\n\n\ndef get_bang_url(search_query, external_bangs_db=None):\n \"\"\"\n Redirects if the user supplied a correct bang search.\n :param search_query: This is a search_query object which contains preferences and the submitted queries.\n :return: None if the bang was invalid, else a string of the redirect url.\n \"\"\"\n ret_val = None\n\n if external_bangs_db is None:\n external_bangs_db = EXTERNAL_BANGS\n\n if search_query.external_bang:\n bang_definition, _ = get_bang_definition_and_ac(external_bangs_db, search_query.external_bang)\n if bang_definition and isinstance(bang_definition, str):\n ret_val = resolve_bang_definition(bang_definition, search_query.query)[0]\n\n return ret_val\n", "path": "searx/external_bang.py"}]}
| 1,674 | 253 |
gh_patches_debug_2243
|
rasdani/github-patches
|
git_diff
|
streamlink__streamlink-5023
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
plugins.vtvgo: '403 Client Error: Forbidden for url: ...'
### Checklist
- [X] This is a plugin issue and not a different kind of issue
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
Latest build from the master branch
### Description
Last month VtvGo added cookie requirements for the stream playlist, and now it seems that they added another security layer. The request to the website returns error 403.
### Debug log
```text
streamlink https://vtvgo.vn/xem-truc-tuyen-kenh-vtv3-3.html --loglevel=debug
[cli][debug] OS: Linux-5.15.0-53-generic-x86_64-with-glibc2.35
[cli][debug] Python: 3.10.6
[cli][debug] Streamlink: 5.1.2+4.g68dad105
[cli][debug] Dependencies:
[cli][debug] certifi: 2022.9.24
[cli][debug] isodate: 0.6.1
[cli][debug] lxml: 4.9.1
[cli][debug] pycountry: 22.3.5
[cli][debug] pycryptodome: 3.15.0
[cli][debug] PySocks: 1.7.1
[cli][debug] requests: 2.28.1
[cli][debug] urllib3: 1.26.12
[cli][debug] websocket-client: 1.4.1
[cli][debug] importlib-metadata: 4.6.4
[cli][debug] Arguments:
[cli][debug] url=https://vtvgo.vn/xem-truc-tuyen-kenh-vtv3-3.html
[cli][debug] --loglevel=debug
[cli][info] Found matching plugin vtvgo for URL https://vtvgo.vn/xem-truc-tuyen-kenh-vtv3-3.html
error: Unable to open URL: https://vtvgo.vn/ajax-get-stream (403 Client Error: Forbidden for url: https://vtvgo.vn/ajax-get-stream)
```
</issue>
<code>
[start of src/streamlink/plugins/vtvgo.py]
1 """
2 $description Live TV channels from VTV, a Vietnamese public, state-owned broadcaster.
3 $url vtvgo.vn
4 $type live
5 """
6
7 import logging
8 import re
9
10 from streamlink.plugin import Plugin, pluginmatcher
11 from streamlink.plugin.api import validate
12 from streamlink.stream.hls import HLSStream
13
14 log = logging.getLogger(__name__)
15
16
17 @pluginmatcher(re.compile(
18 r"https?://vtvgo\.vn/xem-truc-tuyen-kenh-"
19 ))
20 class VTVgo(Plugin):
21 AJAX_URL = "https://vtvgo.vn/ajax-get-stream"
22
23 def _get_streams(self):
24 # get cookies
25 self.session.http.get("https://vtvgo.vn/")
26
27 self.session.http.headers.update({
28 "Origin": "https://vtvgo.vn",
29 "Referer": self.url,
30 "X-Requested-With": "XMLHttpRequest",
31 })
32
33 params = self.session.http.get(self.url, schema=validate.Schema(
34 validate.parse_html(),
35 validate.xml_xpath_string(".//script[contains(text(),'setplayer(')][1]/text()"),
36 validate.none_or_all(
37 validate.regex(
38 re.compile(r"""var\s+(?P<key>(?:type_)?id|time|token)\s*=\s*["']?(?P<value>[^"']+)["']?;"""),
39 method="findall",
40 ),
41 [
42 ("id", int),
43 ("type_id", str),
44 ("time", str),
45 ("token", str),
46 ],
47 ),
48 ))
49 if not params:
50 return
51
52 log.trace(f"{params!r}")
53 hls_url = self.session.http.post(
54 self.AJAX_URL,
55 data=dict(params),
56 schema=validate.Schema(
57 validate.parse_json(),
58 {"stream_url": [validate.url()]},
59 validate.get(("stream_url", 0)),
60 ),
61 )
62
63 return HLSStream.parse_variant_playlist(self.session, hls_url)
64
65
66 __plugin__ = VTVgo
67
[end of src/streamlink/plugins/vtvgo.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/streamlink/plugins/vtvgo.py b/src/streamlink/plugins/vtvgo.py
--- a/src/streamlink/plugins/vtvgo.py
+++ b/src/streamlink/plugins/vtvgo.py
@@ -27,6 +27,7 @@
self.session.http.headers.update({
"Origin": "https://vtvgo.vn",
"Referer": self.url,
+ "Sec-Fetch-Site": "same-origin",
"X-Requested-With": "XMLHttpRequest",
})
|
{"golden_diff": "diff --git a/src/streamlink/plugins/vtvgo.py b/src/streamlink/plugins/vtvgo.py\n--- a/src/streamlink/plugins/vtvgo.py\n+++ b/src/streamlink/plugins/vtvgo.py\n@@ -27,6 +27,7 @@\n self.session.http.headers.update({\n \"Origin\": \"https://vtvgo.vn\",\n \"Referer\": self.url,\n+ \"Sec-Fetch-Site\": \"same-origin\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n })\n", "issue": "plugins.vtvgo: '403 Client Error: Forbidden for url: ...'\n### Checklist\n\n- [X] This is a plugin issue and not a different kind of issue\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nLatest build from the master branch\n\n### Description\n\nLast month VtvGo added cookie requirements for the stream playlist, and now it seems that they added another security layer. The request to the website returns error 403.\n\n### Debug log\n\n```text\nstreamlink https://vtvgo.vn/xem-truc-tuyen-kenh-vtv3-3.html --loglevel=debug\r\n[cli][debug] OS: Linux-5.15.0-53-generic-x86_64-with-glibc2.35\r\n[cli][debug] Python: 3.10.6\r\n[cli][debug] Streamlink: 5.1.2+4.g68dad105\r\n[cli][debug] Dependencies:\r\n[cli][debug] certifi: 2022.9.24\r\n[cli][debug] isodate: 0.6.1\r\n[cli][debug] lxml: 4.9.1\r\n[cli][debug] pycountry: 22.3.5\r\n[cli][debug] pycryptodome: 3.15.0\r\n[cli][debug] PySocks: 1.7.1\r\n[cli][debug] requests: 2.28.1\r\n[cli][debug] urllib3: 1.26.12\r\n[cli][debug] websocket-client: 1.4.1\r\n[cli][debug] importlib-metadata: 4.6.4\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://vtvgo.vn/xem-truc-tuyen-kenh-vtv3-3.html\r\n[cli][debug] --loglevel=debug\r\n[cli][info] Found matching plugin vtvgo for URL https://vtvgo.vn/xem-truc-tuyen-kenh-vtv3-3.html\r\nerror: Unable to open URL: https://vtvgo.vn/ajax-get-stream (403 Client Error: Forbidden for url: https://vtvgo.vn/ajax-get-stream)\n```\n\n", "before_files": [{"content": "\"\"\"\n$description Live TV channels from VTV, a Vietnamese public, state-owned broadcaster.\n$url vtvgo.vn\n$type live\n\"\"\"\n\nimport logging\nimport re\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.hls import HLSStream\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://vtvgo\\.vn/xem-truc-tuyen-kenh-\"\n))\nclass VTVgo(Plugin):\n AJAX_URL = \"https://vtvgo.vn/ajax-get-stream\"\n\n def _get_streams(self):\n # get cookies\n self.session.http.get(\"https://vtvgo.vn/\")\n\n self.session.http.headers.update({\n \"Origin\": \"https://vtvgo.vn\",\n \"Referer\": self.url,\n \"X-Requested-With\": \"XMLHttpRequest\",\n })\n\n params = self.session.http.get(self.url, schema=validate.Schema(\n validate.parse_html(),\n validate.xml_xpath_string(\".//script[contains(text(),'setplayer(')][1]/text()\"),\n validate.none_or_all(\n validate.regex(\n re.compile(r\"\"\"var\\s+(?P<key>(?:type_)?id|time|token)\\s*=\\s*[\"']?(?P<value>[^\"']+)[\"']?;\"\"\"),\n method=\"findall\",\n ),\n [\n (\"id\", int),\n (\"type_id\", str),\n (\"time\", str),\n (\"token\", str),\n ],\n ),\n ))\n if not params:\n return\n\n log.trace(f\"{params!r}\")\n hls_url = self.session.http.post(\n self.AJAX_URL,\n data=dict(params),\n schema=validate.Schema(\n validate.parse_json(),\n {\"stream_url\": [validate.url()]},\n validate.get((\"stream_url\", 0)),\n ),\n )\n\n return HLSStream.parse_variant_playlist(self.session, hls_url)\n\n\n__plugin__ = VTVgo\n", "path": "src/streamlink/plugins/vtvgo.py"}]}
| 1,708 | 108 |
gh_patches_debug_7931
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-center-index-14099
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[service] msbuild package template has import incompatible with Conan 2.0
At https://github.com/conan-io/conan-center-index/blob/53ef0688c436c6697d8312d47fcdef68fe7e6955/docs/package_templates/msbuild_package/all/conanfile.py#L4, `vs_layout` is imported from `conan.tools.layout`. However, when using this import in [my PR](https://github.com/conan-io/conan-center-index/pull/13646), the CI informed me that the recipe was not ready for Conan v2, citing this import error:
```
ERROR: Error loading conanfile at '/home/conan/w/prod_cci_PR-13646/recipes/lzham/all/conanfile.py': Unable to load conanfile in /home/conan/w/prod_cci_PR-13646/recipes/lzham/all/conanfile.py
File "<frozen importlib._bootstrap_external>", line 728, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/home/conan/w/prod_cci_PR-13646/recipes/lzham/all/conanfile.py", line 8, in <module>
from conan.tools.layout import vs_layout
ImportError: cannot import name 'vs_layout' from 'conan.tools.layout' (/opt/pyenv/versions/3.7.13/lib/python3.7/site-packages/conan/tools/layout/__init__.py)
```
Therefore, it seems the msbuild template should import `vs_layout` from `conan.tools.microsoft` instead.
</issue>
<code>
[start of docs/package_templates/msbuild_package/all/conanfile.py]
1 from conan import ConanFile
2 from conan.errors import ConanInvalidConfiguration
3 from conan.tools.microsoft import is_msvc, MSBuildDeps, MSBuildToolchain, MSBuild, VCVars
4 from conan.tools.layout import vs_layout
5 from conan.tools.files import apply_conandata_patches, export_conandata_patches, get, copy, rm, replace_in_file
6 import os
7
8
9 required_conan_version = ">=1.53.0"
10
11
12 class PackageConan(ConanFile):
13 name = "package"
14 description = "short description"
15 # Use short name only, conform to SPDX License List: https://spdx.org/licenses/
16 # In case not listed there, use "LicenseRef-<license-file-name>"
17 license = ""
18 url = "https://github.com/conan-io/conan-center-index"
19 homepage = "https://github.com/project/package"
20 # no "conan" and project name in topics. Use topics from the upstream listed on GH
21 topics = ("topic1", "topic2", "topic3")
22 settings = "os", "arch", "compiler", "build_type"
23 options = {
24 "shared": [True, False],
25 "fPIC": [True, False],
26 }
27 default_options = {
28 "shared": False,
29 "fPIC": True,
30 }
31
32 # no exports_sources attribute, but export_sources(self) method instead
33 # this allows finer grain exportation of patches per version
34 def export_sources(self):
35 export_conandata_patches(self)
36
37 def config_options(self):
38 if self.settings.os == "Windows":
39 del self.options.fPIC
40
41 def configure(self):
42 if self.options.shared:
43 self.options.rm_safe("fPIC")
44 # for plain C projects only
45 self.settings.rm_safe("compiler.libcxx")
46 self.settings.rm_safe("compiler.cppstd")
47
48 def layout(self):
49 vs_layout(self)
50
51 def requirements(self):
52 # prefer self.requires method instead of requires attribute
53 self.requires("dependency/0.8.1")
54
55 def validate(self):
56 # in case it does not work in another configuration, it should validated here too
57 if not is_msvc(self):
58 raise ConanInvalidConfiguration(f"{self.ref} can be built only by Visual Studio and msvc.")
59
60 # if another tool than the compiler or CMake is required to build the project (pkgconf, bison, flex etc)
61 def build_requirements(self):
62 self.tool_requires("tool/x.y.z")
63
64 def source(self):
65 get(self, **self.conan_data["sources"][self.version], destination=self.source_folder, strip_root=True)
66
67 def generate(self):
68 tc = MSBuildToolchain(self)
69 tc.generate()
70 tc = MSBuildDeps(self)
71 tc.generate()
72 tc = VCVars(self)
73 tc.generate()
74
75 def _patch_sources(self):
76 apply_conandata_patches(self)
77 # remove bundled xxhash
78 rm(self, "whateer.*", os.path.join(self.source_folder, "lib"))
79 replace_in_file(self, os.path.join(self.source_folder, "CMakeLists.txt"), "...", "")
80
81 def build(self):
82 self._patch_sources() # It can be apply_conandata_patches(self) only in case no more patches are needed
83 msbuild = MSBuild(self)
84 # customize to Release when RelWithDebInfo
85 msbuild.build_type = "Debug" if self.settings.build_type == "Debug" else "Release"
86 # use Win32 instead of the default value when building x86
87 msbuild.platform = "Win32" if self.settings.arch == "x86" else msbuild.platform
88 # customize according the solution file and compiler version
89 msbuild.build(sln="project_2017.sln")
90
91 def package(self):
92 copy(self, pattern="LICENSE", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder)
93 copy(
94 self, pattern="*.lib", dst=os.path.join(self.package_folder, "lib"), src=self.build_folder, keep_path=False
95 )
96 copy(
97 self, pattern="*.dll", dst=os.path.join(self.package_folder, "bin"), src=self.build_folder, keep_path=False
98 )
99 copy(
100 self,
101 pattern="*.h",
102 dst=os.path.join(self.package_folder, "include"),
103 src=os.path.join(self.source_folder, "include"),
104 )
105
106 def package_info(self):
107 self.cpp_info.libs = ["package_lib"]
108
[end of docs/package_templates/msbuild_package/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docs/package_templates/msbuild_package/all/conanfile.py b/docs/package_templates/msbuild_package/all/conanfile.py
--- a/docs/package_templates/msbuild_package/all/conanfile.py
+++ b/docs/package_templates/msbuild_package/all/conanfile.py
@@ -1,7 +1,6 @@
from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
-from conan.tools.microsoft import is_msvc, MSBuildDeps, MSBuildToolchain, MSBuild, VCVars
-from conan.tools.layout import vs_layout
+from conan.tools.microsoft import is_msvc, vs_layout, MSBuildDeps, MSBuildToolchain, MSBuild, VCVars
from conan.tools.files import apply_conandata_patches, export_conandata_patches, get, copy, rm, replace_in_file
import os
|
{"golden_diff": "diff --git a/docs/package_templates/msbuild_package/all/conanfile.py b/docs/package_templates/msbuild_package/all/conanfile.py\n--- a/docs/package_templates/msbuild_package/all/conanfile.py\n+++ b/docs/package_templates/msbuild_package/all/conanfile.py\n@@ -1,7 +1,6 @@\n from conan import ConanFile\n from conan.errors import ConanInvalidConfiguration\n-from conan.tools.microsoft import is_msvc, MSBuildDeps, MSBuildToolchain, MSBuild, VCVars\n-from conan.tools.layout import vs_layout\n+from conan.tools.microsoft import is_msvc, vs_layout, MSBuildDeps, MSBuildToolchain, MSBuild, VCVars\n from conan.tools.files import apply_conandata_patches, export_conandata_patches, get, copy, rm, replace_in_file\n import os\n", "issue": "[service] msbuild package template has import incompatible with Conan 2.0\nAt https://github.com/conan-io/conan-center-index/blob/53ef0688c436c6697d8312d47fcdef68fe7e6955/docs/package_templates/msbuild_package/all/conanfile.py#L4, `vs_layout` is imported from `conan.tools.layout`. However, when using this import in [my PR](https://github.com/conan-io/conan-center-index/pull/13646), the CI informed me that the recipe was not ready for Conan v2, citing this import error:\r\n\r\n```\r\nERROR: Error loading conanfile at '/home/conan/w/prod_cci_PR-13646/recipes/lzham/all/conanfile.py': Unable to load conanfile in /home/conan/w/prod_cci_PR-13646/recipes/lzham/all/conanfile.py\r\n File \"<frozen importlib._bootstrap_external>\", line 728, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 219, in _call_with_frames_removed\r\n File \"/home/conan/w/prod_cci_PR-13646/recipes/lzham/all/conanfile.py\", line 8, in <module>\r\n from conan.tools.layout import vs_layout\r\nImportError: cannot import name 'vs_layout' from 'conan.tools.layout' (/opt/pyenv/versions/3.7.13/lib/python3.7/site-packages/conan/tools/layout/__init__.py)\r\n```\r\nTherefore, it seems the msbuild template should import `vs_layout` from `conan.tools.microsoft` instead.\r\n\n", "before_files": [{"content": "from conan import ConanFile\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.microsoft import is_msvc, MSBuildDeps, MSBuildToolchain, MSBuild, VCVars\nfrom conan.tools.layout import vs_layout\nfrom conan.tools.files import apply_conandata_patches, export_conandata_patches, get, copy, rm, replace_in_file\nimport os\n\n\nrequired_conan_version = \">=1.53.0\"\n\n\nclass PackageConan(ConanFile):\n name = \"package\"\n description = \"short description\"\n # Use short name only, conform to SPDX License List: https://spdx.org/licenses/\n # In case not listed there, use \"LicenseRef-<license-file-name>\"\n license = \"\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/project/package\"\n # no \"conan\" and project name in topics. Use topics from the upstream listed on GH\n topics = (\"topic1\", \"topic2\", \"topic3\")\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n\n # no exports_sources attribute, but export_sources(self) method instead\n # this allows finer grain exportation of patches per version\n def export_sources(self):\n export_conandata_patches(self)\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n self.options.rm_safe(\"fPIC\")\n # for plain C projects only\n self.settings.rm_safe(\"compiler.libcxx\")\n self.settings.rm_safe(\"compiler.cppstd\")\n\n def layout(self):\n vs_layout(self)\n\n def requirements(self):\n # prefer self.requires method instead of requires attribute\n self.requires(\"dependency/0.8.1\")\n\n def validate(self):\n # in case it does not work in another configuration, it should validated here too\n if not is_msvc(self):\n raise ConanInvalidConfiguration(f\"{self.ref} can be built only by Visual Studio and msvc.\")\n\n # if another tool than the compiler or CMake is required to build the project (pkgconf, bison, flex etc)\n def build_requirements(self):\n self.tool_requires(\"tool/x.y.z\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], destination=self.source_folder, strip_root=True)\n\n def generate(self):\n tc = MSBuildToolchain(self)\n tc.generate()\n tc = MSBuildDeps(self)\n tc.generate()\n tc = VCVars(self)\n tc.generate()\n\n def _patch_sources(self):\n apply_conandata_patches(self)\n # remove bundled xxhash\n rm(self, \"whateer.*\", os.path.join(self.source_folder, \"lib\"))\n replace_in_file(self, os.path.join(self.source_folder, \"CMakeLists.txt\"), \"...\", \"\")\n\n def build(self):\n self._patch_sources() # It can be apply_conandata_patches(self) only in case no more patches are needed\n msbuild = MSBuild(self)\n # customize to Release when RelWithDebInfo\n msbuild.build_type = \"Debug\" if self.settings.build_type == \"Debug\" else \"Release\"\n # use Win32 instead of the default value when building x86\n msbuild.platform = \"Win32\" if self.settings.arch == \"x86\" else msbuild.platform\n # customize according the solution file and compiler version\n msbuild.build(sln=\"project_2017.sln\")\n\n def package(self):\n copy(self, pattern=\"LICENSE\", dst=os.path.join(self.package_folder, \"licenses\"), src=self.source_folder)\n copy(\n self, pattern=\"*.lib\", dst=os.path.join(self.package_folder, \"lib\"), src=self.build_folder, keep_path=False\n )\n copy(\n self, pattern=\"*.dll\", dst=os.path.join(self.package_folder, \"bin\"), src=self.build_folder, keep_path=False\n )\n copy(\n self,\n pattern=\"*.h\",\n dst=os.path.join(self.package_folder, \"include\"),\n src=os.path.join(self.source_folder, \"include\"),\n )\n\n def package_info(self):\n self.cpp_info.libs = [\"package_lib\"]\n", "path": "docs/package_templates/msbuild_package/all/conanfile.py"}]}
| 2,119 | 180 |
gh_patches_debug_29824
|
rasdani/github-patches
|
git_diff
|
microsoft__Qcodes-531
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Keithley2600 driver broken after commit 460c93c71506213102e56d3ea0c518723666d56a
### Steps to reproduce
1. Connect Keithley 2600
2. ask it for output status or mode
3. doh
### Expected behaviour
tell me what i asked for
### Actual behaviour
error because parsing fails
issue introduced in commit 460c93c71506213102e56d3ea0c518723666d56a (@alexcjohnson)
### System
**operating system**
W10
**qcodes branch**
master
**qcodes commit**
177f8201b3778c0071c60d4e4c658449284e1a6c
</issue>
<code>
[start of qcodes/instrument_drivers/tektronix/Keithley_2600.py]
1 from qcodes import VisaInstrument
2
3
4 class Keithley_2600(VisaInstrument):
5 """
6 channel: use channel 'a' or 'b'
7
8 This is the qcodes driver for the Keithley_2600 Source-Meter series,
9 tested with Keithley_2614B
10
11 Status: beta-version.
12 TODO:
13 - Add all parameters that are in the manual
14 - range and limit should be set according to mode
15 - add ramping and such stuff
16
17 """
18 def __init__(self, name, address, channel, **kwargs):
19 super().__init__(name, address, terminator='\n', **kwargs)
20 self._channel = channel
21
22 self.add_parameter('volt', get_cmd='measure.v()',
23 get_parser=float, set_cmd='source.levelv={:.12f}',
24 label='Voltage',
25 unit='V')
26 self.add_parameter('curr', get_cmd='measure.i()',
27 get_parser=float, set_cmd='source.leveli={:.12f}',
28 label='Current',
29 unit='A')
30 self.add_parameter('mode',
31 get_cmd='source.func',
32 set_cmd='source.func={:d}',
33 val_mapping={'current': 0, 'voltage': 1})
34 self.add_parameter('output',
35 get_cmd='source.output',
36 set_cmd='source.output={:d}',
37 val_mapping={'on': 1, 'off': 0})
38 # Source range
39 # needs get after set
40 self.add_parameter('rangev',
41 get_cmd='source.rangev',
42 get_parser=float,
43 set_cmd='source.rangev={:.4f}',
44 unit='V')
45 # Measure range
46 # needs get after set
47 self.add_parameter('rangei',
48 get_cmd='source.rangei',
49 get_parser=float,
50 set_cmd='source.rangei={:.4f}',
51 unit='A')
52 # Compliance limit
53 self.add_parameter('limitv',
54 get_cmd='source.limitv',
55 get_parser=float,
56 set_cmd='source.limitv={:.4f}',
57 unit='V')
58 # Compliance limit
59 self.add_parameter('limiti',
60 get_cmd='source.limiti',
61 get_parser=float,
62 set_cmd='source.limiti={:.4f}',
63 unit='A')
64
65 self.connect_message()
66
67 def get_idn(self):
68 IDN = self.ask_raw('*IDN?')
69 vendor, model, serial, firmware = map(str.strip, IDN.split(','))
70 model = model[6:]
71
72 IDN = {'vendor': vendor, 'model': model,
73 'serial': serial, 'firmware': firmware}
74 return IDN
75
76 def reset(self):
77 self.write('reset()')
78
79 def ask(self, cmd):
80 return super().ask('print(smu{:s}.{:s})'.format(self._channel, cmd))
81
82 def write(self, cmd):
83 super().write('smu{:s}.{:s}'.format(self._channel, cmd))
84
[end of qcodes/instrument_drivers/tektronix/Keithley_2600.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/qcodes/instrument_drivers/tektronix/Keithley_2600.py b/qcodes/instrument_drivers/tektronix/Keithley_2600.py
--- a/qcodes/instrument_drivers/tektronix/Keithley_2600.py
+++ b/qcodes/instrument_drivers/tektronix/Keithley_2600.py
@@ -19,20 +19,26 @@
super().__init__(name, address, terminator='\n', **kwargs)
self._channel = channel
- self.add_parameter('volt', get_cmd='measure.v()',
- get_parser=float, set_cmd='source.levelv={:.12f}',
+ self.add_parameter('volt',
+ get_cmd='measure.v()',
+ get_parser=float,
+ set_cmd='source.levelv={:.12f}',
label='Voltage',
unit='V')
- self.add_parameter('curr', get_cmd='measure.i()',
- get_parser=float, set_cmd='source.leveli={:.12f}',
+ self.add_parameter('curr',
+ get_cmd='measure.i()',
+ get_parser=float,
+ set_cmd='source.leveli={:.12f}',
label='Current',
unit='A')
self.add_parameter('mode',
get_cmd='source.func',
+ get_parser=float,
set_cmd='source.func={:d}',
val_mapping={'current': 0, 'voltage': 1})
self.add_parameter('output',
get_cmd='source.output',
+ get_parser=float,
set_cmd='source.output={:d}',
val_mapping={'on': 1, 'off': 0})
# Source range
|
{"golden_diff": "diff --git a/qcodes/instrument_drivers/tektronix/Keithley_2600.py b/qcodes/instrument_drivers/tektronix/Keithley_2600.py\n--- a/qcodes/instrument_drivers/tektronix/Keithley_2600.py\n+++ b/qcodes/instrument_drivers/tektronix/Keithley_2600.py\n@@ -19,20 +19,26 @@\n super().__init__(name, address, terminator='\\n', **kwargs)\n self._channel = channel\n \n- self.add_parameter('volt', get_cmd='measure.v()',\n- get_parser=float, set_cmd='source.levelv={:.12f}',\n+ self.add_parameter('volt',\n+ get_cmd='measure.v()',\n+ get_parser=float,\n+ set_cmd='source.levelv={:.12f}',\n label='Voltage',\n unit='V')\n- self.add_parameter('curr', get_cmd='measure.i()',\n- get_parser=float, set_cmd='source.leveli={:.12f}',\n+ self.add_parameter('curr',\n+ get_cmd='measure.i()',\n+ get_parser=float,\n+ set_cmd='source.leveli={:.12f}',\n label='Current',\n unit='A')\n self.add_parameter('mode',\n get_cmd='source.func',\n+ get_parser=float,\n set_cmd='source.func={:d}',\n val_mapping={'current': 0, 'voltage': 1})\n self.add_parameter('output',\n get_cmd='source.output',\n+ get_parser=float,\n set_cmd='source.output={:d}',\n val_mapping={'on': 1, 'off': 0})\n # Source range\n", "issue": "Keithley2600 driver broken after commit 460c93c71506213102e56d3ea0c518723666d56a\n\r\n### Steps to reproduce\r\n1. Connect Keithley 2600\r\n2. ask it for output status or mode\r\n3. doh\r\n\r\n### Expected behaviour\r\ntell me what i asked for\r\n\r\n### Actual behaviour\r\nerror because parsing fails\r\nissue introduced in commit 460c93c71506213102e56d3ea0c518723666d56a (@alexcjohnson)\r\n\r\n\r\n### System\r\n**operating system**\r\nW10\r\n**qcodes branch**\r\nmaster\r\n**qcodes commit**\r\n177f8201b3778c0071c60d4e4c658449284e1a6c \n", "before_files": [{"content": "from qcodes import VisaInstrument\n\n\nclass Keithley_2600(VisaInstrument):\n \"\"\"\n channel: use channel 'a' or 'b'\n\n This is the qcodes driver for the Keithley_2600 Source-Meter series,\n tested with Keithley_2614B\n\n Status: beta-version.\n TODO:\n - Add all parameters that are in the manual\n - range and limit should be set according to mode\n - add ramping and such stuff\n\n \"\"\"\n def __init__(self, name, address, channel, **kwargs):\n super().__init__(name, address, terminator='\\n', **kwargs)\n self._channel = channel\n\n self.add_parameter('volt', get_cmd='measure.v()',\n get_parser=float, set_cmd='source.levelv={:.12f}',\n label='Voltage',\n unit='V')\n self.add_parameter('curr', get_cmd='measure.i()',\n get_parser=float, set_cmd='source.leveli={:.12f}',\n label='Current',\n unit='A')\n self.add_parameter('mode',\n get_cmd='source.func',\n set_cmd='source.func={:d}',\n val_mapping={'current': 0, 'voltage': 1})\n self.add_parameter('output',\n get_cmd='source.output',\n set_cmd='source.output={:d}',\n val_mapping={'on': 1, 'off': 0})\n # Source range\n # needs get after set\n self.add_parameter('rangev',\n get_cmd='source.rangev',\n get_parser=float,\n set_cmd='source.rangev={:.4f}',\n unit='V')\n # Measure range\n # needs get after set\n self.add_parameter('rangei',\n get_cmd='source.rangei',\n get_parser=float,\n set_cmd='source.rangei={:.4f}',\n unit='A')\n # Compliance limit\n self.add_parameter('limitv',\n get_cmd='source.limitv',\n get_parser=float,\n set_cmd='source.limitv={:.4f}',\n unit='V')\n # Compliance limit\n self.add_parameter('limiti',\n get_cmd='source.limiti',\n get_parser=float,\n set_cmd='source.limiti={:.4f}',\n unit='A')\n\n self.connect_message()\n\n def get_idn(self):\n IDN = self.ask_raw('*IDN?')\n vendor, model, serial, firmware = map(str.strip, IDN.split(','))\n model = model[6:]\n\n IDN = {'vendor': vendor, 'model': model,\n 'serial': serial, 'firmware': firmware}\n return IDN\n\n def reset(self):\n self.write('reset()')\n\n def ask(self, cmd):\n return super().ask('print(smu{:s}.{:s})'.format(self._channel, cmd))\n\n def write(self, cmd):\n super().write('smu{:s}.{:s}'.format(self._channel, cmd))\n", "path": "qcodes/instrument_drivers/tektronix/Keithley_2600.py"}]}
| 1,599 | 382 |
gh_patches_debug_26054
|
rasdani/github-patches
|
git_diff
|
encode__uvicorn-636
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
--proxy-headers with supervisor get client ip failed
#### uvicorn version
`Running uvicorn 0.11.3 with CPython 3.8.2 on Linux`
#### supervisor config
```
[fcgi-program:uvicorn]
socket=tcp://0.0.0.0:5000
directory=/var/www/webapp/
command=/usr/local/python3/bin/uvicorn --fd 0 --proxy-headers main:app
numprocs=2
process_name=uvicorn-%(process_num)d
stdout_logfile_maxbytes=0
stdout_logfile=/var/log/uvicorn.log
```
### fastapi
version 0.52.0
```
# cat /var/www/webapp/main.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from fastapi import FastAPI, Request
app = FastAPI(redoc_url=None)
@app.get('/')
def index(request: Request):
return 'hello {}!'.format(request.client.host)
```
### run on supervisor
supervisord version 4.1.0
```
supervisord -n
curl http://127.0.0.1:5000
```
why client.host is None?
</issue>
<code>
[start of uvicorn/protocols/utils.py]
1 import socket
2
3
4 def get_remote_addr(transport):
5 socket_info = transport.get_extra_info("socket")
6 if socket_info is not None:
7 try:
8 info = socket_info.getpeername()
9 except OSError:
10 # This case appears to inconsistently occur with uvloop
11 # bound to a unix domain socket.
12 family = None
13 info = None
14 else:
15 family = socket_info.family
16
17 if family in (socket.AF_INET, socket.AF_INET6):
18 return (str(info[0]), int(info[1]))
19 return None
20 info = transport.get_extra_info("peername")
21 if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:
22 return (str(info[0]), int(info[1]))
23 return None
24
25
26 def get_local_addr(transport):
27 socket_info = transport.get_extra_info("socket")
28 if socket_info is not None:
29 info = socket_info.getsockname()
30 family = socket_info.family
31 if family in (socket.AF_INET, socket.AF_INET6):
32 return (str(info[0]), int(info[1]))
33 return None
34 info = transport.get_extra_info("sockname")
35 if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:
36 return (str(info[0]), int(info[1]))
37 return None
38
39
40 def is_ssl(transport):
41 return bool(transport.get_extra_info("sslcontext"))
42
43
44 def get_client_addr(scope):
45 client = scope.get("client")
46 if not client:
47 return ""
48 return "%s:%d" % client
49
50
51 def get_path_with_query_string(scope):
52 path_with_query_string = scope.get("root_path", "") + scope["path"]
53 if scope["query_string"]:
54 path_with_query_string = "{}?{}".format(
55 path_with_query_string, scope["query_string"].decode("ascii")
56 )
57 return path_with_query_string
58
[end of uvicorn/protocols/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/uvicorn/protocols/utils.py b/uvicorn/protocols/utils.py
--- a/uvicorn/protocols/utils.py
+++ b/uvicorn/protocols/utils.py
@@ -1,5 +1,10 @@
import socket
+if hasattr(socket, "AF_UNIX"):
+ SUPPORTED_SOCKET_FAMILIES = (socket.AF_INET, socket.AF_INET6, socket.AF_UNIX)
+else:
+ SUPPORTED_SOCKET_FAMILIES = (socket.AF_INET, socket.AF_INET6)
+
def get_remote_addr(transport):
socket_info = transport.get_extra_info("socket")
@@ -14,8 +19,9 @@
else:
family = socket_info.family
- if family in (socket.AF_INET, socket.AF_INET6):
+ if family in SUPPORTED_SOCKET_FAMILIES:
return (str(info[0]), int(info[1]))
+
return None
info = transport.get_extra_info("peername")
if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:
@@ -28,7 +34,7 @@
if socket_info is not None:
info = socket_info.getsockname()
family = socket_info.family
- if family in (socket.AF_INET, socket.AF_INET6):
+ if family in SUPPORTED_SOCKET_FAMILIES:
return (str(info[0]), int(info[1]))
return None
info = transport.get_extra_info("sockname")
|
{"golden_diff": "diff --git a/uvicorn/protocols/utils.py b/uvicorn/protocols/utils.py\n--- a/uvicorn/protocols/utils.py\n+++ b/uvicorn/protocols/utils.py\n@@ -1,5 +1,10 @@\n import socket\n \n+if hasattr(socket, \"AF_UNIX\"):\n+ SUPPORTED_SOCKET_FAMILIES = (socket.AF_INET, socket.AF_INET6, socket.AF_UNIX)\n+else:\n+ SUPPORTED_SOCKET_FAMILIES = (socket.AF_INET, socket.AF_INET6)\n+\n \n def get_remote_addr(transport):\n socket_info = transport.get_extra_info(\"socket\")\n@@ -14,8 +19,9 @@\n else:\n family = socket_info.family\n \n- if family in (socket.AF_INET, socket.AF_INET6):\n+ if family in SUPPORTED_SOCKET_FAMILIES:\n return (str(info[0]), int(info[1]))\n+\n return None\n info = transport.get_extra_info(\"peername\")\n if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:\n@@ -28,7 +34,7 @@\n if socket_info is not None:\n info = socket_info.getsockname()\n family = socket_info.family\n- if family in (socket.AF_INET, socket.AF_INET6):\n+ if family in SUPPORTED_SOCKET_FAMILIES:\n return (str(info[0]), int(info[1]))\n return None\n info = transport.get_extra_info(\"sockname\")\n", "issue": "--proxy-headers with supervisor get client ip failed\n#### uvicorn version\r\n`Running uvicorn 0.11.3 with CPython 3.8.2 on Linux` \r\n#### supervisor config\r\n```\r\n[fcgi-program:uvicorn]\r\nsocket=tcp://0.0.0.0:5000 \r\ndirectory=/var/www/webapp/ \r\ncommand=/usr/local/python3/bin/uvicorn --fd 0 --proxy-headers main:app \r\nnumprocs=2 \r\nprocess_name=uvicorn-%(process_num)d \r\nstdout_logfile_maxbytes=0 \r\nstdout_logfile=/var/log/uvicorn.log \r\n```\r\n\r\n### fastapi \r\nversion 0.52.0\r\n\r\n```\r\n# cat /var/www/webapp/main.py\r\n\r\n#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\nfrom fastapi import FastAPI, Request\r\n\r\napp = FastAPI(redoc_url=None)\r\n\r\n\r\[email protected]('/')\r\ndef index(request: Request):\r\n return 'hello {}!'.format(request.client.host)\r\n\r\n```\r\n\r\n### run on supervisor\r\nsupervisord version 4.1.0\r\n```\r\nsupervisord -n\r\ncurl http://127.0.0.1:5000\r\n\r\n```\r\nwhy client.host is None?\r\n\n", "before_files": [{"content": "import socket\n\n\ndef get_remote_addr(transport):\n socket_info = transport.get_extra_info(\"socket\")\n if socket_info is not None:\n try:\n info = socket_info.getpeername()\n except OSError:\n # This case appears to inconsistently occur with uvloop\n # bound to a unix domain socket.\n family = None\n info = None\n else:\n family = socket_info.family\n\n if family in (socket.AF_INET, socket.AF_INET6):\n return (str(info[0]), int(info[1]))\n return None\n info = transport.get_extra_info(\"peername\")\n if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:\n return (str(info[0]), int(info[1]))\n return None\n\n\ndef get_local_addr(transport):\n socket_info = transport.get_extra_info(\"socket\")\n if socket_info is not None:\n info = socket_info.getsockname()\n family = socket_info.family\n if family in (socket.AF_INET, socket.AF_INET6):\n return (str(info[0]), int(info[1]))\n return None\n info = transport.get_extra_info(\"sockname\")\n if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:\n return (str(info[0]), int(info[1]))\n return None\n\n\ndef is_ssl(transport):\n return bool(transport.get_extra_info(\"sslcontext\"))\n\n\ndef get_client_addr(scope):\n client = scope.get(\"client\")\n if not client:\n return \"\"\n return \"%s:%d\" % client\n\n\ndef get_path_with_query_string(scope):\n path_with_query_string = scope.get(\"root_path\", \"\") + scope[\"path\"]\n if scope[\"query_string\"]:\n path_with_query_string = \"{}?{}\".format(\n path_with_query_string, scope[\"query_string\"].decode(\"ascii\")\n )\n return path_with_query_string\n", "path": "uvicorn/protocols/utils.py"}]}
| 1,340 | 320 |
gh_patches_debug_38005
|
rasdani/github-patches
|
git_diff
|
beeware__toga-1610
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
toga-winforms v0.3.0.dev38 gives Error on Windows app
Toga-winforms v0.3.0.dev38 gives AttributeError: 'TextInput' object has not attribute '_on_change'
**To Reproduce**
1. Create an app that uses toga.TextInput(),
2. Set toga-winforms==0.3.0.dev38 under Windows requirements in pyproject.toml file.
3. Update dependencies: `update -d`
4. Build and run app on Windows
**Screenshots**

**Environment:**
- Operating System: Windows 10
- Python version: 3.8.10
- Software versions:
- Briefcase: 0.3.9
- Toga: 0.3.0.dev38
</issue>
<code>
[start of examples/textinput/textinput/app.py]
1 from string import ascii_lowercase, ascii_uppercase, digits
2
3 import toga
4 from toga.constants import COLUMN
5 from toga.style import Pack
6 from toga import validators
7
8 EMPTY_PASSWORD = 'Empty password'
9
10
11 class TextInputApp(toga.App):
12 # Button callback functions
13 def do_extract_values(self, widget, **kwargs):
14 # Disable all the text inputs
15 self.text_input.enabled = False
16 self.password_input.enabled = False
17 self.number_input.enabled = False
18
19 # Update the labels with the extracted values
20 self.text_label.text = "Text content: {}".format(
21 self.text_input.value
22 )
23
24 self.password_label.text = "Your password is {}: {}".format(
25 "valid" if self.password_input.is_valid else "invalid",
26 self.password_input.value
27 )
28
29 number = self.number_input.value
30 if number:
31 self.number_label.text = "Double the number is: {}".format(number * 2)
32 else:
33 self.number_label.text = "You didn't enter a number"
34
35 # Wait 5 seconds
36 for i in range(5, 0, -1):
37 self.label.text = 'Counting down from {}...'.format(i)
38 yield 1
39 self.label.text = 'Enter some values and press extract.'
40
41 # Renable the inputs again.
42 self.text_input.enabled = True
43 self.password_input.enabled = True
44 self.number_input.enabled = True
45
46 def startup(self):
47 # Set up main window
48 self.main_window = toga.MainWindow(title=self.name)
49
50 # Labels to show responses.
51 self.label = toga.Label(
52 'Enter some values and press extract.', style=Pack(padding=10)
53 )
54 self.text_label = toga.Label('Ready.', style=Pack(padding=10))
55 self.password_label = toga.Label('Ready.', style=Pack(padding=10))
56 self.password_content_label = toga.Label(
57 EMPTY_PASSWORD, style=Pack(padding_bottom=10, font_size=9)
58 )
59 self.number_label = toga.Label('Ready.', style=Pack(padding=10))
60
61 # Text inputs and a button
62 self.text_input = toga.TextInput(
63 placeholder='Type something...', style=Pack(padding=10)
64 )
65 self.password_input = toga.PasswordInput(
66 placeholder='Password...',
67 style=Pack(padding=10),
68 on_change=self.on_password_change,
69 validators=[
70 validators.MinLength(10),
71 validators.ContainsUppercase(),
72 validators.ContainsLowercase(),
73 validators.ContainsSpecial(),
74 validators.ContainsDigit()
75 ]
76 )
77 self.email_input = toga.TextInput(
78 placeholder='Email...',
79 style=Pack(padding=10),
80 validators=[validators.Email()]
81 )
82 self.number_input = toga.NumberInput(style=Pack(padding=10))
83 btn_extract = toga.Button(
84 'Extract values',
85 on_press=self.do_extract_values,
86 style=Pack(flex=1),
87 )
88
89 # Outermost box
90 box = toga.Box(
91 children=[
92 self.label,
93 self.text_input,
94 self.password_input,
95 self.password_content_label,
96 self.email_input,
97 self.number_input,
98 self.text_label,
99 self.password_label,
100 self.number_label,
101 btn_extract,
102 ],
103 style=Pack(
104 flex=1,
105 direction=COLUMN,
106 padding=10,
107 )
108 )
109
110 # Add the content on the main window
111 self.main_window.content = box
112
113 # Show the main window
114 self.main_window.show()
115
116 def on_password_change(self, widget):
117 content = widget.value
118 self.password_content_label.text = self.get_password_content_label(content)
119
120 def get_password_content_label(self, content):
121 if content.strip() == "":
122 return EMPTY_PASSWORD
123 contains = set()
124 for letter in content:
125 if letter in ascii_uppercase:
126 contains.add("uppercase letters")
127 elif letter in ascii_lowercase:
128 contains.add("lowercase letters")
129 elif letter in digits:
130 contains.add("digits")
131 else:
132 contains.add("special characters")
133 return "Password contains: {}".format(', '.join(contains))
134
135
136 def main():
137 return TextInputApp('TextInput', 'org.beeware.widgets.textinput')
138
139
140 if __name__ == '__main__':
141 app = main()
142 app.main_loop()
143
[end of examples/textinput/textinput/app.py]
[start of src/core/toga/widgets/textinput.py]
1 import warnings
2
3 from toga.handlers import wrapped_handler
4
5 from .base import Widget
6
7
8 class TextInput(Widget):
9 """ A widget get user input.
10
11 Args:
12 id (str): An identifier for this widget.
13 style (:obj:`Style`): An optional style object. If no style is provided then
14 a new one will be created for the widget.
15 factory (:obj:`module`): A python module that is capable to return a
16 implementation of this class with the same name. (optional & normally not needed)
17 value (str): The initial text for the input.
18 placeholder (str): If no input is present this text is shown.
19 readonly (bool): Whether a user can write into the text input, defaults to `False`.
20 on_change (Callable): Method to be called when text is changed in text box
21 validators (list): list of validators to run on the value of the text box. Should
22 return None is value is valid and an error message if not.
23 on_change (``callable``): The handler to invoke when the text changes.
24 on_gain_focus (:obj:`callable`): Function to execute when get focused.
25 on_lose_focus (:obj:`callable`): Function to execute when lose focus.
26 """
27 MIN_WIDTH = 100
28
29 def __init__(
30 self,
31 id=None,
32 style=None,
33 factory=None,
34 value=None,
35 placeholder=None,
36 readonly=False,
37 on_change=None,
38 on_gain_focus=None,
39 on_lose_focus=None,
40 validators=None,
41 initial=None, # DEPRECATED!
42 ):
43 super().__init__(id=id, style=style, factory=factory)
44
45 # Create a platform specific implementation of the widget
46 self._create()
47
48 ##################################################################
49 # 2022-07: Backwards compatibility
50 ##################################################################
51
52 # initial replaced with value
53 if initial is not None:
54 if value is not None:
55 raise ValueError(
56 "Cannot specify both `initial` and `value`; "
57 "`initial` has been deprecated, use `value`"
58 )
59 else:
60 warnings.warn(
61 "`initial` has been renamed `value`", DeprecationWarning
62 )
63 value = initial
64
65 ##################################################################
66 # End backwards compatibility.
67 ##################################################################
68
69 self.placeholder = placeholder
70 self.readonly = readonly
71
72 # Set the actual value before on_change, because we do not want on_change triggered by it
73 self.value = value
74 self.on_change = on_change
75 self.validators = validators
76 self.on_lose_focus = on_lose_focus
77 self.on_gain_focus = on_gain_focus
78
79 def _create(self):
80 self._impl = self.factory.TextInput(interface=self)
81
82 @property
83 def readonly(self):
84 """ Whether a user can write into the text input
85
86 Returns:
87 ``True`` if only read is possible.
88 ``False`` if read and write is possible.
89 """
90 return self._readonly
91
92 @readonly.setter
93 def readonly(self, value):
94 self._readonly = value
95 self._impl.set_readonly(value)
96
97 @property
98 def placeholder(self):
99 """ The placeholder text.
100
101 Returns:
102 The placeholder text as a ``str``.
103 """
104 return self._placeholder
105
106 @placeholder.setter
107 def placeholder(self, value):
108 if value is None:
109 self._placeholder = ''
110 else:
111 self._placeholder = str(value)
112 self._impl.set_placeholder(value)
113
114 @property
115 def value(self):
116 """ The value of the text input field
117
118 Returns:
119 The current text of the widget as a ``str``.
120 """
121 return self._impl.get_value()
122
123 @value.setter
124 def value(self, value):
125 if value is None:
126 v = ''
127 else:
128 v = str(value)
129 self._impl.set_value(v)
130
131 @property
132 def is_valid(self):
133 return self._impl.is_valid()
134
135 def clear(self):
136 """ Clears the text of the widget """
137 self.value = ''
138
139 @property
140 def on_change(self):
141 """The handler to invoke when the value changes
142
143 Returns:
144 The function ``callable`` that is called on a content change.
145 """
146 return self._on_change
147
148 @on_change.setter
149 def on_change(self, handler):
150 """Set the handler to invoke when the value is changed.
151
152 Args:
153 handler (:obj:`callable`): The handler to invoke when the value is changed.
154 """
155 self._on_change = wrapped_handler(self, handler)
156 self._impl.set_on_change(self._on_change)
157
158 @property
159 def validators(self):
160 return self._validators
161
162 @validators.setter
163 def validators(self, validators):
164 if validators is None:
165 self._validators = []
166 else:
167 self._validators = validators
168 self.validate()
169
170 @property
171 def on_gain_focus(self):
172 """The handler to invoke when the widget get focus.
173
174 Returns:
175 The function ``callable`` that is called on widget focus gain.
176 """
177 return self._on_gain_focus
178
179 @on_gain_focus.setter
180 def on_gain_focus(self, handler):
181 self._on_gain_focus = wrapped_handler(self, handler)
182 self._impl.set_on_gain_focus(self._on_gain_focus)
183
184 @property
185 def on_lose_focus(self):
186 """The handler to invoke when the widget lose focus.
187
188 Returns:
189 The function ``callable`` that is called on widget focus loss.
190 """
191 return self._on_lose_focus
192
193 @on_lose_focus.setter
194 def on_lose_focus(self, handler):
195 self._on_lose_focus = wrapped_handler(self, handler)
196 self._impl.set_on_lose_focus(self._on_lose_focus)
197
198 def validate(self):
199 error_message = None
200 for validator in self.validators:
201 if error_message is None:
202 error_message = validator(self.value)
203
204 if error_message is None:
205 self._impl.clear_error()
206 return True
207 else:
208 self._impl.set_error(error_message)
209 return False
210
[end of src/core/toga/widgets/textinput.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/examples/textinput/textinput/app.py b/examples/textinput/textinput/app.py
--- a/examples/textinput/textinput/app.py
+++ b/examples/textinput/textinput/app.py
@@ -13,12 +13,14 @@
def do_extract_values(self, widget, **kwargs):
# Disable all the text inputs
self.text_input.enabled = False
+ self.text_input_placeholder.enabled = False
self.password_input.enabled = False
self.number_input.enabled = False
# Update the labels with the extracted values
- self.text_label.text = "Text content: {}".format(
- self.text_input.value
+ self.text_label.text = "Text content: {}; {}".format(
+ self.text_input.value,
+ self.text_input_placeholder.value,
)
self.password_label.text = "Your password is {}: {}".format(
@@ -40,6 +42,7 @@
# Renable the inputs again.
self.text_input.enabled = True
+ self.text_input_placeholder.enabled = True
self.password_input.enabled = True
self.number_input.enabled = True
@@ -60,6 +63,10 @@
# Text inputs and a button
self.text_input = toga.TextInput(
+ value='Initial value',
+ placeholder='Type something...', style=Pack(padding=10)
+ )
+ self.text_input_placeholder = toga.TextInput(
placeholder='Type something...', style=Pack(padding=10)
)
self.password_input = toga.PasswordInput(
@@ -91,6 +98,7 @@
children=[
self.label,
self.text_input,
+ self.text_input_placeholder,
self.password_input,
self.password_content_label,
self.email_input,
diff --git a/src/core/toga/widgets/textinput.py b/src/core/toga/widgets/textinput.py
--- a/src/core/toga/widgets/textinput.py
+++ b/src/core/toga/widgets/textinput.py
@@ -70,7 +70,10 @@
self.readonly = readonly
# Set the actual value before on_change, because we do not want on_change triggered by it
+ # However, we need to prime the handler property in case it is accessed.
+ self._on_change = None
self.value = value
+
self.on_change = on_change
self.validators = validators
self.on_lose_focus = on_lose_focus
|
{"golden_diff": "diff --git a/examples/textinput/textinput/app.py b/examples/textinput/textinput/app.py\n--- a/examples/textinput/textinput/app.py\n+++ b/examples/textinput/textinput/app.py\n@@ -13,12 +13,14 @@\n def do_extract_values(self, widget, **kwargs):\n # Disable all the text inputs\n self.text_input.enabled = False\n+ self.text_input_placeholder.enabled = False\n self.password_input.enabled = False\n self.number_input.enabled = False\n \n # Update the labels with the extracted values\n- self.text_label.text = \"Text content: {}\".format(\n- self.text_input.value\n+ self.text_label.text = \"Text content: {}; {}\".format(\n+ self.text_input.value,\n+ self.text_input_placeholder.value,\n )\n \n self.password_label.text = \"Your password is {}: {}\".format(\n@@ -40,6 +42,7 @@\n \n # Renable the inputs again.\n self.text_input.enabled = True\n+ self.text_input_placeholder.enabled = True\n self.password_input.enabled = True\n self.number_input.enabled = True\n \n@@ -60,6 +63,10 @@\n \n # Text inputs and a button\n self.text_input = toga.TextInput(\n+ value='Initial value',\n+ placeholder='Type something...', style=Pack(padding=10)\n+ )\n+ self.text_input_placeholder = toga.TextInput(\n placeholder='Type something...', style=Pack(padding=10)\n )\n self.password_input = toga.PasswordInput(\n@@ -91,6 +98,7 @@\n children=[\n self.label,\n self.text_input,\n+ self.text_input_placeholder,\n self.password_input,\n self.password_content_label,\n self.email_input,\ndiff --git a/src/core/toga/widgets/textinput.py b/src/core/toga/widgets/textinput.py\n--- a/src/core/toga/widgets/textinput.py\n+++ b/src/core/toga/widgets/textinput.py\n@@ -70,7 +70,10 @@\n self.readonly = readonly\n \n # Set the actual value before on_change, because we do not want on_change triggered by it\n+ # However, we need to prime the handler property in case it is accessed.\n+ self._on_change = None\n self.value = value\n+\n self.on_change = on_change\n self.validators = validators\n self.on_lose_focus = on_lose_focus\n", "issue": "toga-winforms v0.3.0.dev38 gives Error on Windows app\nToga-winforms v0.3.0.dev38 gives AttributeError: 'TextInput' object has not attribute '_on_change'\r\n\r\n**To Reproduce**\r\n1. Create an app that uses toga.TextInput(),\r\n2. Set toga-winforms==0.3.0.dev38 under Windows requirements in pyproject.toml file.\r\n3. Update dependencies: `update -d`\r\n4. Build and run app on Windows\r\n\r\n**Screenshots**\r\n\r\n\r\n**Environment:**\r\n - Operating System: Windows 10\r\n - Python version: 3.8.10\r\n - Software versions:\r\n - Briefcase: 0.3.9\r\n - Toga: 0.3.0.dev38\r\n\n", "before_files": [{"content": "from string import ascii_lowercase, ascii_uppercase, digits\n\nimport toga\nfrom toga.constants import COLUMN\nfrom toga.style import Pack\nfrom toga import validators\n\nEMPTY_PASSWORD = 'Empty password'\n\n\nclass TextInputApp(toga.App):\n # Button callback functions\n def do_extract_values(self, widget, **kwargs):\n # Disable all the text inputs\n self.text_input.enabled = False\n self.password_input.enabled = False\n self.number_input.enabled = False\n\n # Update the labels with the extracted values\n self.text_label.text = \"Text content: {}\".format(\n self.text_input.value\n )\n\n self.password_label.text = \"Your password is {}: {}\".format(\n \"valid\" if self.password_input.is_valid else \"invalid\",\n self.password_input.value\n )\n\n number = self.number_input.value\n if number:\n self.number_label.text = \"Double the number is: {}\".format(number * 2)\n else:\n self.number_label.text = \"You didn't enter a number\"\n\n # Wait 5 seconds\n for i in range(5, 0, -1):\n self.label.text = 'Counting down from {}...'.format(i)\n yield 1\n self.label.text = 'Enter some values and press extract.'\n\n # Renable the inputs again.\n self.text_input.enabled = True\n self.password_input.enabled = True\n self.number_input.enabled = True\n\n def startup(self):\n # Set up main window\n self.main_window = toga.MainWindow(title=self.name)\n\n # Labels to show responses.\n self.label = toga.Label(\n 'Enter some values and press extract.', style=Pack(padding=10)\n )\n self.text_label = toga.Label('Ready.', style=Pack(padding=10))\n self.password_label = toga.Label('Ready.', style=Pack(padding=10))\n self.password_content_label = toga.Label(\n EMPTY_PASSWORD, style=Pack(padding_bottom=10, font_size=9)\n )\n self.number_label = toga.Label('Ready.', style=Pack(padding=10))\n\n # Text inputs and a button\n self.text_input = toga.TextInput(\n placeholder='Type something...', style=Pack(padding=10)\n )\n self.password_input = toga.PasswordInput(\n placeholder='Password...',\n style=Pack(padding=10),\n on_change=self.on_password_change,\n validators=[\n validators.MinLength(10),\n validators.ContainsUppercase(),\n validators.ContainsLowercase(),\n validators.ContainsSpecial(),\n validators.ContainsDigit()\n ]\n )\n self.email_input = toga.TextInput(\n placeholder='Email...',\n style=Pack(padding=10),\n validators=[validators.Email()]\n )\n self.number_input = toga.NumberInput(style=Pack(padding=10))\n btn_extract = toga.Button(\n 'Extract values',\n on_press=self.do_extract_values,\n style=Pack(flex=1),\n )\n\n # Outermost box\n box = toga.Box(\n children=[\n self.label,\n self.text_input,\n self.password_input,\n self.password_content_label,\n self.email_input,\n self.number_input,\n self.text_label,\n self.password_label,\n self.number_label,\n btn_extract,\n ],\n style=Pack(\n flex=1,\n direction=COLUMN,\n padding=10,\n )\n )\n\n # Add the content on the main window\n self.main_window.content = box\n\n # Show the main window\n self.main_window.show()\n\n def on_password_change(self, widget):\n content = widget.value\n self.password_content_label.text = self.get_password_content_label(content)\n\n def get_password_content_label(self, content):\n if content.strip() == \"\":\n return EMPTY_PASSWORD\n contains = set()\n for letter in content:\n if letter in ascii_uppercase:\n contains.add(\"uppercase letters\")\n elif letter in ascii_lowercase:\n contains.add(\"lowercase letters\")\n elif letter in digits:\n contains.add(\"digits\")\n else:\n contains.add(\"special characters\")\n return \"Password contains: {}\".format(', '.join(contains))\n\n\ndef main():\n return TextInputApp('TextInput', 'org.beeware.widgets.textinput')\n\n\nif __name__ == '__main__':\n app = main()\n app.main_loop()\n", "path": "examples/textinput/textinput/app.py"}, {"content": "import warnings\n\nfrom toga.handlers import wrapped_handler\n\nfrom .base import Widget\n\n\nclass TextInput(Widget):\n \"\"\" A widget get user input.\n\n Args:\n id (str): An identifier for this widget.\n style (:obj:`Style`): An optional style object. If no style is provided then\n a new one will be created for the widget.\n factory (:obj:`module`): A python module that is capable to return a\n implementation of this class with the same name. (optional & normally not needed)\n value (str): The initial text for the input.\n placeholder (str): If no input is present this text is shown.\n readonly (bool): Whether a user can write into the text input, defaults to `False`.\n on_change (Callable): Method to be called when text is changed in text box\n validators (list): list of validators to run on the value of the text box. Should\n return None is value is valid and an error message if not.\n on_change (``callable``): The handler to invoke when the text changes.\n on_gain_focus (:obj:`callable`): Function to execute when get focused.\n on_lose_focus (:obj:`callable`): Function to execute when lose focus.\n \"\"\"\n MIN_WIDTH = 100\n\n def __init__(\n self,\n id=None,\n style=None,\n factory=None,\n value=None,\n placeholder=None,\n readonly=False,\n on_change=None,\n on_gain_focus=None,\n on_lose_focus=None,\n validators=None,\n initial=None, # DEPRECATED!\n ):\n super().__init__(id=id, style=style, factory=factory)\n\n # Create a platform specific implementation of the widget\n self._create()\n\n ##################################################################\n # 2022-07: Backwards compatibility\n ##################################################################\n\n # initial replaced with value\n if initial is not None:\n if value is not None:\n raise ValueError(\n \"Cannot specify both `initial` and `value`; \"\n \"`initial` has been deprecated, use `value`\"\n )\n else:\n warnings.warn(\n \"`initial` has been renamed `value`\", DeprecationWarning\n )\n value = initial\n\n ##################################################################\n # End backwards compatibility.\n ##################################################################\n\n self.placeholder = placeholder\n self.readonly = readonly\n\n # Set the actual value before on_change, because we do not want on_change triggered by it\n self.value = value\n self.on_change = on_change\n self.validators = validators\n self.on_lose_focus = on_lose_focus\n self.on_gain_focus = on_gain_focus\n\n def _create(self):\n self._impl = self.factory.TextInput(interface=self)\n\n @property\n def readonly(self):\n \"\"\" Whether a user can write into the text input\n\n Returns:\n ``True`` if only read is possible.\n ``False`` if read and write is possible.\n \"\"\"\n return self._readonly\n\n @readonly.setter\n def readonly(self, value):\n self._readonly = value\n self._impl.set_readonly(value)\n\n @property\n def placeholder(self):\n \"\"\" The placeholder text.\n\n Returns:\n The placeholder text as a ``str``.\n \"\"\"\n return self._placeholder\n\n @placeholder.setter\n def placeholder(self, value):\n if value is None:\n self._placeholder = ''\n else:\n self._placeholder = str(value)\n self._impl.set_placeholder(value)\n\n @property\n def value(self):\n \"\"\" The value of the text input field\n\n Returns:\n The current text of the widget as a ``str``.\n \"\"\"\n return self._impl.get_value()\n\n @value.setter\n def value(self, value):\n if value is None:\n v = ''\n else:\n v = str(value)\n self._impl.set_value(v)\n\n @property\n def is_valid(self):\n return self._impl.is_valid()\n\n def clear(self):\n \"\"\" Clears the text of the widget \"\"\"\n self.value = ''\n\n @property\n def on_change(self):\n \"\"\"The handler to invoke when the value changes\n\n Returns:\n The function ``callable`` that is called on a content change.\n \"\"\"\n return self._on_change\n\n @on_change.setter\n def on_change(self, handler):\n \"\"\"Set the handler to invoke when the value is changed.\n\n Args:\n handler (:obj:`callable`): The handler to invoke when the value is changed.\n \"\"\"\n self._on_change = wrapped_handler(self, handler)\n self._impl.set_on_change(self._on_change)\n\n @property\n def validators(self):\n return self._validators\n\n @validators.setter\n def validators(self, validators):\n if validators is None:\n self._validators = []\n else:\n self._validators = validators\n self.validate()\n\n @property\n def on_gain_focus(self):\n \"\"\"The handler to invoke when the widget get focus.\n\n Returns:\n The function ``callable`` that is called on widget focus gain.\n \"\"\"\n return self._on_gain_focus\n\n @on_gain_focus.setter\n def on_gain_focus(self, handler):\n self._on_gain_focus = wrapped_handler(self, handler)\n self._impl.set_on_gain_focus(self._on_gain_focus)\n\n @property\n def on_lose_focus(self):\n \"\"\"The handler to invoke when the widget lose focus.\n\n Returns:\n The function ``callable`` that is called on widget focus loss.\n \"\"\"\n return self._on_lose_focus\n\n @on_lose_focus.setter\n def on_lose_focus(self, handler):\n self._on_lose_focus = wrapped_handler(self, handler)\n self._impl.set_on_lose_focus(self._on_lose_focus)\n\n def validate(self):\n error_message = None\n for validator in self.validators:\n if error_message is None:\n error_message = validator(self.value)\n\n if error_message is None:\n self._impl.clear_error()\n return True\n else:\n self._impl.set_error(error_message)\n return False\n", "path": "src/core/toga/widgets/textinput.py"}]}
| 3,907 | 526 |
gh_patches_debug_13697
|
rasdani/github-patches
|
git_diff
|
litestar-org__litestar-1857
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
StaticFilesConfig and virtual directories
I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem.
This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.
https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32
</issue>
<code>
[start of litestar/middleware/exceptions/middleware.py]
1 from __future__ import annotations
2
3 import pdb # noqa: T100
4 from dataclasses import asdict, dataclass, field
5 from inspect import getmro
6 from sys import exc_info
7 from traceback import format_exception
8 from typing import TYPE_CHECKING, Any, Type, cast
9
10 from litestar.connection import Request
11 from litestar.datastructures import Headers
12 from litestar.enums import MediaType, ScopeType
13 from litestar.exceptions import WebSocketException
14 from litestar.middleware.cors import CORSMiddleware
15 from litestar.middleware.exceptions._debug_response import create_debug_response
16 from litestar.status_codes import HTTP_500_INTERNAL_SERVER_ERROR
17
18 __all__ = ("ExceptionHandlerMiddleware", "ExceptionResponseContent", "create_exception_response")
19
20
21 if TYPE_CHECKING:
22 from litestar import Response
23 from litestar.app import Litestar
24 from litestar.logging import BaseLoggingConfig
25 from litestar.types import (
26 ASGIApp,
27 ExceptionHandler,
28 ExceptionHandlersMap,
29 Logger,
30 Receive,
31 Scope,
32 Send,
33 )
34 from litestar.types.asgi_types import WebSocketCloseEvent
35
36
37 def get_exception_handler(exception_handlers: ExceptionHandlersMap, exc: Exception) -> ExceptionHandler | None:
38 """Given a dictionary that maps exceptions and status codes to handler functions, and an exception, returns the
39 appropriate handler if existing.
40
41 Status codes are given preference over exception type.
42
43 If no status code match exists, each class in the MRO of the exception type is checked and
44 the first matching handler is returned.
45
46 Finally, if a ``500`` handler is registered, it will be returned for any exception that isn't a
47 subclass of :class:`HTTPException <litestar.exceptions.HTTPException>`.
48
49 Args:
50 exception_handlers: Mapping of status codes and exception types to handlers.
51 exc: Exception Instance to be resolved to a handler.
52
53 Returns:
54 Optional exception handler callable.
55 """
56 if not exception_handlers:
57 return None
58 status_code: int | None = getattr(exc, "status_code", None)
59 if status_code and (exception_handler := exception_handlers.get(status_code)):
60 return exception_handler
61 for cls in getmro(type(exc)):
62 if cls in exception_handlers:
63 return exception_handlers[cast("Type[Exception]", cls)]
64 if not hasattr(exc, "status_code") and HTTP_500_INTERNAL_SERVER_ERROR in exception_handlers:
65 return exception_handlers[HTTP_500_INTERNAL_SERVER_ERROR]
66 return None
67
68
69 @dataclass
70 class ExceptionResponseContent:
71 """Represent the contents of an exception-response."""
72
73 status_code: int
74 """Exception status code."""
75 detail: str
76 """Exception details or message."""
77 headers: dict[str, str] | None = field(default=None)
78 """Headers to attach to the response."""
79 extra: dict[str, Any] | list[Any] | None = field(default=None)
80 """An extra mapping to attach to the exception."""
81
82 def to_response(self) -> Response:
83 """Create a response from the model attributes.
84
85 Returns:
86 A response instance.
87 """
88 from litestar.response import Response
89
90 return Response(
91 content={k: v for k, v in asdict(self).items() if k != "headers" and v is not None},
92 headers=self.headers,
93 status_code=self.status_code,
94 media_type=MediaType.JSON,
95 )
96
97
98 def create_exception_response(exc: Exception) -> Response:
99 """Construct a response from an exception.
100
101 Notes:
102 - For instances of :class:`HTTPException <litestar.exceptions.HTTPException>` or other exception classes that have a
103 ``status_code`` attribute (e.g. Starlette exceptions), the status code is drawn from the exception, otherwise
104 response status is ``HTTP_500_INTERNAL_SERVER_ERROR``.
105
106 Args:
107 exc: An exception.
108
109 Returns:
110 Response: HTTP response constructed from exception details.
111 """
112 content = ExceptionResponseContent(
113 status_code=getattr(exc, "status_code", HTTP_500_INTERNAL_SERVER_ERROR),
114 detail=getattr(exc, "detail", repr(exc)),
115 headers=getattr(exc, "headers", None),
116 extra=getattr(exc, "extra", None),
117 )
118 return content.to_response()
119
120
121 class ExceptionHandlerMiddleware:
122 """Middleware used to wrap an ASGIApp inside a try catch block and handle any exceptions raised.
123
124 This used in multiple layers of Litestar.
125 """
126
127 def __init__(self, app: ASGIApp, debug: bool, exception_handlers: ExceptionHandlersMap) -> None:
128 """Initialize ``ExceptionHandlerMiddleware``.
129
130 Args:
131 app: The ``next`` ASGI app to call.
132 debug: Whether ``debug`` mode is enabled
133 exception_handlers: A dictionary mapping status codes and/or exception types to handler functions.
134 """
135 self.app = app
136 self.exception_handlers = exception_handlers
137 self.debug = debug
138
139 async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
140 """ASGI-callable.
141
142 Args:
143 scope: The ASGI connection scope.
144 receive: The ASGI receive function.
145 send: The ASGI send function.
146
147 Returns:
148 None
149 """
150 try:
151 await self.app(scope, receive, send)
152 except Exception as e: # noqa: BLE001
153 litestar_app = scope["app"]
154
155 if litestar_app.logging_config and (logger := litestar_app.logger):
156 self.handle_exception_logging(logger=logger, logging_config=litestar_app.logging_config, scope=scope)
157
158 for hook in litestar_app.after_exception:
159 await hook(e, scope)
160
161 if litestar_app.pdb_on_exception:
162 pdb.post_mortem()
163
164 if scope["type"] == ScopeType.HTTP:
165 await self.handle_request_exception(
166 litestar_app=litestar_app, scope=scope, receive=receive, send=send, exc=e
167 )
168 else:
169 await self.handle_websocket_exception(send=send, exc=e)
170
171 async def handle_request_exception(
172 self, litestar_app: Litestar, scope: Scope, receive: Receive, send: Send, exc: Exception
173 ) -> None:
174 """Handle exception raised inside 'http' scope routes.
175
176 Args:
177 litestar_app: The litestar app instance.
178 scope: The ASGI connection scope.
179 receive: The ASGI receive function.
180 send: The ASGI send function.
181 exc: The caught exception.
182
183 Returns:
184 None.
185 """
186
187 headers = Headers.from_scope(scope=scope)
188 if litestar_app.cors_config and (origin := headers.get("origin")):
189 cors_middleware = CORSMiddleware(app=self.app, config=litestar_app.cors_config)
190 send = cors_middleware.send_wrapper(send=send, origin=origin, has_cookie="cookie" in headers)
191
192 exception_handler = get_exception_handler(self.exception_handlers, exc) or self.default_http_exception_handler
193 request = Request[Any, Any, Any](scope=scope, receive=receive, send=send)
194 response = exception_handler(request, exc)
195 await response.to_asgi_response(app=litestar_app, request=request)(scope=scope, receive=receive, send=send)
196
197 @staticmethod
198 async def handle_websocket_exception(send: Send, exc: Exception) -> None:
199 """Handle exception raised inside 'websocket' scope routes.
200
201 Args:
202 send: The ASGI send function.
203 exc: The caught exception.
204
205 Returns:
206 None.
207 """
208 if isinstance(exc, WebSocketException):
209 code = exc.code
210 reason = exc.detail
211 else:
212 code = 4000 + getattr(exc, "status_code", HTTP_500_INTERNAL_SERVER_ERROR)
213 reason = getattr(exc, "detail", repr(exc))
214 event: WebSocketCloseEvent = {"type": "websocket.close", "code": code, "reason": reason}
215 await send(event)
216
217 def default_http_exception_handler(self, request: Request, exc: Exception) -> Response[Any]:
218 """Handle an HTTP exception by returning the appropriate response.
219
220 Args:
221 request: An HTTP Request instance.
222 exc: The caught exception.
223
224 Returns:
225 An HTTP response.
226 """
227 status_code = getattr(exc, "status_code", HTTP_500_INTERNAL_SERVER_ERROR)
228 if status_code == HTTP_500_INTERNAL_SERVER_ERROR and self.debug:
229 return create_debug_response(request=request, exc=exc)
230 return create_exception_response(exc)
231
232 def handle_exception_logging(self, logger: Logger, logging_config: BaseLoggingConfig, scope: Scope) -> None:
233 """Handle logging - if the litestar app has a logging config in place.
234
235 Args:
236 logger: A logger instance.
237 logging_config: Logging Config instance.
238 scope: The ASGI connection scope.
239
240 Returns:
241 None
242 """
243 if (
244 logging_config.log_exceptions == "always" or (logging_config.log_exceptions == "debug" and self.debug)
245 ) and logging_config.exception_logging_handler:
246 logging_config.exception_logging_handler(logger, scope, format_exception(*exc_info()))
247
[end of litestar/middleware/exceptions/middleware.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/litestar/middleware/exceptions/middleware.py b/litestar/middleware/exceptions/middleware.py
--- a/litestar/middleware/exceptions/middleware.py
+++ b/litestar/middleware/exceptions/middleware.py
@@ -109,9 +109,15 @@
Returns:
Response: HTTP response constructed from exception details.
"""
+ status_code = getattr(exc, "status_code", HTTP_500_INTERNAL_SERVER_ERROR)
+ if status_code == HTTP_500_INTERNAL_SERVER_ERROR:
+ detail = "Internal Server Error"
+ else:
+ detail = getattr(exc, "detail", repr(exc))
+
content = ExceptionResponseContent(
- status_code=getattr(exc, "status_code", HTTP_500_INTERNAL_SERVER_ERROR),
- detail=getattr(exc, "detail", repr(exc)),
+ status_code=status_code,
+ detail=detail,
headers=getattr(exc, "headers", None),
extra=getattr(exc, "extra", None),
)
|
{"golden_diff": "diff --git a/litestar/middleware/exceptions/middleware.py b/litestar/middleware/exceptions/middleware.py\n--- a/litestar/middleware/exceptions/middleware.py\n+++ b/litestar/middleware/exceptions/middleware.py\n@@ -109,9 +109,15 @@\n Returns:\n Response: HTTP response constructed from exception details.\n \"\"\"\n+ status_code = getattr(exc, \"status_code\", HTTP_500_INTERNAL_SERVER_ERROR)\n+ if status_code == HTTP_500_INTERNAL_SERVER_ERROR:\n+ detail = \"Internal Server Error\"\n+ else:\n+ detail = getattr(exc, \"detail\", repr(exc))\n+\n content = ExceptionResponseContent(\n- status_code=getattr(exc, \"status_code\", HTTP_500_INTERNAL_SERVER_ERROR),\n- detail=getattr(exc, \"detail\", repr(exc)),\n+ status_code=status_code,\n+ detail=detail,\n headers=getattr(exc, \"headers\", None),\n extra=getattr(exc, \"extra\", None),\n )\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "from __future__ import annotations\n\nimport pdb # noqa: T100\nfrom dataclasses import asdict, dataclass, field\nfrom inspect import getmro\nfrom sys import exc_info\nfrom traceback import format_exception\nfrom typing import TYPE_CHECKING, Any, Type, cast\n\nfrom litestar.connection import Request\nfrom litestar.datastructures import Headers\nfrom litestar.enums import MediaType, ScopeType\nfrom litestar.exceptions import WebSocketException\nfrom litestar.middleware.cors import CORSMiddleware\nfrom litestar.middleware.exceptions._debug_response import create_debug_response\nfrom litestar.status_codes import HTTP_500_INTERNAL_SERVER_ERROR\n\n__all__ = (\"ExceptionHandlerMiddleware\", \"ExceptionResponseContent\", \"create_exception_response\")\n\n\nif TYPE_CHECKING:\n from litestar import Response\n from litestar.app import Litestar\n from litestar.logging import BaseLoggingConfig\n from litestar.types import (\n ASGIApp,\n ExceptionHandler,\n ExceptionHandlersMap,\n Logger,\n Receive,\n Scope,\n Send,\n )\n from litestar.types.asgi_types import WebSocketCloseEvent\n\n\ndef get_exception_handler(exception_handlers: ExceptionHandlersMap, exc: Exception) -> ExceptionHandler | None:\n \"\"\"Given a dictionary that maps exceptions and status codes to handler functions, and an exception, returns the\n appropriate handler if existing.\n\n Status codes are given preference over exception type.\n\n If no status code match exists, each class in the MRO of the exception type is checked and\n the first matching handler is returned.\n\n Finally, if a ``500`` handler is registered, it will be returned for any exception that isn't a\n subclass of :class:`HTTPException <litestar.exceptions.HTTPException>`.\n\n Args:\n exception_handlers: Mapping of status codes and exception types to handlers.\n exc: Exception Instance to be resolved to a handler.\n\n Returns:\n Optional exception handler callable.\n \"\"\"\n if not exception_handlers:\n return None\n status_code: int | None = getattr(exc, \"status_code\", None)\n if status_code and (exception_handler := exception_handlers.get(status_code)):\n return exception_handler\n for cls in getmro(type(exc)):\n if cls in exception_handlers:\n return exception_handlers[cast(\"Type[Exception]\", cls)]\n if not hasattr(exc, \"status_code\") and HTTP_500_INTERNAL_SERVER_ERROR in exception_handlers:\n return exception_handlers[HTTP_500_INTERNAL_SERVER_ERROR]\n return None\n\n\n@dataclass\nclass ExceptionResponseContent:\n \"\"\"Represent the contents of an exception-response.\"\"\"\n\n status_code: int\n \"\"\"Exception status code.\"\"\"\n detail: str\n \"\"\"Exception details or message.\"\"\"\n headers: dict[str, str] | None = field(default=None)\n \"\"\"Headers to attach to the response.\"\"\"\n extra: dict[str, Any] | list[Any] | None = field(default=None)\n \"\"\"An extra mapping to attach to the exception.\"\"\"\n\n def to_response(self) -> Response:\n \"\"\"Create a response from the model attributes.\n\n Returns:\n A response instance.\n \"\"\"\n from litestar.response import Response\n\n return Response(\n content={k: v for k, v in asdict(self).items() if k != \"headers\" and v is not None},\n headers=self.headers,\n status_code=self.status_code,\n media_type=MediaType.JSON,\n )\n\n\ndef create_exception_response(exc: Exception) -> Response:\n \"\"\"Construct a response from an exception.\n\n Notes:\n - For instances of :class:`HTTPException <litestar.exceptions.HTTPException>` or other exception classes that have a\n ``status_code`` attribute (e.g. Starlette exceptions), the status code is drawn from the exception, otherwise\n response status is ``HTTP_500_INTERNAL_SERVER_ERROR``.\n\n Args:\n exc: An exception.\n\n Returns:\n Response: HTTP response constructed from exception details.\n \"\"\"\n content = ExceptionResponseContent(\n status_code=getattr(exc, \"status_code\", HTTP_500_INTERNAL_SERVER_ERROR),\n detail=getattr(exc, \"detail\", repr(exc)),\n headers=getattr(exc, \"headers\", None),\n extra=getattr(exc, \"extra\", None),\n )\n return content.to_response()\n\n\nclass ExceptionHandlerMiddleware:\n \"\"\"Middleware used to wrap an ASGIApp inside a try catch block and handle any exceptions raised.\n\n This used in multiple layers of Litestar.\n \"\"\"\n\n def __init__(self, app: ASGIApp, debug: bool, exception_handlers: ExceptionHandlersMap) -> None:\n \"\"\"Initialize ``ExceptionHandlerMiddleware``.\n\n Args:\n app: The ``next`` ASGI app to call.\n debug: Whether ``debug`` mode is enabled\n exception_handlers: A dictionary mapping status codes and/or exception types to handler functions.\n \"\"\"\n self.app = app\n self.exception_handlers = exception_handlers\n self.debug = debug\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n \"\"\"ASGI-callable.\n\n Args:\n scope: The ASGI connection scope.\n receive: The ASGI receive function.\n send: The ASGI send function.\n\n Returns:\n None\n \"\"\"\n try:\n await self.app(scope, receive, send)\n except Exception as e: # noqa: BLE001\n litestar_app = scope[\"app\"]\n\n if litestar_app.logging_config and (logger := litestar_app.logger):\n self.handle_exception_logging(logger=logger, logging_config=litestar_app.logging_config, scope=scope)\n\n for hook in litestar_app.after_exception:\n await hook(e, scope)\n\n if litestar_app.pdb_on_exception:\n pdb.post_mortem()\n\n if scope[\"type\"] == ScopeType.HTTP:\n await self.handle_request_exception(\n litestar_app=litestar_app, scope=scope, receive=receive, send=send, exc=e\n )\n else:\n await self.handle_websocket_exception(send=send, exc=e)\n\n async def handle_request_exception(\n self, litestar_app: Litestar, scope: Scope, receive: Receive, send: Send, exc: Exception\n ) -> None:\n \"\"\"Handle exception raised inside 'http' scope routes.\n\n Args:\n litestar_app: The litestar app instance.\n scope: The ASGI connection scope.\n receive: The ASGI receive function.\n send: The ASGI send function.\n exc: The caught exception.\n\n Returns:\n None.\n \"\"\"\n\n headers = Headers.from_scope(scope=scope)\n if litestar_app.cors_config and (origin := headers.get(\"origin\")):\n cors_middleware = CORSMiddleware(app=self.app, config=litestar_app.cors_config)\n send = cors_middleware.send_wrapper(send=send, origin=origin, has_cookie=\"cookie\" in headers)\n\n exception_handler = get_exception_handler(self.exception_handlers, exc) or self.default_http_exception_handler\n request = Request[Any, Any, Any](scope=scope, receive=receive, send=send)\n response = exception_handler(request, exc)\n await response.to_asgi_response(app=litestar_app, request=request)(scope=scope, receive=receive, send=send)\n\n @staticmethod\n async def handle_websocket_exception(send: Send, exc: Exception) -> None:\n \"\"\"Handle exception raised inside 'websocket' scope routes.\n\n Args:\n send: The ASGI send function.\n exc: The caught exception.\n\n Returns:\n None.\n \"\"\"\n if isinstance(exc, WebSocketException):\n code = exc.code\n reason = exc.detail\n else:\n code = 4000 + getattr(exc, \"status_code\", HTTP_500_INTERNAL_SERVER_ERROR)\n reason = getattr(exc, \"detail\", repr(exc))\n event: WebSocketCloseEvent = {\"type\": \"websocket.close\", \"code\": code, \"reason\": reason}\n await send(event)\n\n def default_http_exception_handler(self, request: Request, exc: Exception) -> Response[Any]:\n \"\"\"Handle an HTTP exception by returning the appropriate response.\n\n Args:\n request: An HTTP Request instance.\n exc: The caught exception.\n\n Returns:\n An HTTP response.\n \"\"\"\n status_code = getattr(exc, \"status_code\", HTTP_500_INTERNAL_SERVER_ERROR)\n if status_code == HTTP_500_INTERNAL_SERVER_ERROR and self.debug:\n return create_debug_response(request=request, exc=exc)\n return create_exception_response(exc)\n\n def handle_exception_logging(self, logger: Logger, logging_config: BaseLoggingConfig, scope: Scope) -> None:\n \"\"\"Handle logging - if the litestar app has a logging config in place.\n\n Args:\n logger: A logger instance.\n logging_config: Logging Config instance.\n scope: The ASGI connection scope.\n\n Returns:\n None\n \"\"\"\n if (\n logging_config.log_exceptions == \"always\" or (logging_config.log_exceptions == \"debug\" and self.debug)\n ) and logging_config.exception_logging_handler:\n logging_config.exception_logging_handler(logger, scope, format_exception(*exc_info()))\n", "path": "litestar/middleware/exceptions/middleware.py"}]}
| 3,310 | 223 |
gh_patches_debug_3256
|
rasdani/github-patches
|
git_diff
|
e-valuation__EvaP-1467
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
OpenID login button is broken in latest firefox
Behavior:
When clicking the "login" button at `https://evap.hpi.de` a request is sent to `https://evap.hpi.de/oidc/authenticate/`, which returns a 302 and redirects to `https://evap.hpi.de/?next=/oidc/authenticate/`, which has no visible difference with `https://evap.hpi.de`. After clicking the login button again I'm redirected to `https://evap.hpi.de/?next=/oidc/authenticate/%3Fnext%3D/oidc/authenticate/`
Expected Behavior:
Display the openID page and allow login
Steps to reproduce
Go to https://evap.hpi.de and click the "login" button. Browser is an up-to-date firefox on linux
</issue>
<code>
[start of evap/middleware.py]
1 from django.contrib.auth.views import redirect_to_login
2
3
4 class RequireLoginMiddleware:
5 def __init__(self, get_response):
6 self.get_response = get_response
7
8 def __call__(self, request):
9 return self.get_response(request)
10
11 @staticmethod
12 def process_view(request, view_func, _view_args, _view_kwargs):
13 # Returning None tells django to pass the request on
14 if request.user.is_authenticated:
15 return None
16
17 if "no_login_required" in view_func.__dict__ and view_func.no_login_required:
18 return None
19
20 return redirect_to_login(request.get_full_path())
21
22
23 def no_login_required(func):
24 func.no_login_required = True
25 return func
26
[end of evap/middleware.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/evap/middleware.py b/evap/middleware.py
--- a/evap/middleware.py
+++ b/evap/middleware.py
@@ -17,6 +17,9 @@
if "no_login_required" in view_func.__dict__ and view_func.no_login_required:
return None
+ if view_func.__name__ in ["OIDCAuthenticationRequestView", "OIDCAuthenticationCallbackView"]:
+ return None
+
return redirect_to_login(request.get_full_path())
|
{"golden_diff": "diff --git a/evap/middleware.py b/evap/middleware.py\n--- a/evap/middleware.py\n+++ b/evap/middleware.py\n@@ -17,6 +17,9 @@\n if \"no_login_required\" in view_func.__dict__ and view_func.no_login_required:\n return None\n \n+ if view_func.__name__ in [\"OIDCAuthenticationRequestView\", \"OIDCAuthenticationCallbackView\"]:\n+ return None\n+\n return redirect_to_login(request.get_full_path())\n", "issue": "OpenID login button is broken in latest firefox\nBehavior:\r\nWhen clicking the \"login\" button at `https://evap.hpi.de` a request is sent to `https://evap.hpi.de/oidc/authenticate/`, which returns a 302 and redirects to `https://evap.hpi.de/?next=/oidc/authenticate/`, which has no visible difference with `https://evap.hpi.de`. After clicking the login button again I'm redirected to `https://evap.hpi.de/?next=/oidc/authenticate/%3Fnext%3D/oidc/authenticate/`\r\n\r\nExpected Behavior:\r\nDisplay the openID page and allow login\r\n\r\nSteps to reproduce\r\nGo to https://evap.hpi.de and click the \"login\" button. Browser is an up-to-date firefox on linux\n", "before_files": [{"content": "from django.contrib.auth.views import redirect_to_login\n\n\nclass RequireLoginMiddleware:\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n return self.get_response(request)\n\n @staticmethod\n def process_view(request, view_func, _view_args, _view_kwargs):\n # Returning None tells django to pass the request on\n if request.user.is_authenticated:\n return None\n\n if \"no_login_required\" in view_func.__dict__ and view_func.no_login_required:\n return None\n\n return redirect_to_login(request.get_full_path())\n\n\ndef no_login_required(func):\n func.no_login_required = True\n return func\n", "path": "evap/middleware.py"}]}
| 905 | 113 |
gh_patches_debug_22241
|
rasdani/github-patches
|
git_diff
|
readthedocs__readthedocs.org-9639
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Telemetry: track `extensions` and `html_theme` Sphinx variables
While working on https://github.com/sphinx-doc/sphinx/issues/10070, @AA-Turner ask for the content of `extensions` and `html_theme` and I think it could be useful to store in the Telemetry database somehow. Considering these values may be dynamically populated, we should find a way to execute this code and grab the final value of them.
</issue>
<code>
[start of readthedocs/telemetry/collectors.py]
1 """Data collectors."""
2
3 import json
4
5 import dparse
6 import structlog
7
8 from readthedocs.config.models import PythonInstallRequirements
9
10 log = structlog.get_logger(__name__)
11
12
13 class BuildDataCollector:
14
15 """
16 Build data collector.
17
18 Collect data from a runnig build.
19 """
20
21 def __init__(self, environment):
22 self.environment = environment
23 self.build = self.environment.build
24 self.project = self.environment.project
25 self.version = self.environment.version
26 self.config = self.environment.config
27 self.checkout_path = self.project.checkout_path(self.version.slug)
28
29 log.bind(
30 build_id=self.build["id"],
31 project_slug=self.project.slug,
32 version_slug=self.version.slug,
33 )
34
35 @staticmethod
36 def _safe_json_loads(content, default=None):
37 def lowercase(d): # pylint: disable=invalid-name
38 """Convert all dictionary keys to lowercase."""
39 return {k.lower(): i for k, i in d.items()}
40
41 # pylint: disable=broad-except
42 try:
43 # Use ``object_hook`` parameter to lowercase all the keys of the dictionary.
44 # This helps us to have our data normalized and improve queries.
45 return json.loads(content, object_hook=lowercase)
46 except Exception:
47 log.info(
48 "Error while loading JSON content.",
49 exc_info=True,
50 )
51 return default
52
53 def run(self, *args, **kwargs):
54 build_cmd = self.environment.run(*args, record=False, demux=True, **kwargs)
55 return build_cmd.exit_code, build_cmd.output, build_cmd.error
56
57 def collect(self):
58 """
59 Collect all relevant data from the runnig build.
60
61 Data that can be extracted from the database (project/organization)
62 isn't collected here.
63 """
64
65 # NOTE: we could run each command inside a try/except block to have a
66 # more granular protection and be able to save data from those commands
67 # that didn't fail. Otherwise, if one command fails, all the data for
68 # this Build is lost.
69
70 data = {}
71 data["config"] = {"user": self.config.source_config}
72 data["os"] = self._get_operating_system()
73 data["python"] = self._get_python_version()
74
75 user_apt_packages, all_apt_packages = self._get_apt_packages()
76 conda_packages = (
77 self._get_all_conda_packages() if self.config.is_using_conda else {}
78 )
79 data["packages"] = {
80 "pip": {
81 "user": self._get_user_pip_packages(),
82 "all": self._get_all_pip_packages(),
83 },
84 "conda": {
85 "all": conda_packages,
86 },
87 "apt": {
88 "user": user_apt_packages,
89 "all": all_apt_packages,
90 },
91 }
92 return data
93
94 def _get_all_conda_packages(self):
95 """
96 Get all the packages installed by the user using conda.
97
98 This includes top level and transitive dependencies.
99 The output of ``conda list`` is in the form of::
100
101 [
102 {
103 "base_url": "https://conda.anaconda.org/conda-forge",
104 "build_number": 0,
105 "build_string": "py_0",
106 "channel": "conda-forge",
107 "dist_name": "alabaster-0.7.12-py_0",
108 "name": "alabaster",
109 "platform": "noarch",
110 "version": "0.7.12"
111 },
112 {
113 "base_url": "https://conda.anaconda.org/conda-forge",
114 "build_number": 0,
115 "build_string": "pyh9f0ad1d_0",
116 "channel": "conda-forge",
117 "dist_name": "asn1crypto-1.4.0-pyh9f0ad1d_0",
118 "name": "asn1crypto",
119 "platform": "noarch",
120 "version": "1.4.0"
121 }
122 ]
123 """
124 code, stdout, _ = self.run(
125 "conda", "list", "--json", "--name", self.version.slug
126 )
127 if code == 0 and stdout:
128 packages = self._safe_json_loads(stdout, [])
129 packages = [
130 {
131 "name": package["name"],
132 "channel": package["channel"],
133 "version": package["version"],
134 }
135 for package in packages
136 ]
137 return packages
138 return []
139
140 def _get_user_pip_packages(self):
141 """
142 Get all the packages to be installed defined by the user.
143
144 It parses all the requirements files specified in the config file by
145 the user (python.install.requirements) using ``dparse`` --a 3rd party
146 package.
147
148 If the version of the package is explicit (==) it saves that particular
149 version. Otherwise, if it's not defined, it saves ``undefined`` and if
150 it's a non deterministic operation (like >=, <= or ~=) it saves
151 ``unknown`` in the version.
152
153 """
154 results = []
155 # pylint: disable=too-many-nested-blocks
156 for install in self.config.python.install:
157 if isinstance(install, PythonInstallRequirements):
158 if install.requirements:
159 cmd = ["cat", install.requirements]
160 _, stdout, _ = self.run(*cmd, cwd=self.checkout_path)
161 # pylint: disable=invalid-name
162 df = dparse.parse(
163 stdout, file_type=dparse.filetypes.requirements_txt
164 ).serialize()
165 dependencies = df.get("dependencies", [])
166 for requirement in dependencies:
167 name = requirement.get("name", "").lower()
168 if not name:
169 continue
170
171 # If the user defines a specific version in the
172 # requirements file, we save it Otherwise, we don't
173 # because we don't know which version will be
174 # installed.
175 version = "undefined"
176 specs = str(requirement.get("specs", ""))
177 if specs:
178 if specs.startswith("=="):
179 version = specs.replace("==", "", 1)
180 else:
181 version = "unknown"
182
183 results.append(
184 {
185 "name": name,
186 "version": version,
187 }
188 )
189 return results
190
191 def _get_all_pip_packages(self):
192 """
193 Get all the packages installed by pip.
194
195 This includes top level and transitive dependencies.
196 The output of ``pip list`` is in the form of::
197
198 [
199 {
200 "name": "requests-mock",
201 "version": "1.8.0"
202 },
203 {
204 "name": "requests-toolbelt",
205 "version": "0.9.1"
206 },
207 {
208 "name": "rstcheck",
209 "version": "3.3.1"
210 },
211 {
212 "name": "selectolax",
213 "version": "0.2.10"
214 },
215 {
216 "name": "slumber",
217 "version": "0.7.1"
218 }
219 ]
220 """
221 cmd = [
222 "python",
223 "-m",
224 "pip",
225 "list",
226 "--pre",
227 "--local",
228 "--format",
229 "json",
230 "--not-required",
231 ]
232 code, stdout, _ = self.run(*cmd)
233 if code == 0 and stdout:
234 return self._safe_json_loads(stdout, [])
235 return []
236
237 def _get_operating_system(self):
238 """
239 Get the current operating system.
240
241 The output of ``lsb_release --description`` is in the form of::
242
243 Description: Ubuntu 20.04.3 LTS
244 """
245 code, stdout, _ = self.run("lsb_release", "--description")
246 stdout = stdout.strip()
247 if code == 0 and stdout:
248 parts = stdout.split("\t")
249 if len(parts) == 2:
250 return parts[1]
251 return ""
252
253 def _get_apt_packages(self):
254 """
255 Get the list of installed apt packages (global and from the user).
256
257 The current source of user installed packages is the config file,
258 but we have only the name, so we take the version from the list of all
259 installed packages.
260 """
261 all_apt_packages = self._get_all_apt_packages()
262 all_apt_packages_dict = {
263 package["name"]: package["version"] for package in all_apt_packages
264 }
265 user_apt_packages = self._get_user_apt_packages()
266 for package in user_apt_packages:
267 package["version"] = all_apt_packages_dict.get(package["name"], "")
268 return user_apt_packages, all_apt_packages
269
270 def _get_all_apt_packages(self):
271 """
272 Get all installed apt packages and their versions.
273
274 The output of ``dpkg-query --show`` is the form of::
275
276 adduser 3.116ubuntu1
277 apt 1.6.14
278 base-files 10.1ubuntu2.11
279 base-passwd 3.5.44
280 bash 4.4.18-2ubuntu1.2
281 bsdutils 1:2.31.1-0.4ubuntu3.7
282 bzip2 1.0.6-8.1ubuntu0.2
283 coreutils 8.28-1ubuntu1
284 dash 0.5.8-2.10
285 debconf 1.5.66ubuntu1
286 debianutils 4.8.4
287 diffutils 1:3.6-1
288 dpkg 1.19.0.5ubuntu2.3
289 e2fsprogs 1.44.1-1ubuntu1.3
290 fdisk 2.31.1-0.4ubuntu3.7
291 findutils 4.6.0+git+20170828-2
292 gcc-8-base 8.4.0-1ubuntu1~18.04
293 gpgv 2.2.4-1ubuntu1.4
294 grep 3.1-2build1
295 gzip 1.6-5ubuntu1.2
296 hostname 3.20
297 """
298 code, stdout, _ = self.run(
299 "dpkg-query", "--showformat", "${package} ${version}\\n", "--show"
300 )
301 stdout = stdout.strip()
302 packages = []
303 if code != 0 or not stdout:
304 return packages
305
306 for line in stdout.split("\n"):
307 parts = line.split()
308 if len(parts) == 2:
309 package, version = parts
310 packages.append(
311 {
312 "name": package.lower(),
313 "version": version,
314 }
315 )
316
317 return packages
318
319 def _get_user_apt_packages(self):
320 return [
321 {"name": package.lower(), "version": ""}
322 for package in self.config.build.apt_packages
323 ]
324
325 def _get_python_version(self):
326 """
327 Get the python version currently used.
328
329 The output of ``python --version`` is in the form of::
330
331 Python 3.8.12
332 """
333 code, stdout, _ = self.run("python", "--version")
334 stdout = stdout.strip()
335 if code == 0 and stdout:
336 parts = stdout.split()
337 if len(parts) == 2:
338 return parts[1]
339 return ""
340
[end of readthedocs/telemetry/collectors.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/readthedocs/telemetry/collectors.py b/readthedocs/telemetry/collectors.py
--- a/readthedocs/telemetry/collectors.py
+++ b/readthedocs/telemetry/collectors.py
@@ -1,6 +1,7 @@
"""Data collectors."""
import json
+import os
import dparse
import structlog
@@ -89,6 +90,37 @@
"all": all_apt_packages,
},
}
+ data["doctool"] = self._get_doctool()
+ return data
+
+ def _get_doctool_name(self):
+ if self.version.is_sphinx_type:
+ return "sphinx"
+
+ if self.version.is_mkdocs_type:
+ return "mkdocs"
+
+ return "generic"
+
+ def _get_doctool(self):
+ data = {
+ "name": self._get_doctool_name(),
+ "extensions": [],
+ "html_theme": "",
+ }
+
+ if self._get_doctool_name() != "sphinx":
+ return data
+
+ conf_py_dir = os.path.join(
+ self.checkout_path,
+ os.path.dirname(self.config.sphinx.configuration),
+ )
+ filepath = os.path.join(conf_py_dir, "_build", "json", "telemetry.json")
+ if os.path.exists(filepath):
+ with open(filepath, "r") as json_file:
+ content = json_file.read()
+ data.update(self._safe_json_loads(content, {}))
return data
def _get_all_conda_packages(self):
|
{"golden_diff": "diff --git a/readthedocs/telemetry/collectors.py b/readthedocs/telemetry/collectors.py\n--- a/readthedocs/telemetry/collectors.py\n+++ b/readthedocs/telemetry/collectors.py\n@@ -1,6 +1,7 @@\n \"\"\"Data collectors.\"\"\"\n \n import json\n+import os\n \n import dparse\n import structlog\n@@ -89,6 +90,37 @@\n \"all\": all_apt_packages,\n },\n }\n+ data[\"doctool\"] = self._get_doctool()\n+ return data\n+\n+ def _get_doctool_name(self):\n+ if self.version.is_sphinx_type:\n+ return \"sphinx\"\n+\n+ if self.version.is_mkdocs_type:\n+ return \"mkdocs\"\n+\n+ return \"generic\"\n+\n+ def _get_doctool(self):\n+ data = {\n+ \"name\": self._get_doctool_name(),\n+ \"extensions\": [],\n+ \"html_theme\": \"\",\n+ }\n+\n+ if self._get_doctool_name() != \"sphinx\":\n+ return data\n+\n+ conf_py_dir = os.path.join(\n+ self.checkout_path,\n+ os.path.dirname(self.config.sphinx.configuration),\n+ )\n+ filepath = os.path.join(conf_py_dir, \"_build\", \"json\", \"telemetry.json\")\n+ if os.path.exists(filepath):\n+ with open(filepath, \"r\") as json_file:\n+ content = json_file.read()\n+ data.update(self._safe_json_loads(content, {}))\n return data\n \n def _get_all_conda_packages(self):\n", "issue": "Telemetry: track `extensions` and `html_theme` Sphinx variables\nWhile working on https://github.com/sphinx-doc/sphinx/issues/10070, @AA-Turner ask for the content of `extensions` and `html_theme` and I think it could be useful to store in the Telemetry database somehow. Considering these values may be dynamically populated, we should find a way to execute this code and grab the final value of them.\n", "before_files": [{"content": "\"\"\"Data collectors.\"\"\"\n\nimport json\n\nimport dparse\nimport structlog\n\nfrom readthedocs.config.models import PythonInstallRequirements\n\nlog = structlog.get_logger(__name__)\n\n\nclass BuildDataCollector:\n\n \"\"\"\n Build data collector.\n\n Collect data from a runnig build.\n \"\"\"\n\n def __init__(self, environment):\n self.environment = environment\n self.build = self.environment.build\n self.project = self.environment.project\n self.version = self.environment.version\n self.config = self.environment.config\n self.checkout_path = self.project.checkout_path(self.version.slug)\n\n log.bind(\n build_id=self.build[\"id\"],\n project_slug=self.project.slug,\n version_slug=self.version.slug,\n )\n\n @staticmethod\n def _safe_json_loads(content, default=None):\n def lowercase(d): # pylint: disable=invalid-name\n \"\"\"Convert all dictionary keys to lowercase.\"\"\"\n return {k.lower(): i for k, i in d.items()}\n\n # pylint: disable=broad-except\n try:\n # Use ``object_hook`` parameter to lowercase all the keys of the dictionary.\n # This helps us to have our data normalized and improve queries.\n return json.loads(content, object_hook=lowercase)\n except Exception:\n log.info(\n \"Error while loading JSON content.\",\n exc_info=True,\n )\n return default\n\n def run(self, *args, **kwargs):\n build_cmd = self.environment.run(*args, record=False, demux=True, **kwargs)\n return build_cmd.exit_code, build_cmd.output, build_cmd.error\n\n def collect(self):\n \"\"\"\n Collect all relevant data from the runnig build.\n\n Data that can be extracted from the database (project/organization)\n isn't collected here.\n \"\"\"\n\n # NOTE: we could run each command inside a try/except block to have a\n # more granular protection and be able to save data from those commands\n # that didn't fail. Otherwise, if one command fails, all the data for\n # this Build is lost.\n\n data = {}\n data[\"config\"] = {\"user\": self.config.source_config}\n data[\"os\"] = self._get_operating_system()\n data[\"python\"] = self._get_python_version()\n\n user_apt_packages, all_apt_packages = self._get_apt_packages()\n conda_packages = (\n self._get_all_conda_packages() if self.config.is_using_conda else {}\n )\n data[\"packages\"] = {\n \"pip\": {\n \"user\": self._get_user_pip_packages(),\n \"all\": self._get_all_pip_packages(),\n },\n \"conda\": {\n \"all\": conda_packages,\n },\n \"apt\": {\n \"user\": user_apt_packages,\n \"all\": all_apt_packages,\n },\n }\n return data\n\n def _get_all_conda_packages(self):\n \"\"\"\n Get all the packages installed by the user using conda.\n\n This includes top level and transitive dependencies.\n The output of ``conda list`` is in the form of::\n\n [\n {\n \"base_url\": \"https://conda.anaconda.org/conda-forge\",\n \"build_number\": 0,\n \"build_string\": \"py_0\",\n \"channel\": \"conda-forge\",\n \"dist_name\": \"alabaster-0.7.12-py_0\",\n \"name\": \"alabaster\",\n \"platform\": \"noarch\",\n \"version\": \"0.7.12\"\n },\n {\n \"base_url\": \"https://conda.anaconda.org/conda-forge\",\n \"build_number\": 0,\n \"build_string\": \"pyh9f0ad1d_0\",\n \"channel\": \"conda-forge\",\n \"dist_name\": \"asn1crypto-1.4.0-pyh9f0ad1d_0\",\n \"name\": \"asn1crypto\",\n \"platform\": \"noarch\",\n \"version\": \"1.4.0\"\n }\n ]\n \"\"\"\n code, stdout, _ = self.run(\n \"conda\", \"list\", \"--json\", \"--name\", self.version.slug\n )\n if code == 0 and stdout:\n packages = self._safe_json_loads(stdout, [])\n packages = [\n {\n \"name\": package[\"name\"],\n \"channel\": package[\"channel\"],\n \"version\": package[\"version\"],\n }\n for package in packages\n ]\n return packages\n return []\n\n def _get_user_pip_packages(self):\n \"\"\"\n Get all the packages to be installed defined by the user.\n\n It parses all the requirements files specified in the config file by\n the user (python.install.requirements) using ``dparse`` --a 3rd party\n package.\n\n If the version of the package is explicit (==) it saves that particular\n version. Otherwise, if it's not defined, it saves ``undefined`` and if\n it's a non deterministic operation (like >=, <= or ~=) it saves\n ``unknown`` in the version.\n\n \"\"\"\n results = []\n # pylint: disable=too-many-nested-blocks\n for install in self.config.python.install:\n if isinstance(install, PythonInstallRequirements):\n if install.requirements:\n cmd = [\"cat\", install.requirements]\n _, stdout, _ = self.run(*cmd, cwd=self.checkout_path)\n # pylint: disable=invalid-name\n df = dparse.parse(\n stdout, file_type=dparse.filetypes.requirements_txt\n ).serialize()\n dependencies = df.get(\"dependencies\", [])\n for requirement in dependencies:\n name = requirement.get(\"name\", \"\").lower()\n if not name:\n continue\n\n # If the user defines a specific version in the\n # requirements file, we save it Otherwise, we don't\n # because we don't know which version will be\n # installed.\n version = \"undefined\"\n specs = str(requirement.get(\"specs\", \"\"))\n if specs:\n if specs.startswith(\"==\"):\n version = specs.replace(\"==\", \"\", 1)\n else:\n version = \"unknown\"\n\n results.append(\n {\n \"name\": name,\n \"version\": version,\n }\n )\n return results\n\n def _get_all_pip_packages(self):\n \"\"\"\n Get all the packages installed by pip.\n\n This includes top level and transitive dependencies.\n The output of ``pip list`` is in the form of::\n\n [\n {\n \"name\": \"requests-mock\",\n \"version\": \"1.8.0\"\n },\n {\n \"name\": \"requests-toolbelt\",\n \"version\": \"0.9.1\"\n },\n {\n \"name\": \"rstcheck\",\n \"version\": \"3.3.1\"\n },\n {\n \"name\": \"selectolax\",\n \"version\": \"0.2.10\"\n },\n {\n \"name\": \"slumber\",\n \"version\": \"0.7.1\"\n }\n ]\n \"\"\"\n cmd = [\n \"python\",\n \"-m\",\n \"pip\",\n \"list\",\n \"--pre\",\n \"--local\",\n \"--format\",\n \"json\",\n \"--not-required\",\n ]\n code, stdout, _ = self.run(*cmd)\n if code == 0 and stdout:\n return self._safe_json_loads(stdout, [])\n return []\n\n def _get_operating_system(self):\n \"\"\"\n Get the current operating system.\n\n The output of ``lsb_release --description`` is in the form of::\n\n Description:\tUbuntu 20.04.3 LTS\n \"\"\"\n code, stdout, _ = self.run(\"lsb_release\", \"--description\")\n stdout = stdout.strip()\n if code == 0 and stdout:\n parts = stdout.split(\"\\t\")\n if len(parts) == 2:\n return parts[1]\n return \"\"\n\n def _get_apt_packages(self):\n \"\"\"\n Get the list of installed apt packages (global and from the user).\n\n The current source of user installed packages is the config file,\n but we have only the name, so we take the version from the list of all\n installed packages.\n \"\"\"\n all_apt_packages = self._get_all_apt_packages()\n all_apt_packages_dict = {\n package[\"name\"]: package[\"version\"] for package in all_apt_packages\n }\n user_apt_packages = self._get_user_apt_packages()\n for package in user_apt_packages:\n package[\"version\"] = all_apt_packages_dict.get(package[\"name\"], \"\")\n return user_apt_packages, all_apt_packages\n\n def _get_all_apt_packages(self):\n \"\"\"\n Get all installed apt packages and their versions.\n\n The output of ``dpkg-query --show`` is the form of::\n\n adduser 3.116ubuntu1\n apt 1.6.14\n base-files 10.1ubuntu2.11\n base-passwd 3.5.44\n bash 4.4.18-2ubuntu1.2\n bsdutils 1:2.31.1-0.4ubuntu3.7\n bzip2 1.0.6-8.1ubuntu0.2\n coreutils 8.28-1ubuntu1\n dash 0.5.8-2.10\n debconf 1.5.66ubuntu1\n debianutils 4.8.4\n diffutils 1:3.6-1\n dpkg 1.19.0.5ubuntu2.3\n e2fsprogs 1.44.1-1ubuntu1.3\n fdisk 2.31.1-0.4ubuntu3.7\n findutils 4.6.0+git+20170828-2\n gcc-8-base 8.4.0-1ubuntu1~18.04\n gpgv 2.2.4-1ubuntu1.4\n grep 3.1-2build1\n gzip 1.6-5ubuntu1.2\n hostname 3.20\n \"\"\"\n code, stdout, _ = self.run(\n \"dpkg-query\", \"--showformat\", \"${package} ${version}\\\\n\", \"--show\"\n )\n stdout = stdout.strip()\n packages = []\n if code != 0 or not stdout:\n return packages\n\n for line in stdout.split(\"\\n\"):\n parts = line.split()\n if len(parts) == 2:\n package, version = parts\n packages.append(\n {\n \"name\": package.lower(),\n \"version\": version,\n }\n )\n\n return packages\n\n def _get_user_apt_packages(self):\n return [\n {\"name\": package.lower(), \"version\": \"\"}\n for package in self.config.build.apt_packages\n ]\n\n def _get_python_version(self):\n \"\"\"\n Get the python version currently used.\n\n The output of ``python --version`` is in the form of::\n\n Python 3.8.12\n \"\"\"\n code, stdout, _ = self.run(\"python\", \"--version\")\n stdout = stdout.strip()\n if code == 0 and stdout:\n parts = stdout.split()\n if len(parts) == 2:\n return parts[1]\n return \"\"\n", "path": "readthedocs/telemetry/collectors.py"}]}
| 4,065 | 361 |
gh_patches_debug_35959
|
rasdani/github-patches
|
git_diff
|
electricitymaps__electricitymaps-contrib-2088
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
US-BPA failing due to changes in data layout and timezone
```shell
File "/home/contrib/parsers/US_BPA.py", line 113, in fetch_production
raw_data = get_data(GENERATION_URL, session=session)
File "/home/contrib/parsers/US_BPA.py", line 25, in get_data
df = pd.read_table(StringIO(req.text), skiprows=5)
File "/usr/local/lib/python3.6/site-packages/pandas/io/parsers.py", line 685, in parser_f
return _read(filepath_or_buffer, kwds)
File "/usr/local/lib/python3.6/site-packages/pandas/io/parsers.py", line 463, in _read
data = parser.read(nrows)
File "/usr/local/lib/python3.6/site-packages/pandas/io/parsers.py", line 1154, in read
ret = self._engine.read(nrows)
File "/usr/local/lib/python3.6/site-packages/pandas/io/parsers.py", line 2059, in read
data = self._reader.read(nrows)
File "pandas/_libs/parsers.pyx", line 881, in pandas._libs.parsers.TextReader.read
File "pandas/_libs/parsers.pyx", line 896, in pandas._libs.parsers.TextReader._read_low_memory
File "pandas/_libs/parsers.pyx", line 950, in pandas._libs.parsers.TextReader._read_rows
File "pandas/_libs/parsers.pyx", line 937, in pandas._libs.parsers.TextReader._tokenize_rows
File "pandas/_libs/parsers.pyx", line 2132, in pandas._libs.parsers.raise_parser_error
pandas.errors.ParserError: Error tokenizing data. C error: Expected 1 fields in line 12, saw 6
```
</issue>
<code>
[start of parsers/US_BPA.py]
1 #!/usr/bin/env python3
2
3 """Parser for the Bonneville Power Administration area of the USA."""
4
5 from datetime import datetime, timedelta
6 from io import StringIO
7 import logging
8 import pandas as pd
9 import pytz
10 import requests
11
12
13 GENERATION_URL = 'https://transmission.bpa.gov/business/operations/Wind/baltwg.txt'
14
15 GENERATION_MAPPING = {'Wind': 'wind',
16 'Hydro': 'hydro',
17 'Fossil/Biomass': 'unknown',
18 'Nuclear': 'nuclear'}
19
20
21 def get_data(url, session=None):
22 """Returns a pandas dataframe."""
23 s=session or requests.Session()
24 req = s.get(url)
25 df = pd.read_table(StringIO(req.text), skiprows=5)
26
27 return df
28
29
30 def timestamp_converter(timestamp):
31 """Turns string representation of time into an aware datetime object."""
32
33 dt_naive = datetime.strptime(timestamp, '%m/%d/%Y %H:%M')
34 western = pytz.timezone('America/Los_Angeles')
35 dt_aware = western.localize(dt_naive)
36
37 return dt_aware
38
39
40 def data_processor(df, logger):
41 """
42 Takes a dataframe and drops all generation rows that are empty or more
43 than 1 day old. Turns each row into a dictionary and removes any generation
44 types that are unknown.
45 Returns a list of tuples in the form (datetime, production).
46 """
47
48 df= df.dropna(thresh=2)
49 df.columns = df.columns.str.strip()
50
51 # 5min data for the last 24 hours.
52 df = df.tail(288)
53 df['Date/Time'] = df['Date/Time'].map(timestamp_converter)
54
55 known_keys = GENERATION_MAPPING.keys() | {'Date/Time', 'Load'}
56 column_headers = set(df.columns)
57
58 unknown_keys = column_headers - known_keys
59
60 for k in unknown_keys:
61 logger.warning('New data {} seen in US-BPA data source'.format(k),
62 extra={'key': 'US-BPA'})
63
64 keys_to_remove = unknown_keys | {'Load'}
65
66 processed_data = []
67 for index, row in df.iterrows():
68 production = row.to_dict()
69
70 dt = production.pop('Date/Time')
71 dt = dt.to_pydatetime()
72 mapped_production = {GENERATION_MAPPING[k]:v for k,v in production.items()
73 if k not in keys_to_remove}
74
75 processed_data.append((dt, mapped_production))
76
77 return processed_data
78
79
80 def fetch_production(zone_key='US-BPA', session=None, target_datetime=None, logger=logging.getLogger(__name__)):
81 """
82 Requests the last known production mix (in MW) of a given zone
83 Arguments:
84 zone_key (optional) -- used in case a parser is able to fetch multiple zones
85 session (optional) -- request session passed in order to re-use an existing session
86 Return:
87 A dictionary in the form:
88 {
89 'zoneKey': 'FR',
90 'datetime': '2017-01-01T00:00:00Z',
91 'production': {
92 'biomass': 0.0,
93 'coal': 0.0,
94 'gas': 0.0,
95 'hydro': 0.0,
96 'nuclear': null,
97 'oil': 0.0,
98 'solar': 0.0,
99 'wind': 0.0,
100 'geothermal': 0.0,
101 'unknown': 0.0
102 },
103 'storage': {
104 'hydro': -10.0,
105 },
106 'source': 'mysource.com'
107 }
108 """
109
110 if target_datetime:
111 raise NotImplementedError('This parser is not yet able to parse past dates')
112
113 raw_data = get_data(GENERATION_URL, session=session)
114 processed_data = data_processor(raw_data, logger)
115
116 data = []
117 for item in processed_data:
118 datapoint = {'zoneKey': zone_key,
119 'datetime': item[0],
120 'production': item[1],
121 'storage': {},
122 'source': 'bpa.gov'}
123
124 data.append(datapoint)
125
126 return data
127
128
129 if __name__ == '__main__':
130 print('fetch_production() ->')
131 print(fetch_production())
132
[end of parsers/US_BPA.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/parsers/US_BPA.py b/parsers/US_BPA.py
--- a/parsers/US_BPA.py
+++ b/parsers/US_BPA.py
@@ -2,11 +2,11 @@
"""Parser for the Bonneville Power Administration area of the USA."""
-from datetime import datetime, timedelta
+
from io import StringIO
+import arrow
import logging
import pandas as pd
-import pytz
import requests
@@ -20,19 +20,18 @@
def get_data(url, session=None):
"""Returns a pandas dataframe."""
- s=session or requests.Session()
+ s = session or requests.Session()
req = s.get(url)
- df = pd.read_table(StringIO(req.text), skiprows=5)
+ df = pd.read_table(StringIO(req.text), skiprows=11)
return df
def timestamp_converter(timestamp):
- """Turns string representation of time into an aware datetime object."""
+ """Turns a timestamp str into an aware datetime object."""
- dt_naive = datetime.strptime(timestamp, '%m/%d/%Y %H:%M')
- western = pytz.timezone('America/Los_Angeles')
- dt_aware = western.localize(dt_naive)
+ arr_dt_naive = arrow.get(timestamp, 'MM/DD/YYYY HH:mm')
+ dt_aware = arr_dt_naive.replace(tzinfo='America/Los_Angeles').datetime
return dt_aware
@@ -45,7 +44,7 @@
Returns a list of tuples in the form (datetime, production).
"""
- df= df.dropna(thresh=2)
+ df = df.dropna(thresh=2)
df.columns = df.columns.str.strip()
# 5min data for the last 24 hours.
@@ -59,7 +58,7 @@
for k in unknown_keys:
logger.warning('New data {} seen in US-BPA data source'.format(k),
- extra={'key': 'US-BPA'})
+ extra={'key': 'US-BPA'})
keys_to_remove = unknown_keys | {'Load'}
@@ -69,7 +68,7 @@
dt = production.pop('Date/Time')
dt = dt.to_pydatetime()
- mapped_production = {GENERATION_MAPPING[k]:v for k,v in production.items()
+ mapped_production = {GENERATION_MAPPING[k]: v for k, v in production.items()
if k not in keys_to_remove}
processed_data.append((dt, mapped_production))
|
{"golden_diff": "diff --git a/parsers/US_BPA.py b/parsers/US_BPA.py\n--- a/parsers/US_BPA.py\n+++ b/parsers/US_BPA.py\n@@ -2,11 +2,11 @@\n \n \"\"\"Parser for the Bonneville Power Administration area of the USA.\"\"\"\n \n-from datetime import datetime, timedelta\n+\n from io import StringIO\n+import arrow\n import logging\n import pandas as pd\n-import pytz\n import requests\n \n \n@@ -20,19 +20,18 @@\n \n def get_data(url, session=None):\n \"\"\"Returns a pandas dataframe.\"\"\"\n- s=session or requests.Session()\n+ s = session or requests.Session()\n req = s.get(url)\n- df = pd.read_table(StringIO(req.text), skiprows=5)\n+ df = pd.read_table(StringIO(req.text), skiprows=11)\n \n return df\n \n \n def timestamp_converter(timestamp):\n- \"\"\"Turns string representation of time into an aware datetime object.\"\"\"\n+ \"\"\"Turns a timestamp str into an aware datetime object.\"\"\"\n \n- dt_naive = datetime.strptime(timestamp, '%m/%d/%Y %H:%M')\n- western = pytz.timezone('America/Los_Angeles')\n- dt_aware = western.localize(dt_naive)\n+ arr_dt_naive = arrow.get(timestamp, 'MM/DD/YYYY HH:mm')\n+ dt_aware = arr_dt_naive.replace(tzinfo='America/Los_Angeles').datetime\n \n return dt_aware\n \n@@ -45,7 +44,7 @@\n Returns a list of tuples in the form (datetime, production).\n \"\"\"\n \n- df= df.dropna(thresh=2)\n+ df = df.dropna(thresh=2)\n df.columns = df.columns.str.strip()\n \n # 5min data for the last 24 hours.\n@@ -59,7 +58,7 @@\n \n for k in unknown_keys:\n logger.warning('New data {} seen in US-BPA data source'.format(k),\n- extra={'key': 'US-BPA'})\n+ extra={'key': 'US-BPA'})\n \n keys_to_remove = unknown_keys | {'Load'}\n \n@@ -69,7 +68,7 @@\n \n dt = production.pop('Date/Time')\n dt = dt.to_pydatetime()\n- mapped_production = {GENERATION_MAPPING[k]:v for k,v in production.items()\n+ mapped_production = {GENERATION_MAPPING[k]: v for k, v in production.items()\n if k not in keys_to_remove}\n \n processed_data.append((dt, mapped_production))\n", "issue": "US-BPA failing due to changes in data layout and timezone \n```shell\r\n File \"/home/contrib/parsers/US_BPA.py\", line 113, in fetch_production\r\n raw_data = get_data(GENERATION_URL, session=session)\r\n File \"/home/contrib/parsers/US_BPA.py\", line 25, in get_data\r\n df = pd.read_table(StringIO(req.text), skiprows=5)\r\n File \"/usr/local/lib/python3.6/site-packages/pandas/io/parsers.py\", line 685, in parser_f\r\n return _read(filepath_or_buffer, kwds)\r\n File \"/usr/local/lib/python3.6/site-packages/pandas/io/parsers.py\", line 463, in _read\r\n data = parser.read(nrows)\r\n File \"/usr/local/lib/python3.6/site-packages/pandas/io/parsers.py\", line 1154, in read\r\n ret = self._engine.read(nrows)\r\n File \"/usr/local/lib/python3.6/site-packages/pandas/io/parsers.py\", line 2059, in read\r\n data = self._reader.read(nrows)\r\n File \"pandas/_libs/parsers.pyx\", line 881, in pandas._libs.parsers.TextReader.read\r\n File \"pandas/_libs/parsers.pyx\", line 896, in pandas._libs.parsers.TextReader._read_low_memory\r\n File \"pandas/_libs/parsers.pyx\", line 950, in pandas._libs.parsers.TextReader._read_rows\r\n File \"pandas/_libs/parsers.pyx\", line 937, in pandas._libs.parsers.TextReader._tokenize_rows\r\n File \"pandas/_libs/parsers.pyx\", line 2132, in pandas._libs.parsers.raise_parser_error\r\npandas.errors.ParserError: Error tokenizing data. C error: Expected 1 fields in line 12, saw 6\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n\"\"\"Parser for the Bonneville Power Administration area of the USA.\"\"\"\n\nfrom datetime import datetime, timedelta\nfrom io import StringIO\nimport logging\nimport pandas as pd\nimport pytz\nimport requests\n\n\nGENERATION_URL = 'https://transmission.bpa.gov/business/operations/Wind/baltwg.txt'\n\nGENERATION_MAPPING = {'Wind': 'wind',\n 'Hydro': 'hydro',\n 'Fossil/Biomass': 'unknown',\n 'Nuclear': 'nuclear'}\n\n\ndef get_data(url, session=None):\n \"\"\"Returns a pandas dataframe.\"\"\"\n s=session or requests.Session()\n req = s.get(url)\n df = pd.read_table(StringIO(req.text), skiprows=5)\n\n return df\n\n\ndef timestamp_converter(timestamp):\n \"\"\"Turns string representation of time into an aware datetime object.\"\"\"\n\n dt_naive = datetime.strptime(timestamp, '%m/%d/%Y %H:%M')\n western = pytz.timezone('America/Los_Angeles')\n dt_aware = western.localize(dt_naive)\n\n return dt_aware\n\n\ndef data_processor(df, logger):\n \"\"\"\n Takes a dataframe and drops all generation rows that are empty or more\n than 1 day old. Turns each row into a dictionary and removes any generation\n types that are unknown.\n Returns a list of tuples in the form (datetime, production).\n \"\"\"\n\n df= df.dropna(thresh=2)\n df.columns = df.columns.str.strip()\n\n # 5min data for the last 24 hours.\n df = df.tail(288)\n df['Date/Time'] = df['Date/Time'].map(timestamp_converter)\n\n known_keys = GENERATION_MAPPING.keys() | {'Date/Time', 'Load'}\n column_headers = set(df.columns)\n\n unknown_keys = column_headers - known_keys\n\n for k in unknown_keys:\n logger.warning('New data {} seen in US-BPA data source'.format(k),\n extra={'key': 'US-BPA'})\n\n keys_to_remove = unknown_keys | {'Load'}\n\n processed_data = []\n for index, row in df.iterrows():\n production = row.to_dict()\n\n dt = production.pop('Date/Time')\n dt = dt.to_pydatetime()\n mapped_production = {GENERATION_MAPPING[k]:v for k,v in production.items()\n if k not in keys_to_remove}\n\n processed_data.append((dt, mapped_production))\n\n return processed_data\n\n\ndef fetch_production(zone_key='US-BPA', session=None, target_datetime=None, logger=logging.getLogger(__name__)):\n \"\"\"\n Requests the last known production mix (in MW) of a given zone\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple zones\n session (optional) -- request session passed in order to re-use an existing session\n Return:\n A dictionary in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n raw_data = get_data(GENERATION_URL, session=session)\n processed_data = data_processor(raw_data, logger)\n\n data = []\n for item in processed_data:\n datapoint = {'zoneKey': zone_key,\n 'datetime': item[0],\n 'production': item[1],\n 'storage': {},\n 'source': 'bpa.gov'}\n\n data.append(datapoint)\n\n return data\n\n\nif __name__ == '__main__':\n print('fetch_production() ->')\n print(fetch_production())\n", "path": "parsers/US_BPA.py"}]}
| 2,185 | 568 |
gh_patches_debug_7925
|
rasdani/github-patches
|
git_diff
|
lra__mackup-1718
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
usability / doc issues (minor / feature request)
On ignoring Mackup since I installed it for multiple accounts, I'm now returning to working with it to bring some more machines into the group and reorganise. Here are some minor issues I ran into getting back up to speed:
* `mackup uninstall` says:
Every configuration file, setting and dotfile managed by Mackup will be unlinked and moved back to their original place, in your home folder.
Are you sure ? <Yes|No>yes
This is scary and actually incorrect. Please change 'moved' to 'copied' (obviously Mackup does the correct cp as mv would be an issue for all the other accounts).
* Would be nice to have a few lines in Bull*/backup on "What if I run `mackup backup` twice?" and/or maybe "How do I add another application later after the initial setup?" Again, obviously Mackup does the right thing, but my (pedantic?) reading of the documentation suggests it might not. The --dry-run option already helps with this.
* it would be **way cool** if `mackup list` also reported things like: 'configuration files detected' and 'configuration already in Mackup', and for these cases also 'mackup.cfg includes' (implicit or explicit with applications_to_sync) or 'mackup.cfg excludes'
Thanks!
</issue>
<code>
[start of mackup/main.py]
1 """Mackup.
2
3 Keep your application settings in sync.
4 Copyright (C) 2013-2021 Laurent Raufaste <http://glop.org/>
5
6 Usage:
7 mackup list
8 mackup [options] backup
9 mackup [options] restore
10 mackup show <application>
11 mackup [options] uninstall
12 mackup (-h | --help)
13 mackup --version
14
15 Options:
16 -h --help Show this screen.
17 -f --force Force every question asked to be answered with "Yes".
18 -r --root Allow mackup to be run as superuser.
19 -n --dry-run Show steps without executing.
20 -v --verbose Show additional details.
21 --version Show version.
22
23 Modes of action:
24 1. list: display a list of all supported applications.
25 2. backup: sync your conf files to your synced storage, use this the 1st time
26 you use Mackup.
27 3. restore: link the conf files already in your synced storage on your system,
28 use it on any new system you use.
29 4. uninstall: reset everything as it was before using Mackup.
30
31 By default, Mackup syncs all application data via
32 Dropbox, but may be configured to exclude applications or use a different
33 backend with a .mackup.cfg file.
34
35 See https://github.com/lra/mackup/tree/master/doc for more information.
36
37 """
38 from docopt import docopt
39 from .appsdb import ApplicationsDatabase
40 from .application import ApplicationProfile
41 from .constants import MACKUP_APP_NAME, VERSION
42 from .mackup import Mackup
43 from . import utils
44 import sys
45
46
47 class ColorFormatCodes:
48 BLUE = "\033[34m"
49 BOLD = "\033[1m"
50 NORMAL = "\033[0m"
51
52
53 def header(str):
54 return ColorFormatCodes.BLUE + str + ColorFormatCodes.NORMAL
55
56
57 def bold(str):
58 return ColorFormatCodes.BOLD + str + ColorFormatCodes.NORMAL
59
60
61 def main():
62 """Main function."""
63 # Get the command line arg
64 args = docopt(__doc__, version="Mackup {}".format(VERSION))
65
66 mckp = Mackup()
67 app_db = ApplicationsDatabase()
68
69 def printAppHeader(app_name):
70 if verbose:
71 print(("\n{0} {1} {0}").format(header("---"), bold(app_name)))
72
73 # If we want to answer mackup with "yes" for each question
74 if args["--force"]:
75 utils.FORCE_YES = True
76
77 # Allow mackup to be run as root
78 if args["--root"]:
79 utils.CAN_RUN_AS_ROOT = True
80
81 dry_run = args["--dry-run"]
82
83 verbose = args["--verbose"]
84
85 if args["backup"]:
86 # Check the env where the command is being run
87 mckp.check_for_usable_backup_env()
88
89 # Backup each application
90 for app_name in sorted(mckp.get_apps_to_backup()):
91 app = ApplicationProfile(mckp, app_db.get_files(app_name), dry_run, verbose)
92 printAppHeader(app_name)
93 app.backup()
94
95 elif args["restore"]:
96 # Check the env where the command is being run
97 mckp.check_for_usable_restore_env()
98
99 # Restore the Mackup config before any other config, as we might need
100 # it to know about custom settings
101 mackup_app = ApplicationProfile(
102 mckp, app_db.get_files(MACKUP_APP_NAME), dry_run, verbose
103 )
104 printAppHeader(MACKUP_APP_NAME)
105 mackup_app.restore()
106
107 # Initialize again the apps db, as the Mackup config might have changed
108 # it
109 mckp = Mackup()
110 app_db = ApplicationsDatabase()
111
112 # Restore the rest of the app configs, using the restored Mackup config
113 app_names = mckp.get_apps_to_backup()
114 # Mackup has already been done
115 app_names.discard(MACKUP_APP_NAME)
116
117 for app_name in sorted(app_names):
118 app = ApplicationProfile(mckp, app_db.get_files(app_name), dry_run, verbose)
119 printAppHeader(app_name)
120 app.restore()
121
122 elif args["uninstall"]:
123 # Check the env where the command is being run
124 mckp.check_for_usable_restore_env()
125
126 if dry_run or (
127 utils.confirm(
128 "You are going to uninstall Mackup.\n"
129 "Every configuration file, setting and dotfile"
130 " managed by Mackup will be unlinked and moved back"
131 " to their original place, in your home folder.\n"
132 "Are you sure?"
133 )
134 ):
135
136 # Uninstall the apps except Mackup, which we'll uninstall last, to
137 # keep the settings as long as possible
138 app_names = mckp.get_apps_to_backup()
139 app_names.discard(MACKUP_APP_NAME)
140
141 for app_name in sorted(app_names):
142 app = ApplicationProfile(
143 mckp, app_db.get_files(app_name), dry_run, verbose
144 )
145 printAppHeader(app_name)
146 app.uninstall()
147
148 # Restore the Mackup config before any other config, as we might
149 # need it to know about custom settings
150 mackup_app = ApplicationProfile(
151 mckp, app_db.get_files(MACKUP_APP_NAME), dry_run, verbose
152 )
153 mackup_app.uninstall()
154
155 # Delete the Mackup folder in Dropbox
156 # Don't delete this as there might be other Macs that aren't
157 # uninstalled yet
158 # delete(mckp.mackup_folder)
159
160 print(
161 "\n"
162 "All your files have been put back into place. You can now"
163 " safely uninstall Mackup.\n"
164 "\n"
165 "Thanks for using Mackup!"
166 )
167
168 elif args["list"]:
169 # Display the list of supported applications
170 mckp.check_for_usable_environment()
171 output = "Supported applications:\n"
172 for app_name in sorted(app_db.get_app_names()):
173 output += " - {}\n".format(app_name)
174 output += "\n"
175 output += "{} applications supported in Mackup v{}".format(
176 len(app_db.get_app_names()), VERSION
177 )
178 print(output)
179
180 elif args["show"]:
181 mckp.check_for_usable_environment()
182 app_name = args["<application>"]
183
184 # Make sure the app exists
185 if app_name not in app_db.get_app_names():
186 sys.exit("Unsupported application: {}".format(app_name))
187 print("Name: {}".format(app_db.get_name(app_name)))
188 print("Configuration files:")
189 for file in app_db.get_files(app_name):
190 print(" - {}".format(file))
191
192 # Delete the tmp folder
193 mckp.clean_temp_folder()
194
[end of mackup/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mackup/main.py b/mackup/main.py
--- a/mackup/main.py
+++ b/mackup/main.py
@@ -127,7 +127,7 @@
utils.confirm(
"You are going to uninstall Mackup.\n"
"Every configuration file, setting and dotfile"
- " managed by Mackup will be unlinked and moved back"
+ " managed by Mackup will be unlinked and copied back"
" to their original place, in your home folder.\n"
"Are you sure?"
)
|
{"golden_diff": "diff --git a/mackup/main.py b/mackup/main.py\n--- a/mackup/main.py\n+++ b/mackup/main.py\n@@ -127,7 +127,7 @@\n utils.confirm(\n \"You are going to uninstall Mackup.\\n\"\n \"Every configuration file, setting and dotfile\"\n- \" managed by Mackup will be unlinked and moved back\"\n+ \" managed by Mackup will be unlinked and copied back\"\n \" to their original place, in your home folder.\\n\"\n \"Are you sure?\"\n )\n", "issue": "usability / doc issues (minor / feature request)\nOn ignoring Mackup since I installed it for multiple accounts, I'm now returning to working with it to bring some more machines into the group and reorganise. Here are some minor issues I ran into getting back up to speed:\r\n\r\n* `mackup uninstall` says: \r\n Every configuration file, setting and dotfile managed by Mackup will be unlinked and moved back to their original place, in your home folder.\r\n Are you sure ? <Yes|No>yes\r\n\r\nThis is scary and actually incorrect. Please change 'moved' to 'copied' (obviously Mackup does the correct cp as mv would be an issue for all the other accounts).\r\n\r\n* Would be nice to have a few lines in Bull*/backup on \"What if I run `mackup backup` twice?\" and/or maybe \"How do I add another application later after the initial setup?\" Again, obviously Mackup does the right thing, but my (pedantic?) reading of the documentation suggests it might not. The --dry-run option already helps with this.\r\n\r\n* it would be **way cool** if `mackup list` also reported things like: 'configuration files detected' and 'configuration already in Mackup', and for these cases also 'mackup.cfg includes' (implicit or explicit with applications_to_sync) or 'mackup.cfg excludes' \r\n\r\nThanks!\n", "before_files": [{"content": "\"\"\"Mackup.\n\nKeep your application settings in sync.\nCopyright (C) 2013-2021 Laurent Raufaste <http://glop.org/>\n\nUsage:\n mackup list\n mackup [options] backup\n mackup [options] restore\n mackup show <application>\n mackup [options] uninstall\n mackup (-h | --help)\n mackup --version\n\nOptions:\n -h --help Show this screen.\n -f --force Force every question asked to be answered with \"Yes\".\n -r --root Allow mackup to be run as superuser.\n -n --dry-run Show steps without executing.\n -v --verbose Show additional details.\n --version Show version.\n\nModes of action:\n 1. list: display a list of all supported applications.\n 2. backup: sync your conf files to your synced storage, use this the 1st time\n you use Mackup.\n 3. restore: link the conf files already in your synced storage on your system,\n use it on any new system you use.\n 4. uninstall: reset everything as it was before using Mackup.\n\nBy default, Mackup syncs all application data via\nDropbox, but may be configured to exclude applications or use a different\nbackend with a .mackup.cfg file.\n\nSee https://github.com/lra/mackup/tree/master/doc for more information.\n\n\"\"\"\nfrom docopt import docopt\nfrom .appsdb import ApplicationsDatabase\nfrom .application import ApplicationProfile\nfrom .constants import MACKUP_APP_NAME, VERSION\nfrom .mackup import Mackup\nfrom . import utils\nimport sys\n\n\nclass ColorFormatCodes:\n BLUE = \"\\033[34m\"\n BOLD = \"\\033[1m\"\n NORMAL = \"\\033[0m\"\n\n\ndef header(str):\n return ColorFormatCodes.BLUE + str + ColorFormatCodes.NORMAL\n\n\ndef bold(str):\n return ColorFormatCodes.BOLD + str + ColorFormatCodes.NORMAL\n\n\ndef main():\n \"\"\"Main function.\"\"\"\n # Get the command line arg\n args = docopt(__doc__, version=\"Mackup {}\".format(VERSION))\n\n mckp = Mackup()\n app_db = ApplicationsDatabase()\n\n def printAppHeader(app_name):\n if verbose:\n print((\"\\n{0} {1} {0}\").format(header(\"---\"), bold(app_name)))\n\n # If we want to answer mackup with \"yes\" for each question\n if args[\"--force\"]:\n utils.FORCE_YES = True\n\n # Allow mackup to be run as root\n if args[\"--root\"]:\n utils.CAN_RUN_AS_ROOT = True\n\n dry_run = args[\"--dry-run\"]\n\n verbose = args[\"--verbose\"]\n\n if args[\"backup\"]:\n # Check the env where the command is being run\n mckp.check_for_usable_backup_env()\n\n # Backup each application\n for app_name in sorted(mckp.get_apps_to_backup()):\n app = ApplicationProfile(mckp, app_db.get_files(app_name), dry_run, verbose)\n printAppHeader(app_name)\n app.backup()\n\n elif args[\"restore\"]:\n # Check the env where the command is being run\n mckp.check_for_usable_restore_env()\n\n # Restore the Mackup config before any other config, as we might need\n # it to know about custom settings\n mackup_app = ApplicationProfile(\n mckp, app_db.get_files(MACKUP_APP_NAME), dry_run, verbose\n )\n printAppHeader(MACKUP_APP_NAME)\n mackup_app.restore()\n\n # Initialize again the apps db, as the Mackup config might have changed\n # it\n mckp = Mackup()\n app_db = ApplicationsDatabase()\n\n # Restore the rest of the app configs, using the restored Mackup config\n app_names = mckp.get_apps_to_backup()\n # Mackup has already been done\n app_names.discard(MACKUP_APP_NAME)\n\n for app_name in sorted(app_names):\n app = ApplicationProfile(mckp, app_db.get_files(app_name), dry_run, verbose)\n printAppHeader(app_name)\n app.restore()\n\n elif args[\"uninstall\"]:\n # Check the env where the command is being run\n mckp.check_for_usable_restore_env()\n\n if dry_run or (\n utils.confirm(\n \"You are going to uninstall Mackup.\\n\"\n \"Every configuration file, setting and dotfile\"\n \" managed by Mackup will be unlinked and moved back\"\n \" to their original place, in your home folder.\\n\"\n \"Are you sure?\"\n )\n ):\n\n # Uninstall the apps except Mackup, which we'll uninstall last, to\n # keep the settings as long as possible\n app_names = mckp.get_apps_to_backup()\n app_names.discard(MACKUP_APP_NAME)\n\n for app_name in sorted(app_names):\n app = ApplicationProfile(\n mckp, app_db.get_files(app_name), dry_run, verbose\n )\n printAppHeader(app_name)\n app.uninstall()\n\n # Restore the Mackup config before any other config, as we might\n # need it to know about custom settings\n mackup_app = ApplicationProfile(\n mckp, app_db.get_files(MACKUP_APP_NAME), dry_run, verbose\n )\n mackup_app.uninstall()\n\n # Delete the Mackup folder in Dropbox\n # Don't delete this as there might be other Macs that aren't\n # uninstalled yet\n # delete(mckp.mackup_folder)\n\n print(\n \"\\n\"\n \"All your files have been put back into place. You can now\"\n \" safely uninstall Mackup.\\n\"\n \"\\n\"\n \"Thanks for using Mackup!\"\n )\n\n elif args[\"list\"]:\n # Display the list of supported applications\n mckp.check_for_usable_environment()\n output = \"Supported applications:\\n\"\n for app_name in sorted(app_db.get_app_names()):\n output += \" - {}\\n\".format(app_name)\n output += \"\\n\"\n output += \"{} applications supported in Mackup v{}\".format(\n len(app_db.get_app_names()), VERSION\n )\n print(output)\n\n elif args[\"show\"]:\n mckp.check_for_usable_environment()\n app_name = args[\"<application>\"]\n\n # Make sure the app exists\n if app_name not in app_db.get_app_names():\n sys.exit(\"Unsupported application: {}\".format(app_name))\n print(\"Name: {}\".format(app_db.get_name(app_name)))\n print(\"Configuration files:\")\n for file in app_db.get_files(app_name):\n print(\" - {}\".format(file))\n\n # Delete the tmp folder\n mckp.clean_temp_folder()\n", "path": "mackup/main.py"}]}
| 2,824 | 125 |
gh_patches_debug_3683
|
rasdani/github-patches
|
git_diff
|
bokeh__bokeh-5883
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Test bokehjs/examples and run them on travis ci
They are constantly broken due to not being continuously tested.
</issue>
<code>
[start of examples/plotting/file/airports_map.py]
1 from __future__ import print_function
2
3 from bokeh.layouts import row
4 from bokeh.models import (
5 Range1d, WMTSTileSource, ColumnDataSource, HoverTool,
6 )
7 from bokeh.plotting import figure, show, output_file
8 from bokeh.sampledata.airports import data as airports
9 from bokeh.tile_providers import CARTODBPOSITRON
10
11 points_source = ColumnDataSource(airports)
12 title = "US Airports: Field Elevation > 1500m"
13
14 def plot(tile_source, filename):
15 output_file(filename, title=title)
16
17 # set to roughly extent of points
18 x_range = Range1d(start=airports['x'].min() - 10000, end=airports['x'].max() + 10000, bounds=None)
19 y_range = Range1d(start=airports['y'].min() - 10000, end=airports['y'].max() + 10000, bounds=None)
20
21 # create plot and add tools
22 p = figure(tools='wheel_zoom,pan', x_range=x_range, y_range=y_range, title=title)
23 p.axis.visible = False
24 hover_tool = HoverTool(tooltips=[("Name", "@name"), ("Elevation", "@elevation (m)")])
25 p.add_tools(hover_tool)
26 p.add_tile(tile_source)
27
28 # create point glyphs
29 p.circle(x='x', y='y', size=9, fill_color="#60ACA1", line_color="#D2C4C1", line_width=1.5, source=points_source)
30 return p
31
32 # create a tile source
33 tile_options = {}
34 tile_options['url'] = 'http://otile2.mqcdn.com/tiles/1.0.0/sat/{Z}/{X}/{Y}.png'
35 mq_tile_source = WMTSTileSource(**tile_options)
36
37 carto = plot(CARTODBPOSITRON, 'airports_map_cartodb.html')
38 mq = plot(mq_tile_source, 'airports_map.html')
39
40 show(row([carto, mq]))
41
[end of examples/plotting/file/airports_map.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/examples/plotting/file/airports_map.py b/examples/plotting/file/airports_map.py
--- a/examples/plotting/file/airports_map.py
+++ b/examples/plotting/file/airports_map.py
@@ -31,7 +31,7 @@
# create a tile source
tile_options = {}
-tile_options['url'] = 'http://otile2.mqcdn.com/tiles/1.0.0/sat/{Z}/{X}/{Y}.png'
+# TODO tile_options['url'] = 'http://otile2.mqcdn.com/tiles/1.0.0/sat/{Z}/{X}/{Y}.png'
mq_tile_source = WMTSTileSource(**tile_options)
carto = plot(CARTODBPOSITRON, 'airports_map_cartodb.html')
|
{"golden_diff": "diff --git a/examples/plotting/file/airports_map.py b/examples/plotting/file/airports_map.py\n--- a/examples/plotting/file/airports_map.py\n+++ b/examples/plotting/file/airports_map.py\n@@ -31,7 +31,7 @@\n \n # create a tile source\n tile_options = {}\n-tile_options['url'] = 'http://otile2.mqcdn.com/tiles/1.0.0/sat/{Z}/{X}/{Y}.png'\n+# TODO tile_options['url'] = 'http://otile2.mqcdn.com/tiles/1.0.0/sat/{Z}/{X}/{Y}.png'\n mq_tile_source = WMTSTileSource(**tile_options)\n \n carto = plot(CARTODBPOSITRON, 'airports_map_cartodb.html')\n", "issue": "Test bokehjs/examples and run them on travis ci\nThey are constantly broken due to not being continuously tested.\n", "before_files": [{"content": "from __future__ import print_function\n\nfrom bokeh.layouts import row\nfrom bokeh.models import (\n Range1d, WMTSTileSource, ColumnDataSource, HoverTool,\n)\nfrom bokeh.plotting import figure, show, output_file\nfrom bokeh.sampledata.airports import data as airports\nfrom bokeh.tile_providers import CARTODBPOSITRON\n\npoints_source = ColumnDataSource(airports)\ntitle = \"US Airports: Field Elevation > 1500m\"\n\ndef plot(tile_source, filename):\n output_file(filename, title=title)\n\n # set to roughly extent of points\n x_range = Range1d(start=airports['x'].min() - 10000, end=airports['x'].max() + 10000, bounds=None)\n y_range = Range1d(start=airports['y'].min() - 10000, end=airports['y'].max() + 10000, bounds=None)\n\n # create plot and add tools\n p = figure(tools='wheel_zoom,pan', x_range=x_range, y_range=y_range, title=title)\n p.axis.visible = False\n hover_tool = HoverTool(tooltips=[(\"Name\", \"@name\"), (\"Elevation\", \"@elevation (m)\")])\n p.add_tools(hover_tool)\n p.add_tile(tile_source)\n\n # create point glyphs\n p.circle(x='x', y='y', size=9, fill_color=\"#60ACA1\", line_color=\"#D2C4C1\", line_width=1.5, source=points_source)\n return p\n\n# create a tile source\ntile_options = {}\ntile_options['url'] = 'http://otile2.mqcdn.com/tiles/1.0.0/sat/{Z}/{X}/{Y}.png'\nmq_tile_source = WMTSTileSource(**tile_options)\n\ncarto = plot(CARTODBPOSITRON, 'airports_map_cartodb.html')\nmq = plot(mq_tile_source, 'airports_map.html')\n\nshow(row([carto, mq]))\n", "path": "examples/plotting/file/airports_map.py"}]}
| 1,101 | 180 |
gh_patches_debug_22234
|
rasdani/github-patches
|
git_diff
|
certbot__certbot-4466
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cannot find Apache control command apache2ctl on Arch Linux
I have this problem when using certbot for apache following this guide http://letsencrypt.readthedocs.io/en/latest/using.html#apache
```
$ sudo certbot --apache
Saving debug log to /var/log/letsencrypt/letsencrypt.log
Failed to find apache2ctl in expanded PATH: /usr/local/sbin:/usr/local/bin:/usr/bin:/usr/bin/site_perl:/usr/bin/vendor_perl:/usr/bin/core_perl:/usr/sbin
The apache plugin is not working; there may be problems with your existing configuration.
The error was: NoInstallationError('Cannot find Apache control command apache2ctl',)
```
I am on Arch Linux, and it seems that there is no apache2ctl for Arch but certbot requires it?
I tried to installed it but no luck:
```
$ sudo pacman -S apache2
error: target not found: apache2
```
My Apache's version:
```
$ httpd -v
Server version: Apache/2.4.25 (Unix)
Server built: Dec 20 2016 13:02:2
```
</issue>
<code>
[start of certbot-apache/certbot_apache/constants.py]
1 """Apache plugin constants."""
2 import pkg_resources
3 from certbot import util
4
5 CLI_DEFAULTS_DEFAULT = dict(
6 server_root="/etc/apache2",
7 vhost_root="/etc/apache2/sites-available",
8 vhost_files="*",
9 logs_root="/var/log/apache2",
10 version_cmd=['apache2ctl', '-v'],
11 define_cmd=['apache2ctl', '-t', '-D', 'DUMP_RUN_CFG'],
12 restart_cmd=['apache2ctl', 'graceful'],
13 conftest_cmd=['apache2ctl', 'configtest'],
14 enmod=None,
15 dismod=None,
16 le_vhost_ext="-le-ssl.conf",
17 handle_mods=False,
18 handle_sites=False,
19 challenge_location="/etc/apache2",
20 MOD_SSL_CONF_SRC=pkg_resources.resource_filename(
21 "certbot_apache", "options-ssl-apache.conf")
22 )
23 CLI_DEFAULTS_DEBIAN = dict(
24 server_root="/etc/apache2",
25 vhost_root="/etc/apache2/sites-available",
26 vhost_files="*",
27 logs_root="/var/log/apache2",
28 version_cmd=['apache2ctl', '-v'],
29 define_cmd=['apache2ctl', '-t', '-D', 'DUMP_RUN_CFG'],
30 restart_cmd=['apache2ctl', 'graceful'],
31 conftest_cmd=['apache2ctl', 'configtest'],
32 enmod="a2enmod",
33 dismod="a2dismod",
34 le_vhost_ext="-le-ssl.conf",
35 handle_mods=True,
36 handle_sites=True,
37 challenge_location="/etc/apache2",
38 MOD_SSL_CONF_SRC=pkg_resources.resource_filename(
39 "certbot_apache", "options-ssl-apache.conf")
40 )
41 CLI_DEFAULTS_CENTOS = dict(
42 server_root="/etc/httpd",
43 vhost_root="/etc/httpd/conf.d",
44 vhost_files="*.conf",
45 logs_root="/var/log/httpd",
46 version_cmd=['apachectl', '-v'],
47 define_cmd=['apachectl', '-t', '-D', 'DUMP_RUN_CFG'],
48 restart_cmd=['apachectl', 'graceful'],
49 conftest_cmd=['apachectl', 'configtest'],
50 enmod=None,
51 dismod=None,
52 le_vhost_ext="-le-ssl.conf",
53 handle_mods=False,
54 handle_sites=False,
55 challenge_location="/etc/httpd/conf.d",
56 MOD_SSL_CONF_SRC=pkg_resources.resource_filename(
57 "certbot_apache", "centos-options-ssl-apache.conf")
58 )
59 CLI_DEFAULTS_GENTOO = dict(
60 server_root="/etc/apache2",
61 vhost_root="/etc/apache2/vhosts.d",
62 vhost_files="*.conf",
63 logs_root="/var/log/apache2",
64 version_cmd=['/usr/sbin/apache2', '-v'],
65 define_cmd=['apache2ctl', 'virtualhosts'],
66 restart_cmd=['apache2ctl', 'graceful'],
67 conftest_cmd=['apache2ctl', 'configtest'],
68 enmod=None,
69 dismod=None,
70 le_vhost_ext="-le-ssl.conf",
71 handle_mods=False,
72 handle_sites=False,
73 challenge_location="/etc/apache2/vhosts.d",
74 MOD_SSL_CONF_SRC=pkg_resources.resource_filename(
75 "certbot_apache", "options-ssl-apache.conf")
76 )
77 CLI_DEFAULTS_DARWIN = dict(
78 server_root="/etc/apache2",
79 vhost_root="/etc/apache2/other",
80 vhost_files="*.conf",
81 logs_root="/var/log/apache2",
82 version_cmd=['/usr/sbin/httpd', '-v'],
83 define_cmd=['/usr/sbin/httpd', '-t', '-D', 'DUMP_RUN_CFG'],
84 restart_cmd=['apachectl', 'graceful'],
85 conftest_cmd=['apachectl', 'configtest'],
86 enmod=None,
87 dismod=None,
88 le_vhost_ext="-le-ssl.conf",
89 handle_mods=False,
90 handle_sites=False,
91 challenge_location="/etc/apache2/other",
92 MOD_SSL_CONF_SRC=pkg_resources.resource_filename(
93 "certbot_apache", "options-ssl-apache.conf")
94 )
95 CLI_DEFAULTS_SUSE = dict(
96 server_root="/etc/apache2",
97 vhost_root="/etc/apache2/vhosts.d",
98 vhost_files="*.conf",
99 logs_root="/var/log/apache2",
100 version_cmd=['apache2ctl', '-v'],
101 define_cmd=['apache2ctl', '-t', '-D', 'DUMP_RUN_CFG'],
102 restart_cmd=['apache2ctl', 'graceful'],
103 conftest_cmd=['apache2ctl', 'configtest'],
104 enmod="a2enmod",
105 dismod="a2dismod",
106 le_vhost_ext="-le-ssl.conf",
107 handle_mods=False,
108 handle_sites=False,
109 challenge_location="/etc/apache2/vhosts.d",
110 MOD_SSL_CONF_SRC=pkg_resources.resource_filename(
111 "certbot_apache", "options-ssl-apache.conf")
112 )
113 CLI_DEFAULTS = {
114 "default": CLI_DEFAULTS_DEFAULT,
115 "debian": CLI_DEFAULTS_DEBIAN,
116 "ubuntu": CLI_DEFAULTS_DEBIAN,
117 "centos": CLI_DEFAULTS_CENTOS,
118 "centos linux": CLI_DEFAULTS_CENTOS,
119 "fedora": CLI_DEFAULTS_CENTOS,
120 "red hat enterprise linux server": CLI_DEFAULTS_CENTOS,
121 "rhel": CLI_DEFAULTS_CENTOS,
122 "amazon": CLI_DEFAULTS_CENTOS,
123 "gentoo": CLI_DEFAULTS_GENTOO,
124 "gentoo base system": CLI_DEFAULTS_GENTOO,
125 "darwin": CLI_DEFAULTS_DARWIN,
126 "opensuse": CLI_DEFAULTS_SUSE,
127 "suse": CLI_DEFAULTS_SUSE,
128 }
129 """CLI defaults."""
130
131 MOD_SSL_CONF_DEST = "options-ssl-apache.conf"
132 """Name of the mod_ssl config file as saved in `IConfig.config_dir`."""
133
134 AUGEAS_LENS_DIR = pkg_resources.resource_filename(
135 "certbot_apache", "augeas_lens")
136 """Path to the Augeas lens directory"""
137
138 REWRITE_HTTPS_ARGS = [
139 "^", "https://%{SERVER_NAME}%{REQUEST_URI}", "[L,NE,R=permanent]"]
140 """Apache version<2.3.9 rewrite rule arguments used for redirections to
141 https vhost"""
142
143 REWRITE_HTTPS_ARGS_WITH_END = [
144 "^", "https://%{SERVER_NAME}%{REQUEST_URI}", "[END,NE,R=permanent]"]
145 """Apache version >= 2.3.9 rewrite rule arguments used for redirections to
146 https vhost"""
147
148 OLD_REWRITE_HTTPS_ARGS = [
149 ["^", "https://%{SERVER_NAME}%{REQUEST_URI}", "[L,QSA,R=permanent]"],
150 ["^", "https://%{SERVER_NAME}%{REQUEST_URI}", "[END,QSA,R=permanent]"]]
151
152 HSTS_ARGS = ["always", "set", "Strict-Transport-Security",
153 "\"max-age=31536000\""]
154 """Apache header arguments for HSTS"""
155
156 UIR_ARGS = ["always", "set", "Content-Security-Policy",
157 "upgrade-insecure-requests"]
158
159 HEADER_ARGS = {"Strict-Transport-Security": HSTS_ARGS,
160 "Upgrade-Insecure-Requests": UIR_ARGS}
161
162
163 def os_constant(key):
164 """
165 Get a constant value for operating system
166
167 :param key: name of cli constant
168 :return: value of constant for active os
169 """
170
171 os_info = util.get_os_info()
172 try:
173 constants = CLI_DEFAULTS[os_info[0].lower()]
174 except KeyError:
175 constants = os_like_constants()
176 if not constants:
177 constants = CLI_DEFAULTS["default"]
178 return constants[key]
179
180
181 def os_like_constants():
182 """
183 Try to get constants for distribution with
184 similar layout and configuration, indicated by
185 /etc/os-release variable "LIKE"
186
187 :returns: Constants dictionary
188 :rtype: `dict`
189 """
190
191 os_like = util.get_systemd_os_like()
192 if os_like:
193 for os_name in os_like:
194 if os_name in CLI_DEFAULTS.keys():
195 return CLI_DEFAULTS[os_name]
196 return {}
197
[end of certbot-apache/certbot_apache/constants.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/certbot-apache/certbot_apache/constants.py b/certbot-apache/certbot_apache/constants.py
--- a/certbot-apache/certbot_apache/constants.py
+++ b/certbot-apache/certbot_apache/constants.py
@@ -110,6 +110,24 @@
MOD_SSL_CONF_SRC=pkg_resources.resource_filename(
"certbot_apache", "options-ssl-apache.conf")
)
+CLI_DEFAULTS_ARCH = dict(
+ server_root="/etc/httpd",
+ vhost_root="/etc/httpd/conf",
+ vhost_files="*.conf",
+ logs_root="/var/log/httpd",
+ version_cmd=['apachectl', '-v'],
+ define_cmd=['apachectl', '-t', '-D', 'DUMP_RUN_CFG'],
+ restart_cmd=['apachectl', 'graceful'],
+ conftest_cmd=['apachectl', 'configtest'],
+ enmod=None,
+ dismod=None,
+ le_vhost_ext="-le-ssl.conf",
+ handle_mods=False,
+ handle_sites=False,
+ challenge_location="/etc/httpd/conf",
+ MOD_SSL_CONF_SRC=pkg_resources.resource_filename(
+ "certbot_apache", "options-ssl-apache.conf")
+)
CLI_DEFAULTS = {
"default": CLI_DEFAULTS_DEFAULT,
"debian": CLI_DEFAULTS_DEBIAN,
@@ -125,6 +143,7 @@
"darwin": CLI_DEFAULTS_DARWIN,
"opensuse": CLI_DEFAULTS_SUSE,
"suse": CLI_DEFAULTS_SUSE,
+ "arch": CLI_DEFAULTS_ARCH,
}
"""CLI defaults."""
|
{"golden_diff": "diff --git a/certbot-apache/certbot_apache/constants.py b/certbot-apache/certbot_apache/constants.py\n--- a/certbot-apache/certbot_apache/constants.py\n+++ b/certbot-apache/certbot_apache/constants.py\n@@ -110,6 +110,24 @@\n MOD_SSL_CONF_SRC=pkg_resources.resource_filename(\n \"certbot_apache\", \"options-ssl-apache.conf\")\n )\n+CLI_DEFAULTS_ARCH = dict(\n+ server_root=\"/etc/httpd\",\n+ vhost_root=\"/etc/httpd/conf\",\n+ vhost_files=\"*.conf\",\n+ logs_root=\"/var/log/httpd\",\n+ version_cmd=['apachectl', '-v'],\n+ define_cmd=['apachectl', '-t', '-D', 'DUMP_RUN_CFG'],\n+ restart_cmd=['apachectl', 'graceful'],\n+ conftest_cmd=['apachectl', 'configtest'],\n+ enmod=None,\n+ dismod=None,\n+ le_vhost_ext=\"-le-ssl.conf\",\n+ handle_mods=False,\n+ handle_sites=False,\n+ challenge_location=\"/etc/httpd/conf\",\n+ MOD_SSL_CONF_SRC=pkg_resources.resource_filename(\n+ \"certbot_apache\", \"options-ssl-apache.conf\")\n+)\n CLI_DEFAULTS = {\n \"default\": CLI_DEFAULTS_DEFAULT,\n \"debian\": CLI_DEFAULTS_DEBIAN,\n@@ -125,6 +143,7 @@\n \"darwin\": CLI_DEFAULTS_DARWIN,\n \"opensuse\": CLI_DEFAULTS_SUSE,\n \"suse\": CLI_DEFAULTS_SUSE,\n+ \"arch\": CLI_DEFAULTS_ARCH,\n }\n \"\"\"CLI defaults.\"\"\"\n", "issue": "Cannot find Apache control command apache2ctl on Arch Linux\nI have this problem when using certbot for apache following this guide http://letsencrypt.readthedocs.io/en/latest/using.html#apache\r\n\r\n```\r\n$ sudo certbot --apache\r\nSaving debug log to /var/log/letsencrypt/letsencrypt.log\r\nFailed to find apache2ctl in expanded PATH: /usr/local/sbin:/usr/local/bin:/usr/bin:/usr/bin/site_perl:/usr/bin/vendor_perl:/usr/bin/core_perl:/usr/sbin\r\nThe apache plugin is not working; there may be problems with your existing configuration.\r\nThe error was: NoInstallationError('Cannot find Apache control command apache2ctl',)\r\n```\r\nI am on Arch Linux, and it seems that there is no apache2ctl for Arch but certbot requires it?\r\n\r\nI tried to installed it but no luck:\r\n\r\n```\r\n$ sudo pacman -S apache2\r\nerror: target not found: apache2\r\n```\r\n\r\nMy Apache's version:\r\n\r\n```\r\n$ httpd -v\r\nServer version: Apache/2.4.25 (Unix)\r\nServer built: Dec 20 2016 13:02:2\r\n```\r\n\n", "before_files": [{"content": "\"\"\"Apache plugin constants.\"\"\"\nimport pkg_resources\nfrom certbot import util\n\nCLI_DEFAULTS_DEFAULT = dict(\n server_root=\"/etc/apache2\",\n vhost_root=\"/etc/apache2/sites-available\",\n vhost_files=\"*\",\n logs_root=\"/var/log/apache2\",\n version_cmd=['apache2ctl', '-v'],\n define_cmd=['apache2ctl', '-t', '-D', 'DUMP_RUN_CFG'],\n restart_cmd=['apache2ctl', 'graceful'],\n conftest_cmd=['apache2ctl', 'configtest'],\n enmod=None,\n dismod=None,\n le_vhost_ext=\"-le-ssl.conf\",\n handle_mods=False,\n handle_sites=False,\n challenge_location=\"/etc/apache2\",\n MOD_SSL_CONF_SRC=pkg_resources.resource_filename(\n \"certbot_apache\", \"options-ssl-apache.conf\")\n)\nCLI_DEFAULTS_DEBIAN = dict(\n server_root=\"/etc/apache2\",\n vhost_root=\"/etc/apache2/sites-available\",\n vhost_files=\"*\",\n logs_root=\"/var/log/apache2\",\n version_cmd=['apache2ctl', '-v'],\n define_cmd=['apache2ctl', '-t', '-D', 'DUMP_RUN_CFG'],\n restart_cmd=['apache2ctl', 'graceful'],\n conftest_cmd=['apache2ctl', 'configtest'],\n enmod=\"a2enmod\",\n dismod=\"a2dismod\",\n le_vhost_ext=\"-le-ssl.conf\",\n handle_mods=True,\n handle_sites=True,\n challenge_location=\"/etc/apache2\",\n MOD_SSL_CONF_SRC=pkg_resources.resource_filename(\n \"certbot_apache\", \"options-ssl-apache.conf\")\n)\nCLI_DEFAULTS_CENTOS = dict(\n server_root=\"/etc/httpd\",\n vhost_root=\"/etc/httpd/conf.d\",\n vhost_files=\"*.conf\",\n logs_root=\"/var/log/httpd\",\n version_cmd=['apachectl', '-v'],\n define_cmd=['apachectl', '-t', '-D', 'DUMP_RUN_CFG'],\n restart_cmd=['apachectl', 'graceful'],\n conftest_cmd=['apachectl', 'configtest'],\n enmod=None,\n dismod=None,\n le_vhost_ext=\"-le-ssl.conf\",\n handle_mods=False,\n handle_sites=False,\n challenge_location=\"/etc/httpd/conf.d\",\n MOD_SSL_CONF_SRC=pkg_resources.resource_filename(\n \"certbot_apache\", \"centos-options-ssl-apache.conf\")\n)\nCLI_DEFAULTS_GENTOO = dict(\n server_root=\"/etc/apache2\",\n vhost_root=\"/etc/apache2/vhosts.d\",\n vhost_files=\"*.conf\",\n logs_root=\"/var/log/apache2\",\n version_cmd=['/usr/sbin/apache2', '-v'],\n define_cmd=['apache2ctl', 'virtualhosts'],\n restart_cmd=['apache2ctl', 'graceful'],\n conftest_cmd=['apache2ctl', 'configtest'],\n enmod=None,\n dismod=None,\n le_vhost_ext=\"-le-ssl.conf\",\n handle_mods=False,\n handle_sites=False,\n challenge_location=\"/etc/apache2/vhosts.d\",\n MOD_SSL_CONF_SRC=pkg_resources.resource_filename(\n \"certbot_apache\", \"options-ssl-apache.conf\")\n)\nCLI_DEFAULTS_DARWIN = dict(\n server_root=\"/etc/apache2\",\n vhost_root=\"/etc/apache2/other\",\n vhost_files=\"*.conf\",\n logs_root=\"/var/log/apache2\",\n version_cmd=['/usr/sbin/httpd', '-v'],\n define_cmd=['/usr/sbin/httpd', '-t', '-D', 'DUMP_RUN_CFG'],\n restart_cmd=['apachectl', 'graceful'],\n conftest_cmd=['apachectl', 'configtest'],\n enmod=None,\n dismod=None,\n le_vhost_ext=\"-le-ssl.conf\",\n handle_mods=False,\n handle_sites=False,\n challenge_location=\"/etc/apache2/other\",\n MOD_SSL_CONF_SRC=pkg_resources.resource_filename(\n \"certbot_apache\", \"options-ssl-apache.conf\")\n)\nCLI_DEFAULTS_SUSE = dict(\n server_root=\"/etc/apache2\",\n vhost_root=\"/etc/apache2/vhosts.d\",\n vhost_files=\"*.conf\",\n logs_root=\"/var/log/apache2\",\n version_cmd=['apache2ctl', '-v'],\n define_cmd=['apache2ctl', '-t', '-D', 'DUMP_RUN_CFG'],\n restart_cmd=['apache2ctl', 'graceful'],\n conftest_cmd=['apache2ctl', 'configtest'],\n enmod=\"a2enmod\",\n dismod=\"a2dismod\",\n le_vhost_ext=\"-le-ssl.conf\",\n handle_mods=False,\n handle_sites=False,\n challenge_location=\"/etc/apache2/vhosts.d\",\n MOD_SSL_CONF_SRC=pkg_resources.resource_filename(\n \"certbot_apache\", \"options-ssl-apache.conf\")\n)\nCLI_DEFAULTS = {\n \"default\": CLI_DEFAULTS_DEFAULT,\n \"debian\": CLI_DEFAULTS_DEBIAN,\n \"ubuntu\": CLI_DEFAULTS_DEBIAN,\n \"centos\": CLI_DEFAULTS_CENTOS,\n \"centos linux\": CLI_DEFAULTS_CENTOS,\n \"fedora\": CLI_DEFAULTS_CENTOS,\n \"red hat enterprise linux server\": CLI_DEFAULTS_CENTOS,\n \"rhel\": CLI_DEFAULTS_CENTOS,\n \"amazon\": CLI_DEFAULTS_CENTOS,\n \"gentoo\": CLI_DEFAULTS_GENTOO,\n \"gentoo base system\": CLI_DEFAULTS_GENTOO,\n \"darwin\": CLI_DEFAULTS_DARWIN,\n \"opensuse\": CLI_DEFAULTS_SUSE,\n \"suse\": CLI_DEFAULTS_SUSE,\n}\n\"\"\"CLI defaults.\"\"\"\n\nMOD_SSL_CONF_DEST = \"options-ssl-apache.conf\"\n\"\"\"Name of the mod_ssl config file as saved in `IConfig.config_dir`.\"\"\"\n\nAUGEAS_LENS_DIR = pkg_resources.resource_filename(\n \"certbot_apache\", \"augeas_lens\")\n\"\"\"Path to the Augeas lens directory\"\"\"\n\nREWRITE_HTTPS_ARGS = [\n \"^\", \"https://%{SERVER_NAME}%{REQUEST_URI}\", \"[L,NE,R=permanent]\"]\n\"\"\"Apache version<2.3.9 rewrite rule arguments used for redirections to\nhttps vhost\"\"\"\n\nREWRITE_HTTPS_ARGS_WITH_END = [\n \"^\", \"https://%{SERVER_NAME}%{REQUEST_URI}\", \"[END,NE,R=permanent]\"]\n\"\"\"Apache version >= 2.3.9 rewrite rule arguments used for redirections to\n https vhost\"\"\"\n\nOLD_REWRITE_HTTPS_ARGS = [\n [\"^\", \"https://%{SERVER_NAME}%{REQUEST_URI}\", \"[L,QSA,R=permanent]\"],\n [\"^\", \"https://%{SERVER_NAME}%{REQUEST_URI}\", \"[END,QSA,R=permanent]\"]]\n\nHSTS_ARGS = [\"always\", \"set\", \"Strict-Transport-Security\",\n \"\\\"max-age=31536000\\\"\"]\n\"\"\"Apache header arguments for HSTS\"\"\"\n\nUIR_ARGS = [\"always\", \"set\", \"Content-Security-Policy\",\n \"upgrade-insecure-requests\"]\n\nHEADER_ARGS = {\"Strict-Transport-Security\": HSTS_ARGS,\n \"Upgrade-Insecure-Requests\": UIR_ARGS}\n\n\ndef os_constant(key):\n \"\"\"\n Get a constant value for operating system\n\n :param key: name of cli constant\n :return: value of constant for active os\n \"\"\"\n\n os_info = util.get_os_info()\n try:\n constants = CLI_DEFAULTS[os_info[0].lower()]\n except KeyError:\n constants = os_like_constants()\n if not constants:\n constants = CLI_DEFAULTS[\"default\"]\n return constants[key]\n\n\ndef os_like_constants():\n \"\"\"\n Try to get constants for distribution with\n similar layout and configuration, indicated by\n /etc/os-release variable \"LIKE\"\n\n :returns: Constants dictionary\n :rtype: `dict`\n \"\"\"\n\n os_like = util.get_systemd_os_like()\n if os_like:\n for os_name in os_like:\n if os_name in CLI_DEFAULTS.keys():\n return CLI_DEFAULTS[os_name]\n return {}\n", "path": "certbot-apache/certbot_apache/constants.py"}]}
| 3,044 | 375 |
gh_patches_debug_4231
|
rasdani/github-patches
|
git_diff
|
mlflow__mlflow-9410
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] basic-auth alembic migration crash on remote database
### Issues Policy acknowledgement
- [X] I have read and agree to submit bug reports in accordance with the [issues policy](https://www.github.com/mlflow/mlflow/blob/master/ISSUE_POLICY.md)
### Willingness to contribute
Yes. I can contribute a fix for this bug independently.
### MLflow version
- Client: 1.5.0
- Tracking server: 1.5.0
### System information
- **Debian GNU/Linux 10 (buster)**:
- **Python 3.8.17 **:
- **SQLAlchemy version: 2.0.19**
### Describe the problem
Alembic migration for the basic-auth app crashes due to obfuscated password being passed to the alembic config
This bug seems to be related to the SQLAlchemy version, migration works with SQLAlchemy 1.4.19 but not with 2.0.19, seems like they change the behaviour of the str representation of url's.
### Tracking information
<!-- PLEASE KEEP BACKTICKS AND CHECK PREVIEW -->
```shell
REPLACE_ME
```
### Code to reproduce issue
<!-- PLEASE KEEP BACKTICKS AND CHECK PREVIEW -->
```
python -m mlflow.server.auth db upgrade --url 'postgresql://database_user:database_password@host:5432/database_name'
```
### Stack trace
<!-- PLEASE KEEP BACKTICKS AND CHECK PREVIEW -->
```
Traceback (most recent call last):
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 145, in __init__
self._dbapi_connection = engine.raw_connection()
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 3288, in raw_connection
return self.pool.connect()
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py", line 452, in connect
return _ConnectionFairy._checkout(self)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py", line 1267, in _checkout
fairy = _ConnectionRecord.checkout(pool)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py", line 716, in checkout
rec = pool._do_get()
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/impl.py", line 284, in _do_get
return self._create_connection()
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py", line 393, in _create_connection
return _ConnectionRecord(self)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py", line 678, in __init__
self.__connect()
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py", line 903, in __connect
pool.logger.debug("Error on connect(): %s", e)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/util/langhelpers.py", line 147, in __exit__
raise exc_value.with_traceback(exc_tb)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py", line 898, in __connect
self.dbapi_connection = connection = pool._invoke_creator(self)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/create.py", line 637, in connect
return dialect.connect(*cargs, **cparams)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 615, in connect
return self.loaded_dbapi.connect(*cargs, **cparams)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/psycopg2/__init__.py", line 122, in connect
conn = _connect(dsn, connection_factory=connection_factory, **kwasync)
sqlalchemy.exc.OperationalError: (psycopg2.OperationalError) connection to server at "host" (host ip), port 5432 failed: FATAL: password authentication failed for user "db_user"
connection to server at "host" (host_ip), port 5432 failed: FATAL: password authentication failed for user "db_user"
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.8/runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/usr/local/lib/python3.8/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/mlflow/server/auth/__main__.py", line 5, in <module>
commands()
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/click/core.py", line 1157, in __call__
return self.main(*args, **kwargs)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/click/core.py", line 1078, in main
rv = self.invoke(ctx)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/click/core.py", line 1688, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/click/core.py", line 1688, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/click/core.py", line 1434, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/click/core.py", line 783, in invoke
return __callback(*args, **kwargs)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/mlflow/server/auth/db/cli.py", line 19, in upgrade
utils.migrate(engine, revision)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/mlflow/server/auth/db/utils.py", line 28, in migrate
upgrade(alembic_cfg, revision)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/alembic/command.py", line 385, in upgrade
script.run_env()
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/alembic/script/base.py", line 578, in run_env
util.load_python_file(self.dir, "env.py")
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/alembic/util/pyfiles.py", line 93, in load_python_file
module = load_module_py(module_id, path)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/alembic/util/pyfiles.py", line 109, in load_module_py
spec.loader.exec_module(module) # type: ignore
File "<frozen importlib._bootstrap_external>", line 843, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/mlflow/server/auth/db/migrations/env.py", line 79, in <module>
run_migrations_online()
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/mlflow/server/auth/db/migrations/env.py", line 69, in run_migrations_online
with connectable.connect() as connection:
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 3264, in connect
return self._connection_cls(self)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 147, in __init__
Connection._handle_dbapi_exception_noconnection(
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 2426, in _handle_dbapi_exception_noconnection
raise sqlalchemy_exception.with_traceback(exc_info[2]) from e
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 145, in __init__
self._dbapi_connection = engine.raw_connection()
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 3288, in raw_connection
return self.pool.connect()
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py", line 452, in connect
return _ConnectionFairy._checkout(self)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py", line 1267, in _checkout
fairy = _ConnectionRecord.checkout(pool)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py", line 716, in checkout
rec = pool._do_get()
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/impl.py", line 284, in _do_get
return self._create_connection()
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py", line 393, in _create_connection
return _ConnectionRecord(self)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py", line 678, in __init__
self.__connect()
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py", line 903, in __connect
pool.logger.debug("Error on connect(): %s", e)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/util/langhelpers.py", line 147, in __exit__
raise exc_value.with_traceback(exc_tb)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py", line 898, in __connect
self.dbapi_connection = connection = pool._invoke_creator(self)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/create.py", line 637, in connect
return dialect.connect(*cargs, **cparams)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 615, in connect
return self.loaded_dbapi.connect(*cargs, **cparams)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/psycopg2/__init__.py", line 122, in connect
conn = _connect(dsn, connection_factory=connection_factory, **kwasync)
sqlalchemy.exc.OperationalError: (psycopg2.OperationalError) connection to server at "host" (host ip), port 5432 failed: FATAL: password authentication failed for user "db_user"
connection to server at "host" (host_ip), port 5432 failed: FATAL: password authentication failed for user "db_user"
(Background on this error at: https://sqlalche.me/e/20/e3q8)
```
### Other info / logs
<!-- PLEASE KEEP BACKTICKS AND CHECK PREVIEW -->
```
REPLACE_ME
```
### What component(s) does this bug affect?
- [ ] `area/artifacts`: Artifact stores and artifact logging
- [ ] `area/build`: Build and test infrastructure for MLflow
- [ ] `area/docs`: MLflow documentation pages
- [ ] `area/examples`: Example code
- [ ] `area/gateway`: AI Gateway service, Gateway client APIs, third-party Gateway integrations
- [ ] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry
- [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors
- [ ] `area/recipes`: Recipes, Recipe APIs, Recipe configs, Recipe Templates
- [ ] `area/projects`: MLproject format, project running backends
- [ ] `area/scoring`: MLflow Model server, model deployment tools, Spark UDFs
- [X] `area/server-infra`: MLflow Tracking server backend
- [ ] `area/tracking`: Tracking Service, tracking client APIs, autologging
### What interface(s) does this bug affect?
- [ ] `area/uiux`: Front-end, user experience, plotting, JavaScript, JavaScript dev server
- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models
- [X] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry
- [ ] `area/windows`: Windows support
### What language(s) does this bug affect?
- [ ] `language/r`: R APIs and clients
- [ ] `language/java`: Java APIs and clients
- [ ] `language/new`: Proposals for new client languages
### What integration(s) does this bug affect?
- [ ] `integrations/azure`: Azure and Azure ML integrations
- [ ] `integrations/sagemaker`: SageMaker integrations
- [ ] `integrations/databricks`: Databricks integrations
</issue>
<code>
[start of mlflow/server/auth/db/utils.py]
1 from pathlib import Path
2
3 from sqlalchemy.engine.base import Engine
4 from alembic.command import upgrade
5 from alembic.config import Config
6 from alembic.script import ScriptDirectory
7 from alembic.migration import MigrationContext
8
9
10 def _get_alembic_dir() -> str:
11 return Path(__file__).parent / "migrations"
12
13
14 def _get_alembic_config(url: str) -> Config:
15 alembic_dir = _get_alembic_dir()
16 alembic_ini_path = alembic_dir / "alembic.ini"
17 alembic_cfg = Config(alembic_ini_path)
18 alembic_cfg.set_main_option("script_location", str(alembic_dir))
19 alembic_cfg.set_main_option("sqlalchemy.url", url)
20 return alembic_cfg
21
22
23 def migrate(engine: Engine, revision: str) -> None:
24 alembic_cfg = _get_alembic_config(str(engine.url))
25 with engine.begin() as conn:
26 alembic_cfg.attributes["connection"] = conn
27 upgrade(alembic_cfg, revision)
28
29
30 def migrate_if_needed(engine: Engine, revision: str) -> None:
31 alembic_cfg = _get_alembic_config(str(engine.url))
32 script_dir = ScriptDirectory.from_config(alembic_cfg)
33 with engine.begin() as conn:
34 context = MigrationContext.configure(conn)
35 if context.get_current_revision() != script_dir.get_current_head():
36 upgrade(alembic_cfg, revision)
37
[end of mlflow/server/auth/db/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mlflow/server/auth/db/utils.py b/mlflow/server/auth/db/utils.py
--- a/mlflow/server/auth/db/utils.py
+++ b/mlflow/server/auth/db/utils.py
@@ -21,7 +21,7 @@
def migrate(engine: Engine, revision: str) -> None:
- alembic_cfg = _get_alembic_config(str(engine.url))
+ alembic_cfg = _get_alembic_config(engine.url.render_as_string(hide_password=False))
with engine.begin() as conn:
alembic_cfg.attributes["connection"] = conn
upgrade(alembic_cfg, revision)
|
{"golden_diff": "diff --git a/mlflow/server/auth/db/utils.py b/mlflow/server/auth/db/utils.py\n--- a/mlflow/server/auth/db/utils.py\n+++ b/mlflow/server/auth/db/utils.py\n@@ -21,7 +21,7 @@\n \n \n def migrate(engine: Engine, revision: str) -> None:\n- alembic_cfg = _get_alembic_config(str(engine.url))\n+ alembic_cfg = _get_alembic_config(engine.url.render_as_string(hide_password=False))\n with engine.begin() as conn:\n alembic_cfg.attributes[\"connection\"] = conn\n upgrade(alembic_cfg, revision)\n", "issue": "[BUG] basic-auth alembic migration crash on remote database\n### Issues Policy acknowledgement\n\n- [X] I have read and agree to submit bug reports in accordance with the [issues policy](https://www.github.com/mlflow/mlflow/blob/master/ISSUE_POLICY.md)\n\n### Willingness to contribute\n\nYes. I can contribute a fix for this bug independently.\n\n### MLflow version\n\n- Client: 1.5.0\r\n- Tracking server: 1.5.0\r\n\n\n### System information\n\n- **Debian GNU/Linux 10 (buster)**:\r\n- **Python 3.8.17 **:\r\n\r\n- **SQLAlchemy version: 2.0.19**\n\n### Describe the problem\n\nAlembic migration for the basic-auth app crashes due to obfuscated password being passed to the alembic config\r\n\r\nThis bug seems to be related to the SQLAlchemy version, migration works with SQLAlchemy 1.4.19 but not with 2.0.19, seems like they change the behaviour of the str representation of url's.\n\n### Tracking information\n\n<!-- PLEASE KEEP BACKTICKS AND CHECK PREVIEW -->\r\n```shell\r\nREPLACE_ME\r\n```\r\n\n\n### Code to reproduce issue\n\n<!-- PLEASE KEEP BACKTICKS AND CHECK PREVIEW -->\r\n```\r\npython -m mlflow.server.auth db upgrade --url 'postgresql://database_user:database_password@host:5432/database_name'\r\n```\r\n\n\n### Stack trace\n\n<!-- PLEASE KEEP BACKTICKS AND CHECK PREVIEW -->\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/base.py\", line 145, in __init__\r\n self._dbapi_connection = engine.raw_connection()\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/base.py\", line 3288, in raw_connection\r\n return self.pool.connect()\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py\", line 452, in connect\r\n return _ConnectionFairy._checkout(self)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py\", line 1267, in _checkout\r\n fairy = _ConnectionRecord.checkout(pool)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py\", line 716, in checkout\r\n rec = pool._do_get()\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/impl.py\", line 284, in _do_get\r\n return self._create_connection()\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py\", line 393, in _create_connection\r\n return _ConnectionRecord(self)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py\", line 678, in __init__\r\n self.__connect()\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py\", line 903, in __connect\r\n pool.logger.debug(\"Error on connect(): %s\", e)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/util/langhelpers.py\", line 147, in __exit__\r\n raise exc_value.with_traceback(exc_tb)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py\", line 898, in __connect\r\n self.dbapi_connection = connection = pool._invoke_creator(self)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/create.py\", line 637, in connect\r\n return dialect.connect(*cargs, **cparams)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/default.py\", line 615, in connect\r\n return self.loaded_dbapi.connect(*cargs, **cparams)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/psycopg2/__init__.py\", line 122, in connect\r\n conn = _connect(dsn, connection_factory=connection_factory, **kwasync)\r\nsqlalchemy.exc.OperationalError: (psycopg2.OperationalError) connection to server at \"host\" (host ip), port 5432 failed: FATAL: password authentication failed for user \"db_user\"\r\nconnection to server at \"host\" (host_ip), port 5432 failed: FATAL: password authentication failed for user \"db_user\"\r\n\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.8/runpy.py\", line 194, in _run_module_as_main\r\n return _run_code(code, main_globals, None,\r\n File \"/usr/local/lib/python3.8/runpy.py\", line 87, in _run_code\r\n exec(code, run_globals)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/mlflow/server/auth/__main__.py\", line 5, in <module>\r\n commands()\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/click/core.py\", line 1157, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/click/core.py\", line 1078, in main\r\n rv = self.invoke(ctx)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/click/core.py\", line 1688, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/click/core.py\", line 1688, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/click/core.py\", line 1434, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/click/core.py\", line 783, in invoke\r\n return __callback(*args, **kwargs)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/mlflow/server/auth/db/cli.py\", line 19, in upgrade\r\n utils.migrate(engine, revision)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/mlflow/server/auth/db/utils.py\", line 28, in migrate\r\n upgrade(alembic_cfg, revision)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/alembic/command.py\", line 385, in upgrade\r\n script.run_env()\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/alembic/script/base.py\", line 578, in run_env\r\n util.load_python_file(self.dir, \"env.py\")\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/alembic/util/pyfiles.py\", line 93, in load_python_file\r\n module = load_module_py(module_id, path)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/alembic/util/pyfiles.py\", line 109, in load_module_py\r\n spec.loader.exec_module(module) # type: ignore\r\n File \"<frozen importlib._bootstrap_external>\", line 843, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 219, in _call_with_frames_removed\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/mlflow/server/auth/db/migrations/env.py\", line 79, in <module>\r\n run_migrations_online()\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/mlflow/server/auth/db/migrations/env.py\", line 69, in run_migrations_online\r\n with connectable.connect() as connection:\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/base.py\", line 3264, in connect\r\n return self._connection_cls(self)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/base.py\", line 147, in __init__\r\n Connection._handle_dbapi_exception_noconnection(\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/base.py\", line 2426, in _handle_dbapi_exception_noconnection\r\n raise sqlalchemy_exception.with_traceback(exc_info[2]) from e\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/base.py\", line 145, in __init__\r\n self._dbapi_connection = engine.raw_connection()\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/base.py\", line 3288, in raw_connection\r\n return self.pool.connect()\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py\", line 452, in connect\r\n return _ConnectionFairy._checkout(self)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py\", line 1267, in _checkout\r\n fairy = _ConnectionRecord.checkout(pool)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py\", line 716, in checkout\r\n rec = pool._do_get()\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/impl.py\", line 284, in _do_get\r\n return self._create_connection()\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py\", line 393, in _create_connection\r\n return _ConnectionRecord(self)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py\", line 678, in __init__\r\n self.__connect()\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py\", line 903, in __connect\r\n pool.logger.debug(\"Error on connect(): %s\", e)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/util/langhelpers.py\", line 147, in __exit__\r\n raise exc_value.with_traceback(exc_tb)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py\", line 898, in __connect\r\n self.dbapi_connection = connection = pool._invoke_creator(self)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/create.py\", line 637, in connect\r\n return dialect.connect(*cargs, **cparams)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/default.py\", line 615, in connect\r\n return self.loaded_dbapi.connect(*cargs, **cparams)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/psycopg2/__init__.py\", line 122, in connect\r\n conn = _connect(dsn, connection_factory=connection_factory, **kwasync)\r\nsqlalchemy.exc.OperationalError: (psycopg2.OperationalError) connection to server at \"host\" (host ip), port 5432 failed: FATAL: password authentication failed for user \"db_user\"\r\nconnection to server at \"host\" (host_ip), port 5432 failed: FATAL: password authentication failed for user \"db_user\"\r\n\r\n(Background on this error at: https://sqlalche.me/e/20/e3q8)\r\n```\r\n\n\n### Other info / logs\n\n<!-- PLEASE KEEP BACKTICKS AND CHECK PREVIEW -->\r\n```\r\nREPLACE_ME\r\n```\r\n\n\n### What component(s) does this bug affect?\n\n- [ ] `area/artifacts`: Artifact stores and artifact logging\n- [ ] `area/build`: Build and test infrastructure for MLflow\n- [ ] `area/docs`: MLflow documentation pages\n- [ ] `area/examples`: Example code\n- [ ] `area/gateway`: AI Gateway service, Gateway client APIs, third-party Gateway integrations\n- [ ] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry\n- [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors\n- [ ] `area/recipes`: Recipes, Recipe APIs, Recipe configs, Recipe Templates\n- [ ] `area/projects`: MLproject format, project running backends\n- [ ] `area/scoring`: MLflow Model server, model deployment tools, Spark UDFs\n- [X] `area/server-infra`: MLflow Tracking server backend\n- [ ] `area/tracking`: Tracking Service, tracking client APIs, autologging\n\n### What interface(s) does this bug affect?\n\n- [ ] `area/uiux`: Front-end, user experience, plotting, JavaScript, JavaScript dev server\n- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models\n- [X] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry\n- [ ] `area/windows`: Windows support\n\n### What language(s) does this bug affect?\n\n- [ ] `language/r`: R APIs and clients\n- [ ] `language/java`: Java APIs and clients\n- [ ] `language/new`: Proposals for new client languages\n\n### What integration(s) does this bug affect?\n\n- [ ] `integrations/azure`: Azure and Azure ML integrations\n- [ ] `integrations/sagemaker`: SageMaker integrations\n- [ ] `integrations/databricks`: Databricks integrations\n", "before_files": [{"content": "from pathlib import Path\n\nfrom sqlalchemy.engine.base import Engine\nfrom alembic.command import upgrade\nfrom alembic.config import Config\nfrom alembic.script import ScriptDirectory\nfrom alembic.migration import MigrationContext\n\n\ndef _get_alembic_dir() -> str:\n return Path(__file__).parent / \"migrations\"\n\n\ndef _get_alembic_config(url: str) -> Config:\n alembic_dir = _get_alembic_dir()\n alembic_ini_path = alembic_dir / \"alembic.ini\"\n alembic_cfg = Config(alembic_ini_path)\n alembic_cfg.set_main_option(\"script_location\", str(alembic_dir))\n alembic_cfg.set_main_option(\"sqlalchemy.url\", url)\n return alembic_cfg\n\n\ndef migrate(engine: Engine, revision: str) -> None:\n alembic_cfg = _get_alembic_config(str(engine.url))\n with engine.begin() as conn:\n alembic_cfg.attributes[\"connection\"] = conn\n upgrade(alembic_cfg, revision)\n\n\ndef migrate_if_needed(engine: Engine, revision: str) -> None:\n alembic_cfg = _get_alembic_config(str(engine.url))\n script_dir = ScriptDirectory.from_config(alembic_cfg)\n with engine.begin() as conn:\n context = MigrationContext.configure(conn)\n if context.get_current_revision() != script_dir.get_current_head():\n upgrade(alembic_cfg, revision)\n", "path": "mlflow/server/auth/db/utils.py"}]}
| 4,089 | 138 |
gh_patches_debug_29526
|
rasdani/github-patches
|
git_diff
|
plone__Products.CMFPlone-2829
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
exclude_from_nav and show_excluded_items are not handled correctly in portal_tabs_view
### What I did:
Create a folder on top level and exclude id from navigation
### What I expect to happen:
the folder should disappear from global navigation **when I'm not inside it's context**
### What actually happened:
the folder is always in the navigation
### What version of Plone/ Addons I am using:
plone 5.2rc2
### REASON:
I'm fixing a bug found here: https://github.com/plone/plone.app.layout/issues/190
which leads to the same result in `portal_tabs_view` here https://github.com/plone/Products.CMFPlone/blob/master/Products/CMFPlone/browser/navigation.py#L151-L153
if `show_excluded_items` is activated (which actually is the default setting) every item is shown regardless of `exclude_from_nav` setting. But what the setting claims is to show the excluded item only if you're inside of it.
PR is in preparation...
</issue>
<code>
[start of Products/CMFPlone/browser/navigation.py]
1 # -*- coding: utf-8 -*-
2 from Acquisition import aq_base
3 from Acquisition import aq_inner
4 from plone.app.layout.navigation.interfaces import INavtreeStrategy
5 from plone.app.layout.navigation.navtree import buildFolderTree
6 from plone.app.layout.navigation.root import getNavigationRoot
7 from plone.registry.interfaces import IRegistry
8 from Products.CMFCore.utils import getToolByName
9 from Products.CMFPlone import utils
10 from Products.CMFPlone.browser.interfaces import INavigationBreadcrumbs
11 from Products.CMFPlone.browser.interfaces import INavigationTabs
12 from Products.CMFPlone.browser.interfaces import ISiteMap
13 from Products.CMFPlone.browser.navtree import SitemapQueryBuilder
14 from Products.CMFPlone.interfaces import IHideFromBreadcrumbs
15 from Products.CMFPlone.interfaces import INavigationSchema
16 from Products.Five import BrowserView
17 from zope.component import getMultiAdapter
18 from zope.component import getUtility
19 from zope.interface import implementer
20
21
22 def get_url(item):
23 if not item:
24 return None
25 if hasattr(aq_base(item), 'getURL'):
26 # Looks like a brain
27 return item.getURL()
28 return item.absolute_url()
29
30
31 def get_id(item):
32 if not item:
33 return None
34 getId = getattr(item, 'getId')
35 if not utils.safe_callable(getId):
36 # Looks like a brain
37 return getId
38 return getId()
39
40
41 def get_view_url(context):
42 registry = getUtility(IRegistry)
43 view_action_types = registry.get(
44 'plone.types_use_view_action_in_listings', [])
45 item_url = get_url(context)
46 name = get_id(context)
47
48 if getattr(context, 'portal_type', {}) in view_action_types:
49 item_url += '/view'
50 name += '/view'
51
52 return name, item_url
53
54
55 @implementer(ISiteMap)
56 class CatalogSiteMap(BrowserView):
57
58 def siteMap(self):
59 context = aq_inner(self.context)
60
61 queryBuilder = SitemapQueryBuilder(context)
62 query = queryBuilder()
63 strategy = getMultiAdapter((context, self), INavtreeStrategy)
64
65 return buildFolderTree(
66 context, obj=context,
67 query=query, strategy=strategy
68 )
69
70
71 @implementer(INavigationTabs)
72 class CatalogNavigationTabs(BrowserView):
73
74 def _getNavQuery(self):
75 # check whether we only want actions
76 registry = getUtility(IRegistry)
77 navigation_settings = registry.forInterface(
78 INavigationSchema,
79 prefix="plone",
80 check=False
81 )
82 customQuery = getattr(self.context, 'getCustomNavQuery', False)
83 if customQuery is not None and utils.safe_callable(customQuery):
84 query = customQuery()
85 else:
86 query = {}
87
88 query['path'] = {
89 'query': getNavigationRoot(self.context),
90 'depth': 1
91 }
92 query['portal_type'] = [t for t in navigation_settings.displayed_types]
93 query['sort_on'] = navigation_settings.sort_tabs_on
94 if navigation_settings.sort_tabs_reversed:
95 query['sort_order'] = 'reverse'
96 else:
97 query['sort_order'] = 'ascending'
98
99 if navigation_settings.filter_on_workflow:
100 query['review_state'] = navigation_settings.workflow_states_to_show
101
102 query['is_default_page'] = False
103
104 if not navigation_settings.nonfolderish_tabs:
105 query['is_folderish'] = True
106
107 return query
108
109 def topLevelTabs(self, actions=None, category='portal_tabs'):
110 context = aq_inner(self.context)
111 registry = getUtility(IRegistry)
112 navigation_settings = registry.forInterface(
113 INavigationSchema,
114 prefix="plone",
115 check=False
116 )
117 mtool = getToolByName(context, 'portal_membership')
118 member = mtool.getAuthenticatedMember().id
119 catalog = getToolByName(context, 'portal_catalog')
120
121 if actions is None:
122 context_state = getMultiAdapter(
123 (context, self.request),
124 name=u'plone_context_state'
125 )
126 actions = context_state.actions(category)
127
128 # Build result dict
129 result = []
130 # first the actions
131 for actionInfo in actions:
132 data = actionInfo.copy()
133 data['name'] = data['title']
134 result.append(data)
135
136 # check whether we only want actions
137 if not navigation_settings.generate_tabs:
138 return result
139
140 query = self._getNavQuery()
141
142 rawresult = catalog.searchResults(query)
143
144 def _get_url(item):
145 if item.getRemoteUrl and not member == item.Creator:
146 return (get_id(item), item.getRemoteUrl)
147 return get_view_url(item)
148
149 # now add the content to results
150 for item in rawresult:
151 if not navigation_settings.show_excluded_items:
152 if item.exclude_from_nav:
153 continue
154 cid, item_url = _get_url(item)
155 data = {
156 'name': utils.pretty_title_or_id(context, item),
157 'id': item.getId,
158 'url': item_url,
159 'description': item.Description,
160 'review_state': item.review_state
161 }
162 result.append(data)
163
164 return result
165
166
167 @implementer(INavigationBreadcrumbs)
168 class CatalogNavigationBreadcrumbs(BrowserView):
169
170 def breadcrumbs(self):
171 context = aq_inner(self.context)
172 catalog = getToolByName(context, 'portal_catalog')
173 query = {}
174
175 # Check to see if the current page is a folder default view, if so
176 # get breadcrumbs from the parent folder
177 if utils.isDefaultPage(context, self.request):
178 currentPath = '/'.join(utils.parent(context).getPhysicalPath())
179 else:
180 currentPath = '/'.join(context.getPhysicalPath())
181 query['path'] = {'query': currentPath, 'navtree': 1, 'depth': 0}
182
183 rawresult = catalog(**query)
184
185 # Sort items on path length
186 dec_result = [(len(r.getPath()), r) for r in rawresult]
187 dec_result.sort()
188
189 rootPath = getNavigationRoot(context)
190
191 # Build result dict
192 result = []
193 for r_tuple in dec_result:
194 item = r_tuple[1]
195
196 # Don't include it if it would be above the navigation root
197 itemPath = item.getPath()
198 if rootPath.startswith(itemPath):
199 continue
200
201 cid, item_url = get_view_url(item)
202 data = {
203 'Title': utils.pretty_title_or_id(context, item),
204 'absolute_url': item_url
205 }
206 result.append(data)
207 return result
208
209
210 @implementer(INavigationBreadcrumbs)
211 class PhysicalNavigationBreadcrumbs(BrowserView):
212
213 def breadcrumbs(self):
214 context = aq_inner(self.context)
215 request = self.request
216 container = utils.parent(context)
217
218 name, item_url = get_view_url(context)
219
220 if container is None:
221 return ({
222 'absolute_url': item_url,
223 'Title': utils.pretty_title_or_id(context, context),
224 },)
225
226 view = getMultiAdapter((container, request), name='breadcrumbs_view')
227 base = tuple(view.breadcrumbs())
228
229 # Some things want to be hidden from the breadcrumbs
230 if IHideFromBreadcrumbs.providedBy(context):
231 return base
232
233 if base:
234 item_url = '%s/%s' % (base[-1]['absolute_url'], name)
235
236 rootPath = getNavigationRoot(context)
237 itemPath = '/'.join(context.getPhysicalPath())
238
239 # don't show default pages in breadcrumbs or pages above the navigation
240 # root
241 if not utils.isDefaultPage(context, request) \
242 and not rootPath.startswith(itemPath):
243 base += ({
244 'absolute_url': item_url,
245 'Title': utils.pretty_title_or_id(context, context),
246 },)
247 return base
248
249
250 @implementer(INavigationBreadcrumbs)
251 class RootPhysicalNavigationBreadcrumbs(BrowserView):
252
253 def breadcrumbs(self):
254 # XXX Root never gets included, it's hardcoded as 'Home' in
255 # the template. We will fix and remove the hardcoding and fix
256 # the tests.
257 return ()
258
[end of Products/CMFPlone/browser/navigation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/Products/CMFPlone/browser/navigation.py b/Products/CMFPlone/browser/navigation.py
--- a/Products/CMFPlone/browser/navigation.py
+++ b/Products/CMFPlone/browser/navigation.py
@@ -72,7 +72,7 @@
class CatalogNavigationTabs(BrowserView):
def _getNavQuery(self):
- # check whether we only want actions
+ # check whether we only want actions
registry = getUtility(IRegistry)
navigation_settings = registry.forInterface(
INavigationSchema,
@@ -101,6 +101,9 @@
query['is_default_page'] = False
+ if not navigation_settings.show_excluded_items:
+ query['exclude_from_nav'] = False
+
if not navigation_settings.nonfolderish_tabs:
query['is_folderish'] = True
@@ -146,11 +149,13 @@
return (get_id(item), item.getRemoteUrl)
return get_view_url(item)
+ context_path = '/'.join(context.getPhysicalPath())
+
# now add the content to results
for item in rawresult:
- if not navigation_settings.show_excluded_items:
- if item.exclude_from_nav:
- continue
+ if item.exclude_from_nav and not context_path.startswith(item.getPath()): # noqa: E501
+ # skip excluded items if they're not in our context path
+ continue
cid, item_url = _get_url(item)
data = {
'name': utils.pretty_title_or_id(context, item),
|
{"golden_diff": "diff --git a/Products/CMFPlone/browser/navigation.py b/Products/CMFPlone/browser/navigation.py\n--- a/Products/CMFPlone/browser/navigation.py\n+++ b/Products/CMFPlone/browser/navigation.py\n@@ -72,7 +72,7 @@\n class CatalogNavigationTabs(BrowserView):\n \n def _getNavQuery(self):\n- # check whether we only want actions\n+ # check whether we only want actions\n registry = getUtility(IRegistry)\n navigation_settings = registry.forInterface(\n INavigationSchema,\n@@ -101,6 +101,9 @@\n \n query['is_default_page'] = False\n \n+ if not navigation_settings.show_excluded_items:\n+ query['exclude_from_nav'] = False\n+\n if not navigation_settings.nonfolderish_tabs:\n query['is_folderish'] = True\n \n@@ -146,11 +149,13 @@\n return (get_id(item), item.getRemoteUrl)\n return get_view_url(item)\n \n+ context_path = '/'.join(context.getPhysicalPath())\n+\n # now add the content to results\n for item in rawresult:\n- if not navigation_settings.show_excluded_items:\n- if item.exclude_from_nav:\n- continue\n+ if item.exclude_from_nav and not context_path.startswith(item.getPath()): # noqa: E501\n+ # skip excluded items if they're not in our context path\n+ continue\n cid, item_url = _get_url(item)\n data = {\n 'name': utils.pretty_title_or_id(context, item),\n", "issue": "exclude_from_nav and show_excluded_items are not handled correctly in portal_tabs_view\n### What I did:\r\n\r\nCreate a folder on top level and exclude id from navigation\r\n\r\n### What I expect to happen:\r\n\r\nthe folder should disappear from global navigation **when I'm not inside it's context**\r\n\r\n### What actually happened:\r\n\r\nthe folder is always in the navigation\r\n\r\n### What version of Plone/ Addons I am using:\r\n\r\nplone 5.2rc2\r\n\r\n### REASON:\r\n\r\nI'm fixing a bug found here: https://github.com/plone/plone.app.layout/issues/190\r\nwhich leads to the same result in `portal_tabs_view` here https://github.com/plone/Products.CMFPlone/blob/master/Products/CMFPlone/browser/navigation.py#L151-L153\r\n\r\nif `show_excluded_items` is activated (which actually is the default setting) every item is shown regardless of `exclude_from_nav` setting. But what the setting claims is to show the excluded item only if you're inside of it.\r\n\r\nPR is in preparation...\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom Acquisition import aq_base\nfrom Acquisition import aq_inner\nfrom plone.app.layout.navigation.interfaces import INavtreeStrategy\nfrom plone.app.layout.navigation.navtree import buildFolderTree\nfrom plone.app.layout.navigation.root import getNavigationRoot\nfrom plone.registry.interfaces import IRegistry\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone import utils\nfrom Products.CMFPlone.browser.interfaces import INavigationBreadcrumbs\nfrom Products.CMFPlone.browser.interfaces import INavigationTabs\nfrom Products.CMFPlone.browser.interfaces import ISiteMap\nfrom Products.CMFPlone.browser.navtree import SitemapQueryBuilder\nfrom Products.CMFPlone.interfaces import IHideFromBreadcrumbs\nfrom Products.CMFPlone.interfaces import INavigationSchema\nfrom Products.Five import BrowserView\nfrom zope.component import getMultiAdapter\nfrom zope.component import getUtility\nfrom zope.interface import implementer\n\n\ndef get_url(item):\n if not item:\n return None\n if hasattr(aq_base(item), 'getURL'):\n # Looks like a brain\n return item.getURL()\n return item.absolute_url()\n\n\ndef get_id(item):\n if not item:\n return None\n getId = getattr(item, 'getId')\n if not utils.safe_callable(getId):\n # Looks like a brain\n return getId\n return getId()\n\n\ndef get_view_url(context):\n registry = getUtility(IRegistry)\n view_action_types = registry.get(\n 'plone.types_use_view_action_in_listings', [])\n item_url = get_url(context)\n name = get_id(context)\n\n if getattr(context, 'portal_type', {}) in view_action_types:\n item_url += '/view'\n name += '/view'\n\n return name, item_url\n\n\n@implementer(ISiteMap)\nclass CatalogSiteMap(BrowserView):\n\n def siteMap(self):\n context = aq_inner(self.context)\n\n queryBuilder = SitemapQueryBuilder(context)\n query = queryBuilder()\n strategy = getMultiAdapter((context, self), INavtreeStrategy)\n\n return buildFolderTree(\n context, obj=context,\n query=query, strategy=strategy\n )\n\n\n@implementer(INavigationTabs)\nclass CatalogNavigationTabs(BrowserView):\n\n def _getNavQuery(self):\n # check whether we only want actions\n registry = getUtility(IRegistry)\n navigation_settings = registry.forInterface(\n INavigationSchema,\n prefix=\"plone\",\n check=False\n )\n customQuery = getattr(self.context, 'getCustomNavQuery', False)\n if customQuery is not None and utils.safe_callable(customQuery):\n query = customQuery()\n else:\n query = {}\n\n query['path'] = {\n 'query': getNavigationRoot(self.context),\n 'depth': 1\n }\n query['portal_type'] = [t for t in navigation_settings.displayed_types]\n query['sort_on'] = navigation_settings.sort_tabs_on\n if navigation_settings.sort_tabs_reversed:\n query['sort_order'] = 'reverse'\n else:\n query['sort_order'] = 'ascending'\n\n if navigation_settings.filter_on_workflow:\n query['review_state'] = navigation_settings.workflow_states_to_show\n\n query['is_default_page'] = False\n\n if not navigation_settings.nonfolderish_tabs:\n query['is_folderish'] = True\n\n return query\n\n def topLevelTabs(self, actions=None, category='portal_tabs'):\n context = aq_inner(self.context)\n registry = getUtility(IRegistry)\n navigation_settings = registry.forInterface(\n INavigationSchema,\n prefix=\"plone\",\n check=False\n )\n mtool = getToolByName(context, 'portal_membership')\n member = mtool.getAuthenticatedMember().id\n catalog = getToolByName(context, 'portal_catalog')\n\n if actions is None:\n context_state = getMultiAdapter(\n (context, self.request),\n name=u'plone_context_state'\n )\n actions = context_state.actions(category)\n\n # Build result dict\n result = []\n # first the actions\n for actionInfo in actions:\n data = actionInfo.copy()\n data['name'] = data['title']\n result.append(data)\n\n # check whether we only want actions\n if not navigation_settings.generate_tabs:\n return result\n\n query = self._getNavQuery()\n\n rawresult = catalog.searchResults(query)\n\n def _get_url(item):\n if item.getRemoteUrl and not member == item.Creator:\n return (get_id(item), item.getRemoteUrl)\n return get_view_url(item)\n\n # now add the content to results\n for item in rawresult:\n if not navigation_settings.show_excluded_items:\n if item.exclude_from_nav:\n continue\n cid, item_url = _get_url(item)\n data = {\n 'name': utils.pretty_title_or_id(context, item),\n 'id': item.getId,\n 'url': item_url,\n 'description': item.Description,\n 'review_state': item.review_state\n }\n result.append(data)\n\n return result\n\n\n@implementer(INavigationBreadcrumbs)\nclass CatalogNavigationBreadcrumbs(BrowserView):\n\n def breadcrumbs(self):\n context = aq_inner(self.context)\n catalog = getToolByName(context, 'portal_catalog')\n query = {}\n\n # Check to see if the current page is a folder default view, if so\n # get breadcrumbs from the parent folder\n if utils.isDefaultPage(context, self.request):\n currentPath = '/'.join(utils.parent(context).getPhysicalPath())\n else:\n currentPath = '/'.join(context.getPhysicalPath())\n query['path'] = {'query': currentPath, 'navtree': 1, 'depth': 0}\n\n rawresult = catalog(**query)\n\n # Sort items on path length\n dec_result = [(len(r.getPath()), r) for r in rawresult]\n dec_result.sort()\n\n rootPath = getNavigationRoot(context)\n\n # Build result dict\n result = []\n for r_tuple in dec_result:\n item = r_tuple[1]\n\n # Don't include it if it would be above the navigation root\n itemPath = item.getPath()\n if rootPath.startswith(itemPath):\n continue\n\n cid, item_url = get_view_url(item)\n data = {\n 'Title': utils.pretty_title_or_id(context, item),\n 'absolute_url': item_url\n }\n result.append(data)\n return result\n\n\n@implementer(INavigationBreadcrumbs)\nclass PhysicalNavigationBreadcrumbs(BrowserView):\n\n def breadcrumbs(self):\n context = aq_inner(self.context)\n request = self.request\n container = utils.parent(context)\n\n name, item_url = get_view_url(context)\n\n if container is None:\n return ({\n 'absolute_url': item_url,\n 'Title': utils.pretty_title_or_id(context, context),\n },)\n\n view = getMultiAdapter((container, request), name='breadcrumbs_view')\n base = tuple(view.breadcrumbs())\n\n # Some things want to be hidden from the breadcrumbs\n if IHideFromBreadcrumbs.providedBy(context):\n return base\n\n if base:\n item_url = '%s/%s' % (base[-1]['absolute_url'], name)\n\n rootPath = getNavigationRoot(context)\n itemPath = '/'.join(context.getPhysicalPath())\n\n # don't show default pages in breadcrumbs or pages above the navigation\n # root\n if not utils.isDefaultPage(context, request) \\\n and not rootPath.startswith(itemPath):\n base += ({\n 'absolute_url': item_url,\n 'Title': utils.pretty_title_or_id(context, context),\n },)\n return base\n\n\n@implementer(INavigationBreadcrumbs)\nclass RootPhysicalNavigationBreadcrumbs(BrowserView):\n\n def breadcrumbs(self):\n # XXX Root never gets included, it's hardcoded as 'Home' in\n # the template. We will fix and remove the hardcoding and fix\n # the tests.\n return ()\n", "path": "Products/CMFPlone/browser/navigation.py"}]}
| 3,174 | 350 |
gh_patches_debug_31548
|
rasdani/github-patches
|
git_diff
|
archlinux__archinstall-1194
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`select_disk` throw an error because BlockDevice does not contain label
Running this line from a python script causes an error:
`archinstall.select_disk(archinstall.all_blockdevices())`
```
Traceback (most recent call last):
File "/root/test.py", line 3, in <module>
hdd = archinstall.select_disk(archinstall.all_blockdevices())
File "/usr/lib/python3.10/site-packages/archinstall/lib/user_interaction/disk_conf.py", line 68, in select_disk
f"{index}: {drive} ({dict_o_disks[drive]['size'], dict_o_disks[drive].device, dict_o_disks[drive]['label']})"
File "/usr/lib/python3.10/site-packages/archinstall/lib/disk/blockdevice.py", line 45, in __getitem__
raise KeyError(f'{self} does not contain information: "{key}"')
KeyError: 'BlockDevice(/dev/sr0, size=0.8GB, free_space=, bus_type=sata) does not contain information: "label"'
```
</issue>
<code>
[start of archinstall/lib/user_interaction/disk_conf.py]
1 from __future__ import annotations
2
3 from typing import Any, Dict, TYPE_CHECKING, Optional
4
5 from .partitioning_conf import manage_new_and_existing_partitions, get_default_partition_layout
6 from ..disk import BlockDevice
7 from ..exceptions import DiskError
8 from ..menu import Menu
9 from ..menu.menu import MenuSelectionType
10 from ..output import log
11
12 if TYPE_CHECKING:
13 _: Any
14
15
16 def ask_for_main_filesystem_format(advanced_options=False) -> str:
17 options = {'btrfs': 'btrfs', 'ext4': 'ext4', 'xfs': 'xfs', 'f2fs': 'f2fs'}
18
19 advanced = {'ntfs': 'ntfs'}
20
21 if advanced_options:
22 options.update(advanced)
23
24 prompt = _('Select which filesystem your main partition should use')
25 choice = Menu(prompt, options, skip=False).run()
26 return choice.value
27
28
29 def select_individual_blockdevice_usage(block_devices: list) -> Dict[str, Any]:
30 result = {}
31
32 for device in block_devices:
33 layout = manage_new_and_existing_partitions(device)
34 result[device.path] = layout
35
36 return result
37
38
39 def select_disk_layout(preset: Optional[Dict[str, Any]], block_devices: list, advanced_options=False) -> Optional[Dict[str, Any]]:
40 wipe_mode = str(_('Wipe all selected drives and use a best-effort default partition layout'))
41 custome_mode = str(_('Select what to do with each individual drive (followed by partition usage)'))
42 modes = [wipe_mode, custome_mode]
43
44 warning = str(_('Are you sure you want to reset this setting?'))
45
46 choice = Menu(
47 _('Select what you wish to do with the selected block devices'),
48 modes,
49 explode_on_interrupt=True,
50 explode_warning=warning
51 ).run()
52
53 match choice.type_:
54 case MenuSelectionType.Esc: return preset
55 case MenuSelectionType.Ctrl_c: return None
56 case MenuSelectionType.Selection:
57 if choice.value == wipe_mode:
58 return get_default_partition_layout(block_devices, advanced_options)
59 else:
60 return select_individual_blockdevice_usage(block_devices)
61
62
63 def select_disk(dict_o_disks: Dict[str, BlockDevice]) -> BlockDevice:
64 """
65 Asks the user to select a harddrive from the `dict_o_disks` selection.
66 Usually this is combined with :ref:`archinstall.list_drives`.
67
68 :param dict_o_disks: A `dict` where keys are the drive-name, value should be a dict containing drive information.
69 :type dict_o_disks: dict
70
71 :return: The name/path (the dictionary key) of the selected drive
72 :rtype: str
73 """
74 drives = sorted(list(dict_o_disks.keys()))
75 if len(drives) >= 1:
76 for index, drive in enumerate(drives):
77 print(
78 f"{index}: {drive} ({dict_o_disks[drive]['size'], dict_o_disks[drive].device, dict_o_disks[drive]['label']})"
79 )
80
81 log("You can skip selecting a drive and partitioning and use whatever drive-setup is mounted at /mnt (experimental)",
82 fg="yellow")
83
84 drive = Menu('Select one of the disks or skip and use "/mnt" as default"', drives).run()
85 if not drive:
86 return drive
87
88 drive = dict_o_disks[drive]
89 return drive
90
91 raise DiskError('select_disk() requires a non-empty dictionary of disks to select from.')
92
[end of archinstall/lib/user_interaction/disk_conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/archinstall/lib/user_interaction/disk_conf.py b/archinstall/lib/user_interaction/disk_conf.py
--- a/archinstall/lib/user_interaction/disk_conf.py
+++ b/archinstall/lib/user_interaction/disk_conf.py
@@ -7,7 +7,6 @@
from ..exceptions import DiskError
from ..menu import Menu
from ..menu.menu import MenuSelectionType
-from ..output import log
if TYPE_CHECKING:
_: Any
@@ -60,7 +59,7 @@
return select_individual_blockdevice_usage(block_devices)
-def select_disk(dict_o_disks: Dict[str, BlockDevice]) -> BlockDevice:
+def select_disk(dict_o_disks: Dict[str, BlockDevice]) -> Optional[BlockDevice]:
"""
Asks the user to select a harddrive from the `dict_o_disks` selection.
Usually this is combined with :ref:`archinstall.list_drives`.
@@ -73,19 +72,15 @@
"""
drives = sorted(list(dict_o_disks.keys()))
if len(drives) >= 1:
- for index, drive in enumerate(drives):
- print(
- f"{index}: {drive} ({dict_o_disks[drive]['size'], dict_o_disks[drive].device, dict_o_disks[drive]['label']})"
- )
+ title = str(_('You can skip selecting a drive and partitioning and use whatever drive-setup is mounted at /mnt (experimental)')) + '\n'
+ title += str(_('Select one of the disks or skip and use /mnt as default'))
- log("You can skip selecting a drive and partitioning and use whatever drive-setup is mounted at /mnt (experimental)",
- fg="yellow")
+ choice = Menu(title, drives).run()
- drive = Menu('Select one of the disks or skip and use "/mnt" as default"', drives).run()
- if not drive:
- return drive
+ if choice.type_ == MenuSelectionType.Esc:
+ return None
- drive = dict_o_disks[drive]
+ drive = dict_o_disks[choice.value]
return drive
raise DiskError('select_disk() requires a non-empty dictionary of disks to select from.')
|
{"golden_diff": "diff --git a/archinstall/lib/user_interaction/disk_conf.py b/archinstall/lib/user_interaction/disk_conf.py\n--- a/archinstall/lib/user_interaction/disk_conf.py\n+++ b/archinstall/lib/user_interaction/disk_conf.py\n@@ -7,7 +7,6 @@\n from ..exceptions import DiskError\n from ..menu import Menu\n from ..menu.menu import MenuSelectionType\n-from ..output import log\n \n if TYPE_CHECKING:\n \t_: Any\n@@ -60,7 +59,7 @@\n \t\t\t\treturn select_individual_blockdevice_usage(block_devices)\n \n \n-def select_disk(dict_o_disks: Dict[str, BlockDevice]) -> BlockDevice:\n+def select_disk(dict_o_disks: Dict[str, BlockDevice]) -> Optional[BlockDevice]:\n \t\"\"\"\n \tAsks the user to select a harddrive from the `dict_o_disks` selection.\n \tUsually this is combined with :ref:`archinstall.list_drives`.\n@@ -73,19 +72,15 @@\n \t\"\"\"\n \tdrives = sorted(list(dict_o_disks.keys()))\n \tif len(drives) >= 1:\n-\t\tfor index, drive in enumerate(drives):\n-\t\t\tprint(\n-\t\t\t\tf\"{index}: {drive} ({dict_o_disks[drive]['size'], dict_o_disks[drive].device, dict_o_disks[drive]['label']})\"\n-\t\t\t)\n+\t\ttitle = str(_('You can skip selecting a drive and partitioning and use whatever drive-setup is mounted at /mnt (experimental)')) + '\\n'\n+\t\ttitle += str(_('Select one of the disks or skip and use /mnt as default'))\n \n-\t\tlog(\"You can skip selecting a drive and partitioning and use whatever drive-setup is mounted at /mnt (experimental)\",\n-\t\t\tfg=\"yellow\")\n+\t\tchoice = Menu(title, drives).run()\n \n-\t\tdrive = Menu('Select one of the disks or skip and use \"/mnt\" as default\"', drives).run()\n-\t\tif not drive:\n-\t\t\treturn drive\n+\t\tif choice.type_ == MenuSelectionType.Esc:\n+\t\t\treturn None\n \n-\t\tdrive = dict_o_disks[drive]\n+\t\tdrive = dict_o_disks[choice.value]\n \t\treturn drive\n \n \traise DiskError('select_disk() requires a non-empty dictionary of disks to select from.')\n", "issue": "`select_disk` throw an error because BlockDevice does not contain label\nRunning this line from a python script causes an error: \r\n\r\n`archinstall.select_disk(archinstall.all_blockdevices())`\r\n\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/root/test.py\", line 3, in <module>\r\n hdd = archinstall.select_disk(archinstall.all_blockdevices())\r\n File \"/usr/lib/python3.10/site-packages/archinstall/lib/user_interaction/disk_conf.py\", line 68, in select_disk\r\n f\"{index}: {drive} ({dict_o_disks[drive]['size'], dict_o_disks[drive].device, dict_o_disks[drive]['label']})\"\r\n File \"/usr/lib/python3.10/site-packages/archinstall/lib/disk/blockdevice.py\", line 45, in __getitem__\r\n raise KeyError(f'{self} does not contain information: \"{key}\"')\r\nKeyError: 'BlockDevice(/dev/sr0, size=0.8GB, free_space=, bus_type=sata) does not contain information: \"label\"'\r\n```\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any, Dict, TYPE_CHECKING, Optional\n\nfrom .partitioning_conf import manage_new_and_existing_partitions, get_default_partition_layout\nfrom ..disk import BlockDevice\nfrom ..exceptions import DiskError\nfrom ..menu import Menu\nfrom ..menu.menu import MenuSelectionType\nfrom ..output import log\n\nif TYPE_CHECKING:\n\t_: Any\n\n\ndef ask_for_main_filesystem_format(advanced_options=False) -> str:\n\toptions = {'btrfs': 'btrfs', 'ext4': 'ext4', 'xfs': 'xfs', 'f2fs': 'f2fs'}\n\n\tadvanced = {'ntfs': 'ntfs'}\n\n\tif advanced_options:\n\t\toptions.update(advanced)\n\n\tprompt = _('Select which filesystem your main partition should use')\n\tchoice = Menu(prompt, options, skip=False).run()\n\treturn choice.value\n\n\ndef select_individual_blockdevice_usage(block_devices: list) -> Dict[str, Any]:\n\tresult = {}\n\n\tfor device in block_devices:\n\t\tlayout = manage_new_and_existing_partitions(device)\n\t\tresult[device.path] = layout\n\n\treturn result\n\n\ndef select_disk_layout(preset: Optional[Dict[str, Any]], block_devices: list, advanced_options=False) -> Optional[Dict[str, Any]]:\n\twipe_mode = str(_('Wipe all selected drives and use a best-effort default partition layout'))\n\tcustome_mode = str(_('Select what to do with each individual drive (followed by partition usage)'))\n\tmodes = [wipe_mode, custome_mode]\n\n\twarning = str(_('Are you sure you want to reset this setting?'))\n\n\tchoice = Menu(\n\t\t_('Select what you wish to do with the selected block devices'),\n\t\tmodes,\n\t\texplode_on_interrupt=True,\n\t\texplode_warning=warning\n\t).run()\n\n\tmatch choice.type_:\n\t\tcase MenuSelectionType.Esc: return preset\n\t\tcase MenuSelectionType.Ctrl_c: return None\n\t\tcase MenuSelectionType.Selection:\n\t\t\tif choice.value == wipe_mode:\n\t\t\t\treturn get_default_partition_layout(block_devices, advanced_options)\n\t\t\telse:\n\t\t\t\treturn select_individual_blockdevice_usage(block_devices)\n\n\ndef select_disk(dict_o_disks: Dict[str, BlockDevice]) -> BlockDevice:\n\t\"\"\"\n\tAsks the user to select a harddrive from the `dict_o_disks` selection.\n\tUsually this is combined with :ref:`archinstall.list_drives`.\n\n\t:param dict_o_disks: A `dict` where keys are the drive-name, value should be a dict containing drive information.\n\t:type dict_o_disks: dict\n\n\t:return: The name/path (the dictionary key) of the selected drive\n\t:rtype: str\n\t\"\"\"\n\tdrives = sorted(list(dict_o_disks.keys()))\n\tif len(drives) >= 1:\n\t\tfor index, drive in enumerate(drives):\n\t\t\tprint(\n\t\t\t\tf\"{index}: {drive} ({dict_o_disks[drive]['size'], dict_o_disks[drive].device, dict_o_disks[drive]['label']})\"\n\t\t\t)\n\n\t\tlog(\"You can skip selecting a drive and partitioning and use whatever drive-setup is mounted at /mnt (experimental)\",\n\t\t\tfg=\"yellow\")\n\n\t\tdrive = Menu('Select one of the disks or skip and use \"/mnt\" as default\"', drives).run()\n\t\tif not drive:\n\t\t\treturn drive\n\n\t\tdrive = dict_o_disks[drive]\n\t\treturn drive\n\n\traise DiskError('select_disk() requires a non-empty dictionary of disks to select from.')\n", "path": "archinstall/lib/user_interaction/disk_conf.py"}]}
| 1,736 | 498 |
gh_patches_debug_25811
|
rasdani/github-patches
|
git_diff
|
pyro-ppl__numpyro-356
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`plate` has unexpected behaviour when dist has batch_shape
I faced this issue while revising baseball example using `plate`. The repro code
```
import jax.numpy as np
import numpyro
import numpyro.distributions as dist
from numpyro.handlers import trace, seed
def model():
with numpyro.plate('plate', 10):
numpyro.sample('x', dist.Normal(np.zeros(10), 1))
trace(seed(model, 0)).get_trace()['x']['value'].shape
```
should return `(10,)` but currently, it returns `(10, 10)`.
cc @neerajprad
</issue>
<code>
[start of numpyro/primitives.py]
1 from collections import namedtuple
2 import functools
3
4 import jax
5 from jax import lax
6
7 import numpyro
8 from numpyro.distributions.discrete import PRNGIdentity
9
10 _PYRO_STACK = []
11
12
13 CondIndepStackFrame = namedtuple('CondIndepStackFrame', ['name', 'dim', 'size'])
14
15
16 def apply_stack(msg):
17 pointer = 0
18 for pointer, handler in enumerate(reversed(_PYRO_STACK)):
19 handler.process_message(msg)
20 # When a Messenger sets the "stop" field of a message,
21 # it prevents any Messengers above it on the stack from being applied.
22 if msg.get("stop"):
23 break
24 if msg['value'] is None:
25 if msg['type'] == 'sample':
26 msg['value'], msg['intermediates'] = msg['fn'](*msg['args'],
27 sample_intermediates=True,
28 **msg['kwargs'])
29 else:
30 msg['value'] = msg['fn'](*msg['args'], **msg['kwargs'])
31
32 # A Messenger that sets msg["stop"] == True also prevents application
33 # of postprocess_message by Messengers above it on the stack
34 # via the pointer variable from the process_message loop
35 for handler in _PYRO_STACK[-pointer-1:]:
36 handler.postprocess_message(msg)
37 return msg
38
39
40 class Messenger(object):
41 def __init__(self, fn=None):
42 self.fn = fn
43 functools.update_wrapper(self, fn, updated=[])
44
45 def __enter__(self):
46 _PYRO_STACK.append(self)
47
48 def __exit__(self, *args, **kwargs):
49 assert _PYRO_STACK[-1] is self
50 _PYRO_STACK.pop()
51
52 def process_message(self, msg):
53 pass
54
55 def postprocess_message(self, msg):
56 pass
57
58 def __call__(self, *args, **kwargs):
59 with self:
60 return self.fn(*args, **kwargs)
61
62
63 def sample(name, fn, obs=None, random_state=None, sample_shape=()):
64 """
65 Returns a random sample from the stochastic function `fn`. This can have
66 additional side effects when wrapped inside effect handlers like
67 :class:`~numpyro.handlers.substitute`.
68
69 .. note::
70 By design, `sample` primitive is meant to be used inside a NumPyro model.
71 Then :class:`~numpyro.handlers.seed` handler is used to inject a random
72 state to `fn`. In those situations, `random_state` keyword will take no
73 effect.
74
75 :param str name: name of the sample site
76 :param fn: Python callable
77 :param numpy.ndarray obs: observed value
78 :param jax.random.PRNGKey random_state: an optional random key for `fn`.
79 :param sample_shape: Shape of samples to be drawn.
80 :return: sample from the stochastic `fn`.
81 """
82 # if there are no active Messengers, we just draw a sample and return it as expected:
83 if not _PYRO_STACK:
84 return fn(random_state=random_state, sample_shape=sample_shape)
85
86 # Otherwise, we initialize a message...
87 initial_msg = {
88 'type': 'sample',
89 'name': name,
90 'fn': fn,
91 'args': (),
92 'kwargs': {'random_state': random_state, 'sample_shape': sample_shape},
93 'value': obs,
94 'scale': 1.0,
95 'is_observed': obs is not None,
96 'intermediates': [],
97 'cond_indep_stack': [],
98 }
99
100 # ...and use apply_stack to send it to the Messengers
101 msg = apply_stack(initial_msg)
102 return msg['value']
103
104
105 def identity(x, *args, **kwargs):
106 return x
107
108
109 def param(name, init_value=None, **kwargs):
110 """
111 Annotate the given site as an optimizable parameter for use with
112 :mod:`jax.experimental.optimizers`. For an example of how `param` statements
113 can be used in inference algorithms, refer to :func:`~numpyro.svi.svi`.
114
115 :param str name: name of site.
116 :param numpy.ndarray init_value: initial value specified by the user. Note that
117 the onus of using this to initialize the optimizer is on the user /
118 inference algorithm, since there is no global parameter store in
119 NumPyro.
120 :return: value for the parameter. Unless wrapped inside a
121 handler like :class:`~numpyro.handlers.substitute`, this will simply
122 return the initial value.
123 """
124 # if there are no active Messengers, we just draw a sample and return it as expected:
125 if not _PYRO_STACK:
126 return init_value
127
128 # Otherwise, we initialize a message...
129 initial_msg = {
130 'type': 'param',
131 'name': name,
132 'fn': identity,
133 'args': (init_value,),
134 'kwargs': kwargs,
135 'value': None,
136 'scale': 1.0,
137 'cond_indep_stack': [],
138 }
139
140 # ...and use apply_stack to send it to the Messengers
141 msg = apply_stack(initial_msg)
142 return msg['value']
143
144
145 def module(name, nn, input_shape=None):
146 """
147 Declare a :mod:`~jax.experimental.stax` style neural network inside a
148 model so that its parameters are registered for optimization via
149 :func:`~numpyro.primitives.param` statements.
150
151 :param str name: name of the module to be registered.
152 :param tuple nn: a tuple of `(init_fn, apply_fn)` obtained by a :mod:`~jax.experimental.stax`
153 constructor function.
154 :param tuple input_shape: shape of the input taken by the
155 neural network.
156 :return: a `apply_fn` with bound parameters that takes an array
157 as an input and returns the neural network transformed output
158 array.
159 """
160 module_key = name + '$params'
161 nn_init, nn_apply = nn
162 nn_params = param(module_key)
163 if nn_params is None:
164 if input_shape is None:
165 raise ValueError('Valid value for `input_size` needed to initialize.')
166 rng = numpyro.sample(name + '$rng', PRNGIdentity())
167 _, nn_params = nn_init(rng, input_shape)
168 param(module_key, nn_params)
169 return jax.partial(nn_apply, nn_params)
170
171
172 class plate(Messenger):
173 """
174 Construct for annotating conditionally independent variables. Within a
175 `plate` context manager, `sample` sites will be automatically broadcasted to
176 the size of the plate. Additionally, a scale factor might be applied by
177 certain inference algorithms if `subsample_size` is specified.
178
179 :param str name: Name of the plate.
180 :param int size: Size of the plate.
181 :param int subsample_size: Optional argument denoting the size of the mini-batch.
182 This can be used to apply a scaling factor by inference algorithms. e.g.
183 when computing ELBO using a mini-batch.
184 :param int dim: Optional argument to specify which dimension in the tensor
185 is used as the plate dim. If `None` (default), the leftmost available dim
186 is allocated.
187 """
188 def __init__(self, name, size, subsample_size=None, dim=None):
189 self.name = name
190 self.size = size
191 self.subsample_size = size if subsample_size is None else subsample_size
192 if dim is not None and dim >= 0:
193 raise ValueError('dim arg must be negative.')
194 self.dim = dim
195 self._validate_and_set_dim()
196 super(plate, self).__init__()
197
198 def _validate_and_set_dim(self):
199 msg = {
200 'type': 'plate',
201 'fn': identity,
202 'name': self.name,
203 'args': (None,),
204 'kwargs': {},
205 'value': None,
206 'scale': 1.0,
207 'cond_indep_stack': [],
208 }
209 apply_stack(msg)
210 cond_indep_stack = msg['cond_indep_stack']
211 occupied_dims = {f.dim for f in cond_indep_stack}
212 dim = -1
213 while True:
214 if dim not in occupied_dims:
215 break
216 dim -= 1
217 if self.dim is None:
218 self.dim = dim
219 else:
220 assert self.dim not in occupied_dims
221
222 @staticmethod
223 def _get_batch_shape(cond_indep_stack):
224 n_dims = max(-f.dim for f in cond_indep_stack)
225 batch_shape = [1] * n_dims
226 for f in cond_indep_stack:
227 batch_shape[f.dim] = f.size
228 return tuple(batch_shape)
229
230 def process_message(self, msg):
231 cond_indep_stack = msg['cond_indep_stack']
232 frame = CondIndepStackFrame(self.name, self.dim, self.subsample_size)
233 cond_indep_stack.append(frame)
234 batch_shape = self._get_batch_shape(cond_indep_stack)
235 if 'sample_shape' in msg['kwargs']:
236 batch_shape = lax.broadcast_shapes(msg['kwargs']['sample_shape'], batch_shape)
237 msg['kwargs']['sample_shape'] = batch_shape
238 msg['scale'] = msg['scale'] * self.size / self.subsample_size
239
[end of numpyro/primitives.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/numpyro/primitives.py b/numpyro/primitives.py
--- a/numpyro/primitives.py
+++ b/numpyro/primitives.py
@@ -231,7 +231,20 @@
cond_indep_stack = msg['cond_indep_stack']
frame = CondIndepStackFrame(self.name, self.dim, self.subsample_size)
cond_indep_stack.append(frame)
- batch_shape = self._get_batch_shape(cond_indep_stack)
+ expected_shape = self._get_batch_shape(cond_indep_stack)
+ dist_batch_shape = msg['fn'].batch_shape if msg['type'] == 'sample' else ()
+ overlap_idx = len(expected_shape) - len(dist_batch_shape)
+ if overlap_idx < 0:
+ raise ValueError('Expected dimensions within plate = {}, which is less than the '
+ 'distribution\'s batch shape = {}.'.format(len(expected_shape), len(dist_batch_shape)))
+ trailing_shape = expected_shape[overlap_idx:]
+ # e.g. distribution with batch shape (1, 5) cannot be broadcast to (5, 5)
+ broadcast_shape = lax.broadcast_shapes(trailing_shape, dist_batch_shape)
+ if broadcast_shape != dist_batch_shape:
+ raise ValueError('Distribution batch shape = {} cannot be broadcast up to {}. '
+ 'Consider using unbatched distributions.'
+ .format(dist_batch_shape, broadcast_shape))
+ batch_shape = expected_shape[:overlap_idx]
if 'sample_shape' in msg['kwargs']:
batch_shape = lax.broadcast_shapes(msg['kwargs']['sample_shape'], batch_shape)
msg['kwargs']['sample_shape'] = batch_shape
|
{"golden_diff": "diff --git a/numpyro/primitives.py b/numpyro/primitives.py\n--- a/numpyro/primitives.py\n+++ b/numpyro/primitives.py\n@@ -231,7 +231,20 @@\n cond_indep_stack = msg['cond_indep_stack']\n frame = CondIndepStackFrame(self.name, self.dim, self.subsample_size)\n cond_indep_stack.append(frame)\n- batch_shape = self._get_batch_shape(cond_indep_stack)\n+ expected_shape = self._get_batch_shape(cond_indep_stack)\n+ dist_batch_shape = msg['fn'].batch_shape if msg['type'] == 'sample' else ()\n+ overlap_idx = len(expected_shape) - len(dist_batch_shape)\n+ if overlap_idx < 0:\n+ raise ValueError('Expected dimensions within plate = {}, which is less than the '\n+ 'distribution\\'s batch shape = {}.'.format(len(expected_shape), len(dist_batch_shape)))\n+ trailing_shape = expected_shape[overlap_idx:]\n+ # e.g. distribution with batch shape (1, 5) cannot be broadcast to (5, 5)\n+ broadcast_shape = lax.broadcast_shapes(trailing_shape, dist_batch_shape)\n+ if broadcast_shape != dist_batch_shape:\n+ raise ValueError('Distribution batch shape = {} cannot be broadcast up to {}. '\n+ 'Consider using unbatched distributions.'\n+ .format(dist_batch_shape, broadcast_shape))\n+ batch_shape = expected_shape[:overlap_idx]\n if 'sample_shape' in msg['kwargs']:\n batch_shape = lax.broadcast_shapes(msg['kwargs']['sample_shape'], batch_shape)\n msg['kwargs']['sample_shape'] = batch_shape\n", "issue": "`plate` has unexpected behaviour when dist has batch_shape\nI faced this issue while revising baseball example using `plate`. The repro code\r\n```\r\nimport jax.numpy as np\r\nimport numpyro\r\nimport numpyro.distributions as dist\r\nfrom numpyro.handlers import trace, seed\r\n\r\ndef model():\r\n with numpyro.plate('plate', 10):\r\n numpyro.sample('x', dist.Normal(np.zeros(10), 1))\r\n\r\ntrace(seed(model, 0)).get_trace()['x']['value'].shape\r\n```\r\nshould return `(10,)` but currently, it returns `(10, 10)`.\r\n\r\ncc @neerajprad \n", "before_files": [{"content": "from collections import namedtuple\nimport functools\n\nimport jax\nfrom jax import lax\n\nimport numpyro\nfrom numpyro.distributions.discrete import PRNGIdentity\n\n_PYRO_STACK = []\n\n\nCondIndepStackFrame = namedtuple('CondIndepStackFrame', ['name', 'dim', 'size'])\n\n\ndef apply_stack(msg):\n pointer = 0\n for pointer, handler in enumerate(reversed(_PYRO_STACK)):\n handler.process_message(msg)\n # When a Messenger sets the \"stop\" field of a message,\n # it prevents any Messengers above it on the stack from being applied.\n if msg.get(\"stop\"):\n break\n if msg['value'] is None:\n if msg['type'] == 'sample':\n msg['value'], msg['intermediates'] = msg['fn'](*msg['args'],\n sample_intermediates=True,\n **msg['kwargs'])\n else:\n msg['value'] = msg['fn'](*msg['args'], **msg['kwargs'])\n\n # A Messenger that sets msg[\"stop\"] == True also prevents application\n # of postprocess_message by Messengers above it on the stack\n # via the pointer variable from the process_message loop\n for handler in _PYRO_STACK[-pointer-1:]:\n handler.postprocess_message(msg)\n return msg\n\n\nclass Messenger(object):\n def __init__(self, fn=None):\n self.fn = fn\n functools.update_wrapper(self, fn, updated=[])\n\n def __enter__(self):\n _PYRO_STACK.append(self)\n\n def __exit__(self, *args, **kwargs):\n assert _PYRO_STACK[-1] is self\n _PYRO_STACK.pop()\n\n def process_message(self, msg):\n pass\n\n def postprocess_message(self, msg):\n pass\n\n def __call__(self, *args, **kwargs):\n with self:\n return self.fn(*args, **kwargs)\n\n\ndef sample(name, fn, obs=None, random_state=None, sample_shape=()):\n \"\"\"\n Returns a random sample from the stochastic function `fn`. This can have\n additional side effects when wrapped inside effect handlers like\n :class:`~numpyro.handlers.substitute`.\n\n .. note::\n By design, `sample` primitive is meant to be used inside a NumPyro model.\n Then :class:`~numpyro.handlers.seed` handler is used to inject a random\n state to `fn`. In those situations, `random_state` keyword will take no\n effect.\n\n :param str name: name of the sample site\n :param fn: Python callable\n :param numpy.ndarray obs: observed value\n :param jax.random.PRNGKey random_state: an optional random key for `fn`.\n :param sample_shape: Shape of samples to be drawn.\n :return: sample from the stochastic `fn`.\n \"\"\"\n # if there are no active Messengers, we just draw a sample and return it as expected:\n if not _PYRO_STACK:\n return fn(random_state=random_state, sample_shape=sample_shape)\n\n # Otherwise, we initialize a message...\n initial_msg = {\n 'type': 'sample',\n 'name': name,\n 'fn': fn,\n 'args': (),\n 'kwargs': {'random_state': random_state, 'sample_shape': sample_shape},\n 'value': obs,\n 'scale': 1.0,\n 'is_observed': obs is not None,\n 'intermediates': [],\n 'cond_indep_stack': [],\n }\n\n # ...and use apply_stack to send it to the Messengers\n msg = apply_stack(initial_msg)\n return msg['value']\n\n\ndef identity(x, *args, **kwargs):\n return x\n\n\ndef param(name, init_value=None, **kwargs):\n \"\"\"\n Annotate the given site as an optimizable parameter for use with\n :mod:`jax.experimental.optimizers`. For an example of how `param` statements\n can be used in inference algorithms, refer to :func:`~numpyro.svi.svi`.\n\n :param str name: name of site.\n :param numpy.ndarray init_value: initial value specified by the user. Note that\n the onus of using this to initialize the optimizer is on the user /\n inference algorithm, since there is no global parameter store in\n NumPyro.\n :return: value for the parameter. Unless wrapped inside a\n handler like :class:`~numpyro.handlers.substitute`, this will simply\n return the initial value.\n \"\"\"\n # if there are no active Messengers, we just draw a sample and return it as expected:\n if not _PYRO_STACK:\n return init_value\n\n # Otherwise, we initialize a message...\n initial_msg = {\n 'type': 'param',\n 'name': name,\n 'fn': identity,\n 'args': (init_value,),\n 'kwargs': kwargs,\n 'value': None,\n 'scale': 1.0,\n 'cond_indep_stack': [],\n }\n\n # ...and use apply_stack to send it to the Messengers\n msg = apply_stack(initial_msg)\n return msg['value']\n\n\ndef module(name, nn, input_shape=None):\n \"\"\"\n Declare a :mod:`~jax.experimental.stax` style neural network inside a\n model so that its parameters are registered for optimization via\n :func:`~numpyro.primitives.param` statements.\n\n :param str name: name of the module to be registered.\n :param tuple nn: a tuple of `(init_fn, apply_fn)` obtained by a :mod:`~jax.experimental.stax`\n constructor function.\n :param tuple input_shape: shape of the input taken by the\n neural network.\n :return: a `apply_fn` with bound parameters that takes an array\n as an input and returns the neural network transformed output\n array.\n \"\"\"\n module_key = name + '$params'\n nn_init, nn_apply = nn\n nn_params = param(module_key)\n if nn_params is None:\n if input_shape is None:\n raise ValueError('Valid value for `input_size` needed to initialize.')\n rng = numpyro.sample(name + '$rng', PRNGIdentity())\n _, nn_params = nn_init(rng, input_shape)\n param(module_key, nn_params)\n return jax.partial(nn_apply, nn_params)\n\n\nclass plate(Messenger):\n \"\"\"\n Construct for annotating conditionally independent variables. Within a\n `plate` context manager, `sample` sites will be automatically broadcasted to\n the size of the plate. Additionally, a scale factor might be applied by\n certain inference algorithms if `subsample_size` is specified.\n\n :param str name: Name of the plate.\n :param int size: Size of the plate.\n :param int subsample_size: Optional argument denoting the size of the mini-batch.\n This can be used to apply a scaling factor by inference algorithms. e.g.\n when computing ELBO using a mini-batch.\n :param int dim: Optional argument to specify which dimension in the tensor\n is used as the plate dim. If `None` (default), the leftmost available dim\n is allocated.\n \"\"\"\n def __init__(self, name, size, subsample_size=None, dim=None):\n self.name = name\n self.size = size\n self.subsample_size = size if subsample_size is None else subsample_size\n if dim is not None and dim >= 0:\n raise ValueError('dim arg must be negative.')\n self.dim = dim\n self._validate_and_set_dim()\n super(plate, self).__init__()\n\n def _validate_and_set_dim(self):\n msg = {\n 'type': 'plate',\n 'fn': identity,\n 'name': self.name,\n 'args': (None,),\n 'kwargs': {},\n 'value': None,\n 'scale': 1.0,\n 'cond_indep_stack': [],\n }\n apply_stack(msg)\n cond_indep_stack = msg['cond_indep_stack']\n occupied_dims = {f.dim for f in cond_indep_stack}\n dim = -1\n while True:\n if dim not in occupied_dims:\n break\n dim -= 1\n if self.dim is None:\n self.dim = dim\n else:\n assert self.dim not in occupied_dims\n\n @staticmethod\n def _get_batch_shape(cond_indep_stack):\n n_dims = max(-f.dim for f in cond_indep_stack)\n batch_shape = [1] * n_dims\n for f in cond_indep_stack:\n batch_shape[f.dim] = f.size\n return tuple(batch_shape)\n\n def process_message(self, msg):\n cond_indep_stack = msg['cond_indep_stack']\n frame = CondIndepStackFrame(self.name, self.dim, self.subsample_size)\n cond_indep_stack.append(frame)\n batch_shape = self._get_batch_shape(cond_indep_stack)\n if 'sample_shape' in msg['kwargs']:\n batch_shape = lax.broadcast_shapes(msg['kwargs']['sample_shape'], batch_shape)\n msg['kwargs']['sample_shape'] = batch_shape\n msg['scale'] = msg['scale'] * self.size / self.subsample_size\n", "path": "numpyro/primitives.py"}]}
| 3,281 | 358 |
gh_patches_debug_22365
|
rasdani/github-patches
|
git_diff
|
mampfes__hacs_waste_collection_schedule-1603
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug]: BSR_DE seems not to work anymore
### I Have A Problem With:
A specific source
### What's Your Problem
Source BSR_DE is not importing anything.
This is the Path to the Calendar
https://www.bsr.de/abfuhrkalender_ajax.php?script=dynamic_iCal_ajax&abf_strasse=Chaukenstr.,%2012524%20Berlin%20(Treptow-K%C3%B6penick)&abf_hausnr=3X&tab_control=Liste&abf_config_weihnachtsbaeume=&abf_config_restmuell=on&abf_config_biogut=on&abf_config_wertstoffe=on&abf_config_laubtonne=on
this is the Ical it should get.
BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//www.bsr.de//NONSGML kigkonsult.se iCalcreator 2.41.71//
CALSCALE:GREGORIAN
UID:65616338-3738-4064-a633-326437643934
BEGIN:VEVENT
UID:32386261-6536-4130-a166-356138626438
DTSTAMP:20231228T151659Z
DESCRIPTION:Abholung Hausmüll
DTSTART;TZID=Europe/Berlin:20231228T000000
LOCATION:Chaukenstr. 3X\,
SUMMARY:Abholung Hausmüll
END:VEVENT
BEGIN:VEVENT
UID:37383565-6638-4232-a331-633133633061
DTSTAMP:20231228T151659Z
DESCRIPTION:Abholung Biogut
DTSTART;TZID=Europe/Berlin:20240102T000000
LOCATION:Chaukenstr. 3X\,
SUMMARY:Abholung Biogut
END:VEVENT
BEGIN:VEVENT
UID:30626662-3731-4963-b836-653262613233
DTSTAMP:20231228T151659Z
DESCRIPTION:Abholung Wertstoffe (Abholung durch ALBA)
DTSTART;TZID=Europe/Berlin:20240109T000000
LOCATION:Chaukenstr. 3X\,
SUMMARY:Abholung Wertstoffe (Abholung durch ALBA)
END:VEVENT
BEGIN:VEVENT
UID:64356336-6132-4464-a339-646533373961
DTSTAMP:20231228T151659Z
DESCRIPTION:Abholung Hausmüll
DTSTART;TZID=Europe/Berlin:20240110T000000
LOCATION:Chaukenstr. 3X\,
SUMMARY:Abholung Hausmüll
END:VEVENT
BEGIN:VEVENT
UID:61636636-6661-4834-b063-653566636636
DTSTAMP:20231228T151659Z
DESCRIPTION:Abholung Biogut
DTSTART;TZID=Europe/Berlin:20240115T000000
LOCATION:Chaukenstr. 3X\,
SUMMARY:Abholung Biogut
END:VEVENT
BEGIN:VEVENT
UID:32326238-3862-4533-b361-353033303935
DTSTAMP:20231228T151659Z
DESCRIPTION:Abholung Wertstoffe (Abholung durch ALBA)
DTSTART;TZID=Europe/Berlin:20240123T000000
LOCATION:Chaukenstr. 3X\,
SUMMARY:Abholung Wertstoffe (Abholung durch ALBA)
END:VEVENT
BEGIN:VEVENT
UID:30396632-3239-4764-b439-336132306661
DTSTAMP:20231228T151659Z
DESCRIPTION:Abholung Hausmüll
DTSTART;TZID=Europe/Berlin:20240124T000000
LOCATION:Chaukenstr. 3X\,
SUMMARY:Abholung Hausmüll
END:VEVENT
END:VCALENDAR
### Source (if relevant)
BSR_DE
### Logs
```Shell
no relevant logs
```
### Relevant Configuration
```YAML
- name: bsr_de
args:
abf_strasse: Chaukenstr., 12524 Berlin (Treptow-Köpenick)
abf_hausnr: 3*
```
### Checklist Source Error
- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)
- [X] Checked that the website of your service provider is still working
- [X] Tested my attributes on the service provider website (if possible)
- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on "Redownload" and choose master as version)
### Checklist Sensor Error
- [x] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)
### Required
- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.
- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.
</issue>
<code>
[start of custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py]
1 import urllib.parse
2
3 import requests
4 from waste_collection_schedule import Collection # type: ignore[attr-defined]
5 from waste_collection_schedule.service.ICS import ICS
6
7 TITLE = "Berliner Stadtreinigungsbetriebe"
8 DESCRIPTION = "Source for Berliner Stadtreinigungsbetriebe waste collection."
9 URL = "https://bsr.de"
10 TEST_CASES = {
11 "Bahnhofstr., 12159 Berlin (Tempelhof-Schöneberg)": {
12 "abf_strasse": "Bahnhofstr., 12159 Berlin (Tempelhof-Schöneberg)",
13 "abf_hausnr": 1,
14 },
15 "Am Ried, 13467 Berlin (Reinickendorf)": {
16 "abf_strasse": "Am Ried, 13467 Berlin (Reinickendorf)",
17 "abf_hausnr": "11G",
18 },
19 }
20
21
22 def myquote(s):
23 # bsr uses strange quoting
24 return urllib.parse.quote(s, safe=",()")
25
26
27 class Source:
28 def __init__(self, abf_strasse, abf_hausnr):
29 self._abf_strasse = abf_strasse
30 self._abf_hausnr = abf_hausnr
31 self._ics = ICS(offset=1)
32
33 def fetch(self):
34 # get cookie
35 r = requests.get("https://www.bsr.de/abfuhrkalender-20520.php")
36 cookies = r.cookies
37
38 # get street name only (without PLZ)
39 street = self._abf_strasse.split(",")[0]
40
41 # start search using string name (without PLZ)
42 args = {"script": "dynamic_search", "step": 1, "q": street}
43 r = requests.get(
44 "https://www.bsr.de/abfuhrkalender_ajax.php", params=args, cookies=cookies
45 )
46
47 # retrieve house number list
48 args = {"script": "dynamic_search", "step": 2, "q": self._abf_strasse}
49 r = requests.get(
50 "https://www.bsr.de/abfuhrkalender_ajax.php", params=args, cookies=cookies
51 )
52
53 args = {
54 "abf_strasse": street,
55 "abf_hausnr": self._abf_hausnr,
56 "tab_control": "Jahr",
57 "abf_config_weihnachtsbaeume": "",
58 "abf_config_restmuell": "on",
59 "abf_config_biogut": "on",
60 "abf_config_wertstoffe": "on",
61 "abf_config_laubtonne": "on",
62 # "abf_selectmonth": "5 2020",
63 # "abf_datepicker": "28.04.2020",
64 # "listitems":7,
65 }
66 r = requests.post(
67 "https://www.bsr.de/abfuhrkalender_ajax.php?script=dynamic_kalender_ajax",
68 data=args,
69 cookies=cookies,
70 )
71
72 args = {
73 "script": "dynamic_iCal_ajax",
74 "abf_strasse": self._abf_strasse,
75 "abf_hausnr": self._abf_hausnr,
76 "tab_control": "Jahr",
77 "abf_config_weihnachtsbaeume": "",
78 "abf_config_restmuell": "on",
79 "abf_config_biogut": "on",
80 "abf_config_wertstoffe": "on",
81 "abf_config_laubtonne": "on",
82 # "abf_selectmonth": "5 2020",
83 # "listitems":7,
84 }
85
86 # create url using private url encoding
87 encoded = map(lambda key: f"{key}={myquote(str(args[key]))}", args.keys())
88 url = "https://www.bsr.de/abfuhrkalender_ajax.php?" + "&".join(encoded)
89 r = requests.get(url, cookies=cookies)
90
91 # parse ics file
92 dates = self._ics.convert(r.text)
93
94 entries = []
95 for d in dates:
96 entries.append(Collection(d[0], d[1]))
97 return entries
98
[end of custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py
@@ -1,4 +1,5 @@
import urllib.parse
+import datetime
import requests
from waste_collection_schedule import Collection # type: ignore[attr-defined]
@@ -91,6 +92,22 @@
# parse ics file
dates = self._ics.convert(r.text)
+
+ now = datetime.datetime.now()
+ # in nov/dec already fetch a monthly ics for january
+ # as yearly ics isn't available until the 1. of january.
+ if now.month in [11, 12]:
+ args["tab_control"] = "Monat"
+ args["abf_selectmonth"] = "1 " + str(now.year + 1)
+
+ # create url using private url encoding
+ encoded = map(lambda key: f"{key}={myquote(str(args[key]))}", args.keys())
+ url = "https://www.bsr.de/abfuhrkalender_ajax.php?" + "&".join(encoded)
+ r = requests.get(url, cookies=cookies)
+
+ # parse ics file
+ dates.extend (self._ics.convert(r.text) )
+
entries = []
for d in dates:
entries.append(Collection(d[0], d[1]))
|
{"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py\n@@ -1,4 +1,5 @@\n import urllib.parse\n+import datetime\n \n import requests\n from waste_collection_schedule import Collection # type: ignore[attr-defined]\n@@ -91,6 +92,22 @@\n # parse ics file\n dates = self._ics.convert(r.text)\n \n+\n+ now = datetime.datetime.now()\n+ # in nov/dec already fetch a monthly ics for january\n+ # as yearly ics isn't available until the 1. of january.\n+ if now.month in [11, 12]:\n+ args[\"tab_control\"] = \"Monat\"\n+ args[\"abf_selectmonth\"] = \"1 \" + str(now.year + 1)\n+\n+ # create url using private url encoding\n+ encoded = map(lambda key: f\"{key}={myquote(str(args[key]))}\", args.keys())\n+ url = \"https://www.bsr.de/abfuhrkalender_ajax.php?\" + \"&\".join(encoded)\n+ r = requests.get(url, cookies=cookies)\n+\n+ # parse ics file\n+ dates.extend (self._ics.convert(r.text) )\n+\n entries = []\n for d in dates:\n entries.append(Collection(d[0], d[1]))\n", "issue": "[Bug]: BSR_DE seems not to work anymore\n### I Have A Problem With:\n\nA specific source\n\n### What's Your Problem\n\nSource BSR_DE is not importing anything.\r\n\r\nThis is the Path to the Calendar\r\nhttps://www.bsr.de/abfuhrkalender_ajax.php?script=dynamic_iCal_ajax&abf_strasse=Chaukenstr.,%2012524%20Berlin%20(Treptow-K%C3%B6penick)&abf_hausnr=3X&tab_control=Liste&abf_config_weihnachtsbaeume=&abf_config_restmuell=on&abf_config_biogut=on&abf_config_wertstoffe=on&abf_config_laubtonne=on\r\n\r\nthis is the Ical it should get.\r\n\r\nBEGIN:VCALENDAR\r\nVERSION:2.0\r\nPRODID:-//www.bsr.de//NONSGML kigkonsult.se iCalcreator 2.41.71//\r\nCALSCALE:GREGORIAN\r\nUID:65616338-3738-4064-a633-326437643934\r\nBEGIN:VEVENT\r\nUID:32386261-6536-4130-a166-356138626438\r\nDTSTAMP:20231228T151659Z\r\nDESCRIPTION:Abholung Hausm\u00fcll\r\nDTSTART;TZID=Europe/Berlin:20231228T000000\r\nLOCATION:Chaukenstr. 3X\\,\r\nSUMMARY:Abholung Hausm\u00fcll\r\nEND:VEVENT\r\nBEGIN:VEVENT\r\nUID:37383565-6638-4232-a331-633133633061\r\nDTSTAMP:20231228T151659Z\r\nDESCRIPTION:Abholung Biogut\r\nDTSTART;TZID=Europe/Berlin:20240102T000000\r\nLOCATION:Chaukenstr. 3X\\,\r\nSUMMARY:Abholung Biogut\r\nEND:VEVENT\r\nBEGIN:VEVENT\r\nUID:30626662-3731-4963-b836-653262613233\r\nDTSTAMP:20231228T151659Z\r\nDESCRIPTION:Abholung Wertstoffe (Abholung durch ALBA)\r\nDTSTART;TZID=Europe/Berlin:20240109T000000\r\nLOCATION:Chaukenstr. 3X\\,\r\nSUMMARY:Abholung Wertstoffe (Abholung durch ALBA)\r\nEND:VEVENT\r\nBEGIN:VEVENT\r\nUID:64356336-6132-4464-a339-646533373961\r\nDTSTAMP:20231228T151659Z\r\nDESCRIPTION:Abholung Hausm\u00fcll\r\nDTSTART;TZID=Europe/Berlin:20240110T000000\r\nLOCATION:Chaukenstr. 3X\\,\r\nSUMMARY:Abholung Hausm\u00fcll\r\nEND:VEVENT\r\nBEGIN:VEVENT\r\nUID:61636636-6661-4834-b063-653566636636\r\nDTSTAMP:20231228T151659Z\r\nDESCRIPTION:Abholung Biogut\r\nDTSTART;TZID=Europe/Berlin:20240115T000000\r\nLOCATION:Chaukenstr. 3X\\,\r\nSUMMARY:Abholung Biogut\r\nEND:VEVENT\r\nBEGIN:VEVENT\r\nUID:32326238-3862-4533-b361-353033303935\r\nDTSTAMP:20231228T151659Z\r\nDESCRIPTION:Abholung Wertstoffe (Abholung durch ALBA)\r\nDTSTART;TZID=Europe/Berlin:20240123T000000\r\nLOCATION:Chaukenstr. 3X\\,\r\nSUMMARY:Abholung Wertstoffe (Abholung durch ALBA)\r\nEND:VEVENT\r\nBEGIN:VEVENT\r\nUID:30396632-3239-4764-b439-336132306661\r\nDTSTAMP:20231228T151659Z\r\nDESCRIPTION:Abholung Hausm\u00fcll\r\nDTSTART;TZID=Europe/Berlin:20240124T000000\r\nLOCATION:Chaukenstr. 3X\\,\r\nSUMMARY:Abholung Hausm\u00fcll\r\nEND:VEVENT\r\nEND:VCALENDAR\n\n### Source (if relevant)\n\nBSR_DE\n\n### Logs\n\n```Shell\nno relevant logs\n```\n\n\n### Relevant Configuration\n\n```YAML\n- name: bsr_de\r\n args:\r\n abf_strasse: Chaukenstr., 12524 Berlin (Treptow-K\u00f6penick)\r\n abf_hausnr: 3*\n```\n\n\n### Checklist Source Error\n\n- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)\n- [X] Checked that the website of your service provider is still working\n- [X] Tested my attributes on the service provider website (if possible)\n- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on \"Redownload\" and choose master as version)\n\n### Checklist Sensor Error\n\n- [x] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)\n\n### Required\n\n- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.\n- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.\n", "before_files": [{"content": "import urllib.parse\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\nTITLE = \"Berliner Stadtreinigungsbetriebe\"\nDESCRIPTION = \"Source for Berliner Stadtreinigungsbetriebe waste collection.\"\nURL = \"https://bsr.de\"\nTEST_CASES = {\n \"Bahnhofstr., 12159 Berlin (Tempelhof-Sch\u00f6neberg)\": {\n \"abf_strasse\": \"Bahnhofstr., 12159 Berlin (Tempelhof-Sch\u00f6neberg)\",\n \"abf_hausnr\": 1,\n },\n \"Am Ried, 13467 Berlin (Reinickendorf)\": {\n \"abf_strasse\": \"Am Ried, 13467 Berlin (Reinickendorf)\",\n \"abf_hausnr\": \"11G\",\n },\n}\n\n\ndef myquote(s):\n # bsr uses strange quoting\n return urllib.parse.quote(s, safe=\",()\")\n\n\nclass Source:\n def __init__(self, abf_strasse, abf_hausnr):\n self._abf_strasse = abf_strasse\n self._abf_hausnr = abf_hausnr\n self._ics = ICS(offset=1)\n\n def fetch(self):\n # get cookie\n r = requests.get(\"https://www.bsr.de/abfuhrkalender-20520.php\")\n cookies = r.cookies\n\n # get street name only (without PLZ)\n street = self._abf_strasse.split(\",\")[0]\n\n # start search using string name (without PLZ)\n args = {\"script\": \"dynamic_search\", \"step\": 1, \"q\": street}\n r = requests.get(\n \"https://www.bsr.de/abfuhrkalender_ajax.php\", params=args, cookies=cookies\n )\n\n # retrieve house number list\n args = {\"script\": \"dynamic_search\", \"step\": 2, \"q\": self._abf_strasse}\n r = requests.get(\n \"https://www.bsr.de/abfuhrkalender_ajax.php\", params=args, cookies=cookies\n )\n\n args = {\n \"abf_strasse\": street,\n \"abf_hausnr\": self._abf_hausnr,\n \"tab_control\": \"Jahr\",\n \"abf_config_weihnachtsbaeume\": \"\",\n \"abf_config_restmuell\": \"on\",\n \"abf_config_biogut\": \"on\",\n \"abf_config_wertstoffe\": \"on\",\n \"abf_config_laubtonne\": \"on\",\n # \"abf_selectmonth\": \"5 2020\",\n # \"abf_datepicker\": \"28.04.2020\",\n # \"listitems\":7,\n }\n r = requests.post(\n \"https://www.bsr.de/abfuhrkalender_ajax.php?script=dynamic_kalender_ajax\",\n data=args,\n cookies=cookies,\n )\n\n args = {\n \"script\": \"dynamic_iCal_ajax\",\n \"abf_strasse\": self._abf_strasse,\n \"abf_hausnr\": self._abf_hausnr,\n \"tab_control\": \"Jahr\",\n \"abf_config_weihnachtsbaeume\": \"\",\n \"abf_config_restmuell\": \"on\",\n \"abf_config_biogut\": \"on\",\n \"abf_config_wertstoffe\": \"on\",\n \"abf_config_laubtonne\": \"on\",\n # \"abf_selectmonth\": \"5 2020\",\n # \"listitems\":7,\n }\n\n # create url using private url encoding\n encoded = map(lambda key: f\"{key}={myquote(str(args[key]))}\", args.keys())\n url = \"https://www.bsr.de/abfuhrkalender_ajax.php?\" + \"&\".join(encoded)\n r = requests.get(url, cookies=cookies)\n\n # parse ics file\n dates = self._ics.convert(r.text)\n\n entries = []\n for d in dates:\n entries.append(Collection(d[0], d[1]))\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py"}]}
| 3,165 | 351 |
gh_patches_debug_19023
|
rasdani/github-patches
|
git_diff
|
vyperlang__vyper-874
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add uint256 decimal support for convert
### What's your issue about?
Convert should allow conversion of uint256 -> decimal.
### How can it be fixed?
convert.py:57
#### Cute Animal Picture
^.^
</issue>
<code>
[start of vyper/types/convert.py]
1 from vyper.functions.signature import (
2 signature
3 )
4 from vyper.parser.parser_utils import (
5 LLLnode,
6 getpos,
7 byte_array_to_num
8 )
9 from vyper.exceptions import (
10 InvalidLiteralException,
11 TypeMismatchException,
12 )
13 from vyper.types import (
14 BaseType,
15 )
16 from vyper.types import (
17 get_type,
18 )
19 from vyper.utils import (
20 DECIMAL_DIVISOR,
21 MemoryPositions,
22 SizeLimits
23 )
24
25
26 @signature(('int128', 'uint256', 'bytes32', 'bytes'), 'str_literal')
27 def to_int128(expr, args, kwargs, context):
28 in_node = args[0]
29 typ, len = get_type(in_node)
30 if typ in ('int128', 'uint256', 'bytes32'):
31 if in_node.typ.is_literal and not SizeLimits.in_bounds('int128', in_node.value):
32 raise InvalidLiteralException("Number out of range: {}".format(in_node.value), expr)
33 return LLLnode.from_list(
34 ['clamp', ['mload', MemoryPositions.MINNUM], in_node,
35 ['mload', MemoryPositions.MAXNUM]], typ=BaseType('int128', in_node.typ.unit), pos=getpos(expr)
36 )
37 else:
38 return byte_array_to_num(in_node, expr, 'int128')
39
40
41 @signature(('num_literal', 'int128', 'bytes32'), 'str_literal')
42 def to_uint256(expr, args, kwargs, context):
43 in_node = args[0]
44 typ, len = get_type(in_node)
45 if isinstance(in_node, int):
46
47 if not SizeLimits.in_bounds('uint256', in_node):
48 raise InvalidLiteralException("Number out of range: {}".format(in_node))
49 _unit = in_node.typ.unit if typ == 'int128' else None
50 return LLLnode.from_list(in_node, typ=BaseType('uint256', _unit), pos=getpos(expr))
51 elif isinstance(in_node, LLLnode) and typ in ('int128', 'num_literal'):
52 _unit = in_node.typ.unit if typ == 'int128' else None
53 return LLLnode.from_list(['clampge', in_node, 0], typ=BaseType('uint256', _unit), pos=getpos(expr))
54 elif isinstance(in_node, LLLnode) and typ in ('bytes32'):
55 return LLLnode(value=in_node.value, args=in_node.args, typ=BaseType('uint256'), pos=getpos(expr))
56 else:
57 raise InvalidLiteralException("Invalid input for uint256: %r" % in_node, expr)
58
59
60 @signature('int128', 'str_literal')
61 def to_decimal(expr, args, kwargs, context):
62 input = args[0]
63 return LLLnode.from_list(['mul', input, DECIMAL_DIVISOR], typ=BaseType('decimal', input.typ.unit, input.typ.positional),
64 pos=getpos(expr))
65
66
67 @signature(('int128', 'uint256', 'address', 'bytes'), 'str_literal')
68 def to_bytes32(expr, args, kwargs, context):
69 input = args[0]
70 typ, len = get_type(input)
71 if typ == 'bytes':
72 if len != 32:
73 raise TypeMismatchException("Unable to convert bytes[{}] to bytes32".format(len))
74 if input.location == "memory":
75 return LLLnode.from_list(
76 ['mload', ['add', input, 32]], typ=BaseType('bytes32')
77 )
78 elif input.location == "storage":
79 return LLLnode.from_list(
80 ['sload', ['add', ['sha3_32', input], 1]], typ=BaseType('bytes32')
81 )
82 else:
83 return LLLnode(value=input.value, args=input.args, typ=BaseType('bytes32'), pos=getpos(expr))
84
85
86 def convert(expr, context):
87 output_type = expr.args[1].s
88 if output_type in conversion_table:
89 return conversion_table[output_type](expr, context)
90 else:
91 raise Exception("Conversion to {} is invalid.".format(output_type))
92
93
94 conversion_table = {
95 'int128': to_int128,
96 'uint256': to_uint256,
97 'decimal': to_decimal,
98 'bytes32': to_bytes32,
99 }
100
[end of vyper/types/convert.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/vyper/types/convert.py b/vyper/types/convert.py
--- a/vyper/types/convert.py
+++ b/vyper/types/convert.py
@@ -57,11 +57,20 @@
raise InvalidLiteralException("Invalid input for uint256: %r" % in_node, expr)
-@signature('int128', 'str_literal')
+@signature(('int128', 'uint256'), 'str_literal')
def to_decimal(expr, args, kwargs, context):
input = args[0]
- return LLLnode.from_list(['mul', input, DECIMAL_DIVISOR], typ=BaseType('decimal', input.typ.unit, input.typ.positional),
- pos=getpos(expr))
+ if input.typ.typ == 'uint256':
+ return LLLnode.from_list(
+ ['uclample', ['mul', input, DECIMAL_DIVISOR], ['mload', MemoryPositions.MAXDECIMAL]],
+ typ=BaseType('decimal', input.typ.unit, input.typ.positional), pos=getpos(expr)
+ )
+ else:
+ return LLLnode.from_list(
+ ['mul', input, DECIMAL_DIVISOR],
+ typ=BaseType('decimal', input.typ.unit, input.typ.positional),
+ pos=getpos(expr)
+ )
@signature(('int128', 'uint256', 'address', 'bytes'), 'str_literal')
|
{"golden_diff": "diff --git a/vyper/types/convert.py b/vyper/types/convert.py\n--- a/vyper/types/convert.py\n+++ b/vyper/types/convert.py\n@@ -57,11 +57,20 @@\n raise InvalidLiteralException(\"Invalid input for uint256: %r\" % in_node, expr)\n \n \n-@signature('int128', 'str_literal')\n+@signature(('int128', 'uint256'), 'str_literal')\n def to_decimal(expr, args, kwargs, context):\n input = args[0]\n- return LLLnode.from_list(['mul', input, DECIMAL_DIVISOR], typ=BaseType('decimal', input.typ.unit, input.typ.positional),\n- pos=getpos(expr))\n+ if input.typ.typ == 'uint256':\n+ return LLLnode.from_list(\n+ ['uclample', ['mul', input, DECIMAL_DIVISOR], ['mload', MemoryPositions.MAXDECIMAL]],\n+ typ=BaseType('decimal', input.typ.unit, input.typ.positional), pos=getpos(expr)\n+ )\n+ else:\n+ return LLLnode.from_list(\n+ ['mul', input, DECIMAL_DIVISOR],\n+ typ=BaseType('decimal', input.typ.unit, input.typ.positional),\n+ pos=getpos(expr)\n+ )\n \n \n @signature(('int128', 'uint256', 'address', 'bytes'), 'str_literal')\n", "issue": "Add uint256 decimal support for convert\n### What's your issue about?\r\n\r\nConvert should allow conversion of uint256 -> decimal.\r\n\r\n### How can it be fixed?\r\n\r\nconvert.py:57\r\n\r\n#### Cute Animal Picture\r\n^.^\n", "before_files": [{"content": "from vyper.functions.signature import (\n signature\n)\nfrom vyper.parser.parser_utils import (\n LLLnode,\n getpos,\n byte_array_to_num\n)\nfrom vyper.exceptions import (\n InvalidLiteralException,\n TypeMismatchException,\n)\nfrom vyper.types import (\n BaseType,\n)\nfrom vyper.types import (\n get_type,\n)\nfrom vyper.utils import (\n DECIMAL_DIVISOR,\n MemoryPositions,\n SizeLimits\n)\n\n\n@signature(('int128', 'uint256', 'bytes32', 'bytes'), 'str_literal')\ndef to_int128(expr, args, kwargs, context):\n in_node = args[0]\n typ, len = get_type(in_node)\n if typ in ('int128', 'uint256', 'bytes32'):\n if in_node.typ.is_literal and not SizeLimits.in_bounds('int128', in_node.value):\n raise InvalidLiteralException(\"Number out of range: {}\".format(in_node.value), expr)\n return LLLnode.from_list(\n ['clamp', ['mload', MemoryPositions.MINNUM], in_node,\n ['mload', MemoryPositions.MAXNUM]], typ=BaseType('int128', in_node.typ.unit), pos=getpos(expr)\n )\n else:\n return byte_array_to_num(in_node, expr, 'int128')\n\n\n@signature(('num_literal', 'int128', 'bytes32'), 'str_literal')\ndef to_uint256(expr, args, kwargs, context):\n in_node = args[0]\n typ, len = get_type(in_node)\n if isinstance(in_node, int):\n\n if not SizeLimits.in_bounds('uint256', in_node):\n raise InvalidLiteralException(\"Number out of range: {}\".format(in_node))\n _unit = in_node.typ.unit if typ == 'int128' else None\n return LLLnode.from_list(in_node, typ=BaseType('uint256', _unit), pos=getpos(expr))\n elif isinstance(in_node, LLLnode) and typ in ('int128', 'num_literal'):\n _unit = in_node.typ.unit if typ == 'int128' else None\n return LLLnode.from_list(['clampge', in_node, 0], typ=BaseType('uint256', _unit), pos=getpos(expr))\n elif isinstance(in_node, LLLnode) and typ in ('bytes32'):\n return LLLnode(value=in_node.value, args=in_node.args, typ=BaseType('uint256'), pos=getpos(expr))\n else:\n raise InvalidLiteralException(\"Invalid input for uint256: %r\" % in_node, expr)\n\n\n@signature('int128', 'str_literal')\ndef to_decimal(expr, args, kwargs, context):\n input = args[0]\n return LLLnode.from_list(['mul', input, DECIMAL_DIVISOR], typ=BaseType('decimal', input.typ.unit, input.typ.positional),\n pos=getpos(expr))\n\n\n@signature(('int128', 'uint256', 'address', 'bytes'), 'str_literal')\ndef to_bytes32(expr, args, kwargs, context):\n input = args[0]\n typ, len = get_type(input)\n if typ == 'bytes':\n if len != 32:\n raise TypeMismatchException(\"Unable to convert bytes[{}] to bytes32\".format(len))\n if input.location == \"memory\":\n return LLLnode.from_list(\n ['mload', ['add', input, 32]], typ=BaseType('bytes32')\n )\n elif input.location == \"storage\":\n return LLLnode.from_list(\n ['sload', ['add', ['sha3_32', input], 1]], typ=BaseType('bytes32')\n )\n else:\n return LLLnode(value=input.value, args=input.args, typ=BaseType('bytes32'), pos=getpos(expr))\n\n\ndef convert(expr, context):\n output_type = expr.args[1].s\n if output_type in conversion_table:\n return conversion_table[output_type](expr, context)\n else:\n raise Exception(\"Conversion to {} is invalid.\".format(output_type))\n\n\nconversion_table = {\n 'int128': to_int128,\n 'uint256': to_uint256,\n 'decimal': to_decimal,\n 'bytes32': to_bytes32,\n}\n", "path": "vyper/types/convert.py"}]}
| 1,758 | 321 |
gh_patches_debug_3044
|
rasdani/github-patches
|
git_diff
|
opendatacube__datacube-core-1374
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Incompatibilities with xarray > 2022.03
### Expected behaviour
ODC should work with current version of `xarray`. In `setup.py` there's an exclusion of `2022.6.0`, but I don't think that's sufficient. It'd be worth digging up the commit/PR that made that change.
### Actual behaviour
Tests are failing.
```
FAILED tests/api/test_grid_workflow.py::test_gridworkflow_with_time_depth - AssertionError
FAILED tests/api/test_virtual.py::test_aggregate - ValueError: time already exists as coordinate or variable name.
```
### Steps to reproduce the behaviour
`pytest tests/`
### Environment information
* Which ``datacube --version`` are you using?
`develop` branch at `af59377327c363b9c52b55000b4024a0b3fbaa8b`
* What datacube deployment/enviornment are you running against?
- Mambaforge
- conda-forge
- Python 3.10
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 from setuptools import setup, find_packages
4
5 tests_require = [
6 'hypothesis',
7 'pycodestyle',
8 'pylint',
9 'pytest',
10 'pytest-cov',
11 'pytest-timeout',
12 'pytest-httpserver',
13 'moto',
14 ]
15 doc_require = [
16 'Sphinx',
17 'sphinx_rtd_theme',
18 'sphinx_autodoc_typehints', # Propagate mypy info into docs
19 'sphinx-click',
20 'recommonmark',
21 'setuptools', # version related dependencies
22 'setuptools_scm[toml]',
23 ]
24
25 extras_require = {
26 'performance': ['ciso8601', 'bottleneck'],
27 'distributed': ['distributed', 'dask[distributed]'],
28 'doc': doc_require,
29 's3': ['boto3', 'botocore'],
30 'test': tests_require,
31 'cf': ['compliance-checker>=4.0.0'],
32 }
33
34 extras_require['dev'] = sorted(set(sum([extras_require[k] for k in [
35 'test',
36 'doc',
37 'performance',
38 's3',
39 'distributed',
40 ]], [])))
41
42 # An 'all' option, following ipython naming conventions.
43 extras_require['all'] = sorted(set(sum(extras_require.values(), [])))
44
45 extra_plugins = dict(read=[], write=[], index=[])
46
47 setup(
48 name='datacube',
49 python_requires='>=3.8.0',
50
51 url='https://github.com/opendatacube/datacube-core',
52 author='Open Data Cube',
53 maintainer='Open Data Cube',
54 maintainer_email='',
55 description='An analysis environment for satellite and other earth observation data',
56 long_description=open('README.rst').read(),
57 long_description_content_type='text/x-rst',
58 license='Apache License 2.0',
59 classifiers=[
60 "Development Status :: 4 - Beta",
61 "Intended Audience :: Developers",
62 "Intended Audience :: Science/Research",
63 "License :: OSI Approved :: Apache Software License",
64 "Natural Language :: English",
65 "Operating System :: MacOS :: MacOS X",
66 "Operating System :: POSIX",
67 "Operating System :: POSIX :: BSD",
68 "Operating System :: POSIX :: Linux",
69 "Operating System :: Microsoft :: Windows",
70 "Programming Language :: Python",
71 "Programming Language :: Python :: 3",
72 "Programming Language :: Python :: 3.8",
73 "Programming Language :: Python :: 3.9",
74 "Programming Language :: Python :: 3.10",
75 "Topic :: Scientific/Engineering :: GIS",
76 "Topic :: Scientific/Engineering :: Information Analysis",
77 ],
78
79 packages=find_packages(
80 exclude=('tests', 'tests.*',
81 'integration_tests', 'integration_tests.*')
82 ),
83 package_data={
84 '': ['*.yaml', '*/*.yaml'],
85 'datacube': ['py.typed'],
86 },
87 scripts=[],
88 install_requires=[
89 'affine',
90 'pyproj>=2.5',
91 'shapely>=1.6.4',
92 'cachetools',
93 'click>=5.0',
94 'cloudpickle>=0.4',
95 'dask[array]',
96 'distributed',
97 'jsonschema',
98 'netcdf4',
99 'numpy',
100 'psycopg2',
101 'lark',
102 'pandas',
103 'python-dateutil',
104 'pyyaml',
105 'rasterio>=1.3.2', # Warping broken in 1.3.0 and 1.3.1
106 'sqlalchemy',
107 'GeoAlchemy2',
108 'toolz',
109 'xarray>=0.9,!=2022.6.0', # >0.9 fixes most problems with `crs` attributes being lost
110 ],
111 extras_require=extras_require,
112 tests_require=tests_require,
113
114 entry_points={
115 'console_scripts': [
116 'datacube = datacube.scripts.cli_app:cli',
117 'datacube-search = datacube.scripts.search_tool:cli',
118 'datacube-worker = datacube.execution.worker:main',
119 ],
120 'datacube.plugins.io.read': [
121 'netcdf = datacube.drivers.netcdf.driver:reader_driver_init',
122 *extra_plugins['read'],
123 ],
124 'datacube.plugins.io.write': [
125 'netcdf = datacube.drivers.netcdf.driver:writer_driver_init',
126 *extra_plugins['write'],
127 ],
128 'datacube.plugins.index': [
129 'default = datacube.index.postgres.index:index_driver_init',
130 'null = datacube.index.null.index:index_driver_init',
131 'memory = datacube.index.memory.index:index_driver_init',
132 'postgis = datacube.index.postgis.index:index_driver_init',
133 *extra_plugins['index'],
134 ],
135 },
136 )
137
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -106,7 +106,7 @@
'sqlalchemy',
'GeoAlchemy2',
'toolz',
- 'xarray>=0.9,!=2022.6.0', # >0.9 fixes most problems with `crs` attributes being lost
+ 'xarray>=0.9,<2022.6', # >0.9 fixes most problems with `crs` attributes being lost
],
extras_require=extras_require,
tests_require=tests_require,
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -106,7 +106,7 @@\n 'sqlalchemy',\n 'GeoAlchemy2',\n 'toolz',\n- 'xarray>=0.9,!=2022.6.0', # >0.9 fixes most problems with `crs` attributes being lost\n+ 'xarray>=0.9,<2022.6', # >0.9 fixes most problems with `crs` attributes being lost\n ],\n extras_require=extras_require,\n tests_require=tests_require,\n", "issue": "Incompatibilities with xarray > 2022.03\n### Expected behaviour\r\nODC should work with current version of `xarray`. In `setup.py` there's an exclusion of `2022.6.0`, but I don't think that's sufficient. It'd be worth digging up the commit/PR that made that change.\r\n\r\n### Actual behaviour\r\n\r\nTests are failing.\r\n\r\n```\r\nFAILED tests/api/test_grid_workflow.py::test_gridworkflow_with_time_depth - AssertionError\r\nFAILED tests/api/test_virtual.py::test_aggregate - ValueError: time already exists as coordinate or variable name.\r\n```\r\n\r\n### Steps to reproduce the behaviour\r\n\r\n`pytest tests/`\r\n\r\n### Environment information\r\n\r\n* Which ``datacube --version`` are you using?\r\n\r\n `develop` branch at `af59377327c363b9c52b55000b4024a0b3fbaa8b`\r\n\r\n* What datacube deployment/enviornment are you running against?\r\n\r\n - Mambaforge\r\n - conda-forge \r\n - Python 3.10\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'hypothesis',\n 'pycodestyle',\n 'pylint',\n 'pytest',\n 'pytest-cov',\n 'pytest-timeout',\n 'pytest-httpserver',\n 'moto',\n]\ndoc_require = [\n 'Sphinx',\n 'sphinx_rtd_theme',\n 'sphinx_autodoc_typehints', # Propagate mypy info into docs\n 'sphinx-click',\n 'recommonmark',\n 'setuptools', # version related dependencies\n 'setuptools_scm[toml]',\n]\n\nextras_require = {\n 'performance': ['ciso8601', 'bottleneck'],\n 'distributed': ['distributed', 'dask[distributed]'],\n 'doc': doc_require,\n 's3': ['boto3', 'botocore'],\n 'test': tests_require,\n 'cf': ['compliance-checker>=4.0.0'],\n}\n\nextras_require['dev'] = sorted(set(sum([extras_require[k] for k in [\n 'test',\n 'doc',\n 'performance',\n 's3',\n 'distributed',\n]], [])))\n\n# An 'all' option, following ipython naming conventions.\nextras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n\nextra_plugins = dict(read=[], write=[], index=[])\n\nsetup(\n name='datacube',\n python_requires='>=3.8.0',\n\n url='https://github.com/opendatacube/datacube-core',\n author='Open Data Cube',\n maintainer='Open Data Cube',\n maintainer_email='',\n description='An analysis environment for satellite and other earth observation data',\n long_description=open('README.rst').read(),\n long_description_content_type='text/x-rst',\n license='Apache License 2.0',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Scientific/Engineering :: GIS\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n ],\n\n packages=find_packages(\n exclude=('tests', 'tests.*',\n 'integration_tests', 'integration_tests.*')\n ),\n package_data={\n '': ['*.yaml', '*/*.yaml'],\n 'datacube': ['py.typed'],\n },\n scripts=[],\n install_requires=[\n 'affine',\n 'pyproj>=2.5',\n 'shapely>=1.6.4',\n 'cachetools',\n 'click>=5.0',\n 'cloudpickle>=0.4',\n 'dask[array]',\n 'distributed',\n 'jsonschema',\n 'netcdf4',\n 'numpy',\n 'psycopg2',\n 'lark',\n 'pandas',\n 'python-dateutil',\n 'pyyaml',\n 'rasterio>=1.3.2', # Warping broken in 1.3.0 and 1.3.1\n 'sqlalchemy',\n 'GeoAlchemy2',\n 'toolz',\n 'xarray>=0.9,!=2022.6.0', # >0.9 fixes most problems with `crs` attributes being lost\n ],\n extras_require=extras_require,\n tests_require=tests_require,\n\n entry_points={\n 'console_scripts': [\n 'datacube = datacube.scripts.cli_app:cli',\n 'datacube-search = datacube.scripts.search_tool:cli',\n 'datacube-worker = datacube.execution.worker:main',\n ],\n 'datacube.plugins.io.read': [\n 'netcdf = datacube.drivers.netcdf.driver:reader_driver_init',\n *extra_plugins['read'],\n ],\n 'datacube.plugins.io.write': [\n 'netcdf = datacube.drivers.netcdf.driver:writer_driver_init',\n *extra_plugins['write'],\n ],\n 'datacube.plugins.index': [\n 'default = datacube.index.postgres.index:index_driver_init',\n 'null = datacube.index.null.index:index_driver_init',\n 'memory = datacube.index.memory.index:index_driver_init',\n 'postgis = datacube.index.postgis.index:index_driver_init',\n *extra_plugins['index'],\n ],\n },\n)\n", "path": "setup.py"}]}
| 2,115 | 139 |
gh_patches_debug_64032
|
rasdani/github-patches
|
git_diff
|
holoviz__panel-779
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Typo in DataFrame example page
I'm trying out `pn.widget.DataFrame`. I noticed a typo on the example page `https://github.com/pyviz/panel/blob/45177aa5fa30c89ea26a942385bd7878e60dd323/examples/reference/widgets/DataFrame.ipynb`.
Under the list of Core options, `fit_column` should be `fit_columns`.
Side note: The explanation of fit_columns threw me off at first. I was thinking/hoping it would fit to minimum width based on column contents, but instead it expanded to total widget width. I see I can mostly get what I want via a `width` dict so I'll go that route, just wanted to point out it was slightly confusing.
</issue>
<code>
[start of panel/widgets/tables.py]
1 from __future__ import absolute_import, division, unicode_literals
2
3
4 import param
5
6 from bokeh.models import ColumnDataSource
7 from bokeh.models.widgets import (
8 DataTable, TableColumn, NumberEditor, NumberFormatter,
9 DateFormatter, DateEditor, StringFormatter, StringEditor, IntEditor
10 )
11
12 from ..viewable import Layoutable
13 from ..util import isdatetime
14 from .base import Widget
15
16
17 class DataFrame(Widget):
18
19 editors = param.Dict(default={}, doc="""
20 Bokeh CellEditor to use for a particular column
21 (overrides the default chosen based on the type).""")
22
23 formatters = param.Dict(default={}, doc="""
24 Bokeh CellFormatter to use for a particular column
25 (overrides the default chosen based on the type).""")
26
27 fit_columns = param.Boolean(default=True, doc="""
28 Whether columns should be fit to the available width.
29 This results in no horizontal scrollbar showing up, but data
30 can get unreadable if there is no enough space available.""")
31
32 selection = param.List(default=[], doc="""
33 The currently selected rows of the table.""")
34
35 row_height = param.Integer(default=25, doc="""
36 The height of each table row.""")
37
38 widths = param.Dict(default={}, doc="""
39 A mapping from column name to column width.""")
40
41 value = param.Parameter(default=None)
42
43 _manual_params = ['value', 'editors', 'formatters', 'selection', 'width']
44
45 def __init__(self, value=None, **params):
46 super(DataFrame, self).__init__(value=value, **params)
47 self._renamed_cols = {}
48
49 def _get_columns(self):
50 if self.value is None:
51 return []
52
53 index = [self.value.index.name or 'index']
54 col_names = index + list(self.value.columns)
55 columns = []
56 for col in col_names:
57 if col in self.value.columns:
58 data = self.value[col]
59 else:
60 data = self.value.index
61 kind = data.dtype.kind
62 if kind == 'i':
63 formatter = NumberFormatter()
64 editor = IntEditor()
65 elif kind == 'f':
66 formatter = NumberFormatter(format='0,0.0[00000]')
67 editor = NumberEditor()
68 elif isdatetime(data):
69 formatter = DateFormatter(format='%Y-%m-%d %H:%M:%S')
70 editor = DateEditor()
71 else:
72 formatter = StringFormatter()
73 editor = StringEditor()
74 if col in self.editors:
75 editor = self.editors[col]
76 if col in self.formatters:
77 formatter = self.formatters[col]
78 if str(col) != col:
79 self._renamed_cols[str(col)] = col
80 width = self.widths.get(str(col))
81 column = TableColumn(field=str(col), title=str(col),
82 editor=editor, formatter=formatter,
83 width=width)
84 columns.append(column)
85 return columns
86
87 def _get_properties(self):
88 props = {p : getattr(self, p) for p in list(Layoutable.param)
89 if getattr(self, p) is not None}
90 if self.value is None:
91 data = {}
92 else:
93 data = {k if isinstance(k, str) else str(k): v
94 for k, v in ColumnDataSource.from_df(self.value).items()}
95 if props.get('height', None) is None:
96 length = max([len(v) for v in data.values()]) if data else 0
97 props['height'] = length * self.row_height + 30
98 props['source'] = ColumnDataSource(data=data)
99 props['columns'] = self._get_columns()
100 props['index_position'] = None
101 props['fit_columns'] = self.fit_columns
102 props['row_height'] = self.row_height
103 props['editable'] = not self.disabled
104 return props
105
106 def _process_param_change(self, msg):
107 if 'disabled' in msg:
108 msg['editable'] = not msg.pop('disabled')
109 return super(DataFrame, self)._process_param_change(msg)
110
111 def _get_model(self, doc, root=None, parent=None, comm=None):
112 model = DataTable(**self._get_properties())
113 if root is None:
114 root = model
115 self._link_props(model.source, ['data', ('patching', 'data')], doc, root, comm)
116 self._link_props(model.source.selected, ['indices'], doc, root, comm)
117 self._models[root.ref['id']] = (model, parent)
118 return model
119
120 def _manual_update(self, event, model, doc, root, parent, comm):
121 if event.name == 'value':
122 cds = model.source
123 data = {k if isinstance(k, str) else str(k): v
124 for k, v in ColumnDataSource.from_df(self.value).items()}
125 cds.data = data
126 model.columns = self._get_columns()
127 elif event.name == 'selection':
128 model.source.selected.indices = self.selection
129 else:
130 for col in model.columns:
131 if col.name in self.editors:
132 col.editor = self.editors[col.name]
133 if col.name in self.formatters:
134 col.formatter = self.formatters[col.name]
135 if col.name in self.widths:
136 col.width = self.widths[col.name]
137
138 def _process_events(self, events):
139 if 'data' in events:
140 data = events.pop('data')
141 updated = False
142 for k, v in data.items():
143 if k == 'index':
144 continue
145 k = self._renamed_cols.get(k, k)
146 if isinstance(v, dict):
147 v = [v for k, v in sorted(v.items(), key=lambda k: int(k[0]))]
148 try:
149 isequal = (self.value[k].values == v).all()
150 except:
151 isequal = False
152 if not isequal:
153 self.value[k] = v
154 updated = True
155 if updated:
156 self.param.trigger('value')
157 if 'indices' in events:
158 self.selected = events.pop('indices')
159 super(DataFrame, self)._process_events(events)
160
161 @property
162 def selected_dataframe(self):
163 """
164 Returns a DataFrame of the currently selected rows.
165 """
166 if not self.selection:
167 return self.value
168 return self.value.iloc[self.selection]
169
[end of panel/widgets/tables.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/panel/widgets/tables.py b/panel/widgets/tables.py
--- a/panel/widgets/tables.py
+++ b/panel/widgets/tables.py
@@ -25,7 +25,7 @@
(overrides the default chosen based on the type).""")
fit_columns = param.Boolean(default=True, doc="""
- Whether columns should be fit to the available width.
+ Whether columns should expand to the available width.
This results in no horizontal scrollbar showing up, but data
can get unreadable if there is no enough space available.""")
|
{"golden_diff": "diff --git a/panel/widgets/tables.py b/panel/widgets/tables.py\n--- a/panel/widgets/tables.py\n+++ b/panel/widgets/tables.py\n@@ -25,7 +25,7 @@\n (overrides the default chosen based on the type).\"\"\")\n \n fit_columns = param.Boolean(default=True, doc=\"\"\"\n- Whether columns should be fit to the available width.\n+ Whether columns should expand to the available width.\n This results in no horizontal scrollbar showing up, but data\n can get unreadable if there is no enough space available.\"\"\")\n", "issue": "Typo in DataFrame example page\nI'm trying out `pn.widget.DataFrame`. I noticed a typo on the example page `https://github.com/pyviz/panel/blob/45177aa5fa30c89ea26a942385bd7878e60dd323/examples/reference/widgets/DataFrame.ipynb`. \r\n \r\nUnder the list of Core options, `fit_column` should be `fit_columns`. \r\n \r\nSide note: The explanation of fit_columns threw me off at first. I was thinking/hoping it would fit to minimum width based on column contents, but instead it expanded to total widget width. I see I can mostly get what I want via a `width` dict so I'll go that route, just wanted to point out it was slightly confusing. \n", "before_files": [{"content": "from __future__ import absolute_import, division, unicode_literals\n\n\nimport param\n\nfrom bokeh.models import ColumnDataSource\nfrom bokeh.models.widgets import (\n DataTable, TableColumn, NumberEditor, NumberFormatter,\n DateFormatter, DateEditor, StringFormatter, StringEditor, IntEditor\n)\n\nfrom ..viewable import Layoutable\nfrom ..util import isdatetime\nfrom .base import Widget\n\n\nclass DataFrame(Widget):\n\n editors = param.Dict(default={}, doc=\"\"\"\n Bokeh CellEditor to use for a particular column\n (overrides the default chosen based on the type).\"\"\")\n\n formatters = param.Dict(default={}, doc=\"\"\"\n Bokeh CellFormatter to use for a particular column\n (overrides the default chosen based on the type).\"\"\")\n\n fit_columns = param.Boolean(default=True, doc=\"\"\"\n Whether columns should be fit to the available width.\n This results in no horizontal scrollbar showing up, but data\n can get unreadable if there is no enough space available.\"\"\")\n\n selection = param.List(default=[], doc=\"\"\"\n The currently selected rows of the table.\"\"\")\n\n row_height = param.Integer(default=25, doc=\"\"\"\n The height of each table row.\"\"\")\n\n widths = param.Dict(default={}, doc=\"\"\"\n A mapping from column name to column width.\"\"\")\n\n value = param.Parameter(default=None)\n\n _manual_params = ['value', 'editors', 'formatters', 'selection', 'width']\n\n def __init__(self, value=None, **params):\n super(DataFrame, self).__init__(value=value, **params)\n self._renamed_cols = {}\n\n def _get_columns(self):\n if self.value is None:\n return []\n\n index = [self.value.index.name or 'index']\n col_names = index + list(self.value.columns)\n columns = []\n for col in col_names:\n if col in self.value.columns:\n data = self.value[col]\n else:\n data = self.value.index\n kind = data.dtype.kind\n if kind == 'i':\n formatter = NumberFormatter()\n editor = IntEditor()\n elif kind == 'f':\n formatter = NumberFormatter(format='0,0.0[00000]')\n editor = NumberEditor()\n elif isdatetime(data):\n formatter = DateFormatter(format='%Y-%m-%d %H:%M:%S')\n editor = DateEditor()\n else:\n formatter = StringFormatter()\n editor = StringEditor()\n if col in self.editors:\n editor = self.editors[col]\n if col in self.formatters:\n formatter = self.formatters[col]\n if str(col) != col:\n self._renamed_cols[str(col)] = col\n width = self.widths.get(str(col))\n column = TableColumn(field=str(col), title=str(col),\n editor=editor, formatter=formatter,\n width=width)\n columns.append(column)\n return columns\n\n def _get_properties(self):\n props = {p : getattr(self, p) for p in list(Layoutable.param)\n if getattr(self, p) is not None}\n if self.value is None:\n data = {}\n else:\n data = {k if isinstance(k, str) else str(k): v\n for k, v in ColumnDataSource.from_df(self.value).items()}\n if props.get('height', None) is None:\n length = max([len(v) for v in data.values()]) if data else 0\n props['height'] = length * self.row_height + 30\n props['source'] = ColumnDataSource(data=data)\n props['columns'] = self._get_columns()\n props['index_position'] = None\n props['fit_columns'] = self.fit_columns\n props['row_height'] = self.row_height\n props['editable'] = not self.disabled\n return props\n\n def _process_param_change(self, msg):\n if 'disabled' in msg:\n msg['editable'] = not msg.pop('disabled')\n return super(DataFrame, self)._process_param_change(msg)\n \n def _get_model(self, doc, root=None, parent=None, comm=None):\n model = DataTable(**self._get_properties())\n if root is None:\n root = model\n self._link_props(model.source, ['data', ('patching', 'data')], doc, root, comm)\n self._link_props(model.source.selected, ['indices'], doc, root, comm)\n self._models[root.ref['id']] = (model, parent)\n return model\n\n def _manual_update(self, event, model, doc, root, parent, comm):\n if event.name == 'value':\n cds = model.source\n data = {k if isinstance(k, str) else str(k): v\n for k, v in ColumnDataSource.from_df(self.value).items()}\n cds.data = data\n model.columns = self._get_columns()\n elif event.name == 'selection':\n model.source.selected.indices = self.selection\n else:\n for col in model.columns:\n if col.name in self.editors:\n col.editor = self.editors[col.name]\n if col.name in self.formatters:\n col.formatter = self.formatters[col.name]\n if col.name in self.widths:\n col.width = self.widths[col.name]\n\n def _process_events(self, events):\n if 'data' in events:\n data = events.pop('data')\n updated = False\n for k, v in data.items():\n if k == 'index':\n continue\n k = self._renamed_cols.get(k, k)\n if isinstance(v, dict):\n v = [v for k, v in sorted(v.items(), key=lambda k: int(k[0]))]\n try:\n isequal = (self.value[k].values == v).all()\n except:\n isequal = False\n if not isequal:\n self.value[k] = v\n updated = True\n if updated:\n self.param.trigger('value')\n if 'indices' in events:\n self.selected = events.pop('indices')\n super(DataFrame, self)._process_events(events)\n\n @property\n def selected_dataframe(self):\n \"\"\"\n Returns a DataFrame of the currently selected rows.\n \"\"\"\n if not self.selection:\n return self.value\n return self.value.iloc[self.selection]\n", "path": "panel/widgets/tables.py"}]}
| 2,467 | 123 |
gh_patches_debug_30195
|
rasdani/github-patches
|
git_diff
|
pex-tool__pex-152
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
set user-agent to something pex-identifiable
Donald did some interesting research into where downloads were coming from on PyPI. Realized that we have no custom user-agent to identify pex.
</issue>
<code>
[start of pex/http.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 import contextlib
5 import hashlib
6 import os
7 import shutil
8 import uuid
9 from abc import abstractmethod
10 from email import message_from_string
11
12 from .common import safe_mkdtemp, safe_open
13 from .compatibility import PY3, AbstractClass
14 from .tracer import TRACER
15 from .variables import ENV
16
17 try:
18 import requests
19 except ImportError:
20 requests = None
21
22 try:
23 from cachecontrol import CacheControl
24 from cachecontrol.caches import FileCache
25 except ImportError:
26 CacheControl = FileCache = None
27
28 if PY3:
29 import urllib.request as urllib_request
30 else:
31 import urllib2 as urllib_request
32
33 # This is available as hashlib.algorithms_guaranteed in >=3.2 and as
34 # hashlib.algorithms in >=2.7, but not available in 2.6, so we enumerate
35 # here.
36 HASHLIB_ALGORITHMS = frozenset(['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512'])
37
38
39 class Context(AbstractClass):
40 """Encapsulate the networking necessary to do requirement resolution.
41
42 At a minimum, the Context must implement ``open(link)`` by returning a
43 file-like object. Reference implementations of ``read(link)`` and
44 ``fetch(link)`` are provided based upon ``open(link)`` but may be further
45 specialized by individual implementations.
46 """
47
48 DEFAULT_ENCODING = 'iso-8859-1'
49
50 class Error(Exception):
51 """Error base class for Contexts to wrap application-specific exceptions."""
52 pass
53
54 _REGISTRY = []
55
56 @classmethod
57 def register(cls, context_impl):
58 """Register a concrete implementation of a :class:`Context` to be recognized."""
59 cls._REGISTRY.insert(0, context_impl)
60
61 @classmethod
62 def get(cls):
63 for context_class in cls._REGISTRY:
64 try:
65 context = context_class()
66 TRACER.log('Constructed %s context %r' % (context.__class__.__name__, context), V=4)
67 return context
68 except cls.Error:
69 continue
70 raise cls.Error('Could not initialize a request context.')
71
72 @abstractmethod
73 def open(self, link):
74 """Return an open file-like object to the link.
75
76 :param link: The :class:`Link` to open.
77 """
78
79 def read(self, link):
80 """Return the binary content associated with the link.
81
82 :param link: The :class:`Link` to read.
83 """
84 with contextlib.closing(self.open(link)) as fp:
85 return fp.read()
86
87 def content(self, link):
88 """Return the encoded content associated with the link.
89
90 :param link: The :class:`Link` to read.
91 """
92
93 def fetch(self, link, into=None):
94 """Fetch the binary content associated with the link and write to a file.
95
96 :param link: The :class:`Link` to fetch.
97 :keyword into: If specified, write into the directory ``into``. If ``None``, creates a new
98 temporary directory that persists for the duration of the interpreter.
99 """
100 target = os.path.join(into or safe_mkdtemp(), link.filename)
101
102 if os.path.exists(target):
103 # Assume that if the local file already exists, it is safe to use.
104 return target
105
106 with TRACER.timed('Fetching %s' % link.url, V=2):
107 target_tmp = '%s.%s' % (target, uuid.uuid4())
108 with contextlib.closing(self.open(link)) as in_fp:
109 with safe_open(target_tmp, 'wb') as out_fp:
110 shutil.copyfileobj(in_fp, out_fp)
111
112 os.rename(target_tmp, target)
113 return target
114
115
116 class UrllibContext(Context):
117 """Default Python standard library Context."""
118
119 def open(self, link):
120 return urllib_request.urlopen(link.url)
121
122 def content(self, link):
123 if link.local:
124 raise self.Error('Context.content only works with remote URLs.')
125
126 with contextlib.closing(self.open(link)) as fp:
127 encoding = message_from_string(str(fp.headers)).get_content_charset(self.DEFAULT_ENCODING)
128 return fp.read().decode(encoding, 'replace')
129
130 def __init__(self, *args, **kw):
131 TRACER.log('Warning, using a UrllibContext which is known to be flaky.')
132 TRACER.log('Please build pex with the requests module for more reliable downloads.')
133 super(UrllibContext, self).__init__(*args, **kw)
134
135
136 Context.register(UrllibContext)
137
138
139 class StreamFilelike(object):
140 """A file-like object wrapper around requests streams that performs hash validation."""
141
142 @classmethod
143 def detect_algorithm(cls, link):
144 """Detect the hashing algorithm from the fragment in the link, if any."""
145 if any(link.fragment.startswith('%s=' % algorithm) for algorithm in HASHLIB_ALGORITHMS):
146 algorithm, value = link.fragment.split('=', 2)
147 try:
148 return hashlib.new(algorithm), value
149 except ValueError: # unsupported algorithm
150 return None, None
151 return None, None
152
153 def __init__(self, request, link, chunk_size=16384):
154 self._iterator = request.iter_content(chunk_size)
155 self.encoding = request.encoding
156 self._bytes = b''
157 self._link = link
158 self._hasher, self._hash_value = self.detect_algorithm(link)
159
160 def read(self, length=None):
161 while length is None or len(self._bytes) < length:
162 try:
163 next_chunk = next(self._iterator)
164 if self._hasher:
165 self._hasher.update(next_chunk)
166 self._bytes += next_chunk
167 except StopIteration:
168 self._validate()
169 break
170 if length is None:
171 length = len(self._bytes)
172 chunk, self._bytes = self._bytes[:length], self._bytes[length:]
173 return chunk
174
175 def _validate(self):
176 if self._hasher:
177 if self._hash_value != self._hasher.hexdigest():
178 raise Context.Error('%s failed checksum!' % (self._link.url))
179 else:
180 TRACER.log('Validated %s (%s)' % (self._link.filename, self._link.fragment), V=3)
181
182 def close(self):
183 pass
184
185
186 class RequestsContext(Context):
187 """A requests-based Context."""
188
189 @staticmethod
190 def _create_session(max_retries):
191 session = requests.session()
192 retrying_adapter = requests.adapters.HTTPAdapter(max_retries=max_retries)
193 session.mount('http://', retrying_adapter)
194 session.mount('https://', retrying_adapter)
195
196 return session
197
198 def __init__(self, session=None, verify=True, env=ENV):
199 if requests is None:
200 raise RuntimeError('requests is not available. Cannot use a RequestsContext.')
201
202 self._verify = verify
203
204 max_retries = env.PEX_HTTP_RETRIES
205 if max_retries < 0:
206 raise ValueError('max_retries may not be negative.')
207
208 self._max_retries = max_retries
209 self._session = session or self._create_session(self._max_retries)
210
211 def open(self, link):
212 # requests does not support file:// -- so we must short-circuit manually
213 if link.local:
214 return open(link.path, 'rb') # noqa: T802
215 for attempt in range(self._max_retries + 1):
216 try:
217 return StreamFilelike(self._session.get(link.url, verify=self._verify, stream=True), link)
218 except requests.exceptions.ReadTimeout:
219 # Connect timeouts are handled by the HTTPAdapter, unfortunately read timeouts are not
220 # so we'll retry them ourselves.
221 TRACER.log('Read timeout trying to fetch %s, retrying. %d retries remain.' % (
222 link.url,
223 self._max_retries - attempt))
224 except requests.exceptions.RequestException as e:
225 raise self.Error(e)
226
227 raise self.Error(
228 requests.packages.urllib3.exceptions.MaxRetryError(
229 None,
230 link,
231 'Exceeded max retries of %d' % self._max_retries))
232
233 def content(self, link):
234 if link.local:
235 raise self.Error('Context.content only works with remote URLs.')
236
237 with contextlib.closing(self.open(link)) as request:
238 return request.read().decode(request.encoding or self.DEFAULT_ENCODING, 'replace')
239
240
241 if requests:
242 Context.register(RequestsContext)
243
244
245 class CachedRequestsContext(RequestsContext):
246 """A requests-based Context with CacheControl support."""
247
248 DEFAULT_CACHE = '~/.pex/cache'
249
250 def __init__(self, cache=None, **kw):
251 self._cache = os.path.realpath(os.path.expanduser(cache or self.DEFAULT_CACHE))
252 super(CachedRequestsContext, self).__init__(
253 CacheControl(requests.session(), cache=FileCache(self._cache)), **kw)
254
255
256 if CacheControl:
257 Context.register(CachedRequestsContext)
258
[end of pex/http.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pex/http.py b/pex/http.py
--- a/pex/http.py
+++ b/pex/http.py
@@ -13,6 +13,7 @@
from .compatibility import PY3, AbstractClass
from .tracer import TRACER
from .variables import ENV
+from .version import __version__ as PEX_VERSION
try:
import requests
@@ -185,6 +186,7 @@
class RequestsContext(Context):
"""A requests-based Context."""
+ USER_AGENT = 'pex/%s' % PEX_VERSION
@staticmethod
def _create_session(max_retries):
@@ -192,7 +194,6 @@
retrying_adapter = requests.adapters.HTTPAdapter(max_retries=max_retries)
session.mount('http://', retrying_adapter)
session.mount('https://', retrying_adapter)
-
return session
def __init__(self, session=None, verify=True, env=ENV):
@@ -214,7 +215,9 @@
return open(link.path, 'rb') # noqa: T802
for attempt in range(self._max_retries + 1):
try:
- return StreamFilelike(self._session.get(link.url, verify=self._verify, stream=True), link)
+ return StreamFilelike(self._session.get(
+ link.url, verify=self._verify, stream=True, headers={'User-Agent': self.USER_AGENT}),
+ link)
except requests.exceptions.ReadTimeout:
# Connect timeouts are handled by the HTTPAdapter, unfortunately read timeouts are not
# so we'll retry them ourselves.
|
{"golden_diff": "diff --git a/pex/http.py b/pex/http.py\n--- a/pex/http.py\n+++ b/pex/http.py\n@@ -13,6 +13,7 @@\n from .compatibility import PY3, AbstractClass\n from .tracer import TRACER\n from .variables import ENV\n+from .version import __version__ as PEX_VERSION\n \n try:\n import requests\n@@ -185,6 +186,7 @@\n \n class RequestsContext(Context):\n \"\"\"A requests-based Context.\"\"\"\n+ USER_AGENT = 'pex/%s' % PEX_VERSION\n \n @staticmethod\n def _create_session(max_retries):\n@@ -192,7 +194,6 @@\n retrying_adapter = requests.adapters.HTTPAdapter(max_retries=max_retries)\n session.mount('http://', retrying_adapter)\n session.mount('https://', retrying_adapter)\n-\n return session\n \n def __init__(self, session=None, verify=True, env=ENV):\n@@ -214,7 +215,9 @@\n return open(link.path, 'rb') # noqa: T802\n for attempt in range(self._max_retries + 1):\n try:\n- return StreamFilelike(self._session.get(link.url, verify=self._verify, stream=True), link)\n+ return StreamFilelike(self._session.get(\n+ link.url, verify=self._verify, stream=True, headers={'User-Agent': self.USER_AGENT}),\n+ link)\n except requests.exceptions.ReadTimeout:\n # Connect timeouts are handled by the HTTPAdapter, unfortunately read timeouts are not\n # so we'll retry them ourselves.\n", "issue": "set user-agent to something pex-identifiable\nDonald did some interesting research into where downloads were coming from on PyPI. Realized that we have no custom user-agent to identify pex.\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nimport contextlib\nimport hashlib\nimport os\nimport shutil\nimport uuid\nfrom abc import abstractmethod\nfrom email import message_from_string\n\nfrom .common import safe_mkdtemp, safe_open\nfrom .compatibility import PY3, AbstractClass\nfrom .tracer import TRACER\nfrom .variables import ENV\n\ntry:\n import requests\nexcept ImportError:\n requests = None\n\ntry:\n from cachecontrol import CacheControl\n from cachecontrol.caches import FileCache\nexcept ImportError:\n CacheControl = FileCache = None\n\nif PY3:\n import urllib.request as urllib_request\nelse:\n import urllib2 as urllib_request\n\n# This is available as hashlib.algorithms_guaranteed in >=3.2 and as\n# hashlib.algorithms in >=2.7, but not available in 2.6, so we enumerate\n# here.\nHASHLIB_ALGORITHMS = frozenset(['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512'])\n\n\nclass Context(AbstractClass):\n \"\"\"Encapsulate the networking necessary to do requirement resolution.\n\n At a minimum, the Context must implement ``open(link)`` by returning a\n file-like object. Reference implementations of ``read(link)`` and\n ``fetch(link)`` are provided based upon ``open(link)`` but may be further\n specialized by individual implementations.\n \"\"\"\n\n DEFAULT_ENCODING = 'iso-8859-1'\n\n class Error(Exception):\n \"\"\"Error base class for Contexts to wrap application-specific exceptions.\"\"\"\n pass\n\n _REGISTRY = []\n\n @classmethod\n def register(cls, context_impl):\n \"\"\"Register a concrete implementation of a :class:`Context` to be recognized.\"\"\"\n cls._REGISTRY.insert(0, context_impl)\n\n @classmethod\n def get(cls):\n for context_class in cls._REGISTRY:\n try:\n context = context_class()\n TRACER.log('Constructed %s context %r' % (context.__class__.__name__, context), V=4)\n return context\n except cls.Error:\n continue\n raise cls.Error('Could not initialize a request context.')\n\n @abstractmethod\n def open(self, link):\n \"\"\"Return an open file-like object to the link.\n\n :param link: The :class:`Link` to open.\n \"\"\"\n\n def read(self, link):\n \"\"\"Return the binary content associated with the link.\n\n :param link: The :class:`Link` to read.\n \"\"\"\n with contextlib.closing(self.open(link)) as fp:\n return fp.read()\n\n def content(self, link):\n \"\"\"Return the encoded content associated with the link.\n\n :param link: The :class:`Link` to read.\n \"\"\"\n\n def fetch(self, link, into=None):\n \"\"\"Fetch the binary content associated with the link and write to a file.\n\n :param link: The :class:`Link` to fetch.\n :keyword into: If specified, write into the directory ``into``. If ``None``, creates a new\n temporary directory that persists for the duration of the interpreter.\n \"\"\"\n target = os.path.join(into or safe_mkdtemp(), link.filename)\n\n if os.path.exists(target):\n # Assume that if the local file already exists, it is safe to use.\n return target\n\n with TRACER.timed('Fetching %s' % link.url, V=2):\n target_tmp = '%s.%s' % (target, uuid.uuid4())\n with contextlib.closing(self.open(link)) as in_fp:\n with safe_open(target_tmp, 'wb') as out_fp:\n shutil.copyfileobj(in_fp, out_fp)\n\n os.rename(target_tmp, target)\n return target\n\n\nclass UrllibContext(Context):\n \"\"\"Default Python standard library Context.\"\"\"\n\n def open(self, link):\n return urllib_request.urlopen(link.url)\n\n def content(self, link):\n if link.local:\n raise self.Error('Context.content only works with remote URLs.')\n\n with contextlib.closing(self.open(link)) as fp:\n encoding = message_from_string(str(fp.headers)).get_content_charset(self.DEFAULT_ENCODING)\n return fp.read().decode(encoding, 'replace')\n\n def __init__(self, *args, **kw):\n TRACER.log('Warning, using a UrllibContext which is known to be flaky.')\n TRACER.log('Please build pex with the requests module for more reliable downloads.')\n super(UrllibContext, self).__init__(*args, **kw)\n\n\nContext.register(UrllibContext)\n\n\nclass StreamFilelike(object):\n \"\"\"A file-like object wrapper around requests streams that performs hash validation.\"\"\"\n\n @classmethod\n def detect_algorithm(cls, link):\n \"\"\"Detect the hashing algorithm from the fragment in the link, if any.\"\"\"\n if any(link.fragment.startswith('%s=' % algorithm) for algorithm in HASHLIB_ALGORITHMS):\n algorithm, value = link.fragment.split('=', 2)\n try:\n return hashlib.new(algorithm), value\n except ValueError: # unsupported algorithm\n return None, None\n return None, None\n\n def __init__(self, request, link, chunk_size=16384):\n self._iterator = request.iter_content(chunk_size)\n self.encoding = request.encoding\n self._bytes = b''\n self._link = link\n self._hasher, self._hash_value = self.detect_algorithm(link)\n\n def read(self, length=None):\n while length is None or len(self._bytes) < length:\n try:\n next_chunk = next(self._iterator)\n if self._hasher:\n self._hasher.update(next_chunk)\n self._bytes += next_chunk\n except StopIteration:\n self._validate()\n break\n if length is None:\n length = len(self._bytes)\n chunk, self._bytes = self._bytes[:length], self._bytes[length:]\n return chunk\n\n def _validate(self):\n if self._hasher:\n if self._hash_value != self._hasher.hexdigest():\n raise Context.Error('%s failed checksum!' % (self._link.url))\n else:\n TRACER.log('Validated %s (%s)' % (self._link.filename, self._link.fragment), V=3)\n\n def close(self):\n pass\n\n\nclass RequestsContext(Context):\n \"\"\"A requests-based Context.\"\"\"\n\n @staticmethod\n def _create_session(max_retries):\n session = requests.session()\n retrying_adapter = requests.adapters.HTTPAdapter(max_retries=max_retries)\n session.mount('http://', retrying_adapter)\n session.mount('https://', retrying_adapter)\n\n return session\n\n def __init__(self, session=None, verify=True, env=ENV):\n if requests is None:\n raise RuntimeError('requests is not available. Cannot use a RequestsContext.')\n\n self._verify = verify\n\n max_retries = env.PEX_HTTP_RETRIES\n if max_retries < 0:\n raise ValueError('max_retries may not be negative.')\n\n self._max_retries = max_retries\n self._session = session or self._create_session(self._max_retries)\n\n def open(self, link):\n # requests does not support file:// -- so we must short-circuit manually\n if link.local:\n return open(link.path, 'rb') # noqa: T802\n for attempt in range(self._max_retries + 1):\n try:\n return StreamFilelike(self._session.get(link.url, verify=self._verify, stream=True), link)\n except requests.exceptions.ReadTimeout:\n # Connect timeouts are handled by the HTTPAdapter, unfortunately read timeouts are not\n # so we'll retry them ourselves.\n TRACER.log('Read timeout trying to fetch %s, retrying. %d retries remain.' % (\n link.url,\n self._max_retries - attempt))\n except requests.exceptions.RequestException as e:\n raise self.Error(e)\n\n raise self.Error(\n requests.packages.urllib3.exceptions.MaxRetryError(\n None,\n link,\n 'Exceeded max retries of %d' % self._max_retries))\n\n def content(self, link):\n if link.local:\n raise self.Error('Context.content only works with remote URLs.')\n\n with contextlib.closing(self.open(link)) as request:\n return request.read().decode(request.encoding or self.DEFAULT_ENCODING, 'replace')\n\n\nif requests:\n Context.register(RequestsContext)\n\n\nclass CachedRequestsContext(RequestsContext):\n \"\"\"A requests-based Context with CacheControl support.\"\"\"\n\n DEFAULT_CACHE = '~/.pex/cache'\n\n def __init__(self, cache=None, **kw):\n self._cache = os.path.realpath(os.path.expanduser(cache or self.DEFAULT_CACHE))\n super(CachedRequestsContext, self).__init__(\n CacheControl(requests.session(), cache=FileCache(self._cache)), **kw)\n\n\nif CacheControl:\n Context.register(CachedRequestsContext)\n", "path": "pex/http.py"}]}
| 3,231 | 356 |
gh_patches_debug_25447
|
rasdani/github-patches
|
git_diff
|
cookiecutter__cookiecutter-270
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add aliases for cookiecutters
Remembering the locations of cookiecutters can be a bit of a pain - even if it's just including the local path.
It would be useful to have an extra section in `cookiecutterrc` for aliases to cookiecutter locations. Something like:
```
aliases:
python: https://github.com/audreyr/cookiecutter-pypackage
django: https://github.com/marcofucci/cookiecutter-simple-django
docs: ~/samples/templates/documentation
```
The code would simply need to translate an alias into a path before doing any of the existing processing.
</issue>
<code>
[start of cookiecutter/main.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """
5 cookiecutter.main
6 -----------------
7
8 Main entry point for the `cookiecutter` command.
9
10 The code in this module is also a good example of how to use Cookiecutter as a
11 library rather than a script.
12 """
13
14 from __future__ import unicode_literals
15 import argparse
16 import logging
17 import os
18 import sys
19
20 from . import __version__
21 from .config import get_user_config
22 from .prompt import prompt_for_config
23 from .generate import generate_context, generate_files
24 from .vcs import clone
25
26 logger = logging.getLogger(__name__)
27
28
29 def cookiecutter(input_dir, checkout=None, no_input=False):
30 """
31 API equivalent to using Cookiecutter at the command line.
32
33 :param input_dir: A directory containing a project template dir,
34 or a URL to git repo.
35 :param checkout: The branch, tag or commit ID to checkout after clone
36 """
37
38 # Get user config from ~/.cookiecutterrc or equivalent
39 # If no config file, sensible defaults from config.DEFAULT_CONFIG are used
40 config_dict = get_user_config()
41
42 # TODO: find a better way to tell if it's a repo URL
43 if "git@" in input_dir or "https://" in input_dir:
44 repo_dir = clone(
45 repo_url=input_dir,
46 checkout=checkout,
47 clone_to_dir=config_dict['cookiecutters_dir']
48 )
49 else:
50 # If it's a local repo, no need to clone or copy to your cookiecutters_dir
51 repo_dir = input_dir
52
53 context_file = os.path.join(repo_dir, 'cookiecutter.json')
54 logging.debug('context_file is {0}'.format(context_file))
55
56 context = generate_context(
57 context_file=context_file,
58 default_context=config_dict['default_context']
59 )
60
61 # prompt the user to manually configure at the command line.
62 # except when 'no-input' flag is set
63 if not no_input:
64 cookiecutter_dict = prompt_for_config(context)
65 context['cookiecutter'] = cookiecutter_dict
66
67 # Create project from local context and project template.
68 generate_files(
69 repo_dir=repo_dir,
70 context=context
71 )
72
73
74 def _get_parser():
75 parser = argparse.ArgumentParser(
76 description='Create a project from a Cookiecutter project template.'
77 )
78 parser.add_argument(
79 '--no-input',
80 action="store_true",
81 help='Do not prompt for parameters and only use cookiecutter.json '
82 'file content')
83 parser.add_argument(
84 'input_dir',
85 help='Cookiecutter project dir, e.g. cookiecutter-pypackage/'
86 )
87 parser.add_argument(
88 '-c', '--checkout',
89 help='branch, tag or commit to checkout after git clone'
90 )
91 cookiecutter_pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
92 parser.add_argument(
93 '-V', '--version',
94 help="Show version information and exit.",
95 action='version',
96 version='Cookiecutter %s from %s (Python %s)' % (
97 __version__,
98 cookiecutter_pkg_dir,
99 sys.version[:3]
100 )
101 )
102 parser.add_argument(
103 '-v', '--verbose',
104 help='Print debug information',
105 action='store_true', default=False
106 )
107
108 return parser
109
110 def parse_cookiecutter_args(args):
111 """ Parse the command-line arguments to Cookiecutter. """
112 parser = _get_parser()
113 return parser.parse_args(args)
114
115
116 def main():
117 """ Entry point for the package, as defined in setup.py. """
118
119 args = parse_cookiecutter_args(sys.argv[1:])
120
121 if args.verbose:
122 logging.basicConfig(format='%(levelname)s %(filename)s: %(message)s', level=logging.DEBUG)
123 else:
124 # Log info and above to console
125 logging.basicConfig(
126 format='%(levelname)s: %(message)s',
127 level=logging.INFO
128 )
129
130 cookiecutter(args.input_dir, args.checkout, args.no_input)
131
132
133 if __name__ == '__main__':
134 main()
135
[end of cookiecutter/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/cookiecutter/main.py b/cookiecutter/main.py
--- a/cookiecutter/main.py
+++ b/cookiecutter/main.py
@@ -25,6 +25,34 @@
logger = logging.getLogger(__name__)
+builtin_abbreviations = {
+ 'gh': 'https://github.com/{0}.git',
+ 'bb': 'https://bitbucket.org/{0}',
+}
+
+def expand_abbreviations(input_dir, config_dict):
+ """
+ Expand abbreviations in a template name.
+
+ :param input_dir: The project template name.
+ :param config_dict: The user config, which will contain abbreviation
+ definitions.
+ """
+
+ abbreviations = builtin_abbreviations.copy()
+ abbreviations.update(config_dict.get('abbreviations', {}))
+
+ if input_dir in abbreviations:
+ return abbreviations[input_dir]
+
+ # Split on colon. If there is no colon, rest will be empty
+ # and prefix will be the whole input_dir
+ prefix, sep, rest = input_dir.partition(':')
+ if prefix in abbreviations:
+ return abbreviations[prefix].format(rest)
+
+ return input_dir
+
def cookiecutter(input_dir, checkout=None, no_input=False):
"""
@@ -39,6 +67,8 @@
# If no config file, sensible defaults from config.DEFAULT_CONFIG are used
config_dict = get_user_config()
+ input_dir = expand_abbreviations(input_dir, config_dict)
+
# TODO: find a better way to tell if it's a repo URL
if "git@" in input_dir or "https://" in input_dir:
repo_dir = clone(
|
{"golden_diff": "diff --git a/cookiecutter/main.py b/cookiecutter/main.py\n--- a/cookiecutter/main.py\n+++ b/cookiecutter/main.py\n@@ -25,6 +25,34 @@\n \n logger = logging.getLogger(__name__)\n \n+builtin_abbreviations = {\n+ 'gh': 'https://github.com/{0}.git',\n+ 'bb': 'https://bitbucket.org/{0}',\n+}\n+\n+def expand_abbreviations(input_dir, config_dict):\n+ \"\"\"\n+ Expand abbreviations in a template name.\n+\n+ :param input_dir: The project template name.\n+ :param config_dict: The user config, which will contain abbreviation\n+ definitions.\n+ \"\"\"\n+\n+ abbreviations = builtin_abbreviations.copy()\n+ abbreviations.update(config_dict.get('abbreviations', {}))\n+\n+ if input_dir in abbreviations:\n+ return abbreviations[input_dir]\n+\n+ # Split on colon. If there is no colon, rest will be empty\n+ # and prefix will be the whole input_dir\n+ prefix, sep, rest = input_dir.partition(':')\n+ if prefix in abbreviations:\n+ return abbreviations[prefix].format(rest)\n+\n+ return input_dir\n+\n \n def cookiecutter(input_dir, checkout=None, no_input=False):\n \"\"\"\n@@ -39,6 +67,8 @@\n # If no config file, sensible defaults from config.DEFAULT_CONFIG are used\n config_dict = get_user_config()\n \n+ input_dir = expand_abbreviations(input_dir, config_dict)\n+\n # TODO: find a better way to tell if it's a repo URL\n if \"git@\" in input_dir or \"https://\" in input_dir:\n repo_dir = clone(\n", "issue": "Add aliases for cookiecutters\nRemembering the locations of cookiecutters can be a bit of a pain - even if it's just including the local path.\n\nIt would be useful to have an extra section in `cookiecutterrc` for aliases to cookiecutter locations. Something like:\n\n```\naliases:\n python: https://github.com/audreyr/cookiecutter-pypackage\n django: https://github.com/marcofucci/cookiecutter-simple-django\n docs: ~/samples/templates/documentation\n```\n\nThe code would simply need to translate an alias into a path before doing any of the existing processing.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.main\n-----------------\n\nMain entry point for the `cookiecutter` command.\n\nThe code in this module is also a good example of how to use Cookiecutter as a\nlibrary rather than a script.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport argparse\nimport logging\nimport os\nimport sys\n\nfrom . import __version__\nfrom .config import get_user_config\nfrom .prompt import prompt_for_config\nfrom .generate import generate_context, generate_files\nfrom .vcs import clone\n\nlogger = logging.getLogger(__name__)\n\n\ndef cookiecutter(input_dir, checkout=None, no_input=False):\n \"\"\"\n API equivalent to using Cookiecutter at the command line.\n\n :param input_dir: A directory containing a project template dir,\n or a URL to git repo.\n :param checkout: The branch, tag or commit ID to checkout after clone\n \"\"\"\n\n # Get user config from ~/.cookiecutterrc or equivalent\n # If no config file, sensible defaults from config.DEFAULT_CONFIG are used\n config_dict = get_user_config()\n\n # TODO: find a better way to tell if it's a repo URL\n if \"git@\" in input_dir or \"https://\" in input_dir:\n repo_dir = clone(\n repo_url=input_dir,\n checkout=checkout,\n clone_to_dir=config_dict['cookiecutters_dir']\n )\n else:\n # If it's a local repo, no need to clone or copy to your cookiecutters_dir\n repo_dir = input_dir\n\n context_file = os.path.join(repo_dir, 'cookiecutter.json')\n logging.debug('context_file is {0}'.format(context_file))\n\n context = generate_context(\n context_file=context_file,\n default_context=config_dict['default_context']\n )\n\n # prompt the user to manually configure at the command line.\n # except when 'no-input' flag is set\n if not no_input:\n cookiecutter_dict = prompt_for_config(context)\n context['cookiecutter'] = cookiecutter_dict\n\n # Create project from local context and project template.\n generate_files(\n repo_dir=repo_dir,\n context=context\n )\n\n\ndef _get_parser():\n parser = argparse.ArgumentParser(\n description='Create a project from a Cookiecutter project template.'\n )\n parser.add_argument(\n '--no-input',\n action=\"store_true\",\n help='Do not prompt for parameters and only use cookiecutter.json '\n 'file content')\n parser.add_argument(\n 'input_dir',\n help='Cookiecutter project dir, e.g. cookiecutter-pypackage/'\n )\n parser.add_argument(\n '-c', '--checkout',\n help='branch, tag or commit to checkout after git clone'\n )\n cookiecutter_pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n parser.add_argument(\n '-V', '--version',\n help=\"Show version information and exit.\",\n action='version',\n version='Cookiecutter %s from %s (Python %s)' % (\n __version__,\n cookiecutter_pkg_dir,\n sys.version[:3]\n )\n )\n parser.add_argument(\n '-v', '--verbose',\n help='Print debug information',\n action='store_true', default=False\n )\n\n return parser\n\ndef parse_cookiecutter_args(args):\n \"\"\" Parse the command-line arguments to Cookiecutter. \"\"\"\n parser = _get_parser()\n return parser.parse_args(args)\n\n\ndef main():\n \"\"\" Entry point for the package, as defined in setup.py. \"\"\"\n\n args = parse_cookiecutter_args(sys.argv[1:])\n\n if args.verbose:\n logging.basicConfig(format='%(levelname)s %(filename)s: %(message)s', level=logging.DEBUG)\n else:\n # Log info and above to console\n logging.basicConfig(\n format='%(levelname)s: %(message)s',\n level=logging.INFO\n )\n\n cookiecutter(args.input_dir, args.checkout, args.no_input)\n\n\nif __name__ == '__main__':\n main()\n", "path": "cookiecutter/main.py"}]}
| 1,853 | 382 |
gh_patches_debug_7526
|
rasdani/github-patches
|
git_diff
|
internetarchive__openlibrary-9125
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update Github Action "Recent Comment Slack Bot" to mark issues as Needs: Response
<!-- IMPORTANT: Before posting, be sure to redact or remove sensitive data, such as passwords, secret keys, session cookies, etc. -->
<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->
### Describe the problem that you'd like solved
<!-- A clear and concise description of what you want to happen. -->
```
def add_label_to_issues(ids):
owner = 'internetarchive'
repo = 'openlibrary'
labels_url = f"https://api.github.com/repos/{owner}/{repo}/issues/labels"
payload = {"labels": ["Needs: Response"]}
for issue_number in ids:
issue_labels_url = f"https://api.github.com/repos/{owner}/{repo}/issues/{issue_number}/labels"
response = requests.post(issue_labels_url, json=payload, headers=github_headers)
if __name__ == "__main__":
add_label_to_issues([9054, 9065])
```
to https://github.com/internetarchive/openlibrary/blob/master/scripts/gh_scripts/issue_comment_bot.py#L255
### Proposal & Constraints
<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->
<!-- Which suggestions or requirements should be considered for how feature needs to appear or be implemented? -->
### Additional context
<!-- Add any other context or screenshots about the feature request here. -->
### Stakeholders
<!-- @ tag stakeholders of this bug -->
</issue>
<code>
[start of scripts/gh_scripts/issue_comment_bot.py]
1 #!/usr/bin/env python
2 """
3 Fetches Open Library GitHub issues that have been commented on
4 within some amount of time, in hours.
5
6 Writes links to each issue to given Slack channel.
7 """
8 import argparse
9 import errno
10 import os
11 import sys
12 import time
13
14 from datetime import datetime, timedelta
15 from typing import Any
16
17 import requests
18
19 # Maps lead label to GitHub username
20 lead_label_to_username = {
21 'Lead: @mekarpeles': 'mekarpeles',
22 'Lead: @cdrini': 'cdrini',
23 'Lead: @scottbarnes': 'scottbarnes',
24 'Lead: @seabelis': 'seabelis',
25 'Lead: @jimchamp': 'jimchamp',
26 }
27
28 # Maps GitHub username to Slack ID
29 username_to_slack_id = {
30 'mekarpeles': '<@mek>',
31 'cdrini': '<@cdrini>',
32 'scottbarnes': '<@U03MNR6T7FH>',
33 'seabelis': '<@UAHQ39ACT>',
34 'jimchamp': '<@U01ARTHG9EV>',
35 'hornc': '<@U0EUS8DV0>',
36 }
37
38 github_headers = {
39 'X-GitHub-Api-Version': '2022-11-28',
40 'Accept': 'application/vnd.github+json',
41 }
42
43
44 def fetch_issues(updated_since: str):
45 """
46 Fetches all GitHub issues that have been updated since the given date string and have at least one comment.
47
48 GitHub results are paginated. This functions appends each result to a list, and does so for all pages.
49 To keep API calls to a minimum, we request the maximum number of results per request (100 per page, as of writing).
50
51 Important: Updated issues need not have a recent comment. Update events include many other things, such as adding a
52 label to an issue, or moving an issue to a milestone. Issues returned by this function will require additional
53 processing in order to determine if they have recent comments.
54 """
55 # Make initial query for updated issues:
56 query = f'repo:internetarchive/openlibrary is:open is:issue comments:>0 updated:>{updated_since}'
57 p: dict[str, str | int] = {
58 'q': query,
59 'per_page': 100,
60 }
61 response = requests.get(
62 'https://api.github.com/search/issues', params=p, headers=github_headers
63 )
64 d = response.json()
65 results = d['items']
66
67 # Fetch additional updated issues, if any exist
68 def get_next_page(url: str):
69 """Returns list of issues and optional url for next page"""
70 resp = requests.get(url, headers=github_headers)
71 # Get issues
72 d = resp.json()
73 issues = d['items']
74 # Prepare url for next page
75 next = resp.links.get('next', {})
76 next_url = next.get('url', '')
77
78 return issues, next_url
79
80 links = response.links
81 next = links.get('next', {})
82 next_url = next.get('url', '')
83 while next_url:
84 # Make call with next link
85 issues, next_url = get_next_page(next_url)
86 results = results + issues
87
88 return results
89
90
91 def filter_issues(issues: list, since: datetime):
92 """
93 Returns list of issues that were not last responded to by staff.
94 Requires fetching the most recent comments for the given issues.
95 """
96 results = []
97
98 for i in issues:
99 # Fetch comments using URL from previous GitHub search results
100 comments_url = i.get('comments_url')
101 resp = requests.get(
102 comments_url, params={'per_page': 100}, headers=github_headers
103 )
104
105 # Ensure that we have the last page of comments
106 links = resp.links
107 last = links.get('last', {})
108 last_url = last.get('url', '')
109
110 if last_url:
111 resp = requests.get(last_url, headers=github_headers)
112
113 # Get last comment
114 comments = resp.json()
115 last_comment = comments[-1]
116
117 # Determine if last comment meets our criteria for Slack notifications
118 # First step: Ensure that the last comment was left after the given `since` datetime
119 created = datetime.fromisoformat(last_comment['created_at'])
120 # Removing timezone info to avoid TypeErrors, which occur when
121 # comparing a timezone-aware datetime with a timezone-naive datetime
122 created = created.replace(tzinfo=None)
123 if created > since:
124 # Next step: Determine if the last commenter is a staff member
125 last_commenter = last_comment['user']['login']
126 if last_commenter not in username_to_slack_id:
127 lead_label = find_lead_label(i.get('labels', []))
128 results.append(
129 {
130 'comment_url': last_comment['html_url'],
131 'commenter': last_commenter,
132 'issue_title': i['title'],
133 'lead_label': lead_label,
134 }
135 )
136
137 return results
138
139
140 def find_lead_label(labels: list[dict[str, Any]]) -> str:
141 """
142 Finds and returns the name of the first lead label found in the given list of GitHub labels.
143
144 Returns an empty string if no lead label is found
145 """
146 result = ''
147 for label in labels:
148 if label['name'].startswith('Lead:'):
149 result = label['name']
150 break
151
152 return result
153
154
155 def publish_digest(
156 issues: list[dict[str, str]],
157 slack_channel: str,
158 slack_token: str,
159 hours_passed: int,
160 ):
161 """
162 Creates a threaded Slack messaged containing a digest of recently commented GitHub issues.
163
164 Parent Slack message will say how many comments were left, and the timeframe. Each reply
165 will include a link to the comment, as well as additional information.
166 """
167 # Create the parent message
168 parent_thread_msg = (
169 f'{len(issues)} new GitHub comment(s) since {hours_passed} hour(s) ago'
170 )
171
172 response = requests.post(
173 'https://slack.com/api/chat.postMessage',
174 headers={
175 'Authorization': f"Bearer {slack_token}",
176 'Content-Type': 'application/json; charset=utf-8',
177 },
178 json={
179 'channel': slack_channel,
180 'text': parent_thread_msg,
181 },
182 )
183
184 if response.status_code != 200:
185 # XXX : Log this
186 print(f'Failed to send message to Slack. Status code: {response.status_code}')
187 # XXX : Add retry logic?
188 sys.exit(errno.ECOMM)
189
190 d = response.json()
191 # Store timestamp, which, along with the channel, uniquely identifies the parent thread
192 ts = d.get('ts')
193
194 def comment_on_thread(message: str):
195 """
196 Posts the given message as a reply to the parent message.
197 """
198 response = requests.post(
199 'https://slack.com/api/chat.postMessage',
200 headers={
201 'Authorization': f"Bearer {slack_token}",
202 'Content-Type': 'application/json; charset=utf-8',
203 },
204 json={
205 'channel': slack_channel,
206 'text': message,
207 'thread_ts': ts,
208 },
209 )
210 if response.status_code != 200:
211 # XXX : Check "ok" field for errors
212 # XXX : Log this
213 print(
214 f'Failed to POST slack message\n Status code: {response.status_code}\n Message: {message}'
215 )
216 # XXX : Retry logic?
217
218 for i in issues:
219 # Slack rate limit is roughly 1 request per second
220 time.sleep(1)
221
222 comment_url = i['comment_url']
223 issue_title = i['issue_title']
224 commenter = i['commenter']
225 message = f'<{comment_url}|Latest comment for: *{issue_title}*>\n'
226
227 username = lead_label_to_username.get(i['lead_label'], '')
228 slack_id = username_to_slack_id.get(username, '')
229 if slack_id:
230 message += f'Lead: {slack_id}\n'
231 elif i['lead_label']:
232 message += f'{i["lead_label"]}\n'
233 else:
234 message += 'Lead: N/A\n'
235
236 message += f'Commenter: *{commenter}*'
237 comment_on_thread(message)
238
239
240 def time_since(hours):
241 """Returns datetime and string representations of the current time, minus the given hour"""
242 now = datetime.now()
243 # XXX : Add a minute or two to the delta (to avoid dropping issues)?
244 since = now - timedelta(hours=hours)
245 return since, since.strftime('%Y-%m-%dT%H:%M:%S')
246
247
248 def add_label_to_issues(issues):
249 for issue in issues:
250 issue_labels_url = f"https://api.github.com/repos/internetarchive/openlibrary/issues/{issue['number']}/labels"
251 response = requests.post(
252 issue_labels_url,
253 json={"labels": ["Needs: Response"]},
254 headers=github_headers,
255 )
256
257
258 def start_job(args: argparse.Namespace):
259 """
260 Starts the new comment digest job.
261 """
262 since, date_string = time_since(args.hours)
263 issues = fetch_issues(date_string)
264
265 filtered_issues = filter_issues(issues, since)
266 add_label_to_issues(filtered_issues)
267 publish_digest(filtered_issues, args.channel, args.slack_token, args.hours)
268 print('Digest posted to Slack.')
269
270
271 def _get_parser() -> argparse.ArgumentParser:
272 """
273 Creates and returns an ArgumentParser containing default values which were
274 read from the config file.
275 """
276 parser = argparse.ArgumentParser(description=__doc__)
277 parser.add_argument(
278 'hours',
279 help='Fetch issues that have been updated since this many hours ago',
280 type=int,
281 )
282 parser.add_argument(
283 'channel',
284 help="Issues will be published to this Slack channel",
285 type=str,
286 )
287 parser.add_argument(
288 'slack_token',
289 metavar='slack-token',
290 help='Slack auth token',
291 type=str,
292 )
293
294 return parser
295
296
297 if __name__ == '__main__':
298 # Process command-line arguments and starts the notification job
299 parser = _get_parser()
300 args = parser.parse_args()
301
302 # If found, add token to GitHub request headers:
303 github_token = os.environ.get('GITHUB_TOKEN', '')
304 if github_token:
305 github_headers['Authorization'] = f'Bearer {github_token}'
306 start_job(args)
307
[end of scripts/gh_scripts/issue_comment_bot.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scripts/gh_scripts/issue_comment_bot.py b/scripts/gh_scripts/issue_comment_bot.py
--- a/scripts/gh_scripts/issue_comment_bot.py
+++ b/scripts/gh_scripts/issue_comment_bot.py
@@ -127,6 +127,7 @@
lead_label = find_lead_label(i.get('labels', []))
results.append(
{
+ 'number': i['number'],
'comment_url': last_comment['html_url'],
'commenter': last_commenter,
'issue_title': i['title'],
|
{"golden_diff": "diff --git a/scripts/gh_scripts/issue_comment_bot.py b/scripts/gh_scripts/issue_comment_bot.py\n--- a/scripts/gh_scripts/issue_comment_bot.py\n+++ b/scripts/gh_scripts/issue_comment_bot.py\n@@ -127,6 +127,7 @@\n lead_label = find_lead_label(i.get('labels', []))\n results.append(\n {\n+ 'number': i['number'],\n 'comment_url': last_comment['html_url'],\n 'commenter': last_commenter,\n 'issue_title': i['title'],\n", "issue": "Update Github Action \"Recent Comment Slack Bot\" to mark issues as Needs: Response \n<!-- IMPORTANT: Before posting, be sure to redact or remove sensitive data, such as passwords, secret keys, session cookies, etc. -->\r\n\r\n<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->\r\n\r\n### Describe the problem that you'd like solved\r\n<!-- A clear and concise description of what you want to happen. -->\r\n\r\n```\r\ndef add_label_to_issues(ids):\r\n owner = 'internetarchive'\r\n repo = 'openlibrary'\r\n labels_url = f\"https://api.github.com/repos/{owner}/{repo}/issues/labels\"\r\n payload = {\"labels\": [\"Needs: Response\"]}\r\n for issue_number in ids:\r\n issue_labels_url = f\"https://api.github.com/repos/{owner}/{repo}/issues/{issue_number}/labels\"\r\n response = requests.post(issue_labels_url, json=payload, headers=github_headers)\r\n\r\nif __name__ == \"__main__\":\r\n add_label_to_issues([9054, 9065])\r\n```\r\n\r\nto https://github.com/internetarchive/openlibrary/blob/master/scripts/gh_scripts/issue_comment_bot.py#L255\r\n\r\n### Proposal & Constraints\r\n<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->\r\n\r\n<!-- Which suggestions or requirements should be considered for how feature needs to appear or be implemented? -->\r\n\r\n### Additional context\r\n<!-- Add any other context or screenshots about the feature request here. -->\r\n\r\n### Stakeholders\r\n<!-- @ tag stakeholders of this bug -->\r\n\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nFetches Open Library GitHub issues that have been commented on\nwithin some amount of time, in hours.\n\nWrites links to each issue to given Slack channel.\n\"\"\"\nimport argparse\nimport errno\nimport os\nimport sys\nimport time\n\nfrom datetime import datetime, timedelta\nfrom typing import Any\n\nimport requests\n\n# Maps lead label to GitHub username\nlead_label_to_username = {\n 'Lead: @mekarpeles': 'mekarpeles',\n 'Lead: @cdrini': 'cdrini',\n 'Lead: @scottbarnes': 'scottbarnes',\n 'Lead: @seabelis': 'seabelis',\n 'Lead: @jimchamp': 'jimchamp',\n}\n\n# Maps GitHub username to Slack ID\nusername_to_slack_id = {\n 'mekarpeles': '<@mek>',\n 'cdrini': '<@cdrini>',\n 'scottbarnes': '<@U03MNR6T7FH>',\n 'seabelis': '<@UAHQ39ACT>',\n 'jimchamp': '<@U01ARTHG9EV>',\n 'hornc': '<@U0EUS8DV0>',\n}\n\ngithub_headers = {\n 'X-GitHub-Api-Version': '2022-11-28',\n 'Accept': 'application/vnd.github+json',\n}\n\n\ndef fetch_issues(updated_since: str):\n \"\"\"\n Fetches all GitHub issues that have been updated since the given date string and have at least one comment.\n\n GitHub results are paginated. This functions appends each result to a list, and does so for all pages.\n To keep API calls to a minimum, we request the maximum number of results per request (100 per page, as of writing).\n\n Important: Updated issues need not have a recent comment. Update events include many other things, such as adding a\n label to an issue, or moving an issue to a milestone. Issues returned by this function will require additional\n processing in order to determine if they have recent comments.\n \"\"\"\n # Make initial query for updated issues:\n query = f'repo:internetarchive/openlibrary is:open is:issue comments:>0 updated:>{updated_since}'\n p: dict[str, str | int] = {\n 'q': query,\n 'per_page': 100,\n }\n response = requests.get(\n 'https://api.github.com/search/issues', params=p, headers=github_headers\n )\n d = response.json()\n results = d['items']\n\n # Fetch additional updated issues, if any exist\n def get_next_page(url: str):\n \"\"\"Returns list of issues and optional url for next page\"\"\"\n resp = requests.get(url, headers=github_headers)\n # Get issues\n d = resp.json()\n issues = d['items']\n # Prepare url for next page\n next = resp.links.get('next', {})\n next_url = next.get('url', '')\n\n return issues, next_url\n\n links = response.links\n next = links.get('next', {})\n next_url = next.get('url', '')\n while next_url:\n # Make call with next link\n issues, next_url = get_next_page(next_url)\n results = results + issues\n\n return results\n\n\ndef filter_issues(issues: list, since: datetime):\n \"\"\"\n Returns list of issues that were not last responded to by staff.\n Requires fetching the most recent comments for the given issues.\n \"\"\"\n results = []\n\n for i in issues:\n # Fetch comments using URL from previous GitHub search results\n comments_url = i.get('comments_url')\n resp = requests.get(\n comments_url, params={'per_page': 100}, headers=github_headers\n )\n\n # Ensure that we have the last page of comments\n links = resp.links\n last = links.get('last', {})\n last_url = last.get('url', '')\n\n if last_url:\n resp = requests.get(last_url, headers=github_headers)\n\n # Get last comment\n comments = resp.json()\n last_comment = comments[-1]\n\n # Determine if last comment meets our criteria for Slack notifications\n # First step: Ensure that the last comment was left after the given `since` datetime\n created = datetime.fromisoformat(last_comment['created_at'])\n # Removing timezone info to avoid TypeErrors, which occur when\n # comparing a timezone-aware datetime with a timezone-naive datetime\n created = created.replace(tzinfo=None)\n if created > since:\n # Next step: Determine if the last commenter is a staff member\n last_commenter = last_comment['user']['login']\n if last_commenter not in username_to_slack_id:\n lead_label = find_lead_label(i.get('labels', []))\n results.append(\n {\n 'comment_url': last_comment['html_url'],\n 'commenter': last_commenter,\n 'issue_title': i['title'],\n 'lead_label': lead_label,\n }\n )\n\n return results\n\n\ndef find_lead_label(labels: list[dict[str, Any]]) -> str:\n \"\"\"\n Finds and returns the name of the first lead label found in the given list of GitHub labels.\n\n Returns an empty string if no lead label is found\n \"\"\"\n result = ''\n for label in labels:\n if label['name'].startswith('Lead:'):\n result = label['name']\n break\n\n return result\n\n\ndef publish_digest(\n issues: list[dict[str, str]],\n slack_channel: str,\n slack_token: str,\n hours_passed: int,\n):\n \"\"\"\n Creates a threaded Slack messaged containing a digest of recently commented GitHub issues.\n\n Parent Slack message will say how many comments were left, and the timeframe. Each reply\n will include a link to the comment, as well as additional information.\n \"\"\"\n # Create the parent message\n parent_thread_msg = (\n f'{len(issues)} new GitHub comment(s) since {hours_passed} hour(s) ago'\n )\n\n response = requests.post(\n 'https://slack.com/api/chat.postMessage',\n headers={\n 'Authorization': f\"Bearer {slack_token}\",\n 'Content-Type': 'application/json; charset=utf-8',\n },\n json={\n 'channel': slack_channel,\n 'text': parent_thread_msg,\n },\n )\n\n if response.status_code != 200:\n # XXX : Log this\n print(f'Failed to send message to Slack. Status code: {response.status_code}')\n # XXX : Add retry logic?\n sys.exit(errno.ECOMM)\n\n d = response.json()\n # Store timestamp, which, along with the channel, uniquely identifies the parent thread\n ts = d.get('ts')\n\n def comment_on_thread(message: str):\n \"\"\"\n Posts the given message as a reply to the parent message.\n \"\"\"\n response = requests.post(\n 'https://slack.com/api/chat.postMessage',\n headers={\n 'Authorization': f\"Bearer {slack_token}\",\n 'Content-Type': 'application/json; charset=utf-8',\n },\n json={\n 'channel': slack_channel,\n 'text': message,\n 'thread_ts': ts,\n },\n )\n if response.status_code != 200:\n # XXX : Check \"ok\" field for errors\n # XXX : Log this\n print(\n f'Failed to POST slack message\\n Status code: {response.status_code}\\n Message: {message}'\n )\n # XXX : Retry logic?\n\n for i in issues:\n # Slack rate limit is roughly 1 request per second\n time.sleep(1)\n\n comment_url = i['comment_url']\n issue_title = i['issue_title']\n commenter = i['commenter']\n message = f'<{comment_url}|Latest comment for: *{issue_title}*>\\n'\n\n username = lead_label_to_username.get(i['lead_label'], '')\n slack_id = username_to_slack_id.get(username, '')\n if slack_id:\n message += f'Lead: {slack_id}\\n'\n elif i['lead_label']:\n message += f'{i[\"lead_label\"]}\\n'\n else:\n message += 'Lead: N/A\\n'\n\n message += f'Commenter: *{commenter}*'\n comment_on_thread(message)\n\n\ndef time_since(hours):\n \"\"\"Returns datetime and string representations of the current time, minus the given hour\"\"\"\n now = datetime.now()\n # XXX : Add a minute or two to the delta (to avoid dropping issues)?\n since = now - timedelta(hours=hours)\n return since, since.strftime('%Y-%m-%dT%H:%M:%S')\n\n\ndef add_label_to_issues(issues):\n for issue in issues:\n issue_labels_url = f\"https://api.github.com/repos/internetarchive/openlibrary/issues/{issue['number']}/labels\"\n response = requests.post(\n issue_labels_url,\n json={\"labels\": [\"Needs: Response\"]},\n headers=github_headers,\n )\n\n\ndef start_job(args: argparse.Namespace):\n \"\"\"\n Starts the new comment digest job.\n \"\"\"\n since, date_string = time_since(args.hours)\n issues = fetch_issues(date_string)\n\n filtered_issues = filter_issues(issues, since)\n add_label_to_issues(filtered_issues)\n publish_digest(filtered_issues, args.channel, args.slack_token, args.hours)\n print('Digest posted to Slack.')\n\n\ndef _get_parser() -> argparse.ArgumentParser:\n \"\"\"\n Creates and returns an ArgumentParser containing default values which were\n read from the config file.\n \"\"\"\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument(\n 'hours',\n help='Fetch issues that have been updated since this many hours ago',\n type=int,\n )\n parser.add_argument(\n 'channel',\n help=\"Issues will be published to this Slack channel\",\n type=str,\n )\n parser.add_argument(\n 'slack_token',\n metavar='slack-token',\n help='Slack auth token',\n type=str,\n )\n\n return parser\n\n\nif __name__ == '__main__':\n # Process command-line arguments and starts the notification job\n parser = _get_parser()\n args = parser.parse_args()\n\n # If found, add token to GitHub request headers:\n github_token = os.environ.get('GITHUB_TOKEN', '')\n if github_token:\n github_headers['Authorization'] = f'Bearer {github_token}'\n start_job(args)\n", "path": "scripts/gh_scripts/issue_comment_bot.py"}]}
| 3,961 | 120 |
gh_patches_debug_38603
|
rasdani/github-patches
|
git_diff
|
docker__docker-py-347
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SSLAdapter init_poolmanager error because request.packages.urllib3.__version__ invalid
In ssladapter.py,
``` python
try:
import requests.packages.urllib3 as urllib3
except ImportError:
import urllib3
...
if urllib3 and urllib_ver == 'dev' and \
StrictVersion(urllib_ver) > StrictVersion('1.5'):
kwargs['ssl_version'] = self.ssl_version
self.poolmanager = PoolManager(**kwargs)
```
`requests.packages.urllib3` will be imported, and the `requests.packages.urllib3.__version__` is 'dev' at present, and the `StrictVersion(urllib_ver)` will raise a ValueError: "invalid version number 'dev'"
</issue>
<code>
[start of docker/tls.py]
1 import os
2
3 from . import errors
4 from .ssladapter import ssladapter
5
6
7 class TLSConfig(object):
8 cert = None
9 verify = None
10 ssl_version = None
11
12 def __init__(self, client_cert=None, ca_cert=None, verify=None,
13 ssl_version=None):
14 # Argument compatibility/mapping with
15 # http://docs.docker.com/examples/https/
16 # This diverges from the Docker CLI in that users can specify 'tls'
17 # here, but also disable any public/default CA pool verification by
18 # leaving tls_verify=False
19
20 # urllib3 sets a default ssl_version if ssl_version is None
21 # http://tinyurl.com/kxga8hb
22 self.ssl_version = ssl_version
23
24 # "tls" and "tls_verify" must have both or neither cert/key files
25 # In either case, Alert the user when both are expected, but any are
26 # missing.
27
28 if client_cert:
29 try:
30 tls_cert, tls_key = client_cert
31 except ValueError:
32 raise errors.TLSParameterError(
33 'client_config must be a tuple of'
34 ' (client certificate, key file)'
35 )
36
37 if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or
38 not os.path.isfile(tls_key)):
39 raise errors.TLSParameterError(
40 'Path to a certificate and key files must be provided'
41 ' through the client_config param'
42 )
43 self.cert = (tls_cert, tls_key)
44
45 # Either set verify to True (public/default CA checks) or to the
46 # path of a CA Cert file.
47 if verify is not None:
48 if not ca_cert:
49 self.verify = verify
50 elif os.path.isfile(ca_cert):
51 if not verify:
52 raise errors.TLSParameterError(
53 'verify can not be False when a CA cert is'
54 ' provided.'
55 )
56 self.verify = ca_cert
57 else:
58 raise errors.TLSParameterError(
59 'Invalid CA certificate provided for `tls_ca_cert`.'
60 )
61
62 def configure_client(self, client):
63 client.ssl_version = self.ssl_version
64 if self.verify is not None:
65 client.verify = self.verify
66 if self.cert:
67 client.cert = self.cert
68 client.mount('https://', ssladapter.SSLAdapter(self.ssl_version))
69
[end of docker/tls.py]
[start of docker/ssladapter/ssladapter.py]
1 """ Resolves OpenSSL issues in some servers:
2 https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/
3 https://github.com/kennethreitz/requests/pull/799
4 """
5 from distutils.version import StrictVersion
6 from requests.adapters import HTTPAdapter
7 try:
8 import requests.packages.urllib3 as urllib3
9 except ImportError:
10 import urllib3
11
12
13 PoolManager = urllib3.poolmanager.PoolManager
14
15
16 class SSLAdapter(HTTPAdapter):
17 '''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
18 def __init__(self, ssl_version=None, **kwargs):
19 self.ssl_version = ssl_version
20 super(SSLAdapter, self).__init__(**kwargs)
21
22 def init_poolmanager(self, connections, maxsize, block=False):
23 urllib_ver = urllib3.__version__.split('-')[0]
24 kwargs = {
25 'num_pools': connections,
26 'maxsize': maxsize,
27 'block': block
28 }
29 if urllib3 and urllib_ver == 'dev' and \
30 StrictVersion(urllib_ver) > StrictVersion('1.5'):
31 kwargs['ssl_version'] = self.ssl_version
32
33 self.poolmanager = PoolManager(**kwargs)
34
[end of docker/ssladapter/ssladapter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docker/ssladapter/ssladapter.py b/docker/ssladapter/ssladapter.py
--- a/docker/ssladapter/ssladapter.py
+++ b/docker/ssladapter/ssladapter.py
@@ -15,19 +15,27 @@
class SSLAdapter(HTTPAdapter):
'''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
- def __init__(self, ssl_version=None, **kwargs):
+ def __init__(self, ssl_version=None, assert_hostname=None, **kwargs):
self.ssl_version = ssl_version
+ self.assert_hostname = assert_hostname
super(SSLAdapter, self).__init__(**kwargs)
def init_poolmanager(self, connections, maxsize, block=False):
- urllib_ver = urllib3.__version__.split('-')[0]
kwargs = {
'num_pools': connections,
'maxsize': maxsize,
- 'block': block
+ 'block': block,
+ 'assert_hostname': self.assert_hostname,
}
- if urllib3 and urllib_ver == 'dev' and \
- StrictVersion(urllib_ver) > StrictVersion('1.5'):
+ if self.can_override_ssl_version():
kwargs['ssl_version'] = self.ssl_version
self.poolmanager = PoolManager(**kwargs)
+
+ def can_override_ssl_version(self):
+ urllib_ver = urllib3.__version__.split('-')[0]
+ if urllib_ver is None:
+ return False
+ if urllib_ver == 'dev':
+ return True
+ return StrictVersion(urllib_ver) > StrictVersion('1.5')
diff --git a/docker/tls.py b/docker/tls.py
--- a/docker/tls.py
+++ b/docker/tls.py
@@ -10,7 +10,7 @@
ssl_version = None
def __init__(self, client_cert=None, ca_cert=None, verify=None,
- ssl_version=None):
+ ssl_version=None, assert_hostname=None):
# Argument compatibility/mapping with
# http://docs.docker.com/examples/https/
# This diverges from the Docker CLI in that users can specify 'tls'
@@ -20,6 +20,7 @@
# urllib3 sets a default ssl_version if ssl_version is None
# http://tinyurl.com/kxga8hb
self.ssl_version = ssl_version
+ self.assert_hostname = assert_hostname
# "tls" and "tls_verify" must have both or neither cert/key files
# In either case, Alert the user when both are expected, but any are
@@ -65,4 +66,7 @@
client.verify = self.verify
if self.cert:
client.cert = self.cert
- client.mount('https://', ssladapter.SSLAdapter(self.ssl_version))
+ client.mount('https://', ssladapter.SSLAdapter(
+ ssl_version=self.ssl_version,
+ assert_hostname=self.assert_hostname,
+ ))
|
{"golden_diff": "diff --git a/docker/ssladapter/ssladapter.py b/docker/ssladapter/ssladapter.py\n--- a/docker/ssladapter/ssladapter.py\n+++ b/docker/ssladapter/ssladapter.py\n@@ -15,19 +15,27 @@\n \n class SSLAdapter(HTTPAdapter):\n '''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''\n- def __init__(self, ssl_version=None, **kwargs):\n+ def __init__(self, ssl_version=None, assert_hostname=None, **kwargs):\n self.ssl_version = ssl_version\n+ self.assert_hostname = assert_hostname\n super(SSLAdapter, self).__init__(**kwargs)\n \n def init_poolmanager(self, connections, maxsize, block=False):\n- urllib_ver = urllib3.__version__.split('-')[0]\n kwargs = {\n 'num_pools': connections,\n 'maxsize': maxsize,\n- 'block': block\n+ 'block': block,\n+ 'assert_hostname': self.assert_hostname,\n }\n- if urllib3 and urllib_ver == 'dev' and \\\n- StrictVersion(urllib_ver) > StrictVersion('1.5'):\n+ if self.can_override_ssl_version():\n kwargs['ssl_version'] = self.ssl_version\n \n self.poolmanager = PoolManager(**kwargs)\n+\n+ def can_override_ssl_version(self):\n+ urllib_ver = urllib3.__version__.split('-')[0]\n+ if urllib_ver is None:\n+ return False\n+ if urllib_ver == 'dev':\n+ return True\n+ return StrictVersion(urllib_ver) > StrictVersion('1.5')\ndiff --git a/docker/tls.py b/docker/tls.py\n--- a/docker/tls.py\n+++ b/docker/tls.py\n@@ -10,7 +10,7 @@\n ssl_version = None\n \n def __init__(self, client_cert=None, ca_cert=None, verify=None,\n- ssl_version=None):\n+ ssl_version=None, assert_hostname=None):\n # Argument compatibility/mapping with\n # http://docs.docker.com/examples/https/\n # This diverges from the Docker CLI in that users can specify 'tls'\n@@ -20,6 +20,7 @@\n # urllib3 sets a default ssl_version if ssl_version is None\n # http://tinyurl.com/kxga8hb\n self.ssl_version = ssl_version\n+ self.assert_hostname = assert_hostname\n \n # \"tls\" and \"tls_verify\" must have both or neither cert/key files\n # In either case, Alert the user when both are expected, but any are\n@@ -65,4 +66,7 @@\n client.verify = self.verify\n if self.cert:\n client.cert = self.cert\n- client.mount('https://', ssladapter.SSLAdapter(self.ssl_version))\n+ client.mount('https://', ssladapter.SSLAdapter(\n+ ssl_version=self.ssl_version,\n+ assert_hostname=self.assert_hostname,\n+ ))\n", "issue": "SSLAdapter init_poolmanager error because request.packages.urllib3.__version__ invalid\nIn ssladapter.py, \n\n``` python\ntry:\n import requests.packages.urllib3 as urllib3\nexcept ImportError:\n import urllib3\n...\n\n if urllib3 and urllib_ver == 'dev' and \\\n StrictVersion(urllib_ver) > StrictVersion('1.5'):\n kwargs['ssl_version'] = self.ssl_version\n\n self.poolmanager = PoolManager(**kwargs)\n```\n\n`requests.packages.urllib3` will be imported, and the `requests.packages.urllib3.__version__` is 'dev' at present, and the `StrictVersion(urllib_ver)` will raise a ValueError: \"invalid version number 'dev'\"\n\n", "before_files": [{"content": "import os\n\nfrom . import errors\nfrom .ssladapter import ssladapter\n\n\nclass TLSConfig(object):\n cert = None\n verify = None\n ssl_version = None\n\n def __init__(self, client_cert=None, ca_cert=None, verify=None,\n ssl_version=None):\n # Argument compatibility/mapping with\n # http://docs.docker.com/examples/https/\n # This diverges from the Docker CLI in that users can specify 'tls'\n # here, but also disable any public/default CA pool verification by\n # leaving tls_verify=False\n\n # urllib3 sets a default ssl_version if ssl_version is None\n # http://tinyurl.com/kxga8hb\n self.ssl_version = ssl_version\n\n # \"tls\" and \"tls_verify\" must have both or neither cert/key files\n # In either case, Alert the user when both are expected, but any are\n # missing.\n\n if client_cert:\n try:\n tls_cert, tls_key = client_cert\n except ValueError:\n raise errors.TLSParameterError(\n 'client_config must be a tuple of'\n ' (client certificate, key file)'\n )\n\n if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or\n not os.path.isfile(tls_key)):\n raise errors.TLSParameterError(\n 'Path to a certificate and key files must be provided'\n ' through the client_config param'\n )\n self.cert = (tls_cert, tls_key)\n\n # Either set verify to True (public/default CA checks) or to the\n # path of a CA Cert file.\n if verify is not None:\n if not ca_cert:\n self.verify = verify\n elif os.path.isfile(ca_cert):\n if not verify:\n raise errors.TLSParameterError(\n 'verify can not be False when a CA cert is'\n ' provided.'\n )\n self.verify = ca_cert\n else:\n raise errors.TLSParameterError(\n 'Invalid CA certificate provided for `tls_ca_cert`.'\n )\n\n def configure_client(self, client):\n client.ssl_version = self.ssl_version\n if self.verify is not None:\n client.verify = self.verify\n if self.cert:\n client.cert = self.cert\n client.mount('https://', ssladapter.SSLAdapter(self.ssl_version))\n", "path": "docker/tls.py"}, {"content": "\"\"\" Resolves OpenSSL issues in some servers:\n https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/\n https://github.com/kennethreitz/requests/pull/799\n\"\"\"\nfrom distutils.version import StrictVersion\nfrom requests.adapters import HTTPAdapter\ntry:\n import requests.packages.urllib3 as urllib3\nexcept ImportError:\n import urllib3\n\n\nPoolManager = urllib3.poolmanager.PoolManager\n\n\nclass SSLAdapter(HTTPAdapter):\n '''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''\n def __init__(self, ssl_version=None, **kwargs):\n self.ssl_version = ssl_version\n super(SSLAdapter, self).__init__(**kwargs)\n\n def init_poolmanager(self, connections, maxsize, block=False):\n urllib_ver = urllib3.__version__.split('-')[0]\n kwargs = {\n 'num_pools': connections,\n 'maxsize': maxsize,\n 'block': block\n }\n if urllib3 and urllib_ver == 'dev' and \\\n StrictVersion(urllib_ver) > StrictVersion('1.5'):\n kwargs['ssl_version'] = self.ssl_version\n\n self.poolmanager = PoolManager(**kwargs)\n", "path": "docker/ssladapter/ssladapter.py"}]}
| 1,673 | 639 |
gh_patches_debug_31623
|
rasdani/github-patches
|
git_diff
|
freedomofpress__securedrop-6742
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SI issuing a 500 error and then timing out on `develop`
## Description
The SI is failing to start from the `develop` branch, first failing with a 500 error and the log messages below (with logs enabled, and then just timing out:
```
[Fri Feb 03 21:37:34.589966 2023] [wsgi:info] [pid 1330:tid 118957187065600] [remote 127.0.0.1:51894] mod_wsgi (pid=1330, process='source', application='localhost|'): Loading Python script file '/var/www/source.wsgi'.
[Fri Feb 03 21:37:34.591940 2023] [wsgi:error] [pid 1330:tid 118957187065600] [remote 127.0.0.1:51894] mod_wsgi (pid=1330): Failed to exec Python script file '/var/www/source.wsgi'.
[Fri Feb 03 21:37:34.592605 2023] [wsgi:error] [pid 1330:tid 118957187065600] [remote 127.0.0.1:51894] mod_wsgi (pid=1330): Exception occurred processing WSGI script '/var/www/source.wsgi'.
[Fri Feb 03 21:37:34.593310 2023] [wsgi:error] [pid 1330:tid 118957187065600] [remote 127.0.0.1:51894] Traceback (most recent call last):
[Fri Feb 03 21:37:34.593658 2023] [wsgi:error] [pid 1330:tid 118957187065600] [remote 127.0.0.1:51894] File "/var/www/source.wsgi", line 11, in <module>
[Fri Feb 03 21:37:34.593894 2023] [wsgi:error] [pid 1330:tid 118957187065600] [remote 127.0.0.1:51894] from source import app as application
[Fri Feb 03 21:37:34.594012 2023] [wsgi:error] [pid 1330:tid 118957187065600] [remote 127.0.0.1:51894] ImportError: cannot import name 'app' from 'source' (/var/www/securedrop/source.py)
```
## Steps to Reproduce
- set up a prod VM instance on `.2.5.1`
- build packages from `develop` with `make build-debs`
- step through the upgrade scenario to apply said debs
## Expected Behavior
- system is upgraded cleanly and SI and JI functionality is available
## Actual Behavior
- JI is available, SI errors out with an initial 500 error as described.
## Comments
This looks to have been introduced in https://github.com/freedomofpress/securedrop/pull/6563, specifically in the change in https://github.com/freedomofpress/securedrop/blob/c9e2a7a61c486e38e4fcf06b6196b59a115ce152/securedrop/source.py#L6 . Since `app` is now defined only within the `__name__ == 'main'` block, it isn't defined when run via WSGI (as opposed to directly within the dev env).
</issue>
<code>
[start of securedrop/journalist.py]
1 from encryption import EncryptionManager, GpgKeyNotFoundError
2 from execution import asynchronous
3 from journalist_app import create_app
4 from models import Source
5 from sdconfig import SecureDropConfig
6
7 config = SecureDropConfig.get_current()
8 app = create_app(config)
9
10
11 @asynchronous
12 def prime_keycache() -> None:
13 """Pre-load the source public keys into Redis."""
14 with app.app_context():
15 encryption_mgr = EncryptionManager.get_default()
16 for source in Source.query.filter_by(pending=False, deleted_at=None).all():
17 try:
18 encryption_mgr.get_source_public_key(source.filesystem_id)
19 except GpgKeyNotFoundError:
20 pass
21
22
23 prime_keycache()
24
25
26 if __name__ == "__main__": # pragma: no cover
27 debug = getattr(config, "env", "prod") != "prod"
28 # nosemgrep: python.flask.security.audit.app-run-param-config.avoid_app_run_with_bad_host
29 app.run(debug=debug, host="0.0.0.0", port=8081) # nosec
30
[end of securedrop/journalist.py]
[start of securedrop/source.py]
1 from sdconfig import SecureDropConfig
2 from source_app import create_app
3
4 if __name__ == "__main__": # pragma: no cover
5 config = SecureDropConfig.get_current()
6 app = create_app(config)
7 debug = getattr(config, "env", "prod") != "prod"
8 # nosemgrep: python.flask.security.audit.app-run-param-config.avoid_app_run_with_bad_host
9 app.run(debug=debug, host="0.0.0.0", port=8080) # nosec
10
[end of securedrop/source.py]
[start of securedrop/source_app/__init__.py]
1 import os
2 import time
3 from pathlib import Path
4 from typing import Optional, Tuple
5
6 import i18n
7 import template_filters
8 import version
9 import werkzeug
10 from db import db
11 from flask import Flask, g, redirect, render_template, request, session, url_for
12 from flask_babel import gettext
13 from flask_wtf.csrf import CSRFError, CSRFProtect
14 from models import InstanceConfig
15 from request_that_secures_file_uploads import RequestThatSecuresFileUploads
16 from sdconfig import SecureDropConfig
17 from source_app import api, info, main
18 from source_app.decorators import ignore_static
19 from source_app.utils import clear_session_and_redirect_to_logged_out_page
20
21
22 def get_logo_url(app: Flask) -> str:
23 if not app.static_folder:
24 raise FileNotFoundError
25
26 custom_logo_filename = "i/custom_logo.png"
27 default_logo_filename = "i/logo.png"
28 custom_logo_path = Path(app.static_folder) / custom_logo_filename
29 default_logo_path = Path(app.static_folder) / default_logo_filename
30 if custom_logo_path.is_file():
31 return url_for("static", filename=custom_logo_filename)
32 elif default_logo_path.is_file():
33 return url_for("static", filename=default_logo_filename)
34
35 raise FileNotFoundError
36
37
38 def create_app(config: SecureDropConfig) -> Flask:
39 app = Flask(
40 __name__,
41 template_folder=str(config.SOURCE_TEMPLATES_DIR.absolute()),
42 static_folder=config.STATIC_DIR.absolute(),
43 )
44 app.request_class = RequestThatSecuresFileUploads
45 app.config.from_object(config.SOURCE_APP_FLASK_CONFIG_CLS)
46
47 i18n.configure(config, app)
48
49 @app.before_request
50 @ignore_static
51 def setup_i18n() -> None:
52 """Store i18n-related values in Flask's special g object"""
53 i18n.set_locale(config)
54
55 # The default CSRF token expiration is 1 hour. Since large uploads can
56 # take longer than an hour over Tor, we increase the valid window to 24h.
57 app.config["WTF_CSRF_TIME_LIMIT"] = 60 * 60 * 24
58 CSRFProtect(app)
59
60 app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
61 app.config["SQLALCHEMY_DATABASE_URI"] = config.DATABASE_URI
62 db.init_app(app)
63
64 @app.errorhandler(CSRFError)
65 def handle_csrf_error(e: CSRFError) -> werkzeug.Response:
66 return clear_session_and_redirect_to_logged_out_page(flask_session=session)
67
68 app.jinja_env.trim_blocks = True
69 app.jinja_env.lstrip_blocks = True
70 app.jinja_env.globals["version"] = version.__version__
71 # Exported to source templates for being included in instructions
72 app.jinja_env.globals["submission_key_fpr"] = config.JOURNALIST_KEY
73 app.jinja_env.filters["rel_datetime_format"] = template_filters.rel_datetime_format
74 app.jinja_env.filters["nl2br"] = template_filters.nl2br
75 app.jinja_env.filters["filesizeformat"] = template_filters.filesizeformat
76 app.jinja_env.filters["html_datetime_format"] = template_filters.html_datetime_format
77 app.jinja_env.add_extension("jinja2.ext.do")
78
79 for module in [main, info, api]:
80 app.register_blueprint(module.make_blueprint(config)) # type: ignore
81
82 # before_request hooks are executed in order of declaration, so set up g object
83 # before the potential tor2web 403 response.
84 @app.before_request
85 @ignore_static
86 def setup_g() -> Optional[werkzeug.Response]:
87 if InstanceConfig.get_default(refresh=True).organization_name:
88 g.organization_name = ( # pylint: disable=assigning-non-slot
89 InstanceConfig.get_default().organization_name
90 )
91 else:
92 g.organization_name = gettext("SecureDrop") # pylint: disable=assigning-non-slot
93
94 try:
95 g.logo = get_logo_url(app) # pylint: disable=assigning-non-slot
96 except FileNotFoundError:
97 app.logger.error("Site logo not found.")
98
99 return None
100
101 @app.before_request
102 @ignore_static
103 def check_tor2web() -> Optional[werkzeug.Response]:
104 # TODO: expand header checking logic to catch modern tor2web proxies
105 if "X-tor2web" in request.headers:
106 if request.path != url_for("info.tor2web_warning"):
107 return redirect(url_for("info.tor2web_warning"))
108 return None
109
110 @app.errorhandler(404)
111 def page_not_found(error: werkzeug.exceptions.HTTPException) -> Tuple[str, int]:
112 return render_template("notfound.html"), 404
113
114 @app.errorhandler(500)
115 def internal_error(error: werkzeug.exceptions.HTTPException) -> Tuple[str, int]:
116 return render_template("error.html"), 500
117
118 # Obscure the creation time of source private keys by touching them all
119 # on startup.
120 private_keys = config.GPG_KEY_DIR / "private-keys-v1.d"
121 now = time.time()
122 for entry in os.scandir(private_keys):
123 if not entry.is_file() or not entry.name.endswith(".key"):
124 continue
125 os.utime(entry.path, times=(now, now))
126 # So the ctime is also updated
127 os.chmod(entry.path, entry.stat().st_mode)
128
129 return app
130
[end of securedrop/source_app/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/securedrop/journalist.py b/securedrop/journalist.py
--- a/securedrop/journalist.py
+++ b/securedrop/journalist.py
@@ -5,6 +5,7 @@
from sdconfig import SecureDropConfig
config = SecureDropConfig.get_current()
+# app is imported by journalist.wsgi
app = create_app(config)
diff --git a/securedrop/source.py b/securedrop/source.py
--- a/securedrop/source.py
+++ b/securedrop/source.py
@@ -1,9 +1,11 @@
from sdconfig import SecureDropConfig
from source_app import create_app
+config = SecureDropConfig.get_current()
+# app is imported by source.wsgi
+app = create_app(config)
+
if __name__ == "__main__": # pragma: no cover
- config = SecureDropConfig.get_current()
- app = create_app(config)
debug = getattr(config, "env", "prod") != "prod"
# nosemgrep: python.flask.security.audit.app-run-param-config.avoid_app_run_with_bad_host
app.run(debug=debug, host="0.0.0.0", port=8080) # nosec
diff --git a/securedrop/source_app/__init__.py b/securedrop/source_app/__init__.py
--- a/securedrop/source_app/__init__.py
+++ b/securedrop/source_app/__init__.py
@@ -118,12 +118,15 @@
# Obscure the creation time of source private keys by touching them all
# on startup.
private_keys = config.GPG_KEY_DIR / "private-keys-v1.d"
- now = time.time()
- for entry in os.scandir(private_keys):
- if not entry.is_file() or not entry.name.endswith(".key"):
- continue
- os.utime(entry.path, times=(now, now))
- # So the ctime is also updated
- os.chmod(entry.path, entry.stat().st_mode)
+ # The folder may not exist yet in some dev/testing setups,
+ # and if it doesn't exist there's no mtime to obscure.
+ if private_keys.is_dir():
+ now = time.time()
+ for entry in os.scandir(private_keys):
+ if not entry.is_file() or not entry.name.endswith(".key"):
+ continue
+ os.utime(entry.path, times=(now, now))
+ # So the ctime is also updated
+ os.chmod(entry.path, entry.stat().st_mode)
return app
|
{"golden_diff": "diff --git a/securedrop/journalist.py b/securedrop/journalist.py\n--- a/securedrop/journalist.py\n+++ b/securedrop/journalist.py\n@@ -5,6 +5,7 @@\n from sdconfig import SecureDropConfig\n \n config = SecureDropConfig.get_current()\n+# app is imported by journalist.wsgi\n app = create_app(config)\n \n \ndiff --git a/securedrop/source.py b/securedrop/source.py\n--- a/securedrop/source.py\n+++ b/securedrop/source.py\n@@ -1,9 +1,11 @@\n from sdconfig import SecureDropConfig\n from source_app import create_app\n \n+config = SecureDropConfig.get_current()\n+# app is imported by source.wsgi\n+app = create_app(config)\n+\n if __name__ == \"__main__\": # pragma: no cover\n- config = SecureDropConfig.get_current()\n- app = create_app(config)\n debug = getattr(config, \"env\", \"prod\") != \"prod\"\n # nosemgrep: python.flask.security.audit.app-run-param-config.avoid_app_run_with_bad_host\n app.run(debug=debug, host=\"0.0.0.0\", port=8080) # nosec\ndiff --git a/securedrop/source_app/__init__.py b/securedrop/source_app/__init__.py\n--- a/securedrop/source_app/__init__.py\n+++ b/securedrop/source_app/__init__.py\n@@ -118,12 +118,15 @@\n # Obscure the creation time of source private keys by touching them all\n # on startup.\n private_keys = config.GPG_KEY_DIR / \"private-keys-v1.d\"\n- now = time.time()\n- for entry in os.scandir(private_keys):\n- if not entry.is_file() or not entry.name.endswith(\".key\"):\n- continue\n- os.utime(entry.path, times=(now, now))\n- # So the ctime is also updated\n- os.chmod(entry.path, entry.stat().st_mode)\n+ # The folder may not exist yet in some dev/testing setups,\n+ # and if it doesn't exist there's no mtime to obscure.\n+ if private_keys.is_dir():\n+ now = time.time()\n+ for entry in os.scandir(private_keys):\n+ if not entry.is_file() or not entry.name.endswith(\".key\"):\n+ continue\n+ os.utime(entry.path, times=(now, now))\n+ # So the ctime is also updated\n+ os.chmod(entry.path, entry.stat().st_mode)\n \n return app\n", "issue": "SI issuing a 500 error and then timing out on `develop`\n## Description\r\nThe SI is failing to start from the `develop` branch, first failing with a 500 error and the log messages below (with logs enabled, and then just timing out:\r\n```\r\n[Fri Feb 03 21:37:34.589966 2023] [wsgi:info] [pid 1330:tid 118957187065600] [remote 127.0.0.1:51894] mod_wsgi (pid=1330, process='source', application='localhost|'): Loading Python script file '/var/www/source.wsgi'.\r\n[Fri Feb 03 21:37:34.591940 2023] [wsgi:error] [pid 1330:tid 118957187065600] [remote 127.0.0.1:51894] mod_wsgi (pid=1330): Failed to exec Python script file '/var/www/source.wsgi'.\r\n[Fri Feb 03 21:37:34.592605 2023] [wsgi:error] [pid 1330:tid 118957187065600] [remote 127.0.0.1:51894] mod_wsgi (pid=1330): Exception occurred processing WSGI script '/var/www/source.wsgi'.\r\n[Fri Feb 03 21:37:34.593310 2023] [wsgi:error] [pid 1330:tid 118957187065600] [remote 127.0.0.1:51894] Traceback (most recent call last):\r\n[Fri Feb 03 21:37:34.593658 2023] [wsgi:error] [pid 1330:tid 118957187065600] [remote 127.0.0.1:51894] File \"/var/www/source.wsgi\", line 11, in <module>\r\n[Fri Feb 03 21:37:34.593894 2023] [wsgi:error] [pid 1330:tid 118957187065600] [remote 127.0.0.1:51894] from source import app as application\r\n[Fri Feb 03 21:37:34.594012 2023] [wsgi:error] [pid 1330:tid 118957187065600] [remote 127.0.0.1:51894] ImportError: cannot import name 'app' from 'source' (/var/www/securedrop/source.py)\r\n```\r\n\r\n## Steps to Reproduce\r\n\r\n- set up a prod VM instance on `.2.5.1` \r\n- build packages from `develop` with `make build-debs`\r\n- step through the upgrade scenario to apply said debs\r\n\r\n## Expected Behavior\r\n- system is upgraded cleanly and SI and JI functionality is available\r\n\r\n## Actual Behavior\r\n- JI is available, SI errors out with an initial 500 error as described.\r\n\r\n## Comments\r\n\r\nThis looks to have been introduced in https://github.com/freedomofpress/securedrop/pull/6563, specifically in the change in https://github.com/freedomofpress/securedrop/blob/c9e2a7a61c486e38e4fcf06b6196b59a115ce152/securedrop/source.py#L6 . Since `app` is now defined only within the `__name__ == 'main'` block, it isn't defined when run via WSGI (as opposed to directly within the dev env).\r\n\n", "before_files": [{"content": "from encryption import EncryptionManager, GpgKeyNotFoundError\nfrom execution import asynchronous\nfrom journalist_app import create_app\nfrom models import Source\nfrom sdconfig import SecureDropConfig\n\nconfig = SecureDropConfig.get_current()\napp = create_app(config)\n\n\n@asynchronous\ndef prime_keycache() -> None:\n \"\"\"Pre-load the source public keys into Redis.\"\"\"\n with app.app_context():\n encryption_mgr = EncryptionManager.get_default()\n for source in Source.query.filter_by(pending=False, deleted_at=None).all():\n try:\n encryption_mgr.get_source_public_key(source.filesystem_id)\n except GpgKeyNotFoundError:\n pass\n\n\nprime_keycache()\n\n\nif __name__ == \"__main__\": # pragma: no cover\n debug = getattr(config, \"env\", \"prod\") != \"prod\"\n # nosemgrep: python.flask.security.audit.app-run-param-config.avoid_app_run_with_bad_host\n app.run(debug=debug, host=\"0.0.0.0\", port=8081) # nosec\n", "path": "securedrop/journalist.py"}, {"content": "from sdconfig import SecureDropConfig\nfrom source_app import create_app\n\nif __name__ == \"__main__\": # pragma: no cover\n config = SecureDropConfig.get_current()\n app = create_app(config)\n debug = getattr(config, \"env\", \"prod\") != \"prod\"\n # nosemgrep: python.flask.security.audit.app-run-param-config.avoid_app_run_with_bad_host\n app.run(debug=debug, host=\"0.0.0.0\", port=8080) # nosec\n", "path": "securedrop/source.py"}, {"content": "import os\nimport time\nfrom pathlib import Path\nfrom typing import Optional, Tuple\n\nimport i18n\nimport template_filters\nimport version\nimport werkzeug\nfrom db import db\nfrom flask import Flask, g, redirect, render_template, request, session, url_for\nfrom flask_babel import gettext\nfrom flask_wtf.csrf import CSRFError, CSRFProtect\nfrom models import InstanceConfig\nfrom request_that_secures_file_uploads import RequestThatSecuresFileUploads\nfrom sdconfig import SecureDropConfig\nfrom source_app import api, info, main\nfrom source_app.decorators import ignore_static\nfrom source_app.utils import clear_session_and_redirect_to_logged_out_page\n\n\ndef get_logo_url(app: Flask) -> str:\n if not app.static_folder:\n raise FileNotFoundError\n\n custom_logo_filename = \"i/custom_logo.png\"\n default_logo_filename = \"i/logo.png\"\n custom_logo_path = Path(app.static_folder) / custom_logo_filename\n default_logo_path = Path(app.static_folder) / default_logo_filename\n if custom_logo_path.is_file():\n return url_for(\"static\", filename=custom_logo_filename)\n elif default_logo_path.is_file():\n return url_for(\"static\", filename=default_logo_filename)\n\n raise FileNotFoundError\n\n\ndef create_app(config: SecureDropConfig) -> Flask:\n app = Flask(\n __name__,\n template_folder=str(config.SOURCE_TEMPLATES_DIR.absolute()),\n static_folder=config.STATIC_DIR.absolute(),\n )\n app.request_class = RequestThatSecuresFileUploads\n app.config.from_object(config.SOURCE_APP_FLASK_CONFIG_CLS)\n\n i18n.configure(config, app)\n\n @app.before_request\n @ignore_static\n def setup_i18n() -> None:\n \"\"\"Store i18n-related values in Flask's special g object\"\"\"\n i18n.set_locale(config)\n\n # The default CSRF token expiration is 1 hour. Since large uploads can\n # take longer than an hour over Tor, we increase the valid window to 24h.\n app.config[\"WTF_CSRF_TIME_LIMIT\"] = 60 * 60 * 24\n CSRFProtect(app)\n\n app.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n app.config[\"SQLALCHEMY_DATABASE_URI\"] = config.DATABASE_URI\n db.init_app(app)\n\n @app.errorhandler(CSRFError)\n def handle_csrf_error(e: CSRFError) -> werkzeug.Response:\n return clear_session_and_redirect_to_logged_out_page(flask_session=session)\n\n app.jinja_env.trim_blocks = True\n app.jinja_env.lstrip_blocks = True\n app.jinja_env.globals[\"version\"] = version.__version__\n # Exported to source templates for being included in instructions\n app.jinja_env.globals[\"submission_key_fpr\"] = config.JOURNALIST_KEY\n app.jinja_env.filters[\"rel_datetime_format\"] = template_filters.rel_datetime_format\n app.jinja_env.filters[\"nl2br\"] = template_filters.nl2br\n app.jinja_env.filters[\"filesizeformat\"] = template_filters.filesizeformat\n app.jinja_env.filters[\"html_datetime_format\"] = template_filters.html_datetime_format\n app.jinja_env.add_extension(\"jinja2.ext.do\")\n\n for module in [main, info, api]:\n app.register_blueprint(module.make_blueprint(config)) # type: ignore\n\n # before_request hooks are executed in order of declaration, so set up g object\n # before the potential tor2web 403 response.\n @app.before_request\n @ignore_static\n def setup_g() -> Optional[werkzeug.Response]:\n if InstanceConfig.get_default(refresh=True).organization_name:\n g.organization_name = ( # pylint: disable=assigning-non-slot\n InstanceConfig.get_default().organization_name\n )\n else:\n g.organization_name = gettext(\"SecureDrop\") # pylint: disable=assigning-non-slot\n\n try:\n g.logo = get_logo_url(app) # pylint: disable=assigning-non-slot\n except FileNotFoundError:\n app.logger.error(\"Site logo not found.\")\n\n return None\n\n @app.before_request\n @ignore_static\n def check_tor2web() -> Optional[werkzeug.Response]:\n # TODO: expand header checking logic to catch modern tor2web proxies\n if \"X-tor2web\" in request.headers:\n if request.path != url_for(\"info.tor2web_warning\"):\n return redirect(url_for(\"info.tor2web_warning\"))\n return None\n\n @app.errorhandler(404)\n def page_not_found(error: werkzeug.exceptions.HTTPException) -> Tuple[str, int]:\n return render_template(\"notfound.html\"), 404\n\n @app.errorhandler(500)\n def internal_error(error: werkzeug.exceptions.HTTPException) -> Tuple[str, int]:\n return render_template(\"error.html\"), 500\n\n # Obscure the creation time of source private keys by touching them all\n # on startup.\n private_keys = config.GPG_KEY_DIR / \"private-keys-v1.d\"\n now = time.time()\n for entry in os.scandir(private_keys):\n if not entry.is_file() or not entry.name.endswith(\".key\"):\n continue\n os.utime(entry.path, times=(now, now))\n # So the ctime is also updated\n os.chmod(entry.path, entry.stat().st_mode)\n\n return app\n", "path": "securedrop/source_app/__init__.py"}]}
| 3,417 | 573 |
gh_patches_debug_20461
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-3125
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Couldn't create new rows for the tables that use IDENTITY for their id columns.
## Description
<!-- A clear and concise description of what the bug is. -->
We have moved to using a different method for ids generations for our default id column as discussed [here](https://github.com/centerofci/mathesar/pull/3016), however this results in an inablility to create new rows using the ui.

For some reason the call to the `records/` api isn't being made. Using the browsable api works as expected.
## Expected behavior
<!-- A clear and concise description of what you expected to happen. -->
We should be able to use the ui to create rows for this kind of tables.
## To Reproduce
<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->
- Create a new table using the psql shell: `SELECT msar.add_mathesar_table(2200, 'test_table', NULL, NULL, NULL);` <-- This will create a new table in your `public` shema.
- Sync external changes.
- Try to add new rows to that table.
</issue>
<code>
[start of db/columns/operations/select.py]
1 import warnings
2
3 from sqlalchemy import and_, asc, cast, select, text, exists
4
5 from db.columns.exceptions import DynamicDefaultWarning
6 from db.connection import execute_msar_func_with_engine
7 from db.tables.operations.select import reflect_table_from_oid
8 from db.utils import execute_statement, get_pg_catalog_table
9
10
11 def get_column_attnum_from_names_as_map(table_oid, column_names, engine, metadata, connection_to_use=None):
12 statement = _get_columns_attnum_from_names(table_oid, column_names, engine, metadata=metadata)
13 attnums_tuple = execute_statement(engine, statement, connection_to_use).fetchall()
14 name_attnum_map = {attnum_tuple['attname']: attnum_tuple['attnum'] for attnum_tuple in attnums_tuple}
15 return name_attnum_map
16
17
18 def get_columns_attnum_from_names(table_oid, column_names, engine, metadata, connection_to_use=None):
19 """
20 Returns the respective list of attnum of the column names passed.
21 The order is based on the column order in the table and not by the order of the column names argument.
22 """
23 statement = _get_columns_attnum_from_names(table_oid, column_names, engine=engine, metadata=metadata)
24 attnums_tuple = execute_statement(engine, statement, connection_to_use).fetchall()
25 attnums = [attnum_tuple[0] for attnum_tuple in attnums_tuple]
26 return attnums
27
28
29 def get_column_attnum_from_name(table_oid, column_name, engine, metadata, connection_to_use=None):
30 statement = _get_columns_attnum_from_names(table_oid, [column_name], engine=engine, metadata=metadata)
31 return execute_statement(engine, statement, connection_to_use).scalar()
32
33
34 def _get_columns_attnum_from_names(table_oid, column_names, engine, metadata):
35 pg_attribute = get_pg_catalog_table("pg_attribute", engine=engine, metadata=metadata)
36 sel = select(pg_attribute.c.attnum, pg_attribute.c.attname).where(
37 and_(
38 pg_attribute.c.attrelid == table_oid,
39 pg_attribute.c.attname.in_(column_names)
40 )
41 ).order_by(asc(pg_attribute.c.attnum))
42 return sel
43
44
45 def get_column_attnums_from_tables(table_oids, engine, metadata, connection_to_use=None):
46 pg_attribute = get_pg_catalog_table("pg_attribute", engine, metadata=metadata)
47 sel = select(pg_attribute.c.attnum, pg_attribute.c.attrelid.label('table_oid')).where(
48 and_(
49 pg_attribute.c.attrelid.in_(table_oids),
50 # Ignore system columns
51 pg_attribute.c.attnum > 0,
52 # Ignore removed columns
53 pg_attribute.c.attisdropped.is_(False)
54 )
55 )
56 results = execute_statement(engine, sel, connection_to_use).fetchall()
57 return results
58
59
60 def get_map_of_attnum_and_table_oid_to_column_name(table_oids, engine, metadata, connection_to_use=None):
61 """
62 Order determined by the column order in the table.
63 """
64 triples_of_col_info = _get_triples_of_column_name_and_attnum_and_table_oid(
65 table_oids, None, engine, metadata, connection_to_use
66 )
67 return {
68 (attnum, table_oid): column_name
69 for column_name, attnum, table_oid
70 in triples_of_col_info
71 }
72
73
74 def get_column_names_from_attnums(table_oid, attnums, engine, metadata, connection_to_use=None):
75 return list(get_map_of_attnum_to_column_name(table_oid, attnums, engine, metadata, connection_to_use).values())
76
77
78 def get_map_of_attnum_to_column_name(table_oid, attnums, engine, metadata, connection_to_use=None):
79 """
80 Order determined by the column order in the table.
81 """
82 triples_of_col_info = _get_triples_of_column_name_and_attnum_and_table_oid(
83 [table_oid], attnums, engine, metadata, connection_to_use
84 )
85 return {
86 attnum: column_name
87 for column_name, attnum, _
88 in triples_of_col_info
89 }
90
91
92 def _get_triples_of_column_name_and_attnum_and_table_oid(
93 table_oids, attnums, engine, metadata, connection_to_use
94 ):
95 statement = _statement_for_triples_of_column_name_and_attnum_and_table_oid(
96 table_oids, attnums, engine, metadata
97 )
98 return execute_statement(engine, statement, connection_to_use).fetchall()
99
100
101 def get_column_default(table_oid, attnum, engine, metadata, connection_to_use=None):
102 default_dict = get_column_default_dict(
103 table_oid,
104 attnum,
105 engine,
106 metadata=metadata,
107 connection_to_use=connection_to_use,
108 )
109 if default_dict is not None:
110 return default_dict['value']
111
112
113 def get_column_default_dict(table_oid, attnum, engine, metadata, connection_to_use=None):
114 column = get_column_from_oid_and_attnum(
115 table_oid=table_oid,
116 attnum=attnum,
117 engine=engine,
118 metadata=metadata,
119 connection_to_use=connection_to_use,
120 )
121
122 if column.server_default is None:
123 return
124
125 is_dynamic = execute_msar_func_with_engine(
126 engine, 'is_default_possibly_dynamic', table_oid, attnum
127 ).fetchone()[0]
128 sql_text = str(column.server_default.arg)
129
130 if is_dynamic:
131 warnings.warn(
132 "Dynamic column defaults are read only", DynamicDefaultWarning
133 )
134 default_value = sql_text
135 else:
136 # Defaults are often stored as text with SQL casts appended
137 # Ex: "'test default string'::character varying" or "'2020-01-01'::date"
138 # Here, we execute the cast to get the proper python value
139 default_value = execute_statement(
140 engine,
141 select(cast(text(sql_text), column.type)),
142 connection_to_use
143 ).scalar()
144
145 return {"value": default_value, "is_dynamic": is_dynamic}
146
147
148 def determine_whether_column_contains_data(
149 table_oid, column_name, engine, metadata, connection_to_use=None
150 ):
151 """
152 Given a column, return True if it contains data, False otherwise.
153 """
154 sa_table = reflect_table_from_oid(
155 table_oid, engine, metadata=metadata, connection_to_use=connection_to_use,
156 )
157 sel = select(exists(1).where(sa_table.columns[column_name] != None)) # noqa
158 contains_data = execute_statement(engine, sel, connection_to_use).scalar()
159 return contains_data
160
161
162 def get_column_from_oid_and_attnum(table_oid, attnum, engine, metadata, connection_to_use=None):
163 sa_table = reflect_table_from_oid(table_oid, engine, metadata=metadata, connection_to_use=connection_to_use)
164 column_name = get_column_name_from_attnum(table_oid, attnum, engine, metadata=metadata, connection_to_use=connection_to_use)
165 sa_column = sa_table.columns[column_name]
166 return sa_column
167
168
169 def get_column_name_from_attnum(table_oid, attnum, engine, metadata, connection_to_use=None):
170 statement = _statement_for_triples_of_column_name_and_attnum_and_table_oid(
171 [table_oid], [attnum], engine, metadata=metadata,
172 )
173 column_name = execute_statement(engine, statement, connection_to_use).scalar()
174 return column_name
175
176
177 def _statement_for_triples_of_column_name_and_attnum_and_table_oid(
178 table_oids, attnums, engine, metadata
179 ):
180 """
181 Returns (column name, column attnum, column table's oid) tuples for each column that's in the
182 tables specified via `table_oids`, and, when `attnums` is not None, that has an attnum
183 specified in `attnums`.
184
185 The order is based on the column order in the table and not on the order of the arguments.
186 """
187 pg_attribute = get_pg_catalog_table("pg_attribute", engine, metadata=metadata)
188 sel = select(pg_attribute.c.attname, pg_attribute.c.attnum, pg_attribute.c.attrelid)
189 wasnt_dropped = pg_attribute.c.attisdropped.is_(False)
190 table_oid_matches = pg_attribute.c.attrelid.in_(table_oids)
191 conditions = [wasnt_dropped, table_oid_matches]
192 if attnums is not None:
193 attnum_matches = pg_attribute.c.attnum.in_(attnums)
194 conditions.append(attnum_matches)
195 else:
196 attnum_positive = pg_attribute.c.attnum > 0
197 conditions.append(attnum_positive)
198 sel = sel.where(and_(*conditions))
199 return sel
200
[end of db/columns/operations/select.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/db/columns/operations/select.py b/db/columns/operations/select.py
--- a/db/columns/operations/select.py
+++ b/db/columns/operations/select.py
@@ -1,6 +1,6 @@
import warnings
-from sqlalchemy import and_, asc, cast, select, text, exists
+from sqlalchemy import and_, asc, cast, select, text, exists, Identity
from db.columns.exceptions import DynamicDefaultWarning
from db.connection import execute_msar_func_with_engine
@@ -118,14 +118,16 @@
metadata=metadata,
connection_to_use=connection_to_use,
)
+ default = column.server_default
- if column.server_default is None:
+ if default is None:
return
is_dynamic = execute_msar_func_with_engine(
engine, 'is_default_possibly_dynamic', table_oid, attnum
).fetchone()[0]
- sql_text = str(column.server_default.arg)
+
+ sql_text = str(default.arg) if not isinstance(default, Identity) else 'identity'
if is_dynamic:
warnings.warn(
|
{"golden_diff": "diff --git a/db/columns/operations/select.py b/db/columns/operations/select.py\n--- a/db/columns/operations/select.py\n+++ b/db/columns/operations/select.py\n@@ -1,6 +1,6 @@\n import warnings\n \n-from sqlalchemy import and_, asc, cast, select, text, exists\n+from sqlalchemy import and_, asc, cast, select, text, exists, Identity\n \n from db.columns.exceptions import DynamicDefaultWarning\n from db.connection import execute_msar_func_with_engine\n@@ -118,14 +118,16 @@\n metadata=metadata,\n connection_to_use=connection_to_use,\n )\n+ default = column.server_default\n \n- if column.server_default is None:\n+ if default is None:\n return\n \n is_dynamic = execute_msar_func_with_engine(\n engine, 'is_default_possibly_dynamic', table_oid, attnum\n ).fetchone()[0]\n- sql_text = str(column.server_default.arg)\n+\n+ sql_text = str(default.arg) if not isinstance(default, Identity) else 'identity'\n \n if is_dynamic:\n warnings.warn(\n", "issue": "Couldn't create new rows for the tables that use IDENTITY for their id columns.\n## Description\r\n<!-- A clear and concise description of what the bug is. -->\r\nWe have moved to using a different method for ids generations for our default id column as discussed [here](https://github.com/centerofci/mathesar/pull/3016), however this results in an inablility to create new rows using the ui.\r\n\r\n\r\n\r\nFor some reason the call to the `records/` api isn't being made. Using the browsable api works as expected.\r\n\r\n## Expected behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nWe should be able to use the ui to create rows for this kind of tables. \r\n\r\n## To Reproduce\r\n<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->\r\n- Create a new table using the psql shell: `SELECT msar.add_mathesar_table(2200, 'test_table', NULL, NULL, NULL);` <-- This will create a new table in your `public` shema.\r\n- Sync external changes.\r\n- Try to add new rows to that table. \r\n\r\n\n", "before_files": [{"content": "import warnings\n\nfrom sqlalchemy import and_, asc, cast, select, text, exists\n\nfrom db.columns.exceptions import DynamicDefaultWarning\nfrom db.connection import execute_msar_func_with_engine\nfrom db.tables.operations.select import reflect_table_from_oid\nfrom db.utils import execute_statement, get_pg_catalog_table\n\n\ndef get_column_attnum_from_names_as_map(table_oid, column_names, engine, metadata, connection_to_use=None):\n statement = _get_columns_attnum_from_names(table_oid, column_names, engine, metadata=metadata)\n attnums_tuple = execute_statement(engine, statement, connection_to_use).fetchall()\n name_attnum_map = {attnum_tuple['attname']: attnum_tuple['attnum'] for attnum_tuple in attnums_tuple}\n return name_attnum_map\n\n\ndef get_columns_attnum_from_names(table_oid, column_names, engine, metadata, connection_to_use=None):\n \"\"\"\n Returns the respective list of attnum of the column names passed.\n The order is based on the column order in the table and not by the order of the column names argument.\n \"\"\"\n statement = _get_columns_attnum_from_names(table_oid, column_names, engine=engine, metadata=metadata)\n attnums_tuple = execute_statement(engine, statement, connection_to_use).fetchall()\n attnums = [attnum_tuple[0] for attnum_tuple in attnums_tuple]\n return attnums\n\n\ndef get_column_attnum_from_name(table_oid, column_name, engine, metadata, connection_to_use=None):\n statement = _get_columns_attnum_from_names(table_oid, [column_name], engine=engine, metadata=metadata)\n return execute_statement(engine, statement, connection_to_use).scalar()\n\n\ndef _get_columns_attnum_from_names(table_oid, column_names, engine, metadata):\n pg_attribute = get_pg_catalog_table(\"pg_attribute\", engine=engine, metadata=metadata)\n sel = select(pg_attribute.c.attnum, pg_attribute.c.attname).where(\n and_(\n pg_attribute.c.attrelid == table_oid,\n pg_attribute.c.attname.in_(column_names)\n )\n ).order_by(asc(pg_attribute.c.attnum))\n return sel\n\n\ndef get_column_attnums_from_tables(table_oids, engine, metadata, connection_to_use=None):\n pg_attribute = get_pg_catalog_table(\"pg_attribute\", engine, metadata=metadata)\n sel = select(pg_attribute.c.attnum, pg_attribute.c.attrelid.label('table_oid')).where(\n and_(\n pg_attribute.c.attrelid.in_(table_oids),\n # Ignore system columns\n pg_attribute.c.attnum > 0,\n # Ignore removed columns\n pg_attribute.c.attisdropped.is_(False)\n )\n )\n results = execute_statement(engine, sel, connection_to_use).fetchall()\n return results\n\n\ndef get_map_of_attnum_and_table_oid_to_column_name(table_oids, engine, metadata, connection_to_use=None):\n \"\"\"\n Order determined by the column order in the table.\n \"\"\"\n triples_of_col_info = _get_triples_of_column_name_and_attnum_and_table_oid(\n table_oids, None, engine, metadata, connection_to_use\n )\n return {\n (attnum, table_oid): column_name\n for column_name, attnum, table_oid\n in triples_of_col_info\n }\n\n\ndef get_column_names_from_attnums(table_oid, attnums, engine, metadata, connection_to_use=None):\n return list(get_map_of_attnum_to_column_name(table_oid, attnums, engine, metadata, connection_to_use).values())\n\n\ndef get_map_of_attnum_to_column_name(table_oid, attnums, engine, metadata, connection_to_use=None):\n \"\"\"\n Order determined by the column order in the table.\n \"\"\"\n triples_of_col_info = _get_triples_of_column_name_and_attnum_and_table_oid(\n [table_oid], attnums, engine, metadata, connection_to_use\n )\n return {\n attnum: column_name\n for column_name, attnum, _\n in triples_of_col_info\n }\n\n\ndef _get_triples_of_column_name_and_attnum_and_table_oid(\n table_oids, attnums, engine, metadata, connection_to_use\n):\n statement = _statement_for_triples_of_column_name_and_attnum_and_table_oid(\n table_oids, attnums, engine, metadata\n )\n return execute_statement(engine, statement, connection_to_use).fetchall()\n\n\ndef get_column_default(table_oid, attnum, engine, metadata, connection_to_use=None):\n default_dict = get_column_default_dict(\n table_oid,\n attnum,\n engine,\n metadata=metadata,\n connection_to_use=connection_to_use,\n )\n if default_dict is not None:\n return default_dict['value']\n\n\ndef get_column_default_dict(table_oid, attnum, engine, metadata, connection_to_use=None):\n column = get_column_from_oid_and_attnum(\n table_oid=table_oid,\n attnum=attnum,\n engine=engine,\n metadata=metadata,\n connection_to_use=connection_to_use,\n )\n\n if column.server_default is None:\n return\n\n is_dynamic = execute_msar_func_with_engine(\n engine, 'is_default_possibly_dynamic', table_oid, attnum\n ).fetchone()[0]\n sql_text = str(column.server_default.arg)\n\n if is_dynamic:\n warnings.warn(\n \"Dynamic column defaults are read only\", DynamicDefaultWarning\n )\n default_value = sql_text\n else:\n # Defaults are often stored as text with SQL casts appended\n # Ex: \"'test default string'::character varying\" or \"'2020-01-01'::date\"\n # Here, we execute the cast to get the proper python value\n default_value = execute_statement(\n engine,\n select(cast(text(sql_text), column.type)),\n connection_to_use\n ).scalar()\n\n return {\"value\": default_value, \"is_dynamic\": is_dynamic}\n\n\ndef determine_whether_column_contains_data(\n table_oid, column_name, engine, metadata, connection_to_use=None\n):\n \"\"\"\n Given a column, return True if it contains data, False otherwise.\n \"\"\"\n sa_table = reflect_table_from_oid(\n table_oid, engine, metadata=metadata, connection_to_use=connection_to_use,\n )\n sel = select(exists(1).where(sa_table.columns[column_name] != None)) # noqa\n contains_data = execute_statement(engine, sel, connection_to_use).scalar()\n return contains_data\n\n\ndef get_column_from_oid_and_attnum(table_oid, attnum, engine, metadata, connection_to_use=None):\n sa_table = reflect_table_from_oid(table_oid, engine, metadata=metadata, connection_to_use=connection_to_use)\n column_name = get_column_name_from_attnum(table_oid, attnum, engine, metadata=metadata, connection_to_use=connection_to_use)\n sa_column = sa_table.columns[column_name]\n return sa_column\n\n\ndef get_column_name_from_attnum(table_oid, attnum, engine, metadata, connection_to_use=None):\n statement = _statement_for_triples_of_column_name_and_attnum_and_table_oid(\n [table_oid], [attnum], engine, metadata=metadata,\n )\n column_name = execute_statement(engine, statement, connection_to_use).scalar()\n return column_name\n\n\ndef _statement_for_triples_of_column_name_and_attnum_and_table_oid(\n table_oids, attnums, engine, metadata\n):\n \"\"\"\n Returns (column name, column attnum, column table's oid) tuples for each column that's in the\n tables specified via `table_oids`, and, when `attnums` is not None, that has an attnum\n specified in `attnums`.\n\n The order is based on the column order in the table and not on the order of the arguments.\n \"\"\"\n pg_attribute = get_pg_catalog_table(\"pg_attribute\", engine, metadata=metadata)\n sel = select(pg_attribute.c.attname, pg_attribute.c.attnum, pg_attribute.c.attrelid)\n wasnt_dropped = pg_attribute.c.attisdropped.is_(False)\n table_oid_matches = pg_attribute.c.attrelid.in_(table_oids)\n conditions = [wasnt_dropped, table_oid_matches]\n if attnums is not None:\n attnum_matches = pg_attribute.c.attnum.in_(attnums)\n conditions.append(attnum_matches)\n else:\n attnum_positive = pg_attribute.c.attnum > 0\n conditions.append(attnum_positive)\n sel = sel.where(and_(*conditions))\n return sel\n", "path": "db/columns/operations/select.py"}]}
| 3,265 | 242 |
gh_patches_debug_19397
|
rasdani/github-patches
|
git_diff
|
elastic__apm-agent-python-766
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
'UnixDomainSocketConnection' object has no attribute 'port'
AttributeError: 'UnixDomainSocketConnection' object has no attribute 'port'
elastic-apm==5.5.1
```
File "django_redis/cache.py", line 32, in _decorator
return method(self, *args, **kwargs)
File "django_redis/cache.py", line 80, in get
return self.client.get(key, default=default, version=version,
File "django_redis/client/default.py", line 203, in get
value = client.get(key)
File "redis/client.py", line 976, in get
return self.execute_command('GET', name)
File "elasticapm/instrumentation/packages/base.py", line 210, in call_if_sampling
return self.call(module, method, wrapped, instance, args, kwargs)
File "elasticapm/instrumentation/packages/redis.py", line 66, in call
return wrapped(*args, **kwargs)
File "redis/client.py", line 667, in execute_command
connection.send_command(*args)
File "redis/connection.py", line 610, in send_command
self.send_packed_command(self.pack_command(*args))
File "elasticapm/instrumentation/packages/base.py", line 210, in call_if_sampling
return self.call(module, method, wrapped, instance, args, kwargs)
File "elasticapm/instrumentation/packages/redis.py", line 90, in call
port = int(instance.port) if instance.port else None
```
</issue>
<code>
[start of elasticapm/instrumentation/packages/redis.py]
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2019, Elasticsearch BV
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are met:
8 #
9 # * Redistributions of source code must retain the above copyright notice, this
10 # list of conditions and the following disclaimer.
11 #
12 # * Redistributions in binary form must reproduce the above copyright notice,
13 # this list of conditions and the following disclaimer in the documentation
14 # and/or other materials provided with the distribution.
15 #
16 # * Neither the name of the copyright holder nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 from __future__ import absolute_import
32
33 from elasticapm.instrumentation.packages.base import AbstractInstrumentedModule
34 from elasticapm.traces import capture_span, execution_context
35
36
37 class Redis3CheckMixin(object):
38 instrument_list_3 = []
39 instrument_list = []
40
41 def get_instrument_list(self):
42 try:
43 from redis import VERSION
44
45 if VERSION[0] >= 3:
46 return self.instrument_list_3
47 return self.instrument_list
48 except ImportError:
49 return self.instrument_list
50
51
52 class RedisInstrumentation(Redis3CheckMixin, AbstractInstrumentedModule):
53 name = "redis"
54
55 # no need to instrument StrictRedis in redis-py >= 3.0
56 instrument_list_3 = [("redis.client", "Redis.execute_command")]
57 instrument_list = [("redis.client", "Redis.execute_command"), ("redis.client", "StrictRedis.execute_command")]
58
59 def call(self, module, method, wrapped, instance, args, kwargs):
60 if len(args) > 0:
61 wrapped_name = str(args[0])
62 else:
63 wrapped_name = self.get_wrapped_name(wrapped, instance, method)
64
65 with capture_span(wrapped_name, span_type="db", span_subtype="redis", span_action="query", leaf=True):
66 return wrapped(*args, **kwargs)
67
68
69 class RedisPipelineInstrumentation(Redis3CheckMixin, AbstractInstrumentedModule):
70 name = "redis"
71
72 # BasePipeline has been renamed to Pipeline in redis-py 3
73 instrument_list_3 = [("redis.client", "Pipeline.execute")]
74 instrument_list = [("redis.client", "BasePipeline.execute")]
75
76 def call(self, module, method, wrapped, instance, args, kwargs):
77 wrapped_name = self.get_wrapped_name(wrapped, instance, method)
78 with capture_span(wrapped_name, span_type="db", span_subtype="redis", span_action="query", leaf=True):
79 return wrapped(*args, **kwargs)
80
81
82 class RedisConnectionInstrumentation(AbstractInstrumentedModule):
83 name = "redis"
84
85 instrument_list = (("redis.connection", "Connection.send_packed_command"),)
86
87 def call(self, module, method, wrapped, instance, args, kwargs):
88 span = execution_context.get_span()
89 if span and span.subtype == "redis":
90 port = int(instance.port) if instance.port else None
91 destination_info = {
92 "address": instance.host,
93 "port": port,
94 "service": {"name": "redis", "resource": "redis", "type": "db"},
95 }
96 span.context["destination"] = destination_info
97 return wrapped(*args, **kwargs)
98
[end of elasticapm/instrumentation/packages/redis.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/elasticapm/instrumentation/packages/redis.py b/elasticapm/instrumentation/packages/redis.py
--- a/elasticapm/instrumentation/packages/redis.py
+++ b/elasticapm/instrumentation/packages/redis.py
@@ -87,11 +87,16 @@
def call(self, module, method, wrapped, instance, args, kwargs):
span = execution_context.get_span()
if span and span.subtype == "redis":
- port = int(instance.port) if instance.port else None
- destination_info = {
- "address": instance.host,
- "port": port,
- "service": {"name": "redis", "resource": "redis", "type": "db"},
- }
- span.context["destination"] = destination_info
+ span.context["destination"] = get_destination_info(instance)
return wrapped(*args, **kwargs)
+
+
+def get_destination_info(connection):
+ destination_info = {"service": {"name": "redis", "resource": "redis", "type": "db"}}
+ if hasattr(connection, "port"):
+ destination_info["port"] = connection.port
+ destination_info["address"] = connection.host
+ elif hasattr(connection, "path"):
+ destination_info["port"] = None
+ destination_info["address"] = "unix://" + connection.path
+ return destination_info
|
{"golden_diff": "diff --git a/elasticapm/instrumentation/packages/redis.py b/elasticapm/instrumentation/packages/redis.py\n--- a/elasticapm/instrumentation/packages/redis.py\n+++ b/elasticapm/instrumentation/packages/redis.py\n@@ -87,11 +87,16 @@\n def call(self, module, method, wrapped, instance, args, kwargs):\n span = execution_context.get_span()\n if span and span.subtype == \"redis\":\n- port = int(instance.port) if instance.port else None\n- destination_info = {\n- \"address\": instance.host,\n- \"port\": port,\n- \"service\": {\"name\": \"redis\", \"resource\": \"redis\", \"type\": \"db\"},\n- }\n- span.context[\"destination\"] = destination_info\n+ span.context[\"destination\"] = get_destination_info(instance)\n return wrapped(*args, **kwargs)\n+\n+\n+def get_destination_info(connection):\n+ destination_info = {\"service\": {\"name\": \"redis\", \"resource\": \"redis\", \"type\": \"db\"}}\n+ if hasattr(connection, \"port\"):\n+ destination_info[\"port\"] = connection.port\n+ destination_info[\"address\"] = connection.host\n+ elif hasattr(connection, \"path\"):\n+ destination_info[\"port\"] = None\n+ destination_info[\"address\"] = \"unix://\" + connection.path\n+ return destination_info\n", "issue": "'UnixDomainSocketConnection' object has no attribute 'port'\nAttributeError: 'UnixDomainSocketConnection' object has no attribute 'port'\r\nelastic-apm==5.5.1\r\n\r\n```\r\n File \"django_redis/cache.py\", line 32, in _decorator\r\n return method(self, *args, **kwargs)\r\n File \"django_redis/cache.py\", line 80, in get\r\n return self.client.get(key, default=default, version=version,\r\n File \"django_redis/client/default.py\", line 203, in get\r\n value = client.get(key)\r\n File \"redis/client.py\", line 976, in get\r\n return self.execute_command('GET', name)\r\n File \"elasticapm/instrumentation/packages/base.py\", line 210, in call_if_sampling\r\n return self.call(module, method, wrapped, instance, args, kwargs)\r\n File \"elasticapm/instrumentation/packages/redis.py\", line 66, in call\r\n return wrapped(*args, **kwargs)\r\n File \"redis/client.py\", line 667, in execute_command\r\n connection.send_command(*args)\r\n File \"redis/connection.py\", line 610, in send_command\r\n self.send_packed_command(self.pack_command(*args))\r\n File \"elasticapm/instrumentation/packages/base.py\", line 210, in call_if_sampling\r\n return self.call(module, method, wrapped, instance, args, kwargs)\r\n File \"elasticapm/instrumentation/packages/redis.py\", line 90, in call\r\n port = int(instance.port) if instance.port else None\r\n```\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom __future__ import absolute_import\n\nfrom elasticapm.instrumentation.packages.base import AbstractInstrumentedModule\nfrom elasticapm.traces import capture_span, execution_context\n\n\nclass Redis3CheckMixin(object):\n instrument_list_3 = []\n instrument_list = []\n\n def get_instrument_list(self):\n try:\n from redis import VERSION\n\n if VERSION[0] >= 3:\n return self.instrument_list_3\n return self.instrument_list\n except ImportError:\n return self.instrument_list\n\n\nclass RedisInstrumentation(Redis3CheckMixin, AbstractInstrumentedModule):\n name = \"redis\"\n\n # no need to instrument StrictRedis in redis-py >= 3.0\n instrument_list_3 = [(\"redis.client\", \"Redis.execute_command\")]\n instrument_list = [(\"redis.client\", \"Redis.execute_command\"), (\"redis.client\", \"StrictRedis.execute_command\")]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n if len(args) > 0:\n wrapped_name = str(args[0])\n else:\n wrapped_name = self.get_wrapped_name(wrapped, instance, method)\n\n with capture_span(wrapped_name, span_type=\"db\", span_subtype=\"redis\", span_action=\"query\", leaf=True):\n return wrapped(*args, **kwargs)\n\n\nclass RedisPipelineInstrumentation(Redis3CheckMixin, AbstractInstrumentedModule):\n name = \"redis\"\n\n # BasePipeline has been renamed to Pipeline in redis-py 3\n instrument_list_3 = [(\"redis.client\", \"Pipeline.execute\")]\n instrument_list = [(\"redis.client\", \"BasePipeline.execute\")]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n wrapped_name = self.get_wrapped_name(wrapped, instance, method)\n with capture_span(wrapped_name, span_type=\"db\", span_subtype=\"redis\", span_action=\"query\", leaf=True):\n return wrapped(*args, **kwargs)\n\n\nclass RedisConnectionInstrumentation(AbstractInstrumentedModule):\n name = \"redis\"\n\n instrument_list = ((\"redis.connection\", \"Connection.send_packed_command\"),)\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n span = execution_context.get_span()\n if span and span.subtype == \"redis\":\n port = int(instance.port) if instance.port else None\n destination_info = {\n \"address\": instance.host,\n \"port\": port,\n \"service\": {\"name\": \"redis\", \"resource\": \"redis\", \"type\": \"db\"},\n }\n span.context[\"destination\"] = destination_info\n return wrapped(*args, **kwargs)\n", "path": "elasticapm/instrumentation/packages/redis.py"}]}
| 1,988 | 306 |
gh_patches_debug_56
|
rasdani/github-patches
|
git_diff
|
Anselmoo__spectrafit-660
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Docs]: Update release drafter
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Missing Information in the Docs
- Link to the complete changes of the latest release.
- Exclude auto commits in the contributor list
### Anything else?
_No response_
### Code of Conduct
- [X] I agree to follow this project's Code of Conduct
</issue>
<code>
[start of spectrafit/__init__.py]
1 """SpectraFit, fast command line tool for fitting data."""
2 __version__ = "1.0.0a0"
3
[end of spectrafit/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/spectrafit/__init__.py b/spectrafit/__init__.py
--- a/spectrafit/__init__.py
+++ b/spectrafit/__init__.py
@@ -1,2 +1,2 @@
"""SpectraFit, fast command line tool for fitting data."""
-__version__ = "1.0.0a0"
+__version__ = "1.0.0a1"
|
{"golden_diff": "diff --git a/spectrafit/__init__.py b/spectrafit/__init__.py\n--- a/spectrafit/__init__.py\n+++ b/spectrafit/__init__.py\n@@ -1,2 +1,2 @@\n \"\"\"SpectraFit, fast command line tool for fitting data.\"\"\"\n-__version__ = \"1.0.0a0\"\n+__version__ = \"1.0.0a1\"\n", "issue": "[Docs]: Update release drafter\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues\n\n### Current Missing Information in the Docs\n\n- Link to the complete changes of the latest release.\r\n- Exclude auto commits in the contributor list\n\n### Anything else?\n\n_No response_\n\n### Code of Conduct\n\n- [X] I agree to follow this project's Code of Conduct\n", "before_files": [{"content": "\"\"\"SpectraFit, fast command line tool for fitting data.\"\"\"\n__version__ = \"1.0.0a0\"\n", "path": "spectrafit/__init__.py"}]}
| 650 | 96 |
gh_patches_debug_6709
|
rasdani/github-patches
|
git_diff
|
ethereum__web3.py-3103
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Web3 crashes in time-based gas price strategies with empty chain
* Version: 4.7.1
* Python: 3.6
* OS: linux
### What was wrong?
Running our test suite web3 crashes when estimating time based gas price. Probably it is caused by not having any transactions in our test chain. However when probabilities are calculated they are empty (empty tuple) which causes then an Index Error in ` '104, in _compute_gas_price\r\n first = probabilities[0]\r\nIndexError: tuple index out of range\`
It might be the case that there is a problem with our geth client during runtime.
```
File “/home/circleci/raiden/raiden/network/rpc/client.py”, line 346, in gas_price
return int(self.web3.eth.generateGasPrice())
File “/home/circleci/venv-3.7/lib/python3.7/site-packages/web3/eth.py”, line 385, in generateGasPrice
return self.gasPriceStrategy(self.web3, transaction_params)
File “/home/circleci/venv-3.7/lib/python3.7/site-packages/web3/gas_strategies/time_based.py”, line 168, in time_based_gas_price_strategy
gas_price = _compute_gas_price(probabilities, probability / 100)
File “/home/circleci/venv-3.7/lib/python3.7/site-packages/web3/gas_strategies/time_based.py”, line 104, in _compute_gas_price
first = probabilities[0]
IndexError: tuple index out of range
```
### How can it be fixed?
However I would suggest handling the edge case that there is an empty tuple and not unconditionally access `first = probabilities[0] `
I'm happy to give more information when needed
</issue>
<code>
[start of web3/gas_strategies/time_based.py]
1 import collections
2 import math
3 import operator
4 from typing import (
5 Iterable,
6 Sequence,
7 Tuple,
8 )
9
10 from eth_typing import (
11 ChecksumAddress,
12 )
13 from eth_utils import (
14 to_tuple,
15 )
16 from eth_utils.toolz import (
17 curry,
18 groupby,
19 sliding_window,
20 )
21 from hexbytes import (
22 HexBytes,
23 )
24
25 from web3 import (
26 Web3,
27 )
28 from web3._utils.math import (
29 percentile,
30 )
31 from web3.exceptions import (
32 InsufficientData,
33 Web3ValidationError,
34 )
35 from web3.types import (
36 BlockNumber,
37 GasPriceStrategy,
38 TxParams,
39 Wei,
40 )
41
42 MinerData = collections.namedtuple(
43 "MinerData", ["miner", "num_blocks", "min_gas_price", "low_percentile_gas_price"]
44 )
45 Probability = collections.namedtuple("Probability", ["gas_price", "prob"])
46
47
48 def _get_avg_block_time(w3: Web3, sample_size: int) -> float:
49 latest = w3.eth.get_block("latest")
50
51 constrained_sample_size = min(sample_size, latest["number"])
52 if constrained_sample_size == 0:
53 raise Web3ValidationError("Constrained sample size is 0")
54
55 oldest = w3.eth.get_block(BlockNumber(latest["number"] - constrained_sample_size))
56 return (latest["timestamp"] - oldest["timestamp"]) / constrained_sample_size
57
58
59 def _get_weighted_avg_block_time(w3: Web3, sample_size: int) -> float:
60 latest_block_number = w3.eth.get_block("latest")["number"]
61 constrained_sample_size = min(sample_size, latest_block_number)
62 if constrained_sample_size == 0:
63 raise Web3ValidationError("Constrained sample size is 0")
64 oldest_block = w3.eth.get_block(
65 BlockNumber(latest_block_number - constrained_sample_size)
66 )
67 oldest_block_number = oldest_block["number"]
68 prev_timestamp = oldest_block["timestamp"]
69 weighted_sum = 0.0
70 sum_of_weights = 0.0
71 for i in range(oldest_block_number + 1, latest_block_number + 1):
72 curr_timestamp = w3.eth.get_block(BlockNumber(i))["timestamp"]
73 time = curr_timestamp - prev_timestamp
74 weight = (i - oldest_block_number) / constrained_sample_size
75 weighted_sum += time * weight
76 sum_of_weights += weight
77 prev_timestamp = curr_timestamp
78 return weighted_sum / sum_of_weights
79
80
81 def _get_raw_miner_data(
82 w3: Web3, sample_size: int
83 ) -> Iterable[Tuple[ChecksumAddress, HexBytes, Wei]]:
84 latest = w3.eth.get_block("latest", full_transactions=True)
85
86 for transaction in latest["transactions"]:
87 # type ignored b/c actual transaction is TxData not HexBytes
88 yield (latest["miner"], latest["hash"], transaction["gasPrice"]) # type: ignore
89
90 block = latest
91
92 for _ in range(sample_size - 1):
93 if block["number"] == 0:
94 break
95
96 # we intentionally trace backwards using parent hashes rather than
97 # block numbers to make caching the data easier to implement.
98 block = w3.eth.get_block(block["parentHash"], full_transactions=True)
99 for transaction in block["transactions"]:
100 # type ignored b/c actual transaction is TxData not HexBytes
101 yield (block["miner"], block["hash"], transaction["gasPrice"]) # type: ignore # noqa: E501
102
103
104 def _aggregate_miner_data(
105 raw_data: Iterable[Tuple[ChecksumAddress, HexBytes, Wei]]
106 ) -> Iterable[MinerData]:
107 data_by_miner = groupby(0, raw_data)
108
109 for miner, miner_data in data_by_miner.items():
110 _, block_hashes, gas_prices = map(set, zip(*miner_data))
111 try:
112 # types ignored b/c mypy has trouble inferring gas_prices: Sequence[Wei]
113 price_percentile = percentile(gas_prices, percentile=20) # type: ignore
114 except InsufficientData:
115 price_percentile = min(gas_prices) # type: ignore
116 yield MinerData(
117 miner,
118 len(set(block_hashes)),
119 min(gas_prices), # type: ignore
120 price_percentile,
121 )
122
123
124 @to_tuple
125 def _compute_probabilities(
126 miner_data: Iterable[MinerData], wait_blocks: int, sample_size: int
127 ) -> Iterable[Probability]:
128 """
129 Computes the probabilities that a txn will be accepted at each of the gas
130 prices accepted by the miners.
131 """
132 miner_data_by_price = tuple(
133 sorted(
134 miner_data,
135 key=operator.attrgetter("low_percentile_gas_price"),
136 reverse=True,
137 )
138 )
139 for idx in range(len(miner_data_by_price)):
140 low_percentile_gas_price = miner_data_by_price[idx].low_percentile_gas_price
141 num_blocks_accepting_price = sum(
142 m.num_blocks for m in miner_data_by_price[idx:]
143 )
144 inv_prob_per_block = (sample_size - num_blocks_accepting_price) / sample_size
145 probability_accepted = 1 - inv_prob_per_block**wait_blocks
146 yield Probability(low_percentile_gas_price, probability_accepted)
147
148
149 def _compute_gas_price(
150 probabilities: Sequence[Probability], desired_probability: float
151 ) -> Wei:
152 """
153 Given a sorted range of ``Probability`` named-tuples returns a gas price
154 computed based on where the ``desired_probability`` would fall within the
155 range.
156
157 :param probabilities: An iterable of `Probability` named-tuples
158 sorted in reverse order.
159 :param desired_probability: An floating point representation of the desired
160 probability. (e.g. ``85% -> 0.85``)
161 """
162 first = probabilities[0]
163 last = probabilities[-1]
164
165 if desired_probability >= first.prob:
166 return Wei(int(first.gas_price))
167 elif desired_probability <= last.prob:
168 return Wei(int(last.gas_price))
169
170 for left, right in sliding_window(2, probabilities):
171 if desired_probability < right.prob:
172 continue
173 elif desired_probability > left.prob:
174 # This code block should never be reachable as it would indicate
175 # that we already passed by the probability window in which our
176 # `desired_probability` is located.
177 raise Exception("Invariant")
178
179 adj_prob = desired_probability - right.prob
180 window_size = left.prob - right.prob
181 position = adj_prob / window_size
182 gas_window_size = left.gas_price - right.gas_price
183 gas_price = int(math.ceil(right.gas_price + gas_window_size * position))
184 return Wei(gas_price)
185 else:
186 # The initial `if/else` clause in this function handles the case where
187 # the `desired_probability` is either above or below the min/max
188 # probability found in the `probabilities`.
189 #
190 # With these two cases handled, the only way this code block should be
191 # reachable would be if the `probabilities` were not sorted correctly.
192 # Otherwise, the `desired_probability` **must** fall between two of the
193 # values in the `probabilities``.
194 raise Exception("Invariant")
195
196
197 @curry
198 def construct_time_based_gas_price_strategy(
199 max_wait_seconds: int,
200 sample_size: int = 120,
201 probability: int = 98,
202 weighted: bool = False,
203 ) -> GasPriceStrategy:
204 """
205 A gas pricing strategy that uses recently mined block data to derive a gas
206 price for which a transaction is likely to be mined within X seconds with
207 probability P. If the weighted kwarg is True, more recent block
208 times will be more heavily weighted.
209
210 :param max_wait_seconds: The desired maximum number of seconds the
211 transaction should take to mine.
212 :param sample_size: The number of recent blocks to sample
213 :param probability: An integer representation of the desired probability
214 that the transaction will be mined within ``max_wait_seconds``. 0 means 0%
215 and 100 means 100%.
216 """
217
218 def time_based_gas_price_strategy(w3: Web3, transaction_params: TxParams) -> Wei:
219 if weighted:
220 avg_block_time = _get_weighted_avg_block_time(w3, sample_size=sample_size)
221 else:
222 avg_block_time = _get_avg_block_time(w3, sample_size=sample_size)
223
224 wait_blocks = int(math.ceil(max_wait_seconds / avg_block_time))
225 raw_miner_data = _get_raw_miner_data(w3, sample_size=sample_size)
226 miner_data = _aggregate_miner_data(raw_miner_data)
227
228 probabilities = _compute_probabilities(
229 miner_data,
230 wait_blocks=wait_blocks,
231 sample_size=sample_size,
232 )
233
234 gas_price = _compute_gas_price(probabilities, probability / 100)
235 return gas_price
236
237 return time_based_gas_price_strategy
238
239
240 # fast: mine within 1 minute
241 fast_gas_price_strategy = construct_time_based_gas_price_strategy(
242 max_wait_seconds=60,
243 sample_size=120,
244 )
245 # medium: mine within 10 minutes
246 medium_gas_price_strategy = construct_time_based_gas_price_strategy(
247 max_wait_seconds=600,
248 sample_size=120,
249 )
250 # slow: mine within 1 hour (60 minutes)
251 slow_gas_price_strategy = construct_time_based_gas_price_strategy(
252 max_wait_seconds=60 * 60,
253 sample_size=120,
254 )
255 # glacial: mine within the next 24 hours.
256 glacial_gas_price_strategy = construct_time_based_gas_price_strategy(
257 max_wait_seconds=24 * 60 * 60,
258 sample_size=720,
259 )
260
[end of web3/gas_strategies/time_based.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/web3/gas_strategies/time_based.py b/web3/gas_strategies/time_based.py
--- a/web3/gas_strategies/time_based.py
+++ b/web3/gas_strategies/time_based.py
@@ -216,6 +216,10 @@
"""
def time_based_gas_price_strategy(w3: Web3, transaction_params: TxParams) -> Wei:
+ # return gas price when no transactions available to sample
+ if w3.eth.get_block("latest")["number"] == 0:
+ return w3.eth.gas_price
+
if weighted:
avg_block_time = _get_weighted_avg_block_time(w3, sample_size=sample_size)
else:
|
{"golden_diff": "diff --git a/web3/gas_strategies/time_based.py b/web3/gas_strategies/time_based.py\n--- a/web3/gas_strategies/time_based.py\n+++ b/web3/gas_strategies/time_based.py\n@@ -216,6 +216,10 @@\n \"\"\"\n \n def time_based_gas_price_strategy(w3: Web3, transaction_params: TxParams) -> Wei:\n+ # return gas price when no transactions available to sample\n+ if w3.eth.get_block(\"latest\")[\"number\"] == 0:\n+ return w3.eth.gas_price\n+\n if weighted:\n avg_block_time = _get_weighted_avg_block_time(w3, sample_size=sample_size)\n else:\n", "issue": "Web3 crashes in time-based gas price strategies with empty chain\n* Version: 4.7.1\r\n* Python: 3.6\r\n* OS: linux\r\n\r\n### What was wrong?\r\n\r\nRunning our test suite web3 crashes when estimating time based gas price. Probably it is caused by not having any transactions in our test chain. However when probabilities are calculated they are empty (empty tuple) which causes then an Index Error in ` '104, in _compute_gas_price\\r\\n first = probabilities[0]\\r\\nIndexError: tuple index out of range\\`\r\n\r\nIt might be the case that there is a problem with our geth client during runtime.\r\n\r\n\r\n```\r\nFile \u201c/home/circleci/raiden/raiden/network/rpc/client.py\u201d, line 346, in gas_price\r\nreturn int(self.web3.eth.generateGasPrice())\r\nFile \u201c/home/circleci/venv-3.7/lib/python3.7/site-packages/web3/eth.py\u201d, line 385, in generateGasPrice\r\nreturn self.gasPriceStrategy(self.web3, transaction_params)\r\nFile \u201c/home/circleci/venv-3.7/lib/python3.7/site-packages/web3/gas_strategies/time_based.py\u201d, line 168, in time_based_gas_price_strategy\r\ngas_price = _compute_gas_price(probabilities, probability / 100)\r\nFile \u201c/home/circleci/venv-3.7/lib/python3.7/site-packages/web3/gas_strategies/time_based.py\u201d, line 104, in _compute_gas_price\r\nfirst = probabilities[0]\r\nIndexError: tuple index out of range\r\n```\r\n\r\n\r\n### How can it be fixed?\r\n\r\nHowever I would suggest handling the edge case that there is an empty tuple and not unconditionally access `first = probabilities[0] `\r\n\r\nI'm happy to give more information when needed\n", "before_files": [{"content": "import collections\nimport math\nimport operator\nfrom typing import (\n Iterable,\n Sequence,\n Tuple,\n)\n\nfrom eth_typing import (\n ChecksumAddress,\n)\nfrom eth_utils import (\n to_tuple,\n)\nfrom eth_utils.toolz import (\n curry,\n groupby,\n sliding_window,\n)\nfrom hexbytes import (\n HexBytes,\n)\n\nfrom web3 import (\n Web3,\n)\nfrom web3._utils.math import (\n percentile,\n)\nfrom web3.exceptions import (\n InsufficientData,\n Web3ValidationError,\n)\nfrom web3.types import (\n BlockNumber,\n GasPriceStrategy,\n TxParams,\n Wei,\n)\n\nMinerData = collections.namedtuple(\n \"MinerData\", [\"miner\", \"num_blocks\", \"min_gas_price\", \"low_percentile_gas_price\"]\n)\nProbability = collections.namedtuple(\"Probability\", [\"gas_price\", \"prob\"])\n\n\ndef _get_avg_block_time(w3: Web3, sample_size: int) -> float:\n latest = w3.eth.get_block(\"latest\")\n\n constrained_sample_size = min(sample_size, latest[\"number\"])\n if constrained_sample_size == 0:\n raise Web3ValidationError(\"Constrained sample size is 0\")\n\n oldest = w3.eth.get_block(BlockNumber(latest[\"number\"] - constrained_sample_size))\n return (latest[\"timestamp\"] - oldest[\"timestamp\"]) / constrained_sample_size\n\n\ndef _get_weighted_avg_block_time(w3: Web3, sample_size: int) -> float:\n latest_block_number = w3.eth.get_block(\"latest\")[\"number\"]\n constrained_sample_size = min(sample_size, latest_block_number)\n if constrained_sample_size == 0:\n raise Web3ValidationError(\"Constrained sample size is 0\")\n oldest_block = w3.eth.get_block(\n BlockNumber(latest_block_number - constrained_sample_size)\n )\n oldest_block_number = oldest_block[\"number\"]\n prev_timestamp = oldest_block[\"timestamp\"]\n weighted_sum = 0.0\n sum_of_weights = 0.0\n for i in range(oldest_block_number + 1, latest_block_number + 1):\n curr_timestamp = w3.eth.get_block(BlockNumber(i))[\"timestamp\"]\n time = curr_timestamp - prev_timestamp\n weight = (i - oldest_block_number) / constrained_sample_size\n weighted_sum += time * weight\n sum_of_weights += weight\n prev_timestamp = curr_timestamp\n return weighted_sum / sum_of_weights\n\n\ndef _get_raw_miner_data(\n w3: Web3, sample_size: int\n) -> Iterable[Tuple[ChecksumAddress, HexBytes, Wei]]:\n latest = w3.eth.get_block(\"latest\", full_transactions=True)\n\n for transaction in latest[\"transactions\"]:\n # type ignored b/c actual transaction is TxData not HexBytes\n yield (latest[\"miner\"], latest[\"hash\"], transaction[\"gasPrice\"]) # type: ignore\n\n block = latest\n\n for _ in range(sample_size - 1):\n if block[\"number\"] == 0:\n break\n\n # we intentionally trace backwards using parent hashes rather than\n # block numbers to make caching the data easier to implement.\n block = w3.eth.get_block(block[\"parentHash\"], full_transactions=True)\n for transaction in block[\"transactions\"]:\n # type ignored b/c actual transaction is TxData not HexBytes\n yield (block[\"miner\"], block[\"hash\"], transaction[\"gasPrice\"]) # type: ignore # noqa: E501\n\n\ndef _aggregate_miner_data(\n raw_data: Iterable[Tuple[ChecksumAddress, HexBytes, Wei]]\n) -> Iterable[MinerData]:\n data_by_miner = groupby(0, raw_data)\n\n for miner, miner_data in data_by_miner.items():\n _, block_hashes, gas_prices = map(set, zip(*miner_data))\n try:\n # types ignored b/c mypy has trouble inferring gas_prices: Sequence[Wei]\n price_percentile = percentile(gas_prices, percentile=20) # type: ignore\n except InsufficientData:\n price_percentile = min(gas_prices) # type: ignore\n yield MinerData(\n miner,\n len(set(block_hashes)),\n min(gas_prices), # type: ignore\n price_percentile,\n )\n\n\n@to_tuple\ndef _compute_probabilities(\n miner_data: Iterable[MinerData], wait_blocks: int, sample_size: int\n) -> Iterable[Probability]:\n \"\"\"\n Computes the probabilities that a txn will be accepted at each of the gas\n prices accepted by the miners.\n \"\"\"\n miner_data_by_price = tuple(\n sorted(\n miner_data,\n key=operator.attrgetter(\"low_percentile_gas_price\"),\n reverse=True,\n )\n )\n for idx in range(len(miner_data_by_price)):\n low_percentile_gas_price = miner_data_by_price[idx].low_percentile_gas_price\n num_blocks_accepting_price = sum(\n m.num_blocks for m in miner_data_by_price[idx:]\n )\n inv_prob_per_block = (sample_size - num_blocks_accepting_price) / sample_size\n probability_accepted = 1 - inv_prob_per_block**wait_blocks\n yield Probability(low_percentile_gas_price, probability_accepted)\n\n\ndef _compute_gas_price(\n probabilities: Sequence[Probability], desired_probability: float\n) -> Wei:\n \"\"\"\n Given a sorted range of ``Probability`` named-tuples returns a gas price\n computed based on where the ``desired_probability`` would fall within the\n range.\n\n :param probabilities: An iterable of `Probability` named-tuples\n sorted in reverse order.\n :param desired_probability: An floating point representation of the desired\n probability. (e.g. ``85% -> 0.85``)\n \"\"\"\n first = probabilities[0]\n last = probabilities[-1]\n\n if desired_probability >= first.prob:\n return Wei(int(first.gas_price))\n elif desired_probability <= last.prob:\n return Wei(int(last.gas_price))\n\n for left, right in sliding_window(2, probabilities):\n if desired_probability < right.prob:\n continue\n elif desired_probability > left.prob:\n # This code block should never be reachable as it would indicate\n # that we already passed by the probability window in which our\n # `desired_probability` is located.\n raise Exception(\"Invariant\")\n\n adj_prob = desired_probability - right.prob\n window_size = left.prob - right.prob\n position = adj_prob / window_size\n gas_window_size = left.gas_price - right.gas_price\n gas_price = int(math.ceil(right.gas_price + gas_window_size * position))\n return Wei(gas_price)\n else:\n # The initial `if/else` clause in this function handles the case where\n # the `desired_probability` is either above or below the min/max\n # probability found in the `probabilities`.\n #\n # With these two cases handled, the only way this code block should be\n # reachable would be if the `probabilities` were not sorted correctly.\n # Otherwise, the `desired_probability` **must** fall between two of the\n # values in the `probabilities``.\n raise Exception(\"Invariant\")\n\n\n@curry\ndef construct_time_based_gas_price_strategy(\n max_wait_seconds: int,\n sample_size: int = 120,\n probability: int = 98,\n weighted: bool = False,\n) -> GasPriceStrategy:\n \"\"\"\n A gas pricing strategy that uses recently mined block data to derive a gas\n price for which a transaction is likely to be mined within X seconds with\n probability P. If the weighted kwarg is True, more recent block\n times will be more heavily weighted.\n\n :param max_wait_seconds: The desired maximum number of seconds the\n transaction should take to mine.\n :param sample_size: The number of recent blocks to sample\n :param probability: An integer representation of the desired probability\n that the transaction will be mined within ``max_wait_seconds``. 0 means 0%\n and 100 means 100%.\n \"\"\"\n\n def time_based_gas_price_strategy(w3: Web3, transaction_params: TxParams) -> Wei:\n if weighted:\n avg_block_time = _get_weighted_avg_block_time(w3, sample_size=sample_size)\n else:\n avg_block_time = _get_avg_block_time(w3, sample_size=sample_size)\n\n wait_blocks = int(math.ceil(max_wait_seconds / avg_block_time))\n raw_miner_data = _get_raw_miner_data(w3, sample_size=sample_size)\n miner_data = _aggregate_miner_data(raw_miner_data)\n\n probabilities = _compute_probabilities(\n miner_data,\n wait_blocks=wait_blocks,\n sample_size=sample_size,\n )\n\n gas_price = _compute_gas_price(probabilities, probability / 100)\n return gas_price\n\n return time_based_gas_price_strategy\n\n\n# fast: mine within 1 minute\nfast_gas_price_strategy = construct_time_based_gas_price_strategy(\n max_wait_seconds=60,\n sample_size=120,\n)\n# medium: mine within 10 minutes\nmedium_gas_price_strategy = construct_time_based_gas_price_strategy(\n max_wait_seconds=600,\n sample_size=120,\n)\n# slow: mine within 1 hour (60 minutes)\nslow_gas_price_strategy = construct_time_based_gas_price_strategy(\n max_wait_seconds=60 * 60,\n sample_size=120,\n)\n# glacial: mine within the next 24 hours.\nglacial_gas_price_strategy = construct_time_based_gas_price_strategy(\n max_wait_seconds=24 * 60 * 60,\n sample_size=720,\n)\n", "path": "web3/gas_strategies/time_based.py"}]}
| 3,729 | 158 |
gh_patches_debug_13687
|
rasdani/github-patches
|
git_diff
|
vispy__vispy-2092
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
memory surge after minimizing the vispy window
Hi, thank you very much for working on vispy. Recently, I found a bug related to memory consumption. It appears after minimizing the vispy window, with a memory surge rate of about 50 MB/second. This surge doesn't exist even if you bring other programs to the top level. The only way to trigger it, based on my experience so far, is to minimize the window by either clicking the "Minimize" icon on the window or the taskbar icon of the vispy window.
Please try the two scripts provided in the Examples folder in the vispy github: Examples\demo\scene\scrolling_plots.py and Examples\demo\gloo\realtime_signals.py
Below is my environment:
OS: Windows 10 ver.1909
Python: 3.8.3
vispy: 0.6.4 or github master
pyqt5: 5.14.2
Thanks! Appreciate the help.
</issue>
<code>
[start of examples/demo/gloo/realtime_signals.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 # vispy: gallery 2
4 # Copyright (c) Vispy Development Team. All Rights Reserved.
5 # Distributed under the (new) BSD License. See LICENSE.txt for more info.
6
7 """
8 Multiple real-time digital signals with GLSL-based clipping.
9 """
10
11 from vispy import gloo
12 from vispy import app
13 import numpy as np
14 import math
15
16 # Number of cols and rows in the table.
17 nrows = 16
18 ncols = 20
19
20 # Number of signals.
21 m = nrows*ncols
22
23 # Number of samples per signal.
24 n = 1000
25
26 # Various signal amplitudes.
27 amplitudes = .1 + .2 * np.random.rand(m, 1).astype(np.float32)
28
29 # Generate the signals as a (m, n) array.
30 y = amplitudes * np.random.randn(m, n).astype(np.float32)
31
32 # Color of each vertex (TODO: make it more efficient by using a GLSL-based
33 # color map and the index).
34 color = np.repeat(np.random.uniform(size=(m, 3), low=.5, high=.9),
35 n, axis=0).astype(np.float32)
36
37 # Signal 2D index of each vertex (row and col) and x-index (sample index
38 # within each signal).
39 index = np.c_[np.repeat(np.repeat(np.arange(ncols), nrows), n),
40 np.repeat(np.tile(np.arange(nrows), ncols), n),
41 np.tile(np.arange(n), m)].astype(np.float32)
42
43 VERT_SHADER = """
44 #version 120
45
46 // y coordinate of the position.
47 attribute float a_position;
48
49 // row, col, and time index.
50 attribute vec3 a_index;
51 varying vec3 v_index;
52
53 // 2D scaling factor (zooming).
54 uniform vec2 u_scale;
55
56 // Size of the table.
57 uniform vec2 u_size;
58
59 // Number of samples per signal.
60 uniform float u_n;
61
62 // Color.
63 attribute vec3 a_color;
64 varying vec4 v_color;
65
66 // Varying variables used for clipping in the fragment shader.
67 varying vec2 v_position;
68 varying vec4 v_ab;
69
70 void main() {
71 float nrows = u_size.x;
72 float ncols = u_size.y;
73
74 // Compute the x coordinate from the time index.
75 float x = -1 + 2*a_index.z / (u_n-1);
76 vec2 position = vec2(x - (1 - 1 / u_scale.x), a_position);
77
78 // Find the affine transformation for the subplots.
79 vec2 a = vec2(1./ncols, 1./nrows)*.9;
80 vec2 b = vec2(-1 + 2*(a_index.x+.5) / ncols,
81 -1 + 2*(a_index.y+.5) / nrows);
82 // Apply the static subplot transformation + scaling.
83 gl_Position = vec4(a*u_scale*position+b, 0.0, 1.0);
84
85 v_color = vec4(a_color, 1.);
86 v_index = a_index;
87
88 // For clipping test in the fragment shader.
89 v_position = gl_Position.xy;
90 v_ab = vec4(a, b);
91 }
92 """
93
94 FRAG_SHADER = """
95 #version 120
96
97 varying vec4 v_color;
98 varying vec3 v_index;
99
100 varying vec2 v_position;
101 varying vec4 v_ab;
102
103 void main() {
104 gl_FragColor = v_color;
105
106 // Discard the fragments between the signals (emulate glMultiDrawArrays).
107 if ((fract(v_index.x) > 0.) || (fract(v_index.y) > 0.))
108 discard;
109
110 // Clipping test.
111 vec2 test = abs((v_position.xy-v_ab.zw)/v_ab.xy);
112 if ((test.x > 1) || (test.y > 1))
113 discard;
114 }
115 """
116
117
118 class Canvas(app.Canvas):
119 def __init__(self):
120 app.Canvas.__init__(self, title='Use your wheel to zoom!',
121 keys='interactive')
122 self.program = gloo.Program(VERT_SHADER, FRAG_SHADER)
123 self.program['a_position'] = y.reshape(-1, 1)
124 self.program['a_color'] = color
125 self.program['a_index'] = index
126 self.program['u_scale'] = (1., 1.)
127 self.program['u_size'] = (nrows, ncols)
128 self.program['u_n'] = n
129
130 gloo.set_viewport(0, 0, *self.physical_size)
131
132 self._timer = app.Timer('auto', connect=self.on_timer, start=True)
133
134 gloo.set_state(clear_color='black', blend=True,
135 blend_func=('src_alpha', 'one_minus_src_alpha'))
136
137 self.show()
138
139 def on_resize(self, event):
140 gloo.set_viewport(0, 0, *event.physical_size)
141
142 def on_mouse_wheel(self, event):
143 dx = np.sign(event.delta[1]) * .05
144 scale_x, scale_y = self.program['u_scale']
145 scale_x_new, scale_y_new = (scale_x * math.exp(2.5*dx),
146 scale_y * math.exp(0.0*dx))
147 self.program['u_scale'] = (max(1, scale_x_new), max(1, scale_y_new))
148 self.update()
149
150 def on_timer(self, event):
151 """Add some data at the end of each signal (real-time signals)."""
152 k = 10
153 y[:, :-k] = y[:, k:]
154 y[:, -k:] = amplitudes * np.random.randn(m, k)
155
156 self.program['a_position'].set_data(y.ravel().astype(np.float32))
157 self.update()
158
159 def on_draw(self, event):
160 gloo.clear()
161 self.program.draw('line_strip')
162
163 if __name__ == '__main__':
164 c = Canvas()
165 app.run()
166
[end of examples/demo/gloo/realtime_signals.py]
[start of examples/demo/scene/scrolling_plots.py]
1 # -*- coding: utf-8 -*-
2 # Copyright (c) Vispy Development Team. All Rights Reserved.
3 # Distributed under the (new) BSD License. See LICENSE.txt for more info.
4 # vispy: gallery 30
5 """
6 Show 10,000 realtime scrolling plots
7 """
8 from vispy import app, scene
9 import numpy as np
10
11
12 canvas = scene.SceneCanvas(keys='interactive', show=True, size=(1024, 768))
13 grid = canvas.central_widget.add_grid()
14 view = grid.add_view(0, 1)
15 view.camera = scene.MagnifyCamera(mag=1, size_factor=0.5, radius_ratio=0.6)
16
17 # Add axes
18 yax = scene.AxisWidget(orientation='left')
19 yax.stretch = (0.05, 1)
20 grid.add_widget(yax, 0, 0)
21 yax.link_view(view)
22
23 xax = scene.AxisWidget(orientation='bottom')
24 xax.stretch = (1, 0.05)
25 grid.add_widget(xax, 1, 1)
26 xax.link_view(view)
27
28
29 N = 4900
30 M = 2000
31 cols = int(N**0.5)
32 view.camera.rect = (0, 0, cols, N/cols)
33
34 lines = scene.ScrollingLines(n_lines=N, line_size=M, columns=cols, dx=0.8/M,
35 cell_size=(1, 8), parent=view.scene)
36 lines.transform = scene.STTransform(scale=(1, 1/8.))
37
38
39 def update(ev):
40 m = 50
41 data = np.random.normal(size=(N, m), scale=0.3)
42 data[data > 1] += 4
43 lines.roll_data(data)
44
45 timer = app.Timer(connect=update, interval=0)
46 timer.start()
47
48
49 if __name__ == '__main__':
50 import sys
51 if sys.flags.interactive != 1:
52 app.run()
53
[end of examples/demo/scene/scrolling_plots.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/examples/demo/gloo/realtime_signals.py b/examples/demo/gloo/realtime_signals.py
--- a/examples/demo/gloo/realtime_signals.py
+++ b/examples/demo/gloo/realtime_signals.py
@@ -155,6 +155,7 @@
self.program['a_position'].set_data(y.ravel().astype(np.float32))
self.update()
+ self.context.flush() # prevent memory leak when minimized
def on_draw(self, event):
gloo.clear()
diff --git a/examples/demo/scene/scrolling_plots.py b/examples/demo/scene/scrolling_plots.py
--- a/examples/demo/scene/scrolling_plots.py
+++ b/examples/demo/scene/scrolling_plots.py
@@ -41,6 +41,7 @@
data = np.random.normal(size=(N, m), scale=0.3)
data[data > 1] += 4
lines.roll_data(data)
+ canvas.context.flush() # prevent memory leak when minimized
timer = app.Timer(connect=update, interval=0)
timer.start()
|
{"golden_diff": "diff --git a/examples/demo/gloo/realtime_signals.py b/examples/demo/gloo/realtime_signals.py\n--- a/examples/demo/gloo/realtime_signals.py\n+++ b/examples/demo/gloo/realtime_signals.py\n@@ -155,6 +155,7 @@\n \n self.program['a_position'].set_data(y.ravel().astype(np.float32))\n self.update()\n+ self.context.flush() # prevent memory leak when minimized\n \n def on_draw(self, event):\n gloo.clear()\ndiff --git a/examples/demo/scene/scrolling_plots.py b/examples/demo/scene/scrolling_plots.py\n--- a/examples/demo/scene/scrolling_plots.py\n+++ b/examples/demo/scene/scrolling_plots.py\n@@ -41,6 +41,7 @@\n data = np.random.normal(size=(N, m), scale=0.3)\n data[data > 1] += 4\n lines.roll_data(data)\n+ canvas.context.flush() # prevent memory leak when minimized\n \n timer = app.Timer(connect=update, interval=0)\n timer.start()\n", "issue": "memory surge after minimizing the vispy window\nHi, thank you very much for working on vispy. Recently, I found a bug related to memory consumption. It appears after minimizing the vispy window, with a memory surge rate of about 50 MB/second. This surge doesn't exist even if you bring other programs to the top level. The only way to trigger it, based on my experience so far, is to minimize the window by either clicking the \"Minimize\" icon on the window or the taskbar icon of the vispy window. \r\n\r\nPlease try the two scripts provided in the Examples folder in the vispy github: Examples\\demo\\scene\\scrolling_plots.py and Examples\\demo\\gloo\\realtime_signals.py\r\n\r\nBelow is my environment:\r\nOS: Windows 10 ver.1909\r\nPython: 3.8.3\r\nvispy: 0.6.4 or github master\r\npyqt5: 5.14.2\r\n\r\nThanks! Appreciate the help. \r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# vispy: gallery 2\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\n\"\"\"\nMultiple real-time digital signals with GLSL-based clipping.\n\"\"\"\n\nfrom vispy import gloo\nfrom vispy import app\nimport numpy as np\nimport math\n\n# Number of cols and rows in the table.\nnrows = 16\nncols = 20\n\n# Number of signals.\nm = nrows*ncols\n\n# Number of samples per signal.\nn = 1000\n\n# Various signal amplitudes.\namplitudes = .1 + .2 * np.random.rand(m, 1).astype(np.float32)\n\n# Generate the signals as a (m, n) array.\ny = amplitudes * np.random.randn(m, n).astype(np.float32)\n\n# Color of each vertex (TODO: make it more efficient by using a GLSL-based\n# color map and the index).\ncolor = np.repeat(np.random.uniform(size=(m, 3), low=.5, high=.9),\n n, axis=0).astype(np.float32)\n\n# Signal 2D index of each vertex (row and col) and x-index (sample index\n# within each signal).\nindex = np.c_[np.repeat(np.repeat(np.arange(ncols), nrows), n),\n np.repeat(np.tile(np.arange(nrows), ncols), n),\n np.tile(np.arange(n), m)].astype(np.float32)\n\nVERT_SHADER = \"\"\"\n#version 120\n\n// y coordinate of the position.\nattribute float a_position;\n\n// row, col, and time index.\nattribute vec3 a_index;\nvarying vec3 v_index;\n\n// 2D scaling factor (zooming).\nuniform vec2 u_scale;\n\n// Size of the table.\nuniform vec2 u_size;\n\n// Number of samples per signal.\nuniform float u_n;\n\n// Color.\nattribute vec3 a_color;\nvarying vec4 v_color;\n\n// Varying variables used for clipping in the fragment shader.\nvarying vec2 v_position;\nvarying vec4 v_ab;\n\nvoid main() {\n float nrows = u_size.x;\n float ncols = u_size.y;\n\n // Compute the x coordinate from the time index.\n float x = -1 + 2*a_index.z / (u_n-1);\n vec2 position = vec2(x - (1 - 1 / u_scale.x), a_position);\n\n // Find the affine transformation for the subplots.\n vec2 a = vec2(1./ncols, 1./nrows)*.9;\n vec2 b = vec2(-1 + 2*(a_index.x+.5) / ncols,\n -1 + 2*(a_index.y+.5) / nrows);\n // Apply the static subplot transformation + scaling.\n gl_Position = vec4(a*u_scale*position+b, 0.0, 1.0);\n\n v_color = vec4(a_color, 1.);\n v_index = a_index;\n\n // For clipping test in the fragment shader.\n v_position = gl_Position.xy;\n v_ab = vec4(a, b);\n}\n\"\"\"\n\nFRAG_SHADER = \"\"\"\n#version 120\n\nvarying vec4 v_color;\nvarying vec3 v_index;\n\nvarying vec2 v_position;\nvarying vec4 v_ab;\n\nvoid main() {\n gl_FragColor = v_color;\n\n // Discard the fragments between the signals (emulate glMultiDrawArrays).\n if ((fract(v_index.x) > 0.) || (fract(v_index.y) > 0.))\n discard;\n\n // Clipping test.\n vec2 test = abs((v_position.xy-v_ab.zw)/v_ab.xy);\n if ((test.x > 1) || (test.y > 1))\n discard;\n}\n\"\"\"\n\n\nclass Canvas(app.Canvas):\n def __init__(self):\n app.Canvas.__init__(self, title='Use your wheel to zoom!',\n keys='interactive')\n self.program = gloo.Program(VERT_SHADER, FRAG_SHADER)\n self.program['a_position'] = y.reshape(-1, 1)\n self.program['a_color'] = color\n self.program['a_index'] = index\n self.program['u_scale'] = (1., 1.)\n self.program['u_size'] = (nrows, ncols)\n self.program['u_n'] = n\n\n gloo.set_viewport(0, 0, *self.physical_size)\n\n self._timer = app.Timer('auto', connect=self.on_timer, start=True)\n\n gloo.set_state(clear_color='black', blend=True,\n blend_func=('src_alpha', 'one_minus_src_alpha'))\n\n self.show()\n\n def on_resize(self, event):\n gloo.set_viewport(0, 0, *event.physical_size)\n\n def on_mouse_wheel(self, event):\n dx = np.sign(event.delta[1]) * .05\n scale_x, scale_y = self.program['u_scale']\n scale_x_new, scale_y_new = (scale_x * math.exp(2.5*dx),\n scale_y * math.exp(0.0*dx))\n self.program['u_scale'] = (max(1, scale_x_new), max(1, scale_y_new))\n self.update()\n\n def on_timer(self, event):\n \"\"\"Add some data at the end of each signal (real-time signals).\"\"\"\n k = 10\n y[:, :-k] = y[:, k:]\n y[:, -k:] = amplitudes * np.random.randn(m, k)\n\n self.program['a_position'].set_data(y.ravel().astype(np.float32))\n self.update()\n\n def on_draw(self, event):\n gloo.clear()\n self.program.draw('line_strip')\n\nif __name__ == '__main__':\n c = Canvas()\n app.run()\n", "path": "examples/demo/gloo/realtime_signals.py"}, {"content": "# -*- coding: utf-8 -*-\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n# vispy: gallery 30\n\"\"\"\nShow 10,000 realtime scrolling plots\n\"\"\"\nfrom vispy import app, scene\nimport numpy as np\n\n\ncanvas = scene.SceneCanvas(keys='interactive', show=True, size=(1024, 768))\ngrid = canvas.central_widget.add_grid()\nview = grid.add_view(0, 1)\nview.camera = scene.MagnifyCamera(mag=1, size_factor=0.5, radius_ratio=0.6)\n\n# Add axes\nyax = scene.AxisWidget(orientation='left')\nyax.stretch = (0.05, 1)\ngrid.add_widget(yax, 0, 0)\nyax.link_view(view)\n\nxax = scene.AxisWidget(orientation='bottom')\nxax.stretch = (1, 0.05)\ngrid.add_widget(xax, 1, 1)\nxax.link_view(view)\n\n\nN = 4900\nM = 2000\ncols = int(N**0.5)\nview.camera.rect = (0, 0, cols, N/cols)\n\nlines = scene.ScrollingLines(n_lines=N, line_size=M, columns=cols, dx=0.8/M,\n cell_size=(1, 8), parent=view.scene)\nlines.transform = scene.STTransform(scale=(1, 1/8.))\n\n\ndef update(ev):\n m = 50\n data = np.random.normal(size=(N, m), scale=0.3)\n data[data > 1] += 4\n lines.roll_data(data)\n\ntimer = app.Timer(connect=update, interval=0)\ntimer.start()\n\n\nif __name__ == '__main__':\n import sys\n if sys.flags.interactive != 1:\n app.run()\n", "path": "examples/demo/scene/scrolling_plots.py"}]}
| 3,000 | 236 |
gh_patches_debug_29823
|
rasdani/github-patches
|
git_diff
|
Project-MONAI__MONAI-971
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Preventing imports of all monai submodules
When importing monai in any way, all submodules are automatically made available, e.g.:
`import monai` results in the root level `import transforms` being suddenly possible (instead of needing `from monai import transforms`)
This seems to me like undesirable behavior: any project that has a **local** `transforms` submodule is suddenly unable to import it, because this results in a name conflict with the imported monai modules.
When looking at any other package, similar behavior doesn't occur: after performing `import torch`, one doesn't expect `import utils` to import `torch.utils`(!)
To conclude:
--- Current (undesirable) situation ---
```python
import monai
import transforms # no error
# do something with transforms
# e.g. transforms.Compose([...])
```
--- Desired situation ---
```python
import monai
# do something with transforms
# e.g. monai.transforms.Compose([...])
```
Or, with specific import:
```python
from monai import transforms
# do something with transforms
# e.g. transforms.Compose([...])
```
And when no local module is present:
```python
import monai
import transforms
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ModuleNotFoundError: No module named 'transforms'
```
The same holds for all monai submodules (`utils`, `metrics`, `losses`, etc.)
</issue>
<code>
[start of monai/utils/module.py]
1 # Copyright 2020 MONAI Consortium
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 # http://www.apache.org/licenses/LICENSE-2.0
6 # Unless required by applicable law or agreed to in writing, software
7 # distributed under the License is distributed on an "AS IS" BASIS,
8 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 # See the License for the specific language governing permissions and
10 # limitations under the License.
11
12 from importlib import import_module
13 from pkgutil import walk_packages
14 from re import match
15 from typing import Any, Callable, Tuple
16
17 OPTIONAL_IMPORT_MSG_FMT = "{}"
18
19
20 def export(modname):
21 """
22 Make the decorated object a member of the named module. This will also add the object under its aliases if it has
23 a `__aliases__` member, thus this decorator should be before the `alias` decorator to pick up those names. Alias
24 names which conflict with package names or existing members will be ignored.
25 """
26
27 def _inner(obj):
28 mod = import_module(modname)
29 if not hasattr(mod, obj.__name__):
30 setattr(mod, obj.__name__, obj)
31
32 # add the aliases for `obj` to the target module
33 for alias in getattr(obj, "__aliases__", ()):
34 if not hasattr(mod, alias):
35 setattr(mod, alias, obj)
36
37 return obj
38
39 return _inner
40
41
42 def load_submodules(basemod, load_all: bool = True, exclude_pattern: str = "(.*[tT]est.*)|(_.*)"):
43 """
44 Traverse the source of the module structure starting with module `basemod`, loading all packages plus all files if
45 `loadAll` is True, excluding anything whose name matches `excludePattern`.
46 """
47 submodules = []
48
49 for importer, name, is_pkg in walk_packages(basemod.__path__):
50 if (is_pkg or load_all) and match(exclude_pattern, name) is None:
51 mod = import_module(basemod.__name__ + "." + name) # why do I need to do this first?
52 importer.find_module(name).load_module(name)
53 submodules.append(mod)
54
55 return submodules
56
57
58 @export("monai.utils")
59 def get_full_type_name(typeobj):
60 module = typeobj.__module__
61 if module is None or module == str.__class__.__module__:
62 return typeobj.__name__ # Avoid reporting __builtin__
63 else:
64 return module + "." + typeobj.__name__
65
66
67 def min_version(the_module, min_version_str: str = "") -> bool:
68 """
69 Convert version strings into tuples of int and compare them.
70
71 Returns True if the module's version is greater or equal to the 'min_version'.
72 When min_version_str is not provided, it always returns True.
73 """
74 if min_version_str:
75 mod_version = tuple(int(x) for x in the_module.__version__.split(".")[:2])
76 required = tuple(int(x) for x in min_version_str.split(".")[:2])
77 return mod_version >= required
78 return True # always valid version
79
80
81 def exact_version(the_module, version_str: str = "") -> bool:
82 """
83 Returns True if the module's __version__ matches version_str
84 """
85 return bool(the_module.__version__ == version_str)
86
87
88 def optional_import(
89 module: str,
90 version: str = "",
91 version_checker: Callable[..., bool] = min_version,
92 name: str = "",
93 descriptor: str = OPTIONAL_IMPORT_MSG_FMT,
94 version_args=None,
95 allow_namespace_pkg: bool = False,
96 ) -> Tuple[Any, bool]:
97 """
98 Imports an optional module specified by `module` string.
99 Any importing related exceptions will be stored, and exceptions raise lazily
100 when attempting to use the failed-to-import module.
101
102 Args:
103 module: name of the module to be imported.
104 version: version string used by the version_checker.
105 version_checker: a callable to check the module version, Defaults to monai.utils.min_version.
106 name: a non-module attribute (such as method/class) to import from the imported module.
107 descriptor: a format string for the final error message when using a not imported module.
108 version_args: additional parameters to the version checker.
109 allow_namespace_pkg: whether importing a namespace package is allowed. Defaults to False.
110
111 Returns:
112 The imported module and a boolean flag indicating whether the import is successful.
113
114 Examples::
115
116 >>> torch, flag = optional_import('torch', '1.1')
117 >>> print(torch, flag)
118 <module 'torch' from 'python/lib/python3.6/site-packages/torch/__init__.py'> True
119
120 >>> the_module, flag = optional_import('unknown_module')
121 >>> print(flag)
122 False
123 >>> the_module.method # trying to access a module which is not imported
124 AttributeError: Optional import: import unknown_module (No module named 'unknown_module').
125
126 >>> torch, flag = optional_import('torch', '42', exact_version)
127 >>> torch.nn # trying to access a module for which there isn't a proper version imported
128 AttributeError: Optional import: import torch (requires version '42' by 'exact_version').
129
130 >>> conv, flag = optional_import('torch.nn.functional', '1.0', name='conv1d')
131 >>> print(conv)
132 <built-in method conv1d of type object at 0x11a49eac0>
133
134 >>> conv, flag = optional_import('torch.nn.functional', '42', name='conv1d')
135 >>> conv() # trying to use a function from the not successfully imported module (due to unmatched version)
136 AttributeError: Optional import: from torch.nn.functional import conv1d (requires version '42' by 'min_version').
137 """
138
139 tb = None
140 exception_str = ""
141 if name:
142 actual_cmd = f"from {module} import {name}"
143 else:
144 actual_cmd = f"import {module}"
145 try:
146 pkg = __import__(module) # top level module
147 the_module = import_module(module)
148 if not allow_namespace_pkg:
149 is_namespace = getattr(the_module, "__file__", None) is None and hasattr(the_module, "__path__")
150 assert not is_namespace
151 if name: # user specified to load class/function/... from the module
152 the_module = getattr(the_module, name)
153 except Exception as import_exception: # any exceptions during import
154 tb = import_exception.__traceback__
155 exception_str = f"{import_exception}"
156 else: # found the module
157 if version_args and version_checker(pkg, f"{version}", version_args):
158 return the_module, True
159 if not version_args and version_checker(pkg, f"{version}"):
160 return the_module, True
161
162 # preparing lazy error message
163 msg = descriptor.format(actual_cmd)
164 if version and tb is None: # a pure version issue
165 msg += f" (requires '{module} {version}' by '{version_checker.__name__}')"
166 if exception_str:
167 msg += f" ({exception_str})"
168
169 class _LazyRaise:
170 def __init__(self, *_args, **_kwargs):
171 _default_msg = (
172 f"Optional import: {msg}."
173 + "\n\nFor details about installing the optional dependencies, please visit:"
174 + "\n https://docs.monai.io/en/latest/installation.html#installing-the-recommended-dependencies"
175 )
176 if tb is None:
177 self._exception = AttributeError(_default_msg)
178 else:
179 self._exception = AttributeError(_default_msg).with_traceback(tb)
180
181 def __getattr__(self, name):
182 """
183 Raises:
184 AttributeError: When you call this method.
185 """
186 raise self._exception
187
188 def __call__(self, *_args, **_kwargs):
189 """
190 Raises:
191 AttributeError: When you call this method.
192 """
193 raise self._exception
194
195 return _LazyRaise(), False
196
[end of monai/utils/module.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/monai/utils/module.py b/monai/utils/module.py
--- a/monai/utils/module.py
+++ b/monai/utils/module.py
@@ -9,6 +9,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import sys
from importlib import import_module
from pkgutil import walk_packages
from re import match
@@ -42,20 +43,19 @@
def load_submodules(basemod, load_all: bool = True, exclude_pattern: str = "(.*[tT]est.*)|(_.*)"):
"""
Traverse the source of the module structure starting with module `basemod`, loading all packages plus all files if
- `loadAll` is True, excluding anything whose name matches `excludePattern`.
+ `load_all` is True, excluding anything whose name matches `exclude_pattern`.
"""
submodules = []
- for importer, name, is_pkg in walk_packages(basemod.__path__):
- if (is_pkg or load_all) and match(exclude_pattern, name) is None:
- mod = import_module(basemod.__name__ + "." + name) # why do I need to do this first?
+ for importer, name, is_pkg in walk_packages(basemod.__path__, prefix=basemod.__name__ + "."):
+ if (is_pkg or load_all) and name not in sys.modules and match(exclude_pattern, name) is None:
+ mod = import_module(name)
importer.find_module(name).load_module(name)
submodules.append(mod)
return submodules
-@export("monai.utils")
def get_full_type_name(typeobj):
module = typeobj.__module__
if module is None or module == str.__class__.__module__:
|
{"golden_diff": "diff --git a/monai/utils/module.py b/monai/utils/module.py\n--- a/monai/utils/module.py\n+++ b/monai/utils/module.py\n@@ -9,6 +9,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import sys\n from importlib import import_module\n from pkgutil import walk_packages\n from re import match\n@@ -42,20 +43,19 @@\n def load_submodules(basemod, load_all: bool = True, exclude_pattern: str = \"(.*[tT]est.*)|(_.*)\"):\n \"\"\"\n Traverse the source of the module structure starting with module `basemod`, loading all packages plus all files if\n- `loadAll` is True, excluding anything whose name matches `excludePattern`.\n+ `load_all` is True, excluding anything whose name matches `exclude_pattern`.\n \"\"\"\n submodules = []\n \n- for importer, name, is_pkg in walk_packages(basemod.__path__):\n- if (is_pkg or load_all) and match(exclude_pattern, name) is None:\n- mod = import_module(basemod.__name__ + \".\" + name) # why do I need to do this first?\n+ for importer, name, is_pkg in walk_packages(basemod.__path__, prefix=basemod.__name__ + \".\"):\n+ if (is_pkg or load_all) and name not in sys.modules and match(exclude_pattern, name) is None:\n+ mod = import_module(name)\n importer.find_module(name).load_module(name)\n submodules.append(mod)\n \n return submodules\n \n \n-@export(\"monai.utils\")\n def get_full_type_name(typeobj):\n module = typeobj.__module__\n if module is None or module == str.__class__.__module__:\n", "issue": "Preventing imports of all monai submodules\nWhen importing monai in any way, all submodules are automatically made available, e.g.:\r\n`import monai` results in the root level `import transforms` being suddenly possible (instead of needing `from monai import transforms`)\r\n\r\nThis seems to me like undesirable behavior: any project that has a **local** `transforms` submodule is suddenly unable to import it, because this results in a name conflict with the imported monai modules.\r\nWhen looking at any other package, similar behavior doesn't occur: after performing `import torch`, one doesn't expect `import utils` to import `torch.utils`(!)\r\n\r\nTo conclude:\r\n--- Current (undesirable) situation ---\r\n```python\r\nimport monai\r\nimport transforms # no error\r\n\r\n# do something with transforms\r\n# e.g. transforms.Compose([...])\r\n```\r\n\r\n--- Desired situation ---\r\n```python\r\nimport monai\r\n\r\n# do something with transforms\r\n# e.g. monai.transforms.Compose([...])\r\n```\r\nOr, with specific import:\r\n```python\r\nfrom monai import transforms\r\n\r\n# do something with transforms\r\n# e.g. transforms.Compose([...])\r\n```\r\nAnd when no local module is present:\r\n```python\r\nimport monai\r\nimport transforms\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\nModuleNotFoundError: No module named 'transforms'\r\n```\r\n\r\nThe same holds for all monai submodules (`utils`, `metrics`, `losses`, etc.)\r\n\n", "before_files": [{"content": "# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom importlib import import_module\nfrom pkgutil import walk_packages\nfrom re import match\nfrom typing import Any, Callable, Tuple\n\nOPTIONAL_IMPORT_MSG_FMT = \"{}\"\n\n\ndef export(modname):\n \"\"\"\n Make the decorated object a member of the named module. This will also add the object under its aliases if it has\n a `__aliases__` member, thus this decorator should be before the `alias` decorator to pick up those names. Alias\n names which conflict with package names or existing members will be ignored.\n \"\"\"\n\n def _inner(obj):\n mod = import_module(modname)\n if not hasattr(mod, obj.__name__):\n setattr(mod, obj.__name__, obj)\n\n # add the aliases for `obj` to the target module\n for alias in getattr(obj, \"__aliases__\", ()):\n if not hasattr(mod, alias):\n setattr(mod, alias, obj)\n\n return obj\n\n return _inner\n\n\ndef load_submodules(basemod, load_all: bool = True, exclude_pattern: str = \"(.*[tT]est.*)|(_.*)\"):\n \"\"\"\n Traverse the source of the module structure starting with module `basemod`, loading all packages plus all files if\n `loadAll` is True, excluding anything whose name matches `excludePattern`.\n \"\"\"\n submodules = []\n\n for importer, name, is_pkg in walk_packages(basemod.__path__):\n if (is_pkg or load_all) and match(exclude_pattern, name) is None:\n mod = import_module(basemod.__name__ + \".\" + name) # why do I need to do this first?\n importer.find_module(name).load_module(name)\n submodules.append(mod)\n\n return submodules\n\n\n@export(\"monai.utils\")\ndef get_full_type_name(typeobj):\n module = typeobj.__module__\n if module is None or module == str.__class__.__module__:\n return typeobj.__name__ # Avoid reporting __builtin__\n else:\n return module + \".\" + typeobj.__name__\n\n\ndef min_version(the_module, min_version_str: str = \"\") -> bool:\n \"\"\"\n Convert version strings into tuples of int and compare them.\n\n Returns True if the module's version is greater or equal to the 'min_version'.\n When min_version_str is not provided, it always returns True.\n \"\"\"\n if min_version_str:\n mod_version = tuple(int(x) for x in the_module.__version__.split(\".\")[:2])\n required = tuple(int(x) for x in min_version_str.split(\".\")[:2])\n return mod_version >= required\n return True # always valid version\n\n\ndef exact_version(the_module, version_str: str = \"\") -> bool:\n \"\"\"\n Returns True if the module's __version__ matches version_str\n \"\"\"\n return bool(the_module.__version__ == version_str)\n\n\ndef optional_import(\n module: str,\n version: str = \"\",\n version_checker: Callable[..., bool] = min_version,\n name: str = \"\",\n descriptor: str = OPTIONAL_IMPORT_MSG_FMT,\n version_args=None,\n allow_namespace_pkg: bool = False,\n) -> Tuple[Any, bool]:\n \"\"\"\n Imports an optional module specified by `module` string.\n Any importing related exceptions will be stored, and exceptions raise lazily\n when attempting to use the failed-to-import module.\n\n Args:\n module: name of the module to be imported.\n version: version string used by the version_checker.\n version_checker: a callable to check the module version, Defaults to monai.utils.min_version.\n name: a non-module attribute (such as method/class) to import from the imported module.\n descriptor: a format string for the final error message when using a not imported module.\n version_args: additional parameters to the version checker.\n allow_namespace_pkg: whether importing a namespace package is allowed. Defaults to False.\n\n Returns:\n The imported module and a boolean flag indicating whether the import is successful.\n\n Examples::\n\n >>> torch, flag = optional_import('torch', '1.1')\n >>> print(torch, flag)\n <module 'torch' from 'python/lib/python3.6/site-packages/torch/__init__.py'> True\n\n >>> the_module, flag = optional_import('unknown_module')\n >>> print(flag)\n False\n >>> the_module.method # trying to access a module which is not imported\n AttributeError: Optional import: import unknown_module (No module named 'unknown_module').\n\n >>> torch, flag = optional_import('torch', '42', exact_version)\n >>> torch.nn # trying to access a module for which there isn't a proper version imported\n AttributeError: Optional import: import torch (requires version '42' by 'exact_version').\n\n >>> conv, flag = optional_import('torch.nn.functional', '1.0', name='conv1d')\n >>> print(conv)\n <built-in method conv1d of type object at 0x11a49eac0>\n\n >>> conv, flag = optional_import('torch.nn.functional', '42', name='conv1d')\n >>> conv() # trying to use a function from the not successfully imported module (due to unmatched version)\n AttributeError: Optional import: from torch.nn.functional import conv1d (requires version '42' by 'min_version').\n \"\"\"\n\n tb = None\n exception_str = \"\"\n if name:\n actual_cmd = f\"from {module} import {name}\"\n else:\n actual_cmd = f\"import {module}\"\n try:\n pkg = __import__(module) # top level module\n the_module = import_module(module)\n if not allow_namespace_pkg:\n is_namespace = getattr(the_module, \"__file__\", None) is None and hasattr(the_module, \"__path__\")\n assert not is_namespace\n if name: # user specified to load class/function/... from the module\n the_module = getattr(the_module, name)\n except Exception as import_exception: # any exceptions during import\n tb = import_exception.__traceback__\n exception_str = f\"{import_exception}\"\n else: # found the module\n if version_args and version_checker(pkg, f\"{version}\", version_args):\n return the_module, True\n if not version_args and version_checker(pkg, f\"{version}\"):\n return the_module, True\n\n # preparing lazy error message\n msg = descriptor.format(actual_cmd)\n if version and tb is None: # a pure version issue\n msg += f\" (requires '{module} {version}' by '{version_checker.__name__}')\"\n if exception_str:\n msg += f\" ({exception_str})\"\n\n class _LazyRaise:\n def __init__(self, *_args, **_kwargs):\n _default_msg = (\n f\"Optional import: {msg}.\"\n + \"\\n\\nFor details about installing the optional dependencies, please visit:\"\n + \"\\n https://docs.monai.io/en/latest/installation.html#installing-the-recommended-dependencies\"\n )\n if tb is None:\n self._exception = AttributeError(_default_msg)\n else:\n self._exception = AttributeError(_default_msg).with_traceback(tb)\n\n def __getattr__(self, name):\n \"\"\"\n Raises:\n AttributeError: When you call this method.\n \"\"\"\n raise self._exception\n\n def __call__(self, *_args, **_kwargs):\n \"\"\"\n Raises:\n AttributeError: When you call this method.\n \"\"\"\n raise self._exception\n\n return _LazyRaise(), False\n", "path": "monai/utils/module.py"}]}
| 3,092 | 402 |
gh_patches_debug_62442
|
rasdani/github-patches
|
git_diff
|
comic__grand-challenge.org-1162
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Reader study completed message is visible when study is not completed

</issue>
<code>
[start of app/grandchallenge/reader_studies/templatetags/get_ground_truth.py]
1 from django import template
2
3 register = template.Library()
4
5
6 @register.simple_tag
7 def get_ground_truth(obj, image, question):
8 """Get the auth token for the user."""
9 ground_truths = obj.statistics["ground_truths"]
10 return ground_truths[image][question]
11
[end of app/grandchallenge/reader_studies/templatetags/get_ground_truth.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/app/grandchallenge/reader_studies/templatetags/get_ground_truth.py b/app/grandchallenge/reader_studies/templatetags/get_ground_truth.py
--- a/app/grandchallenge/reader_studies/templatetags/get_ground_truth.py
+++ b/app/grandchallenge/reader_studies/templatetags/get_ground_truth.py
@@ -5,6 +5,7 @@
@register.simple_tag
def get_ground_truth(obj, image, question):
- """Get the auth token for the user."""
+ """Get the ground truth value for the image/question combination in reader
+ study obj."""
ground_truths = obj.statistics["ground_truths"]
return ground_truths[image][question]
|
{"golden_diff": "diff --git a/app/grandchallenge/reader_studies/templatetags/get_ground_truth.py b/app/grandchallenge/reader_studies/templatetags/get_ground_truth.py\n--- a/app/grandchallenge/reader_studies/templatetags/get_ground_truth.py\n+++ b/app/grandchallenge/reader_studies/templatetags/get_ground_truth.py\n@@ -5,6 +5,7 @@\n \n @register.simple_tag\n def get_ground_truth(obj, image, question):\n- \"\"\"Get the auth token for the user.\"\"\"\n+ \"\"\"Get the ground truth value for the image/question combination in reader\n+ study obj.\"\"\"\n ground_truths = obj.statistics[\"ground_truths\"]\n return ground_truths[image][question]\n", "issue": "Reader study completed message is visible when study is not completed\n\r\n\n", "before_files": [{"content": "from django import template\n\nregister = template.Library()\n\n\[email protected]_tag\ndef get_ground_truth(obj, image, question):\n \"\"\"Get the auth token for the user.\"\"\"\n ground_truths = obj.statistics[\"ground_truths\"]\n return ground_truths[image][question]\n", "path": "app/grandchallenge/reader_studies/templatetags/get_ground_truth.py"}]}
| 726 | 159 |
gh_patches_debug_25949
|
rasdani/github-patches
|
git_diff
|
larq__larq-319
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Refactor layer test to use pytest parameterized test
Currently we use a mix of unittests written in `pytest` style and tests using `tf.test.TestCase` in larq. In #313 I added some simple `pytest` fixtures that allow to run tests in both eager and graph mode.
The only part of the test suite that uses `tf.test.TestCase` are the [layer tests](https://github.com/larq/larq/blob/f6f9277a006f6cb07b1e5f9bc591a087f2261152/larq/layers_test.py#L73-L187) which rely on [`keras_parameterized.run_all_keras_modes`](https://github.com/tensorflow/tensorflow/blob/669080135924d204a1c67dd556223d794efbf664/tensorflow/python/keras/keras_parameterized.py#L176-L303) and a forked/copied version of [`testing_utils. layer_test`](https://github.com/tensorflow/tensorflow/blob/669080135924d204a1c67dd556223d794efbf664/tensorflow/python/keras/testing_utils.py#L75-L261).
I think it would be a lot cleaner to write a simple [`pytest` fixture](https://pytest.org/en/latest/fixture.html#fixture) similar to #313 that allows us to run a normal [`pytest` parameterized test](https://pytest.org/en/latest/parametrize.html#pytest-mark-parametrize) in all Keras modes and adapt the layer tests to use it. That way we can get rid of `absl.testing` and some requirements on TensorFlow internals.
</issue>
<code>
[start of setup.py]
1 from setuptools import setup, find_packages
2
3
4 def readme():
5 with open("README.md", "r") as f:
6 return f.read()
7
8
9 setup(
10 name="larq",
11 version="0.7.4",
12 python_requires=">=3.6",
13 author="Plumerai",
14 author_email="[email protected]",
15 description="An Open Source Machine Learning Library for Training Binarized Neural Networks",
16 long_description=readme(),
17 long_description_content_type="text/markdown",
18 url="https://larq.dev/",
19 packages=find_packages(exclude=["larq.snapshots"]),
20 license="Apache 2.0",
21 install_requires=[
22 "numpy >= 1.15.4, < 2.0",
23 "terminaltables>=3.1.0",
24 "dataclasses ; python_version<'3.7'",
25 ],
26 extras_require={
27 "tensorflow": ["tensorflow>=1.14.0"],
28 "tensorflow_gpu": ["tensorflow-gpu>=1.14.0"],
29 "test": [
30 "absl-py==0.8.1",
31 "pytest==5.2.2",
32 "pytest-cov==2.8.1",
33 "pytest-xdist==1.30.0",
34 "snapshottest==0.5.1",
35 ],
36 "docs": [
37 "mkdocs==1.0.4",
38 "mkdocs-material==4.4.3",
39 "pymdown-extensions==6.1",
40 "mknotebooks==0.1.7",
41 "mkdocs-minify-plugin==0.2.1",
42 "larq-zoo==0.4.2",
43 "altair==3.2.0",
44 "pandas==0.25.3",
45 ],
46 },
47 classifiers=[
48 "Development Status :: 4 - Beta",
49 "Intended Audience :: Developers",
50 "Intended Audience :: Education",
51 "Intended Audience :: Science/Research",
52 "License :: OSI Approved :: Apache Software License",
53 "Programming Language :: Python :: 3",
54 "Programming Language :: Python :: 3 :: Only",
55 "Programming Language :: Python :: 3.6",
56 "Programming Language :: Python :: 3.7",
57 "Topic :: Scientific/Engineering",
58 "Topic :: Scientific/Engineering :: Mathematics",
59 "Topic :: Scientific/Engineering :: Artificial Intelligence",
60 "Topic :: Software Development",
61 "Topic :: Software Development :: Libraries",
62 "Topic :: Software Development :: Libraries :: Python Modules",
63 ],
64 )
65
[end of setup.py]
[start of larq/conftest.py]
1 import pytest
2 from tensorflow.python.eager import context
3
4
5 @pytest.fixture
6 def eager_mode():
7 """pytest fixture for running test in eager mode"""
8 with context.eager_mode():
9 yield
10
11
12 @pytest.fixture
13 def graph_mode():
14 """pytest fixture for running test in graph mode"""
15 with context.graph_mode():
16 yield
17
18
19 @pytest.fixture(params=["eager", "graph"])
20 def eager_and_graph_mode(request):
21 """pytest fixture for running test in eager and graph mode"""
22 with getattr(context, f"{request.param}_mode")():
23 yield request.param
24
[end of larq/conftest.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/larq/conftest.py b/larq/conftest.py
--- a/larq/conftest.py
+++ b/larq/conftest.py
@@ -1,4 +1,5 @@
import pytest
+import tensorflow as tf
from tensorflow.python.eager import context
@@ -21,3 +22,27 @@
"""pytest fixture for running test in eager and graph mode"""
with getattr(context, f"{request.param}_mode")():
yield request.param
+
+
[email protected](params=["graph", "tf_eager", "tf_keras_eager"])
+def keras_should_run_eagerly(request):
+ """Fixture to run in graph and two eager modes.
+
+ The modes are:
+ - Graph mode
+ - TensorFlow eager and Keras eager
+ - TensorFlow eager and Keras not eager
+
+ The `tf.context` sets graph/eager mode for TensorFlow. The yield is True if Keras
+ should run eagerly.
+ """
+
+ if request.param == "graph":
+ if int(tf.__version__[0]) >= 2:
+ pytest.skip("Skipping graph mode for TensorFlow 2+.")
+
+ with context.graph_mode():
+ yield
+ else:
+ with context.eager_mode():
+ yield request.param == "tf_keras_eager"
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -27,7 +27,6 @@
"tensorflow": ["tensorflow>=1.14.0"],
"tensorflow_gpu": ["tensorflow-gpu>=1.14.0"],
"test": [
- "absl-py==0.8.1",
"pytest==5.2.2",
"pytest-cov==2.8.1",
"pytest-xdist==1.30.0",
|
{"golden_diff": "diff --git a/larq/conftest.py b/larq/conftest.py\n--- a/larq/conftest.py\n+++ b/larq/conftest.py\n@@ -1,4 +1,5 @@\n import pytest\n+import tensorflow as tf\n from tensorflow.python.eager import context\n \n \n@@ -21,3 +22,27 @@\n \"\"\"pytest fixture for running test in eager and graph mode\"\"\"\n with getattr(context, f\"{request.param}_mode\")():\n yield request.param\n+\n+\[email protected](params=[\"graph\", \"tf_eager\", \"tf_keras_eager\"])\n+def keras_should_run_eagerly(request):\n+ \"\"\"Fixture to run in graph and two eager modes.\n+\n+ The modes are:\n+ - Graph mode\n+ - TensorFlow eager and Keras eager\n+ - TensorFlow eager and Keras not eager\n+\n+ The `tf.context` sets graph/eager mode for TensorFlow. The yield is True if Keras\n+ should run eagerly.\n+ \"\"\"\n+\n+ if request.param == \"graph\":\n+ if int(tf.__version__[0]) >= 2:\n+ pytest.skip(\"Skipping graph mode for TensorFlow 2+.\")\n+\n+ with context.graph_mode():\n+ yield\n+ else:\n+ with context.eager_mode():\n+ yield request.param == \"tf_keras_eager\"\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -27,7 +27,6 @@\n \"tensorflow\": [\"tensorflow>=1.14.0\"],\n \"tensorflow_gpu\": [\"tensorflow-gpu>=1.14.0\"],\n \"test\": [\n- \"absl-py==0.8.1\",\n \"pytest==5.2.2\",\n \"pytest-cov==2.8.1\",\n \"pytest-xdist==1.30.0\",\n", "issue": "Refactor layer test to use pytest parameterized test\nCurrently we use a mix of unittests written in `pytest` style and tests using `tf.test.TestCase` in larq. In #313 I added some simple `pytest` fixtures that allow to run tests in both eager and graph mode.\r\n\r\nThe only part of the test suite that uses `tf.test.TestCase` are the [layer tests](https://github.com/larq/larq/blob/f6f9277a006f6cb07b1e5f9bc591a087f2261152/larq/layers_test.py#L73-L187) which rely on [`keras_parameterized.run_all_keras_modes`](https://github.com/tensorflow/tensorflow/blob/669080135924d204a1c67dd556223d794efbf664/tensorflow/python/keras/keras_parameterized.py#L176-L303) and a forked/copied version of [`testing_utils. layer_test`](https://github.com/tensorflow/tensorflow/blob/669080135924d204a1c67dd556223d794efbf664/tensorflow/python/keras/testing_utils.py#L75-L261).\r\n\r\nI think it would be a lot cleaner to write a simple [`pytest` fixture](https://pytest.org/en/latest/fixture.html#fixture) similar to #313 that allows us to run a normal [`pytest` parameterized test](https://pytest.org/en/latest/parametrize.html#pytest-mark-parametrize) in all Keras modes and adapt the layer tests to use it. That way we can get rid of `absl.testing` and some requirements on TensorFlow internals.\n", "before_files": [{"content": "from setuptools import setup, find_packages\n\n\ndef readme():\n with open(\"README.md\", \"r\") as f:\n return f.read()\n\n\nsetup(\n name=\"larq\",\n version=\"0.7.4\",\n python_requires=\">=3.6\",\n author=\"Plumerai\",\n author_email=\"[email protected]\",\n description=\"An Open Source Machine Learning Library for Training Binarized Neural Networks\",\n long_description=readme(),\n long_description_content_type=\"text/markdown\",\n url=\"https://larq.dev/\",\n packages=find_packages(exclude=[\"larq.snapshots\"]),\n license=\"Apache 2.0\",\n install_requires=[\n \"numpy >= 1.15.4, < 2.0\",\n \"terminaltables>=3.1.0\",\n \"dataclasses ; python_version<'3.7'\",\n ],\n extras_require={\n \"tensorflow\": [\"tensorflow>=1.14.0\"],\n \"tensorflow_gpu\": [\"tensorflow-gpu>=1.14.0\"],\n \"test\": [\n \"absl-py==0.8.1\",\n \"pytest==5.2.2\",\n \"pytest-cov==2.8.1\",\n \"pytest-xdist==1.30.0\",\n \"snapshottest==0.5.1\",\n ],\n \"docs\": [\n \"mkdocs==1.0.4\",\n \"mkdocs-material==4.4.3\",\n \"pymdown-extensions==6.1\",\n \"mknotebooks==0.1.7\",\n \"mkdocs-minify-plugin==0.2.1\",\n \"larq-zoo==0.4.2\",\n \"altair==3.2.0\",\n \"pandas==0.25.3\",\n ],\n },\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development\",\n \"Topic :: Software Development :: Libraries\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n)\n", "path": "setup.py"}, {"content": "import pytest\nfrom tensorflow.python.eager import context\n\n\[email protected]\ndef eager_mode():\n \"\"\"pytest fixture for running test in eager mode\"\"\"\n with context.eager_mode():\n yield\n\n\[email protected]\ndef graph_mode():\n \"\"\"pytest fixture for running test in graph mode\"\"\"\n with context.graph_mode():\n yield\n\n\[email protected](params=[\"eager\", \"graph\"])\ndef eager_and_graph_mode(request):\n \"\"\"pytest fixture for running test in eager and graph mode\"\"\"\n with getattr(context, f\"{request.param}_mode\")():\n yield request.param\n", "path": "larq/conftest.py"}]}
| 1,802 | 416 |
gh_patches_debug_54573
|
rasdani/github-patches
|
git_diff
|
zulip__zulip-29008
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Clean up beta app installation instructions
Following up on #28884, we should convert the beta app installation instructions into our standard tabbed instructions format. I'm not sure about the best tab labels. Perhaps something like this?
- Most systems
- Linux with APT
</issue>
<code>
[start of zerver/lib/markdown/tabbed_sections.py]
1 import re
2 from typing import Any, Dict, List, Mapping, Optional
3
4 import markdown
5 from markdown.extensions import Extension
6 from markdown.preprocessors import Preprocessor
7 from typing_extensions import override
8
9 from zerver.lib.markdown.priorities import PREPROCESSOR_PRIORITES
10
11 START_TABBED_SECTION_REGEX = re.compile(r"^\{start_tabs\}$")
12 END_TABBED_SECTION_REGEX = re.compile(r"^\{end_tabs\}$")
13 TAB_CONTENT_REGEX = re.compile(r"^\{tab\|([^}]+)\}$")
14
15 TABBED_SECTION_TEMPLATE = """
16 <div class="tabbed-section {tab_class}" markdown="1">
17 {nav_bar}
18 <div class="blocks">
19 {blocks}
20 </div>
21 </div>
22 """.strip()
23
24 NAV_BAR_TEMPLATE = """
25 <ul class="nav">
26 {tabs}
27 </ul>
28 """.strip()
29
30 NAV_LIST_ITEM_TEMPLATE = """
31 <li data-tab-key="{data_tab_key}" tabindex="0">{label}</li>
32 """.strip()
33
34 DIV_TAB_CONTENT_TEMPLATE = """
35 <div data-tab-key="{data_tab_key}" markdown="1">
36 {content}
37 </div>
38 """.strip()
39
40 # If adding new entries here, also check if you need to update
41 # tabbed-instructions.js
42 TAB_SECTION_LABELS = {
43 "desktop-web": "Desktop/Web",
44 "ios": "iOS",
45 "android": "Android",
46 "mac": "macOS",
47 "windows": "Windows",
48 "linux": "Linux",
49 "python": "Python",
50 "js": "JavaScript",
51 "curl": "curl",
52 "zulip-send": "zulip-send",
53 "web": "Web",
54 "desktop": "Desktop",
55 "mobile": "Mobile",
56 "mm-default": "Default installation",
57 "mm-cloud": "Cloud instance",
58 "mm-docker": "Docker",
59 "mm-gitlab-omnibus": "GitLab Omnibus",
60 "mm-self-hosting-cloud-export": "Self hosting (cloud export)",
61 "require-invitations": "Require invitations",
62 "allow-anyone-to-join": "Allow anyone to join",
63 "restrict-by-email-domain": "Restrict by email domain",
64 "zoom": "Zoom",
65 "jitsi-meet": "Jitsi Meet",
66 "bigbluebutton": "BigBlueButton",
67 "disable": "Disabled",
68 "chrome": "Chrome",
69 "firefox": "Firefox",
70 "desktop-app": "Desktop app",
71 "system-proxy-settings": "System proxy settings",
72 "custom-proxy-settings": "Custom proxy settings",
73 "stream": "From a stream view",
74 "not-stream": "From other views",
75 "via-recent-conversations": "Via recent conversations",
76 "via-inbox-view": "Via inbox view",
77 "via-left-sidebar": "Via left sidebar",
78 "instructions-for-all-platforms": "Instructions for all platforms",
79 "public-streams": "Public streams",
80 "private-streams": "Private streams",
81 "web-public-streams": "Web-public streams",
82 "via-user-card": "Via user card",
83 "via-user-profile": "Via user profile",
84 "via-organization-settings": "Via organization settings",
85 "via-personal-settings": "Via personal settings",
86 "via-stream-settings": "Via stream settings",
87 "default-subdomain": "Default subdomain",
88 "custom-subdomain": "Custom subdomain",
89 "zulip-cloud-standard": "Zulip Cloud Standard",
90 "zulip-cloud-plus": "Zulip Cloud Plus",
91 "request-sponsorship": "Request sponsorship",
92 "request-education-pricing": "Request education pricing",
93 "zulip-cloud": "Zulip Cloud",
94 "self-hosting": "Self hosting",
95 "okta": "Okta",
96 "onelogin": "OneLogin",
97 "azuread": "AzureAD",
98 "keycloak": "Keycloak",
99 "auth0": "Auth0",
100 "logged-in": "If you are logged in",
101 "logged-out": "If you are logged out",
102 "user": "User",
103 "bot": "Bot",
104 "on-sign-up": "On sign-up",
105 "via-paste": "Via paste",
106 "via-drag-and-drop": "Via drag-and-drop",
107 "via-markdown": "Via Markdown",
108 "via-compose-box-buttons": "Via compose box buttons",
109 "stream-compose": "Compose to a stream",
110 "dm-compose": "Compose a DM",
111 "v8": "Zulip Server 8.0+",
112 "v6": "Zulip Server 6.0+",
113 "v4": "Zulip Server 4.0+",
114 "all-versions": "All versions",
115 "for-a-bot": "For a bot",
116 "for-yourself": "For yourself",
117 }
118
119
120 class TabbedSectionsGenerator(Extension):
121 @override
122 def extendMarkdown(self, md: markdown.Markdown) -> None:
123 md.preprocessors.register(
124 TabbedSectionsPreprocessor(md, self.getConfigs()),
125 "tabbed_sections",
126 PREPROCESSOR_PRIORITES["tabbed_sections"],
127 )
128
129
130 class TabbedSectionsPreprocessor(Preprocessor):
131 def __init__(self, md: markdown.Markdown, config: Mapping[str, Any]) -> None:
132 super().__init__(md)
133
134 @override
135 def run(self, lines: List[str]) -> List[str]:
136 tab_section = self.parse_tabs(lines)
137 while tab_section:
138 if "tabs" in tab_section:
139 tab_class = "has-tabs"
140 else:
141 tab_class = "no-tabs"
142 tab_section["tabs"] = [
143 {
144 "tab_key": "instructions-for-all-platforms",
145 "start": tab_section["start_tabs_index"],
146 }
147 ]
148 nav_bar = self.generate_nav_bar(tab_section)
149 content_blocks = self.generate_content_blocks(tab_section, lines)
150 rendered_tabs = TABBED_SECTION_TEMPLATE.format(
151 tab_class=tab_class, nav_bar=nav_bar, blocks=content_blocks
152 )
153
154 start = tab_section["start_tabs_index"]
155 end = tab_section["end_tabs_index"] + 1
156 lines = [*lines[:start], rendered_tabs, *lines[end:]]
157 tab_section = self.parse_tabs(lines)
158 return lines
159
160 def generate_content_blocks(self, tab_section: Dict[str, Any], lines: List[str]) -> str:
161 tab_content_blocks = []
162 for index, tab in enumerate(tab_section["tabs"]):
163 start_index = tab["start"] + 1
164 try:
165 # If there are more tabs, we can use the starting index
166 # of the next tab as the ending index of the previous one
167 end_index = tab_section["tabs"][index + 1]["start"]
168 except IndexError:
169 # Otherwise, just use the end of the entire section
170 end_index = tab_section["end_tabs_index"]
171
172 content = "\n".join(lines[start_index:end_index]).strip()
173 tab_content_block = DIV_TAB_CONTENT_TEMPLATE.format(
174 data_tab_key=tab["tab_key"],
175 # Wrapping the content in two newlines is necessary here.
176 # If we don't do this, the inner Markdown does not get
177 # rendered properly.
178 content=f"\n{content}\n",
179 )
180 tab_content_blocks.append(tab_content_block)
181 return "\n".join(tab_content_blocks)
182
183 def generate_nav_bar(self, tab_section: Dict[str, Any]) -> str:
184 li_elements = []
185 for tab in tab_section["tabs"]:
186 tab_key = tab.get("tab_key")
187 tab_label = TAB_SECTION_LABELS.get(tab_key)
188 if tab_label is None:
189 raise ValueError(
190 f"Tab '{tab_key}' is not present in TAB_SECTION_LABELS in zerver/lib/markdown/tabbed_sections.py"
191 )
192
193 li = NAV_LIST_ITEM_TEMPLATE.format(data_tab_key=tab_key, label=tab_label)
194 li_elements.append(li)
195
196 return NAV_BAR_TEMPLATE.format(tabs="\n".join(li_elements))
197
198 def parse_tabs(self, lines: List[str]) -> Optional[Dict[str, Any]]:
199 block: Dict[str, Any] = {}
200 for index, line in enumerate(lines):
201 start_match = START_TABBED_SECTION_REGEX.search(line)
202 if start_match:
203 block["start_tabs_index"] = index
204
205 tab_content_match = TAB_CONTENT_REGEX.search(line)
206 if tab_content_match:
207 block.setdefault("tabs", [])
208 tab = {"start": index, "tab_key": tab_content_match.group(1)}
209 block["tabs"].append(tab)
210
211 end_match = END_TABBED_SECTION_REGEX.search(line)
212 if end_match:
213 block["end_tabs_index"] = index
214 break
215 return block
216
217
218 def makeExtension(*args: Any, **kwargs: str) -> TabbedSectionsGenerator:
219 return TabbedSectionsGenerator(**kwargs)
220
[end of zerver/lib/markdown/tabbed_sections.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/zerver/lib/markdown/tabbed_sections.py b/zerver/lib/markdown/tabbed_sections.py
--- a/zerver/lib/markdown/tabbed_sections.py
+++ b/zerver/lib/markdown/tabbed_sections.py
@@ -46,6 +46,8 @@
"mac": "macOS",
"windows": "Windows",
"linux": "Linux",
+ "most-systems": "Most systems",
+ "linux-with-apt": "Linux with APT",
"python": "Python",
"js": "JavaScript",
"curl": "curl",
|
{"golden_diff": "diff --git a/zerver/lib/markdown/tabbed_sections.py b/zerver/lib/markdown/tabbed_sections.py\n--- a/zerver/lib/markdown/tabbed_sections.py\n+++ b/zerver/lib/markdown/tabbed_sections.py\n@@ -46,6 +46,8 @@\n \"mac\": \"macOS\",\n \"windows\": \"Windows\",\n \"linux\": \"Linux\",\n+ \"most-systems\": \"Most systems\",\n+ \"linux-with-apt\": \"Linux with APT\",\n \"python\": \"Python\",\n \"js\": \"JavaScript\",\n \"curl\": \"curl\",\n", "issue": "Clean up beta app installation instructions\nFollowing up on #28884, we should convert the beta app installation instructions into our standard tabbed instructions format. I'm not sure about the best tab labels. Perhaps something like this?\r\n\r\n- Most systems\r\n- Linux with APT\r\n\n", "before_files": [{"content": "import re\nfrom typing import Any, Dict, List, Mapping, Optional\n\nimport markdown\nfrom markdown.extensions import Extension\nfrom markdown.preprocessors import Preprocessor\nfrom typing_extensions import override\n\nfrom zerver.lib.markdown.priorities import PREPROCESSOR_PRIORITES\n\nSTART_TABBED_SECTION_REGEX = re.compile(r\"^\\{start_tabs\\}$\")\nEND_TABBED_SECTION_REGEX = re.compile(r\"^\\{end_tabs\\}$\")\nTAB_CONTENT_REGEX = re.compile(r\"^\\{tab\\|([^}]+)\\}$\")\n\nTABBED_SECTION_TEMPLATE = \"\"\"\n<div class=\"tabbed-section {tab_class}\" markdown=\"1\">\n{nav_bar}\n<div class=\"blocks\">\n{blocks}\n</div>\n</div>\n\"\"\".strip()\n\nNAV_BAR_TEMPLATE = \"\"\"\n<ul class=\"nav\">\n{tabs}\n</ul>\n\"\"\".strip()\n\nNAV_LIST_ITEM_TEMPLATE = \"\"\"\n<li data-tab-key=\"{data_tab_key}\" tabindex=\"0\">{label}</li>\n\"\"\".strip()\n\nDIV_TAB_CONTENT_TEMPLATE = \"\"\"\n<div data-tab-key=\"{data_tab_key}\" markdown=\"1\">\n{content}\n</div>\n\"\"\".strip()\n\n# If adding new entries here, also check if you need to update\n# tabbed-instructions.js\nTAB_SECTION_LABELS = {\n \"desktop-web\": \"Desktop/Web\",\n \"ios\": \"iOS\",\n \"android\": \"Android\",\n \"mac\": \"macOS\",\n \"windows\": \"Windows\",\n \"linux\": \"Linux\",\n \"python\": \"Python\",\n \"js\": \"JavaScript\",\n \"curl\": \"curl\",\n \"zulip-send\": \"zulip-send\",\n \"web\": \"Web\",\n \"desktop\": \"Desktop\",\n \"mobile\": \"Mobile\",\n \"mm-default\": \"Default installation\",\n \"mm-cloud\": \"Cloud instance\",\n \"mm-docker\": \"Docker\",\n \"mm-gitlab-omnibus\": \"GitLab Omnibus\",\n \"mm-self-hosting-cloud-export\": \"Self hosting (cloud export)\",\n \"require-invitations\": \"Require invitations\",\n \"allow-anyone-to-join\": \"Allow anyone to join\",\n \"restrict-by-email-domain\": \"Restrict by email domain\",\n \"zoom\": \"Zoom\",\n \"jitsi-meet\": \"Jitsi Meet\",\n \"bigbluebutton\": \"BigBlueButton\",\n \"disable\": \"Disabled\",\n \"chrome\": \"Chrome\",\n \"firefox\": \"Firefox\",\n \"desktop-app\": \"Desktop app\",\n \"system-proxy-settings\": \"System proxy settings\",\n \"custom-proxy-settings\": \"Custom proxy settings\",\n \"stream\": \"From a stream view\",\n \"not-stream\": \"From other views\",\n \"via-recent-conversations\": \"Via recent conversations\",\n \"via-inbox-view\": \"Via inbox view\",\n \"via-left-sidebar\": \"Via left sidebar\",\n \"instructions-for-all-platforms\": \"Instructions for all platforms\",\n \"public-streams\": \"Public streams\",\n \"private-streams\": \"Private streams\",\n \"web-public-streams\": \"Web-public streams\",\n \"via-user-card\": \"Via user card\",\n \"via-user-profile\": \"Via user profile\",\n \"via-organization-settings\": \"Via organization settings\",\n \"via-personal-settings\": \"Via personal settings\",\n \"via-stream-settings\": \"Via stream settings\",\n \"default-subdomain\": \"Default subdomain\",\n \"custom-subdomain\": \"Custom subdomain\",\n \"zulip-cloud-standard\": \"Zulip Cloud Standard\",\n \"zulip-cloud-plus\": \"Zulip Cloud Plus\",\n \"request-sponsorship\": \"Request sponsorship\",\n \"request-education-pricing\": \"Request education pricing\",\n \"zulip-cloud\": \"Zulip Cloud\",\n \"self-hosting\": \"Self hosting\",\n \"okta\": \"Okta\",\n \"onelogin\": \"OneLogin\",\n \"azuread\": \"AzureAD\",\n \"keycloak\": \"Keycloak\",\n \"auth0\": \"Auth0\",\n \"logged-in\": \"If you are logged in\",\n \"logged-out\": \"If you are logged out\",\n \"user\": \"User\",\n \"bot\": \"Bot\",\n \"on-sign-up\": \"On sign-up\",\n \"via-paste\": \"Via paste\",\n \"via-drag-and-drop\": \"Via drag-and-drop\",\n \"via-markdown\": \"Via Markdown\",\n \"via-compose-box-buttons\": \"Via compose box buttons\",\n \"stream-compose\": \"Compose to a stream\",\n \"dm-compose\": \"Compose a DM\",\n \"v8\": \"Zulip Server 8.0+\",\n \"v6\": \"Zulip Server 6.0+\",\n \"v4\": \"Zulip Server 4.0+\",\n \"all-versions\": \"All versions\",\n \"for-a-bot\": \"For a bot\",\n \"for-yourself\": \"For yourself\",\n}\n\n\nclass TabbedSectionsGenerator(Extension):\n @override\n def extendMarkdown(self, md: markdown.Markdown) -> None:\n md.preprocessors.register(\n TabbedSectionsPreprocessor(md, self.getConfigs()),\n \"tabbed_sections\",\n PREPROCESSOR_PRIORITES[\"tabbed_sections\"],\n )\n\n\nclass TabbedSectionsPreprocessor(Preprocessor):\n def __init__(self, md: markdown.Markdown, config: Mapping[str, Any]) -> None:\n super().__init__(md)\n\n @override\n def run(self, lines: List[str]) -> List[str]:\n tab_section = self.parse_tabs(lines)\n while tab_section:\n if \"tabs\" in tab_section:\n tab_class = \"has-tabs\"\n else:\n tab_class = \"no-tabs\"\n tab_section[\"tabs\"] = [\n {\n \"tab_key\": \"instructions-for-all-platforms\",\n \"start\": tab_section[\"start_tabs_index\"],\n }\n ]\n nav_bar = self.generate_nav_bar(tab_section)\n content_blocks = self.generate_content_blocks(tab_section, lines)\n rendered_tabs = TABBED_SECTION_TEMPLATE.format(\n tab_class=tab_class, nav_bar=nav_bar, blocks=content_blocks\n )\n\n start = tab_section[\"start_tabs_index\"]\n end = tab_section[\"end_tabs_index\"] + 1\n lines = [*lines[:start], rendered_tabs, *lines[end:]]\n tab_section = self.parse_tabs(lines)\n return lines\n\n def generate_content_blocks(self, tab_section: Dict[str, Any], lines: List[str]) -> str:\n tab_content_blocks = []\n for index, tab in enumerate(tab_section[\"tabs\"]):\n start_index = tab[\"start\"] + 1\n try:\n # If there are more tabs, we can use the starting index\n # of the next tab as the ending index of the previous one\n end_index = tab_section[\"tabs\"][index + 1][\"start\"]\n except IndexError:\n # Otherwise, just use the end of the entire section\n end_index = tab_section[\"end_tabs_index\"]\n\n content = \"\\n\".join(lines[start_index:end_index]).strip()\n tab_content_block = DIV_TAB_CONTENT_TEMPLATE.format(\n data_tab_key=tab[\"tab_key\"],\n # Wrapping the content in two newlines is necessary here.\n # If we don't do this, the inner Markdown does not get\n # rendered properly.\n content=f\"\\n{content}\\n\",\n )\n tab_content_blocks.append(tab_content_block)\n return \"\\n\".join(tab_content_blocks)\n\n def generate_nav_bar(self, tab_section: Dict[str, Any]) -> str:\n li_elements = []\n for tab in tab_section[\"tabs\"]:\n tab_key = tab.get(\"tab_key\")\n tab_label = TAB_SECTION_LABELS.get(tab_key)\n if tab_label is None:\n raise ValueError(\n f\"Tab '{tab_key}' is not present in TAB_SECTION_LABELS in zerver/lib/markdown/tabbed_sections.py\"\n )\n\n li = NAV_LIST_ITEM_TEMPLATE.format(data_tab_key=tab_key, label=tab_label)\n li_elements.append(li)\n\n return NAV_BAR_TEMPLATE.format(tabs=\"\\n\".join(li_elements))\n\n def parse_tabs(self, lines: List[str]) -> Optional[Dict[str, Any]]:\n block: Dict[str, Any] = {}\n for index, line in enumerate(lines):\n start_match = START_TABBED_SECTION_REGEX.search(line)\n if start_match:\n block[\"start_tabs_index\"] = index\n\n tab_content_match = TAB_CONTENT_REGEX.search(line)\n if tab_content_match:\n block.setdefault(\"tabs\", [])\n tab = {\"start\": index, \"tab_key\": tab_content_match.group(1)}\n block[\"tabs\"].append(tab)\n\n end_match = END_TABBED_SECTION_REGEX.search(line)\n if end_match:\n block[\"end_tabs_index\"] = index\n break\n return block\n\n\ndef makeExtension(*args: Any, **kwargs: str) -> TabbedSectionsGenerator:\n return TabbedSectionsGenerator(**kwargs)\n", "path": "zerver/lib/markdown/tabbed_sections.py"}]}
| 3,076 | 128 |
gh_patches_debug_7229
|
rasdani/github-patches
|
git_diff
|
docker__docker-py-1150
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Client.build crashes when trying to pull a new image if HttpHeaders are set in config file
``` python
import docker
c = docker.Client()
c.build('https://github.com/docker/compose.git')
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-3-d78c607c9627> in <module>()
----> 1 c.build('https://github.com/docker/compose.git')
/home/joffrey/.envs/pydocker/local/lib/python2.7/site-packages/docker/api/build.pyc in build(self, path, tag, quiet, fileobj, nocache, rm, stream, timeout, custom_context, encoding, pull, forcerm, dockerfile, container_limits, decode, buildargs, gzip)
102 headers=headers,
103 stream=stream,
--> 104 timeout=timeout,
105 )
106
/home/joffrey/.envs/pydocker/local/lib/python2.7/site-packages/docker/utils/decorators.pyc in inner(self, *args, **kwargs)
44 kwargs['headers'] = self._auth_configs['HttpHeaders']
45 else:
---> 46 kwargs['headers'].update(self._auth_configs['HttpHeaders'])
47 return f(self, *args, **kwargs)
48 return inner
AttributeError: 'NoneType' object has no attribute 'update'
```
</issue>
<code>
[start of docker/utils/decorators.py]
1 import functools
2
3 from .. import errors
4 from . import utils
5
6
7 def check_resource(f):
8 @functools.wraps(f)
9 def wrapped(self, resource_id=None, *args, **kwargs):
10 if resource_id is None:
11 if kwargs.get('container'):
12 resource_id = kwargs.pop('container')
13 elif kwargs.get('image'):
14 resource_id = kwargs.pop('image')
15 if isinstance(resource_id, dict):
16 resource_id = resource_id.get('Id')
17 if not resource_id:
18 raise errors.NullResource(
19 'image or container param is undefined'
20 )
21 return f(self, resource_id, *args, **kwargs)
22 return wrapped
23
24
25 def minimum_version(version):
26 def decorator(f):
27 @functools.wraps(f)
28 def wrapper(self, *args, **kwargs):
29 if utils.version_lt(self._version, version):
30 raise errors.InvalidVersion(
31 '{0} is not available for version < {1}'.format(
32 f.__name__, version
33 )
34 )
35 return f(self, *args, **kwargs)
36 return wrapper
37 return decorator
38
39
40 def update_headers(f):
41 def inner(self, *args, **kwargs):
42 if 'HttpHeaders' in self._auth_configs:
43 if 'headers' not in kwargs:
44 kwargs['headers'] = self._auth_configs['HttpHeaders']
45 else:
46 kwargs['headers'].update(self._auth_configs['HttpHeaders'])
47 return f(self, *args, **kwargs)
48 return inner
49
[end of docker/utils/decorators.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docker/utils/decorators.py b/docker/utils/decorators.py
--- a/docker/utils/decorators.py
+++ b/docker/utils/decorators.py
@@ -40,7 +40,7 @@
def update_headers(f):
def inner(self, *args, **kwargs):
if 'HttpHeaders' in self._auth_configs:
- if 'headers' not in kwargs:
+ if not kwargs.get('headers'):
kwargs['headers'] = self._auth_configs['HttpHeaders']
else:
kwargs['headers'].update(self._auth_configs['HttpHeaders'])
|
{"golden_diff": "diff --git a/docker/utils/decorators.py b/docker/utils/decorators.py\n--- a/docker/utils/decorators.py\n+++ b/docker/utils/decorators.py\n@@ -40,7 +40,7 @@\n def update_headers(f):\n def inner(self, *args, **kwargs):\n if 'HttpHeaders' in self._auth_configs:\n- if 'headers' not in kwargs:\n+ if not kwargs.get('headers'):\n kwargs['headers'] = self._auth_configs['HttpHeaders']\n else:\n kwargs['headers'].update(self._auth_configs['HttpHeaders'])\n", "issue": "Client.build crashes when trying to pull a new image if HttpHeaders are set in config file\n``` python\n\nimport docker\n\nc = docker.Client()\nc.build('https://github.com/docker/compose.git')\n---------------------------------------------------------------------------\nAttributeError Traceback (most recent call last)\n<ipython-input-3-d78c607c9627> in <module>()\n----> 1 c.build('https://github.com/docker/compose.git')\n\n/home/joffrey/.envs/pydocker/local/lib/python2.7/site-packages/docker/api/build.pyc in build(self, path, tag, quiet, fileobj, nocache, rm, stream, timeout, custom_context, encoding, pull, forcerm, dockerfile, container_limits, decode, buildargs, gzip)\n 102 headers=headers,\n 103 stream=stream,\n--> 104 timeout=timeout,\n 105 )\n 106 \n\n/home/joffrey/.envs/pydocker/local/lib/python2.7/site-packages/docker/utils/decorators.pyc in inner(self, *args, **kwargs)\n 44 kwargs['headers'] = self._auth_configs['HttpHeaders']\n 45 else:\n---> 46 kwargs['headers'].update(self._auth_configs['HttpHeaders'])\n 47 return f(self, *args, **kwargs)\n 48 return inner\n\nAttributeError: 'NoneType' object has no attribute 'update'\n```\n\n", "before_files": [{"content": "import functools\n\nfrom .. import errors\nfrom . import utils\n\n\ndef check_resource(f):\n @functools.wraps(f)\n def wrapped(self, resource_id=None, *args, **kwargs):\n if resource_id is None:\n if kwargs.get('container'):\n resource_id = kwargs.pop('container')\n elif kwargs.get('image'):\n resource_id = kwargs.pop('image')\n if isinstance(resource_id, dict):\n resource_id = resource_id.get('Id')\n if not resource_id:\n raise errors.NullResource(\n 'image or container param is undefined'\n )\n return f(self, resource_id, *args, **kwargs)\n return wrapped\n\n\ndef minimum_version(version):\n def decorator(f):\n @functools.wraps(f)\n def wrapper(self, *args, **kwargs):\n if utils.version_lt(self._version, version):\n raise errors.InvalidVersion(\n '{0} is not available for version < {1}'.format(\n f.__name__, version\n )\n )\n return f(self, *args, **kwargs)\n return wrapper\n return decorator\n\n\ndef update_headers(f):\n def inner(self, *args, **kwargs):\n if 'HttpHeaders' in self._auth_configs:\n if 'headers' not in kwargs:\n kwargs['headers'] = self._auth_configs['HttpHeaders']\n else:\n kwargs['headers'].update(self._auth_configs['HttpHeaders'])\n return f(self, *args, **kwargs)\n return inner\n", "path": "docker/utils/decorators.py"}]}
| 1,268 | 126 |
gh_patches_debug_13723
|
rasdani/github-patches
|
git_diff
|
easybuilders__easybuild-easyblocks-1464
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CUDA stubs path missing
In order for the NVML and other libraries to be found, one needs to add
$CUDA_ROOT/lib64/stubs to $LIBRARY_PATH (not $LD_LIBRARY_PATH)
</issue>
<code>
[start of easybuild/easyblocks/c/cuda.py]
1 ##
2 # This file is an EasyBuild reciPY as per https://github.com/easybuilders/easybuild
3 #
4 # Copyright:: Copyright 2012-2018 Cyprus Institute / CaSToRC, Uni.Lu, NTUA, Ghent University, Forschungszentrum Juelich GmbH
5 # Authors:: George Tsouloupas <[email protected]>, Fotis Georgatos <[email protected]>, Kenneth Hoste, Damian Alvarez
6 # License:: MIT/GPL
7 # $Id$
8 #
9 # This work implements a part of the HPCBIOS project and is a component of the policy:
10 # http://hpcbios.readthedocs.org/en/latest/HPCBIOS_2012-99.html
11 ##
12 """
13 EasyBuild support for CUDA, implemented as an easyblock
14
15 Ref: https://speakerdeck.com/ajdecon/introduction-to-the-cuda-toolkit-for-building-applications
16
17 @author: George Tsouloupas (Cyprus Institute)
18 @author: Fotis Georgatos (Uni.lu)
19 @author: Kenneth Hoste (Ghent University)
20 @author: Damian Alvarez (Forschungszentrum Juelich)
21 """
22 import os
23 import stat
24
25 from distutils.version import LooseVersion
26
27 from easybuild.easyblocks.generic.binary import Binary
28 from easybuild.framework.easyconfig import CUSTOM
29 from easybuild.tools.filetools import adjust_permissions, patch_perl_script_autoflush, write_file
30 from easybuild.tools.run import run_cmd, run_cmd_qa
31 from easybuild.tools.systemtools import get_shared_lib_ext
32
33 # Wrapper script definition
34 WRAPPER_TEMPLATE = """#!/bin/sh
35 echo "$@" | grep -e '-ccbin' -e '--compiler-bindir' > /dev/null
36 if [ $? -eq 0 ];
37 then
38 echo "ERROR: do not set -ccbin or --compiler-bindir when using the `basename $0` wrapper"
39 else
40 nvcc -ccbin=%s "$@"
41 exit $?
42 fi """
43
44 class EB_CUDA(Binary):
45 """
46 Support for installing CUDA.
47 """
48
49 @staticmethod
50 def extra_options():
51 """Create a set of wrappers based on a list determined by the easyconfig file"""
52 extra_vars = {
53 'host_compilers': [None, "Host compilers for which a wrapper will be generated", CUSTOM]
54 }
55 return Binary.extra_options(extra_vars)
56
57 def extract_step(self):
58 """Extract installer to have more control, e.g. options, patching Perl scripts, etc."""
59 execpath = self.src[0]['path']
60 run_cmd("/bin/sh " + execpath + " --noexec --nox11 --target " + self.builddir)
61 self.src[0]['finalpath'] = self.builddir
62
63 def install_step(self):
64 """Install CUDA using Perl install script."""
65
66 # define how to run the installer
67 # script has /usr/bin/perl hardcoded, but we want to have control over which perl is being used
68 if LooseVersion(self.version) <= LooseVersion("5"):
69 install_script = "install-linux.pl"
70 self.cfg.update('installopts', '--prefix=%s' % self.installdir)
71 else:
72 install_script = "cuda-installer.pl"
73 # note: also including samples (via "-samplespath=%(installdir)s -samples") would require libglut
74 self.cfg.update('installopts', "-verbose -silent -toolkitpath=%s -toolkit" % self.installdir)
75
76 cmd = "%(preinstallopts)s perl ./%(script)s %(installopts)s" % {
77 'preinstallopts': self.cfg['preinstallopts'],
78 'script': install_script,
79 'installopts': self.cfg['installopts']
80 }
81
82 # prepare for running install script autonomously
83 qanda = {}
84 stdqa = {
85 # this question is only asked if CUDA tools are already available system-wide
86 r"Would you like to remove all CUDA files under .*? (yes/no/abort): ": "no",
87 }
88 noqanda = [
89 r"^Configuring",
90 r"Installation Complete",
91 r"Verifying archive integrity.*",
92 r"^Uncompressing NVIDIA CUDA",
93 r".* -> .*",
94 ]
95
96 # patch install script to handle Q&A autonomously
97 patch_perl_script_autoflush(os.path.join(self.builddir, install_script))
98
99 # make sure $DISPLAY is not defined, which may lead to (weird) problems
100 # this is workaround for not being able to specify --nox11 to the Perl install scripts
101 if 'DISPLAY' in os.environ:
102 os.environ.pop('DISPLAY')
103
104 #overriding maxhits default value to 300 (300s wait for nothing to change in the output without seeing a known question)
105 run_cmd_qa(cmd, qanda, std_qa=stdqa, no_qa=noqanda, log_all=True, simple=True, maxhits=300)
106
107 def post_install_step(self):
108 """Create wrappers for the specified host compilers"""
109 def create_wrapper(wrapper_name, wrapper_comp):
110 """Create for a particular compiler, with a particular name"""
111 wrapper_f = os.path.join(self.installdir, 'bin', wrapper_name)
112 write_file(wrapper_f, WRAPPER_TEMPLATE % wrapper_comp)
113 adjust_permissions(wrapper_f, stat.S_IXUSR|stat.S_IRUSR|stat.S_IXGRP|stat.S_IRGRP|stat.S_IXOTH|stat.S_IROTH)
114
115 # Prepare wrappers to handle a default host compiler other than g++
116 for comp in (self.cfg['host_compilers'] or []):
117 create_wrapper('nvcc_%s' % comp, comp)
118
119 super(EB_CUDA, self).post_install_step()
120
121 def sanity_check_step(self):
122 """Custom sanity check for CUDA."""
123 shlib_ext = get_shared_lib_ext()
124
125 chk_libdir = ["lib64"]
126
127 # Versions higher than 6 do not provide 32 bit libraries
128 if LooseVersion(self.version) < LooseVersion("6"):
129 chk_libdir += ["lib"]
130
131 custom_paths = {
132 'files': ["bin/%s" % x for x in ["fatbinary", "nvcc", "nvlink", "ptxas"]] +
133 ["%s/lib%s.%s" % (x, y, shlib_ext) for x in chk_libdir for y in ["cublas", "cudart", "cufft",
134 "curand", "cusparse"]],
135 'dirs': ["include"],
136 }
137
138 if LooseVersion(self.version) < LooseVersion('7'):
139 custom_paths['files'].append('open64/bin/nvopencc')
140 if LooseVersion(self.version) >= LooseVersion('7'):
141 custom_paths['files'].append("extras/CUPTI/lib64/libcupti.%s" % shlib_ext)
142 custom_paths['dirs'].append("extras/CUPTI/include")
143
144
145 super(EB_CUDA, self).sanity_check_step(custom_paths=custom_paths)
146
147 def make_module_req_guess(self):
148 """Specify CUDA custom values for PATH etc."""
149
150 guesses = super(EB_CUDA, self).make_module_req_guess()
151
152 # The dirs should be in the order ['open64/bin', 'bin']
153 bin_path = []
154 if LooseVersion(self.version) < LooseVersion('7'):
155 bin_path.append('open64/bin')
156 bin_path.append('bin')
157
158 lib_path = ['lib64']
159 inc_path = ['include']
160 if LooseVersion(self.version) >= LooseVersion('7'):
161 lib_path.append('extras/CUPTI/lib64')
162 inc_path.append('extras/CUPTI/include')
163
164 guesses.update({
165 'PATH': bin_path,
166 'LD_LIBRARY_PATH': lib_path,
167 'CPATH': inc_path,
168 'CUDA_HOME': [''],
169 'CUDA_ROOT': [''],
170 'CUDA_PATH': [''],
171 })
172
173 return guesses
174
[end of easybuild/easyblocks/c/cuda.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/easybuild/easyblocks/c/cuda.py b/easybuild/easyblocks/c/cuda.py
--- a/easybuild/easyblocks/c/cuda.py
+++ b/easybuild/easyblocks/c/cuda.py
@@ -160,10 +160,14 @@
if LooseVersion(self.version) >= LooseVersion('7'):
lib_path.append('extras/CUPTI/lib64')
inc_path.append('extras/CUPTI/include')
+ bin_path.append('nvvm/bin')
+ lib_path.append('nvvm/lib64')
+ inc_path.append('nvvm/include')
guesses.update({
'PATH': bin_path,
'LD_LIBRARY_PATH': lib_path,
+ 'LIBRARY_PATH': ['lib64', 'lib64/stubs'],
'CPATH': inc_path,
'CUDA_HOME': [''],
'CUDA_ROOT': [''],
|
{"golden_diff": "diff --git a/easybuild/easyblocks/c/cuda.py b/easybuild/easyblocks/c/cuda.py\n--- a/easybuild/easyblocks/c/cuda.py\n+++ b/easybuild/easyblocks/c/cuda.py\n@@ -160,10 +160,14 @@\n if LooseVersion(self.version) >= LooseVersion('7'):\n lib_path.append('extras/CUPTI/lib64')\n inc_path.append('extras/CUPTI/include')\n+ bin_path.append('nvvm/bin')\n+ lib_path.append('nvvm/lib64')\n+ inc_path.append('nvvm/include')\n \n guesses.update({\n 'PATH': bin_path,\n 'LD_LIBRARY_PATH': lib_path,\n+ 'LIBRARY_PATH': ['lib64', 'lib64/stubs'],\n 'CPATH': inc_path,\n 'CUDA_HOME': [''],\n 'CUDA_ROOT': [''],\n", "issue": "CUDA stubs path missing\nIn order for the NVML and other libraries to be found, one needs to add \r\n$CUDA_ROOT/lib64/stubs to $LIBRARY_PATH (not $LD_LIBRARY_PATH)\r\n\r\n\n", "before_files": [{"content": "##\n# This file is an EasyBuild reciPY as per https://github.com/easybuilders/easybuild\n#\n# Copyright:: Copyright 2012-2018 Cyprus Institute / CaSToRC, Uni.Lu, NTUA, Ghent University, Forschungszentrum Juelich GmbH\n# Authors:: George Tsouloupas <[email protected]>, Fotis Georgatos <[email protected]>, Kenneth Hoste, Damian Alvarez\n# License:: MIT/GPL\n# $Id$\n#\n# This work implements a part of the HPCBIOS project and is a component of the policy:\n# http://hpcbios.readthedocs.org/en/latest/HPCBIOS_2012-99.html\n##\n\"\"\"\nEasyBuild support for CUDA, implemented as an easyblock\n\nRef: https://speakerdeck.com/ajdecon/introduction-to-the-cuda-toolkit-for-building-applications\n\n@author: George Tsouloupas (Cyprus Institute)\n@author: Fotis Georgatos (Uni.lu)\n@author: Kenneth Hoste (Ghent University)\n@author: Damian Alvarez (Forschungszentrum Juelich)\n\"\"\"\nimport os\nimport stat\n\nfrom distutils.version import LooseVersion\n\nfrom easybuild.easyblocks.generic.binary import Binary\nfrom easybuild.framework.easyconfig import CUSTOM\nfrom easybuild.tools.filetools import adjust_permissions, patch_perl_script_autoflush, write_file\nfrom easybuild.tools.run import run_cmd, run_cmd_qa\nfrom easybuild.tools.systemtools import get_shared_lib_ext\n\n# Wrapper script definition\nWRAPPER_TEMPLATE = \"\"\"#!/bin/sh\necho \"$@\" | grep -e '-ccbin' -e '--compiler-bindir' > /dev/null\nif [ $? -eq 0 ];\nthen\n echo \"ERROR: do not set -ccbin or --compiler-bindir when using the `basename $0` wrapper\"\nelse\n nvcc -ccbin=%s \"$@\"\n exit $?\nfi \"\"\"\n\nclass EB_CUDA(Binary):\n \"\"\"\n Support for installing CUDA.\n \"\"\"\n\n @staticmethod\n def extra_options():\n \"\"\"Create a set of wrappers based on a list determined by the easyconfig file\"\"\"\n extra_vars = {\n 'host_compilers': [None, \"Host compilers for which a wrapper will be generated\", CUSTOM]\n }\n return Binary.extra_options(extra_vars)\n\n def extract_step(self):\n \"\"\"Extract installer to have more control, e.g. options, patching Perl scripts, etc.\"\"\"\n execpath = self.src[0]['path']\n run_cmd(\"/bin/sh \" + execpath + \" --noexec --nox11 --target \" + self.builddir)\n self.src[0]['finalpath'] = self.builddir\n\n def install_step(self):\n \"\"\"Install CUDA using Perl install script.\"\"\"\n\n # define how to run the installer\n # script has /usr/bin/perl hardcoded, but we want to have control over which perl is being used\n if LooseVersion(self.version) <= LooseVersion(\"5\"):\n install_script = \"install-linux.pl\"\n self.cfg.update('installopts', '--prefix=%s' % self.installdir)\n else:\n install_script = \"cuda-installer.pl\"\n # note: also including samples (via \"-samplespath=%(installdir)s -samples\") would require libglut\n self.cfg.update('installopts', \"-verbose -silent -toolkitpath=%s -toolkit\" % self.installdir)\n\n cmd = \"%(preinstallopts)s perl ./%(script)s %(installopts)s\" % {\n 'preinstallopts': self.cfg['preinstallopts'],\n 'script': install_script,\n 'installopts': self.cfg['installopts']\n }\n\n # prepare for running install script autonomously\n qanda = {}\n stdqa = {\n # this question is only asked if CUDA tools are already available system-wide\n r\"Would you like to remove all CUDA files under .*? (yes/no/abort): \": \"no\",\n }\n noqanda = [\n r\"^Configuring\",\n r\"Installation Complete\",\n r\"Verifying archive integrity.*\",\n r\"^Uncompressing NVIDIA CUDA\",\n r\".* -> .*\",\n ]\n\n # patch install script to handle Q&A autonomously\n patch_perl_script_autoflush(os.path.join(self.builddir, install_script))\n\n # make sure $DISPLAY is not defined, which may lead to (weird) problems\n # this is workaround for not being able to specify --nox11 to the Perl install scripts\n if 'DISPLAY' in os.environ:\n os.environ.pop('DISPLAY')\n\n #overriding maxhits default value to 300 (300s wait for nothing to change in the output without seeing a known question)\n run_cmd_qa(cmd, qanda, std_qa=stdqa, no_qa=noqanda, log_all=True, simple=True, maxhits=300)\n\n def post_install_step(self):\n \"\"\"Create wrappers for the specified host compilers\"\"\"\n def create_wrapper(wrapper_name, wrapper_comp):\n \"\"\"Create for a particular compiler, with a particular name\"\"\"\n wrapper_f = os.path.join(self.installdir, 'bin', wrapper_name)\n write_file(wrapper_f, WRAPPER_TEMPLATE % wrapper_comp)\n adjust_permissions(wrapper_f, stat.S_IXUSR|stat.S_IRUSR|stat.S_IXGRP|stat.S_IRGRP|stat.S_IXOTH|stat.S_IROTH)\n\n # Prepare wrappers to handle a default host compiler other than g++\n for comp in (self.cfg['host_compilers'] or []):\n create_wrapper('nvcc_%s' % comp, comp)\n\n super(EB_CUDA, self).post_install_step()\n\n def sanity_check_step(self):\n \"\"\"Custom sanity check for CUDA.\"\"\"\n shlib_ext = get_shared_lib_ext()\n\n chk_libdir = [\"lib64\"]\n\n # Versions higher than 6 do not provide 32 bit libraries\n if LooseVersion(self.version) < LooseVersion(\"6\"):\n chk_libdir += [\"lib\"]\n\n custom_paths = {\n 'files': [\"bin/%s\" % x for x in [\"fatbinary\", \"nvcc\", \"nvlink\", \"ptxas\"]] +\n [\"%s/lib%s.%s\" % (x, y, shlib_ext) for x in chk_libdir for y in [\"cublas\", \"cudart\", \"cufft\",\n \"curand\", \"cusparse\"]],\n 'dirs': [\"include\"],\n }\n\n if LooseVersion(self.version) < LooseVersion('7'):\n custom_paths['files'].append('open64/bin/nvopencc')\n if LooseVersion(self.version) >= LooseVersion('7'):\n custom_paths['files'].append(\"extras/CUPTI/lib64/libcupti.%s\" % shlib_ext)\n custom_paths['dirs'].append(\"extras/CUPTI/include\")\n\n\n super(EB_CUDA, self).sanity_check_step(custom_paths=custom_paths)\n\n def make_module_req_guess(self):\n \"\"\"Specify CUDA custom values for PATH etc.\"\"\"\n\n guesses = super(EB_CUDA, self).make_module_req_guess()\n\n # The dirs should be in the order ['open64/bin', 'bin']\n bin_path = []\n if LooseVersion(self.version) < LooseVersion('7'):\n bin_path.append('open64/bin')\n bin_path.append('bin')\n\n lib_path = ['lib64']\n inc_path = ['include']\n if LooseVersion(self.version) >= LooseVersion('7'):\n lib_path.append('extras/CUPTI/lib64')\n inc_path.append('extras/CUPTI/include')\n\n guesses.update({\n 'PATH': bin_path,\n 'LD_LIBRARY_PATH': lib_path,\n 'CPATH': inc_path,\n 'CUDA_HOME': [''],\n 'CUDA_ROOT': [''],\n 'CUDA_PATH': [''],\n })\n\n return guesses\n", "path": "easybuild/easyblocks/c/cuda.py"}]}
| 2,752 | 201 |
gh_patches_debug_29587
|
rasdani/github-patches
|
git_diff
|
cocotb__cocotb-2198
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use setuptools_scm for generating a version number
[setuptools_scm](https://github.com/pypa/setuptools_scm/) can automatically generate version numbers based on git tags and versioning information. This will allow us to exactly identify which commit a user is reporting an issue with if it is not a released version. It will also remove one step of the release process.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 ###############################################################################
3 # Copyright (c) 2013 Potential Ventures Ltd
4 # Copyright (c) 2013 SolarFlare Communications Inc
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are met:
9 # * Redistributions of source code must retain the above copyright
10 # notice, this list of conditions and the following disclaimer.
11 # * Redistributions in binary form must reproduce the above copyright
12 # notice, this list of conditions and the following disclaimer in the
13 # documentation and/or other materials provided with the distribution.
14 # * Neither the name of Potential Ventures Ltd,
15 # SolarFlare Communications Inc nor the
16 # names of its contributors may be used to endorse or promote products
17 # derived from this software without specific prior written permission.
18 #
19 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
20 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 # DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
23 # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 ###############################################################################
30
31 import sys
32 if sys.version_info[:2] < (3, 5):
33 msg = [
34 "This version of cocotb requires at least Python 3.5,",
35 "you are running Python %d.%d.%d." % (
36 sys.version_info[0], sys.version_info[1], sys.version_info[2])
37 ]
38 if sys.version_info[0] == 2:
39 msg += [
40 "If you have Python 3 installed on your machine try ",
41 "using 'python3 -m pip' instead of 'pip' to install cocotb."
42 ]
43 msg += [
44 "For more information please refer to the documentation at ",
45 "https://cocotb.readthedocs.io."
46 ]
47
48 raise SystemExit("\n".join(msg))
49
50 import logging
51 from setuptools import setup
52 from setuptools import find_packages
53 from os import path, walk
54 from io import StringIO
55
56 # Note: cocotb is not installed properly yet and is missing dependencies and binaries
57 # We can still import other files next to setup.py, as long as they're in MANIFEST.in
58 # The below line is necessary for PEP517 support
59 sys.path.append(path.dirname(__file__))
60 from cocotb_build_libs import get_ext, build_ext
61
62
63 def read_file(fname):
64 with open(path.join(path.dirname(__file__), fname), encoding='utf8') as f:
65 return f.read()
66
67
68 def package_files(directory):
69 paths = []
70 for (fpath, directories, filenames) in walk(directory):
71 for filename in filenames:
72 paths.append(path.join('..', fpath, filename))
73 return paths
74
75
76 # this sets the __version__ variable
77 exec(read_file(path.join('cocotb', '_version.py')))
78
79 # store log from build_libs and display at the end in verbose mode
80 # see https://github.com/pypa/pip/issues/6634
81 log_stream = StringIO()
82 handler = logging.StreamHandler(log_stream)
83 log = logging.getLogger("cocotb._build_libs")
84 log.setLevel(logging.INFO)
85 log.addHandler(handler)
86
87 setup(
88 name='cocotb',
89 cmdclass={'build_ext': build_ext},
90 version=__version__, # noqa: F821
91 description='cocotb is a coroutine based cosimulation library for writing VHDL and Verilog testbenches in Python.',
92 url='https://docs.cocotb.org',
93 license='BSD',
94 long_description=read_file('README.md'),
95 long_description_content_type='text/markdown',
96 author='Chris Higgs, Stuart Hodgson',
97 maintainer='cocotb contributors',
98 maintainer_email='[email protected]',
99 install_requires=[],
100 python_requires='>=3.5',
101 packages=find_packages(),
102 package_data={
103 'cocotb': (
104 package_files('cocotb/share/makefiles') + # noqa: W504
105 package_files('cocotb/share/include') + # noqa: W504
106 package_files('cocotb/share/def') +
107 package_files('cocotb/share/lib/verilator')
108 )
109 },
110 ext_modules=get_ext(),
111 entry_points={
112 'console_scripts': [
113 'cocotb-config=cocotb.config:main',
114 ]
115 },
116 platforms='any',
117 classifiers=[
118 "Programming Language :: Python :: 3",
119 "Programming Language :: Python :: 3.5",
120 "Programming Language :: Python :: 3.6",
121 "Programming Language :: Python :: 3.7",
122 "Programming Language :: Python :: 3.8",
123 "Programming Language :: Python :: 3.9",
124 "License :: OSI Approved :: BSD License",
125 "Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)",
126 ],
127
128 # these appear in the sidebar on PyPI
129 project_urls={
130 "Bug Tracker": "https://github.com/cocotb/cocotb/issues",
131 "Source Code": "https://github.com/cocotb/cocotb",
132 "Documentation": "https://docs.cocotb.org",
133 },
134 )
135
136 print(log_stream.getvalue())
137
[end of setup.py]
[start of cocotb/_version.py]
1 # Package versioning solution originally found here:
2 # http://stackoverflow.com/q/458550
3
4 # Store the version here so:
5 # 1) we don't load dependencies by storing it in __init__.py
6 # 2) we can import it in setup.py for the same reason
7 # 3) we can import it into your module
8 __version__ = '1.5.0.dev0'
9
[end of cocotb/_version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/cocotb/_version.py b/cocotb/_version.py
deleted file mode 100644
--- a/cocotb/_version.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# Package versioning solution originally found here:
-# http://stackoverflow.com/q/458550
-
-# Store the version here so:
-# 1) we don't load dependencies by storing it in __init__.py
-# 2) we can import it in setup.py for the same reason
-# 3) we can import it into your module
-__version__ = '1.5.0.dev0'
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -73,9 +73,6 @@
return paths
-# this sets the __version__ variable
-exec(read_file(path.join('cocotb', '_version.py')))
-
# store log from build_libs and display at the end in verbose mode
# see https://github.com/pypa/pip/issues/6634
log_stream = StringIO()
@@ -87,7 +84,11 @@
setup(
name='cocotb',
cmdclass={'build_ext': build_ext},
- version=__version__, # noqa: F821
+ use_scm_version=dict(
+ write_to='cocotb/_version.py',
+ write_to_template='__version__ = {version!r}',
+ version_scheme='release-branch-semver'
+ ),
description='cocotb is a coroutine based cosimulation library for writing VHDL and Verilog testbenches in Python.',
url='https://docs.cocotb.org',
license='BSD',
@@ -96,6 +97,7 @@
author='Chris Higgs, Stuart Hodgson',
maintainer='cocotb contributors',
maintainer_email='[email protected]',
+ setup_requires=['setuptools_scm'],
install_requires=[],
python_requires='>=3.5',
packages=find_packages(),
|
{"golden_diff": "diff --git a/cocotb/_version.py b/cocotb/_version.py\ndeleted file mode 100644\n--- a/cocotb/_version.py\n+++ /dev/null\n@@ -1,8 +0,0 @@\n-# Package versioning solution originally found here:\n-# http://stackoverflow.com/q/458550\n-\n-# Store the version here so:\n-# 1) we don't load dependencies by storing it in __init__.py\n-# 2) we can import it in setup.py for the same reason\n-# 3) we can import it into your module\n-__version__ = '1.5.0.dev0'\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -73,9 +73,6 @@\n return paths\n \n \n-# this sets the __version__ variable\n-exec(read_file(path.join('cocotb', '_version.py')))\n-\n # store log from build_libs and display at the end in verbose mode\n # see https://github.com/pypa/pip/issues/6634\n log_stream = StringIO()\n@@ -87,7 +84,11 @@\n setup(\n name='cocotb',\n cmdclass={'build_ext': build_ext},\n- version=__version__, # noqa: F821\n+ use_scm_version=dict(\n+ write_to='cocotb/_version.py',\n+ write_to_template='__version__ = {version!r}',\n+ version_scheme='release-branch-semver'\n+ ),\n description='cocotb is a coroutine based cosimulation library for writing VHDL and Verilog testbenches in Python.',\n url='https://docs.cocotb.org',\n license='BSD',\n@@ -96,6 +97,7 @@\n author='Chris Higgs, Stuart Hodgson',\n maintainer='cocotb contributors',\n maintainer_email='[email protected]',\n+ setup_requires=['setuptools_scm'],\n install_requires=[],\n python_requires='>=3.5',\n packages=find_packages(),\n", "issue": "Use setuptools_scm for generating a version number\n[setuptools_scm](https://github.com/pypa/setuptools_scm/) can automatically generate version numbers based on git tags and versioning information. This will allow us to exactly identify which commit a user is reporting an issue with if it is not a released version. It will also remove one step of the release process.\n", "before_files": [{"content": "#!/usr/bin/env python\n###############################################################################\n# Copyright (c) 2013 Potential Ventures Ltd\n# Copyright (c) 2013 SolarFlare Communications Inc\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd,\n# SolarFlare Communications Inc nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\nimport sys\nif sys.version_info[:2] < (3, 5):\n msg = [\n \"This version of cocotb requires at least Python 3.5,\",\n \"you are running Python %d.%d.%d.\" % (\n sys.version_info[0], sys.version_info[1], sys.version_info[2])\n ]\n if sys.version_info[0] == 2:\n msg += [\n \"If you have Python 3 installed on your machine try \",\n \"using 'python3 -m pip' instead of 'pip' to install cocotb.\"\n ]\n msg += [\n \"For more information please refer to the documentation at \",\n \"https://cocotb.readthedocs.io.\"\n ]\n\n raise SystemExit(\"\\n\".join(msg))\n\nimport logging\nfrom setuptools import setup\nfrom setuptools import find_packages\nfrom os import path, walk\nfrom io import StringIO\n\n# Note: cocotb is not installed properly yet and is missing dependencies and binaries\n# We can still import other files next to setup.py, as long as they're in MANIFEST.in\n# The below line is necessary for PEP517 support\nsys.path.append(path.dirname(__file__))\nfrom cocotb_build_libs import get_ext, build_ext\n\n\ndef read_file(fname):\n with open(path.join(path.dirname(__file__), fname), encoding='utf8') as f:\n return f.read()\n\n\ndef package_files(directory):\n paths = []\n for (fpath, directories, filenames) in walk(directory):\n for filename in filenames:\n paths.append(path.join('..', fpath, filename))\n return paths\n\n\n# this sets the __version__ variable\nexec(read_file(path.join('cocotb', '_version.py')))\n\n# store log from build_libs and display at the end in verbose mode\n# see https://github.com/pypa/pip/issues/6634\nlog_stream = StringIO()\nhandler = logging.StreamHandler(log_stream)\nlog = logging.getLogger(\"cocotb._build_libs\")\nlog.setLevel(logging.INFO)\nlog.addHandler(handler)\n\nsetup(\n name='cocotb',\n cmdclass={'build_ext': build_ext},\n version=__version__, # noqa: F821\n description='cocotb is a coroutine based cosimulation library for writing VHDL and Verilog testbenches in Python.',\n url='https://docs.cocotb.org',\n license='BSD',\n long_description=read_file('README.md'),\n long_description_content_type='text/markdown',\n author='Chris Higgs, Stuart Hodgson',\n maintainer='cocotb contributors',\n maintainer_email='[email protected]',\n install_requires=[],\n python_requires='>=3.5',\n packages=find_packages(),\n package_data={\n 'cocotb': (\n package_files('cocotb/share/makefiles') + # noqa: W504\n package_files('cocotb/share/include') + # noqa: W504\n package_files('cocotb/share/def') +\n package_files('cocotb/share/lib/verilator')\n )\n },\n ext_modules=get_ext(),\n entry_points={\n 'console_scripts': [\n 'cocotb-config=cocotb.config:main',\n ]\n },\n platforms='any',\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"License :: OSI Approved :: BSD License\",\n \"Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)\",\n ],\n\n # these appear in the sidebar on PyPI\n project_urls={\n \"Bug Tracker\": \"https://github.com/cocotb/cocotb/issues\",\n \"Source Code\": \"https://github.com/cocotb/cocotb\",\n \"Documentation\": \"https://docs.cocotb.org\",\n },\n)\n\nprint(log_stream.getvalue())\n", "path": "setup.py"}, {"content": "# Package versioning solution originally found here:\n# http://stackoverflow.com/q/458550\n\n# Store the version here so:\n# 1) we don't load dependencies by storing it in __init__.py\n# 2) we can import it in setup.py for the same reason\n# 3) we can import it into your module\n__version__ = '1.5.0.dev0'\n", "path": "cocotb/_version.py"}]}
| 2,275 | 469 |
gh_patches_debug_32041
|
rasdani/github-patches
|
git_diff
|
nautobot__nautobot-5398
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Job Buttons do not honor the Job task_queue
<!--
NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.
This form is only for reporting reproducible bugs. If you need assistance
with Nautobot installation, or if you have a general question, please start a
discussion instead: https://github.com/nautobot/nautobot/discussions
Please describe the environment in which you are running Nautobot. Be sure
that you are running an unmodified instance of the latest stable release
before submitting a bug report, and that any plugins have been disabled.
-->
### Environment
* Nautobot version (Docker tag too if applicable): 1.6.11
* Python version: 3.11
* Database platform, version: NA
* Middleware(s): NA
<!--
Describe in detail the exact steps that someone else can take to reproduce
this bug using the current stable release of Nautobot. Begin with the
creation of any necessary database objects and call out every operation
being performed explicitly. If reporting a bug in the REST API, be sure to
reconstruct the raw HTTP request(s) being made: Don't rely on a client
library such as pynautobot.
-->
### Steps to Reproduce
1. Create a JobButtonReciever that uses a queue other than default
2. Create a Job Button that points to the JobButtonReciever and connect it to a contenttype (like dcim.devices)
3. Click the Job Button on the contenttype from above.
<!-- What did you expect to happen? -->
### Expected Behavior
The Job runs under the queue that is configured for JobButtonReceiver.
<!-- What happened instead? -->
### Observed Behavior
The Job runs under the default queue (which in my case causes an error, but may not in other use cases).
</issue>
<code>
[start of nautobot/extras/templatetags/job_buttons.py]
1 from collections import OrderedDict
2
3 from django import template
4 from django.contrib.contenttypes.models import ContentType
5 from django.urls import reverse
6 from django.utils.html import format_html
7 from django.utils.safestring import mark_safe
8
9 from nautobot.core.utils.data import render_jinja2
10 from nautobot.extras.models import Job, JobButton
11
12 register = template.Library()
13
14 GROUP_DROPDOWN = """
15 <div class="btn-group">
16 <button type="button" class="btn btn-sm btn-{group_button_class} dropdown-toggle" data-toggle="dropdown">
17 {group_name} <span class="caret"></span>
18 </button>
19 <ul class="dropdown-menu pull-right">
20 {grouped_buttons}
21 </ul>
22 </div>
23 """
24
25 HIDDEN_INPUTS = """
26 <input type="hidden" name="csrfmiddlewaretoken" value="{csrf_token}">
27 <input type="hidden" name="object_pk" value="{object_pk}">
28 <input type="hidden" name="object_model_name" value="{object_model_name}">
29 <input type="hidden" name="_schedule_type" value="immediately">
30 <input type="hidden" name="_return_url" value="{redirect_path}">
31 """
32
33 NO_CONFIRM_BUTTON = """
34 <button type="submit" form="form_id_{button_id}" class="btn btn-sm btn-{button_class}" {disabled}>{button_text}</button>
35 """
36
37 NO_CONFIRM_FORM = """
38 <form id="form_id_{button_id}" action="{button_url}" method="post" class="form">
39 {hidden_inputs}
40 </form>
41 """
42
43 CONFIRM_BUTTON = """
44 <button type="button" class="btn btn-sm btn-{button_class}" data-toggle="modal" data-target="#confirm_modal_id_{button_id}" {disabled}>
45 {button_text}
46 </button>
47 """
48
49 CONFIRM_MODAL = """
50 <div class="modal fade" id="confirm_modal_id_{button_id}" tabindex="-1" role="dialog" aria-labelledby="confirm_modal_label_{button_id}">
51 <div class="modal-dialog" role="document">
52 <div class="modal-content">
53 <div class="modal-header">
54 <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">×</span></button>
55 <h4 class="modal-title" id="confirm_modal_label_{button_id}">Confirmation</h4>
56 </div>
57 <form id="form_id_{button_id}" action="{button_url}" method="post" class="form">
58 <div class="modal-body">
59 {hidden_inputs}
60 Run Job <strong>'{job}'</strong> with object <strong>'{object}'</strong>?
61 </div>
62 <div class="modal-footer">
63 <button type="button" class="btn btn-default" data-dismiss="modal">Cancel</button>
64 <button type="submit" class="btn btn-primary">Confirm</button>
65 </div>
66 </form>
67 </div>
68 </div>
69 </div>
70 """
71
72 SAFE_EMPTY_STR = mark_safe("") # noqa: S308 # suspicious-mark-safe-usage -- this one is safe
73
74
75 def _render_job_button_for_obj(job_button, obj, context, content_type):
76 """
77 Helper method for job_buttons templatetag to reduce repetition of code.
78
79 Returns:
80 (str, str): (button_html, form_html)
81 """
82 # Pass select context data when rendering the JobButton text as Jinja2
83 button_context = {
84 "obj": obj,
85 "debug": context.get("debug", False), # django.template.context_processors.debug
86 "request": context["request"], # django.template.context_processors.request
87 "user": context["user"], # django.contrib.auth.context_processors.auth
88 "perms": context["perms"], # django.contrib.auth.context_processors.auth
89 }
90 try:
91 text_rendered = render_jinja2(job_button.text, button_context)
92 except Exception as exc:
93 return (
94 format_html(
95 '<a class="btn btn-sm btn-{}" disabled="disabled" title="{}"><i class="mdi mdi-alert"></i> {}</a>\n',
96 "default" if not job_button.group_name else "link",
97 exc,
98 job_button.name,
99 ),
100 SAFE_EMPTY_STR,
101 )
102
103 if not text_rendered:
104 return (SAFE_EMPTY_STR, SAFE_EMPTY_STR)
105
106 # Disable buttons if the user doesn't have permission to run the underlying Job.
107 has_run_perm = Job.objects.check_perms(context["user"], instance=job_button.job, action="run")
108 hidden_inputs = format_html(
109 HIDDEN_INPUTS,
110 csrf_token=context["csrf_token"],
111 object_pk=obj.pk,
112 object_model_name=f"{content_type.app_label}.{content_type.model}",
113 redirect_path=context["request"].path,
114 )
115 template_args = {
116 "button_id": job_button.pk,
117 "button_text": text_rendered,
118 "button_class": job_button.button_class if not job_button.group_name else "link",
119 "button_url": reverse("extras:job_run", kwargs={"pk": job_button.job.pk}),
120 "object": obj,
121 "job": job_button.job,
122 "hidden_inputs": hidden_inputs,
123 "disabled": "" if has_run_perm else "disabled",
124 }
125
126 if job_button.confirmation:
127 return (
128 format_html(CONFIRM_BUTTON, **template_args),
129 format_html(CONFIRM_MODAL, **template_args),
130 )
131 else:
132 return (
133 format_html(NO_CONFIRM_BUTTON, **template_args),
134 format_html(NO_CONFIRM_FORM, **template_args),
135 )
136
137
138 @register.simple_tag(takes_context=True)
139 def job_buttons(context, obj):
140 """
141 Render all applicable job buttons for the given object.
142 """
143 content_type = ContentType.objects.get_for_model(obj)
144 # We will enforce "run" permission later in deciding which buttons to show as disabled.
145 buttons = JobButton.objects.filter(content_types=content_type)
146 if not buttons:
147 return SAFE_EMPTY_STR
148
149 buttons_html = forms_html = SAFE_EMPTY_STR
150 group_names = OrderedDict()
151
152 for jb in buttons:
153 # Organize job buttons by group for later processing
154 if jb.group_name:
155 group_names.setdefault(jb.group_name, []).append(jb)
156
157 # Render and add non-grouped buttons
158 else:
159 button_html, form_html = _render_job_button_for_obj(jb, obj, context, content_type)
160 buttons_html += button_html
161 forms_html += form_html
162
163 # Add grouped buttons to template
164 for group_name, buttons in group_names.items():
165 group_button_class = buttons[0].button_class
166
167 buttons_rendered = SAFE_EMPTY_STR
168
169 for jb in buttons:
170 # Render grouped buttons as list items
171 button_html, form_html = _render_job_button_for_obj(jb, obj, context, content_type)
172 buttons_rendered += format_html("<li>{}</li>", button_html)
173 forms_html += form_html
174
175 if buttons_rendered:
176 buttons_html += format_html(
177 GROUP_DROPDOWN,
178 group_button_class=group_button_class,
179 group_name=group_name,
180 grouped_buttons=buttons_rendered,
181 )
182
183 # We want all of the buttons first and then any modals and forms so the buttons render properly
184 return buttons_html + forms_html
185
[end of nautobot/extras/templatetags/job_buttons.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/nautobot/extras/templatetags/job_buttons.py b/nautobot/extras/templatetags/job_buttons.py
--- a/nautobot/extras/templatetags/job_buttons.py
+++ b/nautobot/extras/templatetags/job_buttons.py
@@ -1,6 +1,7 @@
from collections import OrderedDict
from django import template
+from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.urls import reverse
from django.utils.html import format_html
@@ -27,6 +28,7 @@
<input type="hidden" name="object_pk" value="{object_pk}">
<input type="hidden" name="object_model_name" value="{object_model_name}">
<input type="hidden" name="_schedule_type" value="immediately">
+<input type="hidden" name="_task_queue" value="{task_queue}">
<input type="hidden" name="_return_url" value="{redirect_path}">
"""
@@ -105,12 +107,17 @@
# Disable buttons if the user doesn't have permission to run the underlying Job.
has_run_perm = Job.objects.check_perms(context["user"], instance=job_button.job, action="run")
+ try:
+ _task_queue = job_button.job.task_queues[0]
+ except IndexError:
+ _task_queue = settings.CELERY_TASK_DEFAULT_QUEUE
hidden_inputs = format_html(
HIDDEN_INPUTS,
csrf_token=context["csrf_token"],
object_pk=obj.pk,
object_model_name=f"{content_type.app_label}.{content_type.model}",
redirect_path=context["request"].path,
+ task_queue=_task_queue,
)
template_args = {
"button_id": job_button.pk,
|
{"golden_diff": "diff --git a/nautobot/extras/templatetags/job_buttons.py b/nautobot/extras/templatetags/job_buttons.py\n--- a/nautobot/extras/templatetags/job_buttons.py\n+++ b/nautobot/extras/templatetags/job_buttons.py\n@@ -1,6 +1,7 @@\n from collections import OrderedDict\n \n from django import template\n+from django.conf import settings\n from django.contrib.contenttypes.models import ContentType\n from django.urls import reverse\n from django.utils.html import format_html\n@@ -27,6 +28,7 @@\n <input type=\"hidden\" name=\"object_pk\" value=\"{object_pk}\">\n <input type=\"hidden\" name=\"object_model_name\" value=\"{object_model_name}\">\n <input type=\"hidden\" name=\"_schedule_type\" value=\"immediately\">\n+<input type=\"hidden\" name=\"_task_queue\" value=\"{task_queue}\">\n <input type=\"hidden\" name=\"_return_url\" value=\"{redirect_path}\">\n \"\"\"\n \n@@ -105,12 +107,17 @@\n \n # Disable buttons if the user doesn't have permission to run the underlying Job.\n has_run_perm = Job.objects.check_perms(context[\"user\"], instance=job_button.job, action=\"run\")\n+ try:\n+ _task_queue = job_button.job.task_queues[0]\n+ except IndexError:\n+ _task_queue = settings.CELERY_TASK_DEFAULT_QUEUE\n hidden_inputs = format_html(\n HIDDEN_INPUTS,\n csrf_token=context[\"csrf_token\"],\n object_pk=obj.pk,\n object_model_name=f\"{content_type.app_label}.{content_type.model}\",\n redirect_path=context[\"request\"].path,\n+ task_queue=_task_queue,\n )\n template_args = {\n \"button_id\": job_button.pk,\n", "issue": "Job Buttons do not honor the Job task_queue\n<!--\r\n NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.\r\n\r\n This form is only for reporting reproducible bugs. If you need assistance\r\n with Nautobot installation, or if you have a general question, please start a\r\n discussion instead: https://github.com/nautobot/nautobot/discussions\r\n\r\n Please describe the environment in which you are running Nautobot. Be sure\r\n that you are running an unmodified instance of the latest stable release\r\n before submitting a bug report, and that any plugins have been disabled.\r\n-->\r\n### Environment\r\n* Nautobot version (Docker tag too if applicable): 1.6.11\r\n* Python version: 3.11\r\n* Database platform, version: NA\r\n* Middleware(s): NA\r\n\r\n<!--\r\n Describe in detail the exact steps that someone else can take to reproduce\r\n this bug using the current stable release of Nautobot. Begin with the\r\n creation of any necessary database objects and call out every operation\r\n being performed explicitly. If reporting a bug in the REST API, be sure to\r\n reconstruct the raw HTTP request(s) being made: Don't rely on a client\r\n library such as pynautobot.\r\n-->\r\n### Steps to Reproduce\r\n1. Create a JobButtonReciever that uses a queue other than default\r\n2. Create a Job Button that points to the JobButtonReciever and connect it to a contenttype (like dcim.devices)\r\n3. Click the Job Button on the contenttype from above.\r\n\r\n<!-- What did you expect to happen? -->\r\n### Expected Behavior\r\nThe Job runs under the queue that is configured for JobButtonReceiver.\r\n\r\n<!-- What happened instead? -->\r\n### Observed Behavior\r\nThe Job runs under the default queue (which in my case causes an error, but may not in other use cases).\n", "before_files": [{"content": "from collections import OrderedDict\n\nfrom django import template\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.urls import reverse\nfrom django.utils.html import format_html\nfrom django.utils.safestring import mark_safe\n\nfrom nautobot.core.utils.data import render_jinja2\nfrom nautobot.extras.models import Job, JobButton\n\nregister = template.Library()\n\nGROUP_DROPDOWN = \"\"\"\n<div class=\"btn-group\">\n <button type=\"button\" class=\"btn btn-sm btn-{group_button_class} dropdown-toggle\" data-toggle=\"dropdown\">\n {group_name} <span class=\"caret\"></span>\n </button>\n <ul class=\"dropdown-menu pull-right\">\n {grouped_buttons}\n </ul>\n</div>\n\"\"\"\n\nHIDDEN_INPUTS = \"\"\"\n<input type=\"hidden\" name=\"csrfmiddlewaretoken\" value=\"{csrf_token}\">\n<input type=\"hidden\" name=\"object_pk\" value=\"{object_pk}\">\n<input type=\"hidden\" name=\"object_model_name\" value=\"{object_model_name}\">\n<input type=\"hidden\" name=\"_schedule_type\" value=\"immediately\">\n<input type=\"hidden\" name=\"_return_url\" value=\"{redirect_path}\">\n\"\"\"\n\nNO_CONFIRM_BUTTON = \"\"\"\n<button type=\"submit\" form=\"form_id_{button_id}\" class=\"btn btn-sm btn-{button_class}\" {disabled}>{button_text}</button>\n\"\"\"\n\nNO_CONFIRM_FORM = \"\"\"\n<form id=\"form_id_{button_id}\" action=\"{button_url}\" method=\"post\" class=\"form\">\n {hidden_inputs}\n</form>\n\"\"\"\n\nCONFIRM_BUTTON = \"\"\"\n<button type=\"button\" class=\"btn btn-sm btn-{button_class}\" data-toggle=\"modal\" data-target=\"#confirm_modal_id_{button_id}\" {disabled}>\n {button_text}\n</button>\n\"\"\"\n\nCONFIRM_MODAL = \"\"\"\n<div class=\"modal fade\" id=\"confirm_modal_id_{button_id}\" tabindex=\"-1\" role=\"dialog\" aria-labelledby=\"confirm_modal_label_{button_id}\">\n <div class=\"modal-dialog\" role=\"document\">\n <div class=\"modal-content\">\n <div class=\"modal-header\">\n <button type=\"button\" class=\"close\" data-dismiss=\"modal\" aria-label=\"Close\"><span aria-hidden=\"true\">×</span></button>\n <h4 class=\"modal-title\" id=\"confirm_modal_label_{button_id}\">Confirmation</h4>\n </div>\n <form id=\"form_id_{button_id}\" action=\"{button_url}\" method=\"post\" class=\"form\">\n <div class=\"modal-body\">\n {hidden_inputs}\n Run Job <strong>'{job}'</strong> with object <strong>'{object}'</strong>?\n </div>\n <div class=\"modal-footer\">\n <button type=\"button\" class=\"btn btn-default\" data-dismiss=\"modal\">Cancel</button>\n <button type=\"submit\" class=\"btn btn-primary\">Confirm</button>\n </div>\n </form>\n </div>\n </div>\n</div>\n\"\"\"\n\nSAFE_EMPTY_STR = mark_safe(\"\") # noqa: S308 # suspicious-mark-safe-usage -- this one is safe\n\n\ndef _render_job_button_for_obj(job_button, obj, context, content_type):\n \"\"\"\n Helper method for job_buttons templatetag to reduce repetition of code.\n\n Returns:\n (str, str): (button_html, form_html)\n \"\"\"\n # Pass select context data when rendering the JobButton text as Jinja2\n button_context = {\n \"obj\": obj,\n \"debug\": context.get(\"debug\", False), # django.template.context_processors.debug\n \"request\": context[\"request\"], # django.template.context_processors.request\n \"user\": context[\"user\"], # django.contrib.auth.context_processors.auth\n \"perms\": context[\"perms\"], # django.contrib.auth.context_processors.auth\n }\n try:\n text_rendered = render_jinja2(job_button.text, button_context)\n except Exception as exc:\n return (\n format_html(\n '<a class=\"btn btn-sm btn-{}\" disabled=\"disabled\" title=\"{}\"><i class=\"mdi mdi-alert\"></i> {}</a>\\n',\n \"default\" if not job_button.group_name else \"link\",\n exc,\n job_button.name,\n ),\n SAFE_EMPTY_STR,\n )\n\n if not text_rendered:\n return (SAFE_EMPTY_STR, SAFE_EMPTY_STR)\n\n # Disable buttons if the user doesn't have permission to run the underlying Job.\n has_run_perm = Job.objects.check_perms(context[\"user\"], instance=job_button.job, action=\"run\")\n hidden_inputs = format_html(\n HIDDEN_INPUTS,\n csrf_token=context[\"csrf_token\"],\n object_pk=obj.pk,\n object_model_name=f\"{content_type.app_label}.{content_type.model}\",\n redirect_path=context[\"request\"].path,\n )\n template_args = {\n \"button_id\": job_button.pk,\n \"button_text\": text_rendered,\n \"button_class\": job_button.button_class if not job_button.group_name else \"link\",\n \"button_url\": reverse(\"extras:job_run\", kwargs={\"pk\": job_button.job.pk}),\n \"object\": obj,\n \"job\": job_button.job,\n \"hidden_inputs\": hidden_inputs,\n \"disabled\": \"\" if has_run_perm else \"disabled\",\n }\n\n if job_button.confirmation:\n return (\n format_html(CONFIRM_BUTTON, **template_args),\n format_html(CONFIRM_MODAL, **template_args),\n )\n else:\n return (\n format_html(NO_CONFIRM_BUTTON, **template_args),\n format_html(NO_CONFIRM_FORM, **template_args),\n )\n\n\[email protected]_tag(takes_context=True)\ndef job_buttons(context, obj):\n \"\"\"\n Render all applicable job buttons for the given object.\n \"\"\"\n content_type = ContentType.objects.get_for_model(obj)\n # We will enforce \"run\" permission later in deciding which buttons to show as disabled.\n buttons = JobButton.objects.filter(content_types=content_type)\n if not buttons:\n return SAFE_EMPTY_STR\n\n buttons_html = forms_html = SAFE_EMPTY_STR\n group_names = OrderedDict()\n\n for jb in buttons:\n # Organize job buttons by group for later processing\n if jb.group_name:\n group_names.setdefault(jb.group_name, []).append(jb)\n\n # Render and add non-grouped buttons\n else:\n button_html, form_html = _render_job_button_for_obj(jb, obj, context, content_type)\n buttons_html += button_html\n forms_html += form_html\n\n # Add grouped buttons to template\n for group_name, buttons in group_names.items():\n group_button_class = buttons[0].button_class\n\n buttons_rendered = SAFE_EMPTY_STR\n\n for jb in buttons:\n # Render grouped buttons as list items\n button_html, form_html = _render_job_button_for_obj(jb, obj, context, content_type)\n buttons_rendered += format_html(\"<li>{}</li>\", button_html)\n forms_html += form_html\n\n if buttons_rendered:\n buttons_html += format_html(\n GROUP_DROPDOWN,\n group_button_class=group_button_class,\n group_name=group_name,\n grouped_buttons=buttons_rendered,\n )\n\n # We want all of the buttons first and then any modals and forms so the buttons render properly\n return buttons_html + forms_html\n", "path": "nautobot/extras/templatetags/job_buttons.py"}]}
| 2,963 | 380 |
gh_patches_debug_13544
|
rasdani/github-patches
|
git_diff
|
Azure__azure-cli-extensions-93
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Change help for az webapp new
### Extension name (the extension in question)
webapp
### Description of issue (in as much detail as possible)
currently the help information for the command return the
Command
az webapp new: Create and deploy a node web app.
Requested changes:
- add the callout for .net core apps
- add explicit mention that the command is in preview
-----
</issue>
<code>
[start of src/webapp/setup.py]
1 #!/usr/bin/env python
2
3 # --------------------------------------------------------------------------------------------
4 # Copyright (c) Microsoft Corporation. All rights reserved.
5 # Licensed under the MIT License. See License.txt in the project root for license information.
6 # --------------------------------------------------------------------------------------------
7
8 from codecs import open
9 from setuptools import setup, find_packages
10
11 VERSION = "0.1.0"
12
13 CLASSIFIERS = [
14 'Development Status :: 4 - Beta',
15 'Intended Audience :: Developers',
16 'Intended Audience :: System Administrators',
17 'Programming Language :: Python',
18 'Programming Language :: Python :: 2',
19 'Programming Language :: Python :: 2.7',
20 'Programming Language :: Python :: 3',
21 'Programming Language :: Python :: 3.4',
22 'Programming Language :: Python :: 3.5',
23 'Programming Language :: Python :: 3.6',
24 'License :: OSI Approved :: MIT License',
25 ]
26
27 DEPENDENCIES = []
28
29 setup(
30 name='webapp',
31 version=VERSION,
32 description='An Azure CLI Extension to manage appservice resources',
33 long_description='An Azure CLI Extension to manage appservice resources',
34 license='MIT',
35 author='Sisira Panchagnula',
36 author_email='[email protected]',
37 url='https://github.com/Azure/azure-cli-extensions',
38 classifiers=CLASSIFIERS,
39 packages=find_packages(exclude=["tests"]),
40 install_requires=DEPENDENCIES
41 )
42
[end of src/webapp/setup.py]
[start of src/webapp/azext_webapp/_help.py]
1 # --------------------------------------------------------------------------------------------
2 # Copyright (c) Microsoft Corporation. All rights reserved.
3 # Licensed under the MIT License. See License.txt in the project root for license information.
4 # --------------------------------------------------------------------------------------------
5
6 from knack.help_files import helps
7
8
9 helps['webapp new'] = """
10 type: command
11 short-summary: Create and deploy a node web app
12 examples:
13 - name: Create a web app with the default configuration.
14 text: >
15 az webapp new -n MyUniqueAppName --dryrun \n
16 az webapp new -n MyUniqueAppName -l locationName
17 """
18
[end of src/webapp/azext_webapp/_help.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/webapp/azext_webapp/_help.py b/src/webapp/azext_webapp/_help.py
--- a/src/webapp/azext_webapp/_help.py
+++ b/src/webapp/azext_webapp/_help.py
@@ -8,7 +8,8 @@
helps['webapp new'] = """
type: command
- short-summary: Create and deploy a node web app
+ short-summary: Experimental command to create and deploy a web app.
+ Current supports Node on Linux & .NET Core on Windows.
examples:
- name: Create a web app with the default configuration.
text: >
diff --git a/src/webapp/setup.py b/src/webapp/setup.py
--- a/src/webapp/setup.py
+++ b/src/webapp/setup.py
@@ -8,7 +8,7 @@
from codecs import open
from setuptools import setup, find_packages
-VERSION = "0.1.0"
+VERSION = "0.1.1"
CLASSIFIERS = [
'Development Status :: 4 - Beta',
|
{"golden_diff": "diff --git a/src/webapp/azext_webapp/_help.py b/src/webapp/azext_webapp/_help.py\n--- a/src/webapp/azext_webapp/_help.py\n+++ b/src/webapp/azext_webapp/_help.py\n@@ -8,7 +8,8 @@\n \n helps['webapp new'] = \"\"\"\n type: command\n- short-summary: Create and deploy a node web app\n+ short-summary: Experimental command to create and deploy a web app.\n+ Current supports Node on Linux & .NET Core on Windows.\n examples:\n - name: Create a web app with the default configuration.\n text: >\ndiff --git a/src/webapp/setup.py b/src/webapp/setup.py\n--- a/src/webapp/setup.py\n+++ b/src/webapp/setup.py\n@@ -8,7 +8,7 @@\n from codecs import open\n from setuptools import setup, find_packages\n \n-VERSION = \"0.1.0\"\n+VERSION = \"0.1.1\"\n \n CLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n", "issue": "Change help for az webapp new\n### Extension name (the extension in question)\r\nwebapp\r\n\r\n### Description of issue (in as much detail as possible)\r\ncurrently the help information for the command return the \r\n\r\nCommand\r\n az webapp new: Create and deploy a node web app.\r\n\r\nRequested changes:\r\n\r\n- add the callout for .net core apps\r\n- add explicit mention that the command is in preview\r\n\r\n-----\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom codecs import open\nfrom setuptools import setup, find_packages\n\nVERSION = \"0.1.0\"\n\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'License :: OSI Approved :: MIT License',\n]\n\nDEPENDENCIES = []\n\nsetup(\n name='webapp',\n version=VERSION,\n description='An Azure CLI Extension to manage appservice resources',\n long_description='An Azure CLI Extension to manage appservice resources',\n license='MIT',\n author='Sisira Panchagnula',\n author_email='[email protected]',\n url='https://github.com/Azure/azure-cli-extensions',\n classifiers=CLASSIFIERS,\n packages=find_packages(exclude=[\"tests\"]),\n install_requires=DEPENDENCIES\n)\n", "path": "src/webapp/setup.py"}, {"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom knack.help_files import helps\n\n\nhelps['webapp new'] = \"\"\"\n type: command\n short-summary: Create and deploy a node web app\n examples:\n - name: Create a web app with the default configuration.\n text: >\n az webapp new -n MyUniqueAppName --dryrun \\n\n az webapp new -n MyUniqueAppName -l locationName\n\"\"\"\n", "path": "src/webapp/azext_webapp/_help.py"}]}
| 1,181 | 233 |
gh_patches_debug_23257
|
rasdani/github-patches
|
git_diff
|
getredash__redash-2799
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add front-end extension capability
This is similar in concept to https://github.com/getredash/redash/pull/2354/files
</issue>
<code>
[start of redash/extensions.py]
1 from pkg_resources import iter_entry_points
2
3
4 def init_extensions(app):
5 """
6 Load the Redash extensions for the given Redash Flask app.
7 """
8 if not hasattr(app, 'redash_extensions'):
9 app.redash_extensions = {}
10
11 for entry_point in iter_entry_points('redash.extensions'):
12 app.logger.info('Loading Redash extension %s.', entry_point.name)
13 extension = entry_point.load()
14 app.redash_extensions[entry_point.name] = extension(app)
15
[end of redash/extensions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/redash/extensions.py b/redash/extensions.py
--- a/redash/extensions.py
+++ b/redash/extensions.py
@@ -1,4 +1,5 @@
-from pkg_resources import iter_entry_points
+import os
+from pkg_resources import iter_entry_points, resource_isdir, resource_listdir
def init_extensions(app):
@@ -10,5 +11,20 @@
for entry_point in iter_entry_points('redash.extensions'):
app.logger.info('Loading Redash extension %s.', entry_point.name)
- extension = entry_point.load()
- app.redash_extensions[entry_point.name] = extension(app)
+ try:
+ extension = entry_point.load()
+ app.redash_extensions[entry_point.name] = {
+ "entry_function": extension(app),
+ "resources_list": []
+ }
+ except ImportError:
+ app.logger.info('%s does not have a callable and will not be loaded.', entry_point.name)
+ (root_module, _) = os.path.splitext(entry_point.module_name)
+ content_folder_relative = os.path.join(entry_point.name, 'bundle')
+
+ # If it's a frontend extension only, store a list of files in the bundle directory.
+ if resource_isdir(root_module, content_folder_relative):
+ app.redash_extensions[entry_point.name] = {
+ "entry_function": None,
+ "resources_list": resource_listdir(root_module, content_folder_relative)
+ }
|
{"golden_diff": "diff --git a/redash/extensions.py b/redash/extensions.py\n--- a/redash/extensions.py\n+++ b/redash/extensions.py\n@@ -1,4 +1,5 @@\n-from pkg_resources import iter_entry_points\n+import os\n+from pkg_resources import iter_entry_points, resource_isdir, resource_listdir\n \n \n def init_extensions(app):\n@@ -10,5 +11,20 @@\n \n for entry_point in iter_entry_points('redash.extensions'):\n app.logger.info('Loading Redash extension %s.', entry_point.name)\n- extension = entry_point.load()\n- app.redash_extensions[entry_point.name] = extension(app)\n+ try:\n+ extension = entry_point.load()\n+ app.redash_extensions[entry_point.name] = {\n+ \"entry_function\": extension(app),\n+ \"resources_list\": []\n+ }\n+ except ImportError:\n+ app.logger.info('%s does not have a callable and will not be loaded.', entry_point.name)\n+ (root_module, _) = os.path.splitext(entry_point.module_name)\n+ content_folder_relative = os.path.join(entry_point.name, 'bundle')\n+\n+ # If it's a frontend extension only, store a list of files in the bundle directory.\n+ if resource_isdir(root_module, content_folder_relative):\n+ app.redash_extensions[entry_point.name] = {\n+ \"entry_function\": None,\n+ \"resources_list\": resource_listdir(root_module, content_folder_relative)\n+ }\n", "issue": "Add front-end extension capability\nThis is similar in concept to https://github.com/getredash/redash/pull/2354/files\n", "before_files": [{"content": "from pkg_resources import iter_entry_points\n\n\ndef init_extensions(app):\n \"\"\"\n Load the Redash extensions for the given Redash Flask app.\n \"\"\"\n if not hasattr(app, 'redash_extensions'):\n app.redash_extensions = {}\n\n for entry_point in iter_entry_points('redash.extensions'):\n app.logger.info('Loading Redash extension %s.', entry_point.name)\n extension = entry_point.load()\n app.redash_extensions[entry_point.name] = extension(app)\n", "path": "redash/extensions.py"}]}
| 686 | 317 |
gh_patches_debug_17786
|
rasdani/github-patches
|
git_diff
|
lmfit__lmfit-py-152
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cannot deploy to pypi repo dues to tuples in the `setup.py` attributes
Due to a python-bug (http://bugs.python.org/issue19610) i cannot install and deploy lmfit with `python setup install`
I discovered this issue while trying to fix #149
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # from distutils.core import setup
3 from setuptools import setup
4
5 import lmfit as lmfit
6 import numpy, scipy
7
8 long_desc = """A library for least-squares minimization and data fitting in
9 Python. Built on top of scipy.optimize, lmfit provides a Parameter object
10 which can be set as fixed or free, can have upper and/or lower bounds, or
11 can be written in terms of algebraic constraints of other Parameters. The
12 user writes a function to be minimized as a function of these Parameters,
13 and the scipy.optimize methods are used to find the optimal values for the
14 Parameters. The Levenberg-Marquardt (leastsq) is the default minimization
15 algorithm, and provides estimated standard errors and correlations between
16 varied Parameters. Other minimization methods, including Nelder-Mead's
17 downhill simplex, Powell's method, BFGS, Sequential Least Squares, and
18 others are also supported. Bounds and contraints can be placed on
19 Parameters for all of these methods.
20
21 In addition, methods for explicitly calculating confidence intervals are
22 provided for exploring minmization problems where the approximation of
23 estimating Parameter uncertainties from the covariance matrix is
24 questionable. """
25
26
27 setup(name = 'lmfit',
28 version = lmfit.__version__,
29 author = 'LMFit Development Team',
30 author_email = '[email protected]',
31 url = 'http://lmfit.github.io/lmfit-py/',
32 download_url = 'http://lmfit.github.io//lmfit-py/',
33 requires = ('numpy', 'scipy'),
34 license = 'BSD',
35 description = "Least-Squares Minimization with Bounds and Constraints",
36 long_description = long_desc,
37 platforms = ('Windows', 'Linux', 'Mac OS X'),
38 classifiers=['Intended Audience :: Science/Research',
39 'Operating System :: OS Independent',
40 'Programming Language :: Python',
41 'Topic :: Scientific/Engineering',
42 ],
43 # test_suite='nose.collector',
44 # test_requires=['Nose'],
45 package_dir = {'lmfit': 'lmfit'},
46 packages = ['lmfit', 'lmfit.ui', 'lmfit.uncertainties'],
47 )
48
49
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -30,11 +30,11 @@
author_email = '[email protected]',
url = 'http://lmfit.github.io/lmfit-py/',
download_url = 'http://lmfit.github.io//lmfit-py/',
- requires = ('numpy', 'scipy'),
+ requires = ['numpy', 'scipy'],
license = 'BSD',
description = "Least-Squares Minimization with Bounds and Constraints",
long_description = long_desc,
- platforms = ('Windows', 'Linux', 'Mac OS X'),
+ platforms = ['Windows', 'Linux', 'Mac OS X'],
classifiers=['Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Programming Language :: Python',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -30,11 +30,11 @@\n author_email = '[email protected]',\n url = 'http://lmfit.github.io/lmfit-py/',\n download_url = 'http://lmfit.github.io//lmfit-py/',\n- requires = ('numpy', 'scipy'),\n+ requires = ['numpy', 'scipy'],\n license = 'BSD',\n description = \"Least-Squares Minimization with Bounds and Constraints\",\n long_description = long_desc,\n- platforms = ('Windows', 'Linux', 'Mac OS X'),\n+ platforms = ['Windows', 'Linux', 'Mac OS X'],\n classifiers=['Intended Audience :: Science/Research',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n", "issue": "Cannot deploy to pypi repo dues to tuples in the `setup.py` attributes\nDue to a python-bug (http://bugs.python.org/issue19610) i cannot install and deploy lmfit with `python setup install`\n\nI discovered this issue while trying to fix #149 \n\n", "before_files": [{"content": "#!/usr/bin/env python\n# from distutils.core import setup\nfrom setuptools import setup\n\nimport lmfit as lmfit\nimport numpy, scipy\n\nlong_desc = \"\"\"A library for least-squares minimization and data fitting in\nPython. Built on top of scipy.optimize, lmfit provides a Parameter object\nwhich can be set as fixed or free, can have upper and/or lower bounds, or\ncan be written in terms of algebraic constraints of other Parameters. The\nuser writes a function to be minimized as a function of these Parameters,\nand the scipy.optimize methods are used to find the optimal values for the\nParameters. The Levenberg-Marquardt (leastsq) is the default minimization\nalgorithm, and provides estimated standard errors and correlations between\nvaried Parameters. Other minimization methods, including Nelder-Mead's\ndownhill simplex, Powell's method, BFGS, Sequential Least Squares, and\nothers are also supported. Bounds and contraints can be placed on\nParameters for all of these methods.\n\nIn addition, methods for explicitly calculating confidence intervals are\nprovided for exploring minmization problems where the approximation of\nestimating Parameter uncertainties from the covariance matrix is\nquestionable. \"\"\"\n\n\nsetup(name = 'lmfit',\n version = lmfit.__version__,\n author = 'LMFit Development Team',\n author_email = '[email protected]',\n url = 'http://lmfit.github.io/lmfit-py/',\n download_url = 'http://lmfit.github.io//lmfit-py/',\n requires = ('numpy', 'scipy'),\n license = 'BSD',\n description = \"Least-Squares Minimization with Bounds and Constraints\",\n long_description = long_desc,\n platforms = ('Windows', 'Linux', 'Mac OS X'),\n classifiers=['Intended Audience :: Science/Research',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Scientific/Engineering',\n ],\n # test_suite='nose.collector',\n # test_requires=['Nose'],\n package_dir = {'lmfit': 'lmfit'},\n packages = ['lmfit', 'lmfit.ui', 'lmfit.uncertainties'],\n )\n\n", "path": "setup.py"}]}
| 1,151 | 185 |
gh_patches_debug_8538
|
rasdani/github-patches
|
git_diff
|
qtile__qtile-1768
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
widget.CheckUpdates not hidden when no updates are available
<!--
Please do not ask general questions here! There are [community
contact](https://github.com/qtile/qtile#community) options for that.
-->
# Issue description
<!--
A brief discussion of what failed and how it failed. A description of
what you tried is helpful, i.e. "When I use lazy.kill() on a window I get
the following stack trace" instead of "Closing windows doesn't work".
-->
The documentation for [`CheckUpdates`](http://docs.qtile.org/en/latest/manual/ref/widgets.html#checkupdates) `display_format` mentions that the given format-specifier will only be used when there are updates available - giving rise to the assumption that the widget would be invisible when no updates are available (which seems very reasonable).
This doesn't seem to be the case for me, though. I am permanently seeing a `0`. So either the documentation or the implementation might need a little tweak.
# Qtile version
<!--
Please include the exact commit hash of the version of Qtile that failed.
-->
0.15.1
</issue>
<code>
[start of libqtile/widget/check_updates.py]
1 # Copyright (c) 2015 Ali Mousavi
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a copy
4 # of this software and associated documentation files (the "Software"), to deal
5 # in the Software without restriction, including without limitation the rights
6 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 # copies of the Software, and to permit persons to whom the Software is
8 # furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19 # SOFTWARE.
20
21 import os
22 from subprocess import CalledProcessError, Popen
23
24 from libqtile.log_utils import logger
25 from libqtile.widget import base
26
27
28 class CheckUpdates(base.ThreadedPollText):
29 """Shows number of pending updates in different unix systems"""
30 orientations = base.ORIENTATION_HORIZONTAL
31 defaults = [
32 ("distro", "Arch", "Name of your distribution"),
33 ("custom_command", None, "Custom shell command for checking updates (counts the lines of the output)"),
34 ("update_interval", 60, "Update interval in seconds."),
35 ('execute', None, 'Command to execute on click'),
36 ("display_format", "Updates: {updates}", "Display format if updates available"),
37 ("colour_no_updates", "ffffff", "Colour when there's no updates."),
38 ("colour_have_updates", "ffffff", "Colour when there are updates."),
39 ("restart_indicator", "", "Indicator to represent reboot is required. (Ubuntu only)")
40 ]
41
42 def __init__(self, **config):
43 base.ThreadedPollText.__init__(self, **config)
44 self.add_defaults(CheckUpdates.defaults)
45
46 # format: "Distro": ("cmd", "number of lines to subtract from output")
47 self.cmd_dict = {"Arch": ("pacman -Qu", 0),
48 "Arch_checkupdates": ("checkupdates", 0),
49 "Arch_Sup": ("pacman -Sup", 1),
50 "Arch_yay": ("yay -Qu", 0),
51 "Debian": ("apt-show-versions -u -b", 0),
52 "Ubuntu": ("aptitude search ~U", 0),
53 "Fedora": ("dnf list updates", 3),
54 "FreeBSD": ("pkg_version -I -l '<'", 0),
55 "Mandriva": ("urpmq --auto-select", 0)
56 }
57
58 # Check if distro name is valid.
59 try:
60 self.cmd = self.cmd_dict[self.distro][0].split()
61 self.subtr = self.cmd_dict[self.distro][1]
62 except KeyError:
63 distros = sorted(self.cmd_dict.keys())
64 logger.error(self.distro + ' is not a valid distro name. ' +
65 'Use one of the list: ' + str(distros) + '.')
66 self.cmd = None
67
68 def _check_updates(self):
69 # type: () -> str
70 try:
71 if self.custom_command is None:
72 updates = self.call_process(self.cmd)
73 else:
74 updates = self.call_process(self.custom_command, shell=True)
75 self.subtr = 0
76 except CalledProcessError:
77 updates = ""
78 num_updates = str(len(updates.splitlines()) - self.subtr)
79
80 if self.restart_indicator and os.path.exists('/var/run/reboot-required'):
81 num_updates += self.restart_indicator
82
83 self._set_colour(num_updates)
84 return self.display_format.format(**{"updates": num_updates})
85
86 def _set_colour(self, num_updates):
87 # type: (str) -> None
88 if not num_updates.startswith("0"):
89 self.layout.colour = self.colour_have_updates
90 else:
91 self.layout.colour = self.colour_no_updates
92
93 def poll(self):
94 # type: () -> str
95 if not self.cmd:
96 return "N/A"
97 return self._check_updates()
98
99 def button_press(self, x, y, button):
100 # type: (int, int, int) -> None
101 base.ThreadedPollText.button_press(self, x, y, button)
102 if button == 1 and self.execute is not None:
103 Popen(self.execute, shell=True)
104
[end of libqtile/widget/check_updates.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/libqtile/widget/check_updates.py b/libqtile/widget/check_updates.py
--- a/libqtile/widget/check_updates.py
+++ b/libqtile/widget/check_updates.py
@@ -75,7 +75,11 @@
self.subtr = 0
except CalledProcessError:
updates = ""
- num_updates = str(len(updates.splitlines()) - self.subtr)
+ num_updates = len(updates.splitlines()) - self.subtr
+
+ if num_updates == 0:
+ return ""
+ num_updates = str(num_updates)
if self.restart_indicator and os.path.exists('/var/run/reboot-required'):
num_updates += self.restart_indicator
|
{"golden_diff": "diff --git a/libqtile/widget/check_updates.py b/libqtile/widget/check_updates.py\n--- a/libqtile/widget/check_updates.py\n+++ b/libqtile/widget/check_updates.py\n@@ -75,7 +75,11 @@\n self.subtr = 0\n except CalledProcessError:\n updates = \"\"\n- num_updates = str(len(updates.splitlines()) - self.subtr)\n+ num_updates = len(updates.splitlines()) - self.subtr\n+\n+ if num_updates == 0:\n+ return \"\"\n+ num_updates = str(num_updates)\n \n if self.restart_indicator and os.path.exists('/var/run/reboot-required'):\n num_updates += self.restart_indicator\n", "issue": "widget.CheckUpdates not hidden when no updates are available\n<!--\r\nPlease do not ask general questions here! There are [community\r\ncontact](https://github.com/qtile/qtile#community) options for that.\r\n-->\r\n\r\n# Issue description\r\n\r\n<!--\r\nA brief discussion of what failed and how it failed. A description of\r\nwhat you tried is helpful, i.e. \"When I use lazy.kill() on a window I get\r\nthe following stack trace\" instead of \"Closing windows doesn't work\".\r\n-->\r\n\r\nThe documentation for [`CheckUpdates`](http://docs.qtile.org/en/latest/manual/ref/widgets.html#checkupdates) `display_format` mentions that the given format-specifier will only be used when there are updates available - giving rise to the assumption that the widget would be invisible when no updates are available (which seems very reasonable).\r\n\r\nThis doesn't seem to be the case for me, though. I am permanently seeing a `0`. So either the documentation or the implementation might need a little tweak.\r\n\r\n# Qtile version\r\n\r\n<!--\r\nPlease include the exact commit hash of the version of Qtile that failed.\r\n-->\r\n\r\n0.15.1\n", "before_files": [{"content": "# Copyright (c) 2015 Ali Mousavi\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport os\nfrom subprocess import CalledProcessError, Popen\n\nfrom libqtile.log_utils import logger\nfrom libqtile.widget import base\n\n\nclass CheckUpdates(base.ThreadedPollText):\n \"\"\"Shows number of pending updates in different unix systems\"\"\"\n orientations = base.ORIENTATION_HORIZONTAL\n defaults = [\n (\"distro\", \"Arch\", \"Name of your distribution\"),\n (\"custom_command\", None, \"Custom shell command for checking updates (counts the lines of the output)\"),\n (\"update_interval\", 60, \"Update interval in seconds.\"),\n ('execute', None, 'Command to execute on click'),\n (\"display_format\", \"Updates: {updates}\", \"Display format if updates available\"),\n (\"colour_no_updates\", \"ffffff\", \"Colour when there's no updates.\"),\n (\"colour_have_updates\", \"ffffff\", \"Colour when there are updates.\"),\n (\"restart_indicator\", \"\", \"Indicator to represent reboot is required. (Ubuntu only)\")\n ]\n\n def __init__(self, **config):\n base.ThreadedPollText.__init__(self, **config)\n self.add_defaults(CheckUpdates.defaults)\n\n # format: \"Distro\": (\"cmd\", \"number of lines to subtract from output\")\n self.cmd_dict = {\"Arch\": (\"pacman -Qu\", 0),\n \"Arch_checkupdates\": (\"checkupdates\", 0),\n \"Arch_Sup\": (\"pacman -Sup\", 1),\n \"Arch_yay\": (\"yay -Qu\", 0),\n \"Debian\": (\"apt-show-versions -u -b\", 0),\n \"Ubuntu\": (\"aptitude search ~U\", 0),\n \"Fedora\": (\"dnf list updates\", 3),\n \"FreeBSD\": (\"pkg_version -I -l '<'\", 0),\n \"Mandriva\": (\"urpmq --auto-select\", 0)\n }\n\n # Check if distro name is valid.\n try:\n self.cmd = self.cmd_dict[self.distro][0].split()\n self.subtr = self.cmd_dict[self.distro][1]\n except KeyError:\n distros = sorted(self.cmd_dict.keys())\n logger.error(self.distro + ' is not a valid distro name. ' +\n 'Use one of the list: ' + str(distros) + '.')\n self.cmd = None\n\n def _check_updates(self):\n # type: () -> str\n try:\n if self.custom_command is None:\n updates = self.call_process(self.cmd)\n else:\n updates = self.call_process(self.custom_command, shell=True)\n self.subtr = 0\n except CalledProcessError:\n updates = \"\"\n num_updates = str(len(updates.splitlines()) - self.subtr)\n\n if self.restart_indicator and os.path.exists('/var/run/reboot-required'):\n num_updates += self.restart_indicator\n\n self._set_colour(num_updates)\n return self.display_format.format(**{\"updates\": num_updates})\n\n def _set_colour(self, num_updates):\n # type: (str) -> None\n if not num_updates.startswith(\"0\"):\n self.layout.colour = self.colour_have_updates\n else:\n self.layout.colour = self.colour_no_updates\n\n def poll(self):\n # type: () -> str\n if not self.cmd:\n return \"N/A\"\n return self._check_updates()\n\n def button_press(self, x, y, button):\n # type: (int, int, int) -> None\n base.ThreadedPollText.button_press(self, x, y, button)\n if button == 1 and self.execute is not None:\n Popen(self.execute, shell=True)\n", "path": "libqtile/widget/check_updates.py"}]}
| 2,001 | 152 |
gh_patches_debug_2136
|
rasdani/github-patches
|
git_diff
|
Kinto__kinto-1752
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Deleting a collection doesn't delete access_control_entries for its children
`buckets.py` has an event listener that ensures that when a bucket is deleted, everything underneath it is recursively deleted. `collections.py` has one too but it doesn't appear to be as robust -- it doesn't have a wildcard to match objects more than one level below it (which might be OK, since only records are below collections now), and `delete_object_permissions` is only called on the collection rather than its children.
</issue>
<code>
[start of kinto/views/collections.py]
1 import colander
2 from pyramid.events import subscriber
3
4 from kinto.core import resource, utils
5 from kinto.core.events import ResourceChanged, ACTIONS
6 from kinto.schema_validation import validate_from_bucket_schema_or_400, JSONSchemaMapping
7
8
9 class CollectionSchema(resource.ResourceSchema):
10 schema = JSONSchemaMapping(missing=colander.drop)
11 cache_expires = colander.SchemaNode(colander.Int(), missing=colander.drop)
12
13
14 @resource.register(name='collection',
15 collection_path='/buckets/{{bucket_id}}/collections',
16 record_path='/buckets/{{bucket_id}}/collections/{{id}}')
17 class Collection(resource.ShareableResource):
18 schema = CollectionSchema
19 permissions = ('read', 'write', 'record:create')
20
21 def get_parent_id(self, request):
22 bucket_id = request.matchdict['bucket_id']
23 parent_id = utils.instance_uri(request, 'bucket', id=bucket_id)
24 return parent_id
25
26 def process_record(self, new, old=None):
27 """Additional collection schema validation from bucket, if any."""
28 new = super().process_record(new, old)
29
30 # Remove internal and auto-assigned fields.
31 internal_fields = (self.model.id_field,
32 self.model.modified_field,
33 self.model.permissions_field)
34 validate_from_bucket_schema_or_400(new, resource_name="collection", request=self.request,
35 ignore_fields=internal_fields)
36 return new
37
38
39 @subscriber(ResourceChanged,
40 for_resources=('collection',),
41 for_actions=(ACTIONS.DELETE,))
42 def on_collections_deleted(event):
43 """Some collections were deleted, delete records.
44 """
45 storage = event.request.registry.storage
46 permission = event.request.registry.permission
47
48 for change in event.impacted_records:
49 collection = change['old']
50 bucket_id = event.payload['bucket_id']
51 parent_id = utils.instance_uri(event.request, 'collection',
52 bucket_id=bucket_id,
53 id=collection['id'])
54 storage.delete_all(collection_id=None,
55 parent_id=parent_id,
56 with_deleted=False)
57 storage.purge_deleted(collection_id=None,
58 parent_id=parent_id)
59 permission.delete_object_permissions(parent_id)
60
[end of kinto/views/collections.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kinto/views/collections.py b/kinto/views/collections.py
--- a/kinto/views/collections.py
+++ b/kinto/views/collections.py
@@ -56,4 +56,4 @@
with_deleted=False)
storage.purge_deleted(collection_id=None,
parent_id=parent_id)
- permission.delete_object_permissions(parent_id)
+ permission.delete_object_permissions(parent_id + '/*')
|
{"golden_diff": "diff --git a/kinto/views/collections.py b/kinto/views/collections.py\n--- a/kinto/views/collections.py\n+++ b/kinto/views/collections.py\n@@ -56,4 +56,4 @@\n with_deleted=False)\n storage.purge_deleted(collection_id=None,\n parent_id=parent_id)\n- permission.delete_object_permissions(parent_id)\n+ permission.delete_object_permissions(parent_id + '/*')\n", "issue": "Deleting a collection doesn't delete access_control_entries for its children\n`buckets.py` has an event listener that ensures that when a bucket is deleted, everything underneath it is recursively deleted. `collections.py` has one too but it doesn't appear to be as robust -- it doesn't have a wildcard to match objects more than one level below it (which might be OK, since only records are below collections now), and `delete_object_permissions` is only called on the collection rather than its children.\n", "before_files": [{"content": "import colander\nfrom pyramid.events import subscriber\n\nfrom kinto.core import resource, utils\nfrom kinto.core.events import ResourceChanged, ACTIONS\nfrom kinto.schema_validation import validate_from_bucket_schema_or_400, JSONSchemaMapping\n\n\nclass CollectionSchema(resource.ResourceSchema):\n schema = JSONSchemaMapping(missing=colander.drop)\n cache_expires = colander.SchemaNode(colander.Int(), missing=colander.drop)\n\n\[email protected](name='collection',\n collection_path='/buckets/{{bucket_id}}/collections',\n record_path='/buckets/{{bucket_id}}/collections/{{id}}')\nclass Collection(resource.ShareableResource):\n schema = CollectionSchema\n permissions = ('read', 'write', 'record:create')\n\n def get_parent_id(self, request):\n bucket_id = request.matchdict['bucket_id']\n parent_id = utils.instance_uri(request, 'bucket', id=bucket_id)\n return parent_id\n\n def process_record(self, new, old=None):\n \"\"\"Additional collection schema validation from bucket, if any.\"\"\"\n new = super().process_record(new, old)\n\n # Remove internal and auto-assigned fields.\n internal_fields = (self.model.id_field,\n self.model.modified_field,\n self.model.permissions_field)\n validate_from_bucket_schema_or_400(new, resource_name=\"collection\", request=self.request,\n ignore_fields=internal_fields)\n return new\n\n\n@subscriber(ResourceChanged,\n for_resources=('collection',),\n for_actions=(ACTIONS.DELETE,))\ndef on_collections_deleted(event):\n \"\"\"Some collections were deleted, delete records.\n \"\"\"\n storage = event.request.registry.storage\n permission = event.request.registry.permission\n\n for change in event.impacted_records:\n collection = change['old']\n bucket_id = event.payload['bucket_id']\n parent_id = utils.instance_uri(event.request, 'collection',\n bucket_id=bucket_id,\n id=collection['id'])\n storage.delete_all(collection_id=None,\n parent_id=parent_id,\n with_deleted=False)\n storage.purge_deleted(collection_id=None,\n parent_id=parent_id)\n permission.delete_object_permissions(parent_id)\n", "path": "kinto/views/collections.py"}]}
| 1,200 | 90 |
gh_patches_debug_37258
|
rasdani/github-patches
|
git_diff
|
PaddlePaddle__models-447
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Adapt tuning script to padding removing #444
</issue>
<code>
[start of deep_speech_2/tools/tune.py]
1 """Beam search parameters tuning for DeepSpeech2 model."""
2 from __future__ import absolute_import
3 from __future__ import division
4 from __future__ import print_function
5
6 import sys
7 import os
8 import numpy as np
9 import argparse
10 import functools
11 import gzip
12 import logging
13 import paddle.v2 as paddle
14 import _init_paths
15 from data_utils.data import DataGenerator
16 from decoders.swig_wrapper import Scorer
17 from decoders.swig_wrapper import ctc_beam_search_decoder_batch
18 from model_utils.model import deep_speech_v2_network
19 from utils.error_rate import wer, cer
20 from utils.utility import add_arguments, print_arguments
21
22 parser = argparse.ArgumentParser(description=__doc__)
23 add_arg = functools.partial(add_arguments, argparser=parser)
24 # yapf: disable
25 add_arg('num_batches', int, -1, "# of batches tuning on. "
26 "Default -1, on whole dev set.")
27 add_arg('batch_size', int, 256, "# of samples per batch.")
28 add_arg('trainer_count', int, 8, "# of Trainers (CPUs or GPUs).")
29 add_arg('beam_size', int, 500, "Beam search width.")
30 add_arg('num_proc_bsearch', int, 8, "# of CPUs for beam search.")
31 add_arg('num_proc_data', int, 8, "# of CPUs for data preprocessing.")
32 add_arg('num_conv_layers', int, 2, "# of convolution layers.")
33 add_arg('num_rnn_layers', int, 3, "# of recurrent layers.")
34 add_arg('rnn_layer_size', int, 2048, "# of recurrent cells per layer.")
35 add_arg('num_alphas', int, 45, "# of alpha candidates for tuning.")
36 add_arg('num_betas', int, 8, "# of beta candidates for tuning.")
37 add_arg('alpha_from', float, 1.0, "Where alpha starts tuning from.")
38 add_arg('alpha_to', float, 3.2, "Where alpha ends tuning with.")
39 add_arg('beta_from', float, 0.1, "Where beta starts tuning from.")
40 add_arg('beta_to', float, 0.45, "Where beta ends tuning with.")
41 add_arg('cutoff_prob', float, 1.0, "Cutoff probability for pruning.")
42 add_arg('cutoff_top_n', int, 40, "Cutoff number for pruning.")
43 add_arg('use_gru', bool, False, "Use GRUs instead of simple RNNs.")
44 add_arg('use_gpu', bool, True, "Use GPU or not.")
45 add_arg('share_rnn_weights',bool, True, "Share input-hidden weights across "
46 "bi-directional RNNs. Not for GRU.")
47 add_arg('tune_manifest', str,
48 'data/librispeech/manifest.dev-clean',
49 "Filepath of manifest to tune.")
50 add_arg('mean_std_path', str,
51 'data/librispeech/mean_std.npz',
52 "Filepath of normalizer's mean & std.")
53 add_arg('vocab_path', str,
54 'data/librispeech/vocab.txt',
55 "Filepath of vocabulary.")
56 add_arg('lang_model_path', str,
57 'models/lm/common_crawl_00.prune01111.trie.klm',
58 "Filepath for language model.")
59 add_arg('model_path', str,
60 './checkpoints/libri/params.latest.tar.gz',
61 "If None, the training starts from scratch, "
62 "otherwise, it resumes from the pre-trained model.")
63 add_arg('error_rate_type', str,
64 'wer',
65 "Error rate type for evaluation.",
66 choices=['wer', 'cer'])
67 add_arg('specgram_type', str,
68 'linear',
69 "Audio feature type. Options: linear, mfcc.",
70 choices=['linear', 'mfcc'])
71 # yapf: disable
72 args = parser.parse_args()
73
74
75 logging.basicConfig(
76 format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)d] %(message)s')
77
78 def tune():
79 """Tune parameters alpha and beta incrementally."""
80 if not args.num_alphas >= 0:
81 raise ValueError("num_alphas must be non-negative!")
82 if not args.num_betas >= 0:
83 raise ValueError("num_betas must be non-negative!")
84
85 data_generator = DataGenerator(
86 vocab_filepath=args.vocab_path,
87 mean_std_filepath=args.mean_std_path,
88 augmentation_config='{}',
89 specgram_type=args.specgram_type,
90 num_threads=args.num_proc_data,
91 keep_transcription_text=True)
92
93 audio_data = paddle.layer.data(
94 name="audio_spectrogram",
95 type=paddle.data_type.dense_array(161 * 161))
96 text_data = paddle.layer.data(
97 name="transcript_text",
98 type=paddle.data_type.integer_value_sequence(data_generator.vocab_size))
99
100 output_probs, _ = deep_speech_v2_network(
101 audio_data=audio_data,
102 text_data=text_data,
103 dict_size=data_generator.vocab_size,
104 num_conv_layers=args.num_conv_layers,
105 num_rnn_layers=args.num_rnn_layers,
106 rnn_size=args.rnn_layer_size,
107 use_gru=args.use_gru,
108 share_rnn_weights=args.share_rnn_weights)
109
110 batch_reader = data_generator.batch_reader_creator(
111 manifest_path=args.tune_manifest,
112 batch_size=args.batch_size,
113 sortagrad=False,
114 shuffle_method=None)
115
116 # load parameters
117 if not os.path.isfile(args.model_path):
118 raise IOError("Invaid model path: %s" % args.model_path)
119 parameters = paddle.parameters.Parameters.from_tar(
120 gzip.open(args.model_path))
121
122 inferer = paddle.inference.Inference(
123 output_layer=output_probs, parameters=parameters)
124 # decoders only accept string encoded in utf-8
125 vocab_list = [chars.encode("utf-8") for chars in data_generator.vocab_list]
126
127 # init logger
128 logger = logging.getLogger("")
129 logger.setLevel(level=logging.INFO)
130 # init external scorer
131 logger.info("begin to initialize the external scorer for tuning")
132 if not os.path.isfile(args.lang_model_path):
133 raise IOError("Invaid language model path: %s" % args.lang_model_path)
134 ext_scorer = Scorer(
135 alpha=args.alpha_from,
136 beta=args.beta_from,
137 model_path=args.lang_model_path,
138 vocabulary=vocab_list)
139 logger.info("language model: "
140 "is_character_based = %d," % ext_scorer.is_character_based() +
141 " max_order = %d," % ext_scorer.get_max_order() +
142 " dict_size = %d" % ext_scorer.get_dict_size())
143 logger.info("end initializing scorer. Start tuning ...")
144
145 error_rate_func = cer if args.error_rate_type == 'cer' else wer
146 # create grid for search
147 cand_alphas = np.linspace(args.alpha_from, args.alpha_to, args.num_alphas)
148 cand_betas = np.linspace(args.beta_from, args.beta_to, args.num_betas)
149 params_grid = [(alpha, beta) for alpha in cand_alphas
150 for beta in cand_betas]
151
152 err_sum = [0.0 for i in xrange(len(params_grid))]
153 err_ave = [0.0 for i in xrange(len(params_grid))]
154 num_ins, cur_batch = 0, 0
155 ## incremental tuning parameters over multiple batches
156 for infer_data in batch_reader():
157 if (args.num_batches >= 0) and (cur_batch >= args.num_batches):
158 break
159 infer_results = inferer.infer(input=infer_data)
160
161 num_steps = len(infer_results) // len(infer_data)
162 probs_split = [
163 infer_results[i * num_steps:(i + 1) * num_steps]
164 for i in xrange(len(infer_data))
165 ]
166
167 target_transcripts = [transcript for _, transcript in infer_data]
168
169 num_ins += len(target_transcripts)
170 # grid search
171 for index, (alpha, beta) in enumerate(params_grid):
172 # reset alpha & beta
173 ext_scorer.reset_params(alpha, beta)
174 beam_search_results = ctc_beam_search_decoder_batch(
175 probs_split=probs_split,
176 vocabulary=vocab_list,
177 beam_size=args.beam_size,
178 num_processes=args.num_proc_bsearch,
179 cutoff_prob=args.cutoff_prob,
180 cutoff_top_n=args.cutoff_top_n,
181 ext_scoring_func=ext_scorer, )
182
183 result_transcripts = [res[0][1] for res in beam_search_results]
184 for target, result in zip(target_transcripts, result_transcripts):
185 err_sum[index] += error_rate_func(target, result)
186 err_ave[index] = err_sum[index] / num_ins
187 if index % 2 == 0:
188 sys.stdout.write('.')
189 sys.stdout.flush()
190
191 # output on-line tuning result at the end of current batch
192 err_ave_min = min(err_ave)
193 min_index = err_ave.index(err_ave_min)
194 print("\nBatch %d [%d/?], current opt (alpha, beta) = (%s, %s), "
195 " min [%s] = %f" %(cur_batch, num_ins,
196 "%.3f" % params_grid[min_index][0],
197 "%.3f" % params_grid[min_index][1],
198 args.error_rate_type, err_ave_min))
199 cur_batch += 1
200
201 # output WER/CER at every (alpha, beta)
202 print("\nFinal %s:\n" % args.error_rate_type)
203 for index in xrange(len(params_grid)):
204 print("(alpha, beta) = (%s, %s), [%s] = %f"
205 % ("%.3f" % params_grid[index][0], "%.3f" % params_grid[index][1],
206 args.error_rate_type, err_ave[index]))
207
208 err_ave_min = min(err_ave)
209 min_index = err_ave.index(err_ave_min)
210 print("\nFinish tuning on %d batches, final opt (alpha, beta) = (%s, %s)"
211 % (args.num_batches, "%.3f" % params_grid[min_index][0],
212 "%.3f" % params_grid[min_index][1]))
213
214 logger.info("finish tuning")
215
216
217 def main():
218 print_arguments(args)
219 paddle.init(use_gpu=args.use_gpu,
220 rnn_use_batch=True,
221 trainer_count=args.trainer_count)
222 tune()
223
224
225 if __name__ == '__main__':
226 main()
227
[end of deep_speech_2/tools/tune.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/deep_speech_2/tools/tune.py b/deep_speech_2/tools/tune.py
--- a/deep_speech_2/tools/tune.py
+++ b/deep_speech_2/tools/tune.py
@@ -88,7 +88,8 @@
augmentation_config='{}',
specgram_type=args.specgram_type,
num_threads=args.num_proc_data,
- keep_transcription_text=True)
+ keep_transcription_text=True,
+ num_conv_layers=args.num_conv_layers)
audio_data = paddle.layer.data(
name="audio_spectrogram",
@@ -96,10 +97,25 @@
text_data = paddle.layer.data(
name="transcript_text",
type=paddle.data_type.integer_value_sequence(data_generator.vocab_size))
+ seq_offset_data = paddle.layer.data(
+ name='sequence_offset',
+ type=paddle.data_type.integer_value_sequence(1))
+ seq_len_data = paddle.layer.data(
+ name='sequence_length',
+ type=paddle.data_type.integer_value_sequence(1))
+ index_range_datas = []
+ for i in xrange(args.num_rnn_layers):
+ index_range_datas.append(
+ paddle.layer.data(
+ name='conv%d_index_range' % i,
+ type=paddle.data_type.dense_vector(6)))
output_probs, _ = deep_speech_v2_network(
audio_data=audio_data,
text_data=text_data,
+ seq_offset_data=seq_offset_data,
+ seq_len_data=seq_len_data,
+ index_range_datas=index_range_datas,
dict_size=data_generator.vocab_size,
num_conv_layers=args.num_conv_layers,
num_rnn_layers=args.num_rnn_layers,
@@ -156,15 +172,17 @@
for infer_data in batch_reader():
if (args.num_batches >= 0) and (cur_batch >= args.num_batches):
break
- infer_results = inferer.infer(input=infer_data)
-
- num_steps = len(infer_results) // len(infer_data)
+ infer_results = inferer.infer(input=infer_data,
+ feeding=data_generator.feeding)
+ start_pos = [0] * (len(infer_data) + 1)
+ for i in xrange(len(infer_data)):
+ start_pos[i + 1] = start_pos[i] + infer_data[i][3][0]
probs_split = [
- infer_results[i * num_steps:(i + 1) * num_steps]
- for i in xrange(len(infer_data))
+ infer_results[start_pos[i]:start_pos[i + 1]]
+ for i in xrange(0, len(infer_data))
]
- target_transcripts = [transcript for _, transcript in infer_data]
+ target_transcripts = [ data[1] for data in infer_data ]
num_ins += len(target_transcripts)
# grid search
|
{"golden_diff": "diff --git a/deep_speech_2/tools/tune.py b/deep_speech_2/tools/tune.py\n--- a/deep_speech_2/tools/tune.py\n+++ b/deep_speech_2/tools/tune.py\n@@ -88,7 +88,8 @@\n augmentation_config='{}',\n specgram_type=args.specgram_type,\n num_threads=args.num_proc_data,\n- keep_transcription_text=True)\n+ keep_transcription_text=True,\n+ num_conv_layers=args.num_conv_layers)\n \n audio_data = paddle.layer.data(\n name=\"audio_spectrogram\",\n@@ -96,10 +97,25 @@\n text_data = paddle.layer.data(\n name=\"transcript_text\",\n type=paddle.data_type.integer_value_sequence(data_generator.vocab_size))\n+ seq_offset_data = paddle.layer.data(\n+ name='sequence_offset',\n+ type=paddle.data_type.integer_value_sequence(1))\n+ seq_len_data = paddle.layer.data(\n+ name='sequence_length',\n+ type=paddle.data_type.integer_value_sequence(1))\n+ index_range_datas = []\n+ for i in xrange(args.num_rnn_layers):\n+ index_range_datas.append(\n+ paddle.layer.data(\n+ name='conv%d_index_range' % i,\n+ type=paddle.data_type.dense_vector(6)))\n \n output_probs, _ = deep_speech_v2_network(\n audio_data=audio_data,\n text_data=text_data,\n+ seq_offset_data=seq_offset_data,\n+ seq_len_data=seq_len_data,\n+ index_range_datas=index_range_datas,\n dict_size=data_generator.vocab_size,\n num_conv_layers=args.num_conv_layers,\n num_rnn_layers=args.num_rnn_layers,\n@@ -156,15 +172,17 @@\n for infer_data in batch_reader():\n if (args.num_batches >= 0) and (cur_batch >= args.num_batches):\n break\n- infer_results = inferer.infer(input=infer_data)\n-\n- num_steps = len(infer_results) // len(infer_data)\n+ infer_results = inferer.infer(input=infer_data,\n+ feeding=data_generator.feeding)\n+ start_pos = [0] * (len(infer_data) + 1)\n+ for i in xrange(len(infer_data)):\n+ start_pos[i + 1] = start_pos[i] + infer_data[i][3][0]\n probs_split = [\n- infer_results[i * num_steps:(i + 1) * num_steps]\n- for i in xrange(len(infer_data))\n+ infer_results[start_pos[i]:start_pos[i + 1]]\n+ for i in xrange(0, len(infer_data))\n ]\n \n- target_transcripts = [transcript for _, transcript in infer_data]\n+ target_transcripts = [ data[1] for data in infer_data ]\n \n num_ins += len(target_transcripts)\n # grid search\n", "issue": "Adapt tuning script to padding removing #444\n\n", "before_files": [{"content": "\"\"\"Beam search parameters tuning for DeepSpeech2 model.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nimport os\nimport numpy as np\nimport argparse\nimport functools\nimport gzip\nimport logging\nimport paddle.v2 as paddle\nimport _init_paths\nfrom data_utils.data import DataGenerator\nfrom decoders.swig_wrapper import Scorer\nfrom decoders.swig_wrapper import ctc_beam_search_decoder_batch\nfrom model_utils.model import deep_speech_v2_network\nfrom utils.error_rate import wer, cer\nfrom utils.utility import add_arguments, print_arguments\n\nparser = argparse.ArgumentParser(description=__doc__)\nadd_arg = functools.partial(add_arguments, argparser=parser)\n# yapf: disable\nadd_arg('num_batches', int, -1, \"# of batches tuning on. \"\n \"Default -1, on whole dev set.\")\nadd_arg('batch_size', int, 256, \"# of samples per batch.\")\nadd_arg('trainer_count', int, 8, \"# of Trainers (CPUs or GPUs).\")\nadd_arg('beam_size', int, 500, \"Beam search width.\")\nadd_arg('num_proc_bsearch', int, 8, \"# of CPUs for beam search.\")\nadd_arg('num_proc_data', int, 8, \"# of CPUs for data preprocessing.\")\nadd_arg('num_conv_layers', int, 2, \"# of convolution layers.\")\nadd_arg('num_rnn_layers', int, 3, \"# of recurrent layers.\")\nadd_arg('rnn_layer_size', int, 2048, \"# of recurrent cells per layer.\")\nadd_arg('num_alphas', int, 45, \"# of alpha candidates for tuning.\")\nadd_arg('num_betas', int, 8, \"# of beta candidates for tuning.\")\nadd_arg('alpha_from', float, 1.0, \"Where alpha starts tuning from.\")\nadd_arg('alpha_to', float, 3.2, \"Where alpha ends tuning with.\")\nadd_arg('beta_from', float, 0.1, \"Where beta starts tuning from.\")\nadd_arg('beta_to', float, 0.45, \"Where beta ends tuning with.\")\nadd_arg('cutoff_prob', float, 1.0, \"Cutoff probability for pruning.\")\nadd_arg('cutoff_top_n', int, 40, \"Cutoff number for pruning.\")\nadd_arg('use_gru', bool, False, \"Use GRUs instead of simple RNNs.\")\nadd_arg('use_gpu', bool, True, \"Use GPU or not.\")\nadd_arg('share_rnn_weights',bool, True, \"Share input-hidden weights across \"\n \"bi-directional RNNs. Not for GRU.\")\nadd_arg('tune_manifest', str,\n 'data/librispeech/manifest.dev-clean',\n \"Filepath of manifest to tune.\")\nadd_arg('mean_std_path', str,\n 'data/librispeech/mean_std.npz',\n \"Filepath of normalizer's mean & std.\")\nadd_arg('vocab_path', str,\n 'data/librispeech/vocab.txt',\n \"Filepath of vocabulary.\")\nadd_arg('lang_model_path', str,\n 'models/lm/common_crawl_00.prune01111.trie.klm',\n \"Filepath for language model.\")\nadd_arg('model_path', str,\n './checkpoints/libri/params.latest.tar.gz',\n \"If None, the training starts from scratch, \"\n \"otherwise, it resumes from the pre-trained model.\")\nadd_arg('error_rate_type', str,\n 'wer',\n \"Error rate type for evaluation.\",\n choices=['wer', 'cer'])\nadd_arg('specgram_type', str,\n 'linear',\n \"Audio feature type. Options: linear, mfcc.\",\n choices=['linear', 'mfcc'])\n# yapf: disable\nargs = parser.parse_args()\n\n\nlogging.basicConfig(\n format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)d] %(message)s')\n\ndef tune():\n \"\"\"Tune parameters alpha and beta incrementally.\"\"\"\n if not args.num_alphas >= 0:\n raise ValueError(\"num_alphas must be non-negative!\")\n if not args.num_betas >= 0:\n raise ValueError(\"num_betas must be non-negative!\")\n\n data_generator = DataGenerator(\n vocab_filepath=args.vocab_path,\n mean_std_filepath=args.mean_std_path,\n augmentation_config='{}',\n specgram_type=args.specgram_type,\n num_threads=args.num_proc_data,\n keep_transcription_text=True)\n\n audio_data = paddle.layer.data(\n name=\"audio_spectrogram\",\n type=paddle.data_type.dense_array(161 * 161))\n text_data = paddle.layer.data(\n name=\"transcript_text\",\n type=paddle.data_type.integer_value_sequence(data_generator.vocab_size))\n\n output_probs, _ = deep_speech_v2_network(\n audio_data=audio_data,\n text_data=text_data,\n dict_size=data_generator.vocab_size,\n num_conv_layers=args.num_conv_layers,\n num_rnn_layers=args.num_rnn_layers,\n rnn_size=args.rnn_layer_size,\n use_gru=args.use_gru,\n share_rnn_weights=args.share_rnn_weights)\n\n batch_reader = data_generator.batch_reader_creator(\n manifest_path=args.tune_manifest,\n batch_size=args.batch_size,\n sortagrad=False,\n shuffle_method=None)\n\n # load parameters\n if not os.path.isfile(args.model_path):\n raise IOError(\"Invaid model path: %s\" % args.model_path)\n parameters = paddle.parameters.Parameters.from_tar(\n gzip.open(args.model_path))\n\n inferer = paddle.inference.Inference(\n output_layer=output_probs, parameters=parameters)\n # decoders only accept string encoded in utf-8\n vocab_list = [chars.encode(\"utf-8\") for chars in data_generator.vocab_list]\n\n # init logger\n logger = logging.getLogger(\"\")\n logger.setLevel(level=logging.INFO)\n # init external scorer\n logger.info(\"begin to initialize the external scorer for tuning\")\n if not os.path.isfile(args.lang_model_path):\n raise IOError(\"Invaid language model path: %s\" % args.lang_model_path)\n ext_scorer = Scorer(\n alpha=args.alpha_from,\n beta=args.beta_from,\n model_path=args.lang_model_path,\n vocabulary=vocab_list)\n logger.info(\"language model: \"\n \"is_character_based = %d,\" % ext_scorer.is_character_based() +\n \" max_order = %d,\" % ext_scorer.get_max_order() +\n \" dict_size = %d\" % ext_scorer.get_dict_size())\n logger.info(\"end initializing scorer. Start tuning ...\")\n\n error_rate_func = cer if args.error_rate_type == 'cer' else wer\n # create grid for search\n cand_alphas = np.linspace(args.alpha_from, args.alpha_to, args.num_alphas)\n cand_betas = np.linspace(args.beta_from, args.beta_to, args.num_betas)\n params_grid = [(alpha, beta) for alpha in cand_alphas\n for beta in cand_betas]\n\n err_sum = [0.0 for i in xrange(len(params_grid))]\n err_ave = [0.0 for i in xrange(len(params_grid))]\n num_ins, cur_batch = 0, 0\n ## incremental tuning parameters over multiple batches\n for infer_data in batch_reader():\n if (args.num_batches >= 0) and (cur_batch >= args.num_batches):\n break\n infer_results = inferer.infer(input=infer_data)\n\n num_steps = len(infer_results) // len(infer_data)\n probs_split = [\n infer_results[i * num_steps:(i + 1) * num_steps]\n for i in xrange(len(infer_data))\n ]\n\n target_transcripts = [transcript for _, transcript in infer_data]\n\n num_ins += len(target_transcripts)\n # grid search\n for index, (alpha, beta) in enumerate(params_grid):\n # reset alpha & beta\n ext_scorer.reset_params(alpha, beta)\n beam_search_results = ctc_beam_search_decoder_batch(\n probs_split=probs_split,\n vocabulary=vocab_list,\n beam_size=args.beam_size,\n num_processes=args.num_proc_bsearch,\n cutoff_prob=args.cutoff_prob,\n cutoff_top_n=args.cutoff_top_n,\n ext_scoring_func=ext_scorer, )\n\n result_transcripts = [res[0][1] for res in beam_search_results]\n for target, result in zip(target_transcripts, result_transcripts):\n err_sum[index] += error_rate_func(target, result)\n err_ave[index] = err_sum[index] / num_ins\n if index % 2 == 0:\n sys.stdout.write('.')\n sys.stdout.flush()\n\n # output on-line tuning result at the end of current batch\n err_ave_min = min(err_ave)\n min_index = err_ave.index(err_ave_min)\n print(\"\\nBatch %d [%d/?], current opt (alpha, beta) = (%s, %s), \"\n \" min [%s] = %f\" %(cur_batch, num_ins,\n \"%.3f\" % params_grid[min_index][0],\n \"%.3f\" % params_grid[min_index][1],\n args.error_rate_type, err_ave_min))\n cur_batch += 1\n\n # output WER/CER at every (alpha, beta)\n print(\"\\nFinal %s:\\n\" % args.error_rate_type)\n for index in xrange(len(params_grid)):\n print(\"(alpha, beta) = (%s, %s), [%s] = %f\"\n % (\"%.3f\" % params_grid[index][0], \"%.3f\" % params_grid[index][1],\n args.error_rate_type, err_ave[index]))\n\n err_ave_min = min(err_ave)\n min_index = err_ave.index(err_ave_min)\n print(\"\\nFinish tuning on %d batches, final opt (alpha, beta) = (%s, %s)\"\n % (args.num_batches, \"%.3f\" % params_grid[min_index][0],\n \"%.3f\" % params_grid[min_index][1]))\n\n logger.info(\"finish tuning\")\n\n\ndef main():\n print_arguments(args)\n paddle.init(use_gpu=args.use_gpu,\n rnn_use_batch=True,\n trainer_count=args.trainer_count)\n tune()\n\n\nif __name__ == '__main__':\n main()\n", "path": "deep_speech_2/tools/tune.py"}]}
| 3,439 | 636 |
gh_patches_debug_19961
|
rasdani/github-patches
|
git_diff
|
Kinto__kinto-339
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Implicit collection creation doesn't validate the ID format
```
$ http GET http://localhost:8888/v1/buckets/default/collections/見/records --auth natim:
HTTP/1.1 200 OK
Access-Control-Expose-Headers: Content-Length, Expires, Alert, Retry-After, Last-Modified, Total-Records, ETag, Pragma, Cache-Control, Backoff, Next-Page
Cache-Control: no-cache
Content-Length: 11
Content-Type: application/json; charset=UTF-8
Date: Thu, 05 Nov 2015 14:19:38 GMT
Etag: "1446733178498"
Last-Modified: Thu, 05 Nov 2015 14:19:38 GMT
Server: waitress
Total-Records: 0
{
"data": []
}
```
</issue>
<code>
[start of kinto/views/buckets.py]
1 from pyramid import httpexceptions
2 from pyramid.settings import asbool
3 from pyramid.security import NO_PERMISSION_REQUIRED
4 from pyramid.view import view_config
5
6 from cliquet import resource
7 from cliquet.utils import build_request, reapply_cors
8 from cliquet.storage import exceptions as storage_exceptions
9
10 from kinto.authorization import RouteFactory
11 from kinto.views import NameGenerator
12 from kinto.views.collections import Collection
13
14
15 @resource.register(name='bucket',
16 collection_methods=('GET', 'POST'),
17 collection_path='/buckets',
18 record_path='/buckets/{{id}}')
19 class Bucket(resource.ProtectedResource):
20 permissions = ('read', 'write', 'collection:create', 'group:create')
21
22 def __init__(self, *args, **kwargs):
23 super(Bucket, self).__init__(*args, **kwargs)
24 self.model.id_generator = NameGenerator()
25
26 def get_parent_id(self, request):
27 # Buckets are not isolated by user, unlike Cliquet resources.
28 return ''
29
30 def delete(self):
31 result = super(Bucket, self).delete()
32
33 # Delete groups.
34 storage = self.model.storage
35 parent_id = '/buckets/%s' % self.record_id
36 storage.delete_all(collection_id='group',
37 parent_id=parent_id,
38 with_deleted=False)
39 storage.purge_deleted(collection_id='group',
40 parent_id=parent_id)
41
42 # Delete collections.
43 deleted = storage.delete_all(collection_id='collection',
44 parent_id=parent_id,
45 with_deleted=False)
46 storage.purge_deleted(collection_id='collection',
47 parent_id=parent_id)
48
49 # Delete records.
50 id_field = self.model.id_field
51 for collection in deleted:
52 parent_id = '/buckets/%s/collections/%s' % (self.record_id,
53 collection[id_field])
54 storage.delete_all(collection_id='record',
55 parent_id=parent_id,
56 with_deleted=False)
57 storage.purge_deleted(collection_id='record', parent_id=parent_id)
58
59 return result
60
61
62 def create_bucket(request, bucket_id):
63 """Create a bucket if it doesn't exists."""
64 bucket_put = (request.method.lower() == 'put' and
65 request.path.endswith('buckets/default'))
66 # Do nothing if current request will already create the bucket.
67 if bucket_put:
68 return
69
70 # Do not intent to create multiple times per request (e.g. in batch).
71 already_created = request.bound_data.setdefault('buckets', {})
72 if bucket_id in already_created:
73 return
74
75 # Fake context to instantiate a Bucket resource.
76 context = RouteFactory(request)
77 context.get_permission_object_id = lambda r, i: '/buckets/%s' % bucket_id
78 resource = Bucket(request, context)
79 try:
80 bucket = resource.model.create_record({'id': bucket_id})
81 except storage_exceptions.UnicityError as e:
82 bucket = e.record
83 already_created[bucket_id] = bucket
84
85
86 def create_collection(request, bucket_id):
87 # Do nothing if current request does not involve a collection.
88 subpath = request.matchdict.get('subpath')
89 if not (subpath and subpath.startswith('collections/')):
90 return
91
92 collection_id = subpath.split('/')[1]
93 collection_uri = '/buckets/%s/collections/%s' % (bucket_id, collection_id)
94
95 # Do not intent to create multiple times per request (e.g. in batch).
96 already_created = request.bound_data.setdefault('collections', {})
97 if collection_uri in already_created:
98 return
99
100 # Do nothing if current request will already create the collection.
101 collection_put = (request.method.lower() == 'put' and
102 request.path.endswith(collection_id))
103 if collection_put:
104 return
105
106 # Fake context to instantiate a Collection resource.
107 context = RouteFactory(request)
108 context.get_permission_object_id = lambda r, i: collection_uri
109
110 backup = request.matchdict
111 request.matchdict = dict(bucket_id=bucket_id,
112 id=collection_id,
113 **request.matchdict)
114 resource = Collection(request, context)
115 try:
116 collection = resource.model.create_record({'id': collection_id})
117 except storage_exceptions.UnicityError as e:
118 collection = e.record
119 already_created[collection_uri] = collection
120 request.matchdict = backup
121
122
123 @view_config(route_name='default_bucket', permission=NO_PERMISSION_REQUIRED)
124 @view_config(route_name='default_bucket_collection',
125 permission=NO_PERMISSION_REQUIRED)
126 def default_bucket(request):
127 if request.method.lower() == 'options':
128 path = request.path.replace('default', 'unknown')
129 subrequest = build_request(request, {
130 'method': 'OPTIONS',
131 'path': path
132 })
133 return request.invoke_subrequest(subrequest)
134
135 if getattr(request, 'prefixed_userid', None) is None:
136 # Pass through the forbidden_view_config
137 raise httpexceptions.HTTPForbidden()
138
139 settings = request.registry.settings
140
141 if asbool(settings['readonly']):
142 raise httpexceptions.HTTPMethodNotAllowed()
143
144 bucket_id = request.default_bucket_id
145 path = request.path.replace('/buckets/default', '/buckets/%s' % bucket_id)
146 querystring = request.url[(request.url.index(request.path) +
147 len(request.path)):]
148
149 # Make sure bucket exists
150 create_bucket(request, bucket_id)
151
152 # Make sure the collection exists
153 create_collection(request, bucket_id)
154
155 subrequest = build_request(request, {
156 'method': request.method,
157 'path': path + querystring,
158 'body': request.body
159 })
160 subrequest.bound_data = request.bound_data
161
162 try:
163 response = request.invoke_subrequest(subrequest)
164 except httpexceptions.HTTPException as error:
165 if error.content_type == 'application/json':
166 response = reapply_cors(subrequest, error)
167 else:
168 # Ask the upper level to format the error.
169 raise error
170 return response
171
[end of kinto/views/buckets.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kinto/views/buckets.py b/kinto/views/buckets.py
--- a/kinto/views/buckets.py
+++ b/kinto/views/buckets.py
@@ -4,6 +4,7 @@
from pyramid.view import view_config
from cliquet import resource
+from cliquet.errors import raise_invalid
from cliquet.utils import build_request, reapply_cors
from cliquet.storage import exceptions as storage_exceptions
@@ -112,6 +113,12 @@
id=collection_id,
**request.matchdict)
resource = Collection(request, context)
+ if not resource.model.id_generator.match(collection_id):
+ error_details = {
+ 'location': 'path',
+ 'description': "Invalid collection_id id"
+ }
+ raise_invalid(request, **error_details)
try:
collection = resource.model.create_record({'id': collection_id})
except storage_exceptions.UnicityError as e:
|
{"golden_diff": "diff --git a/kinto/views/buckets.py b/kinto/views/buckets.py\n--- a/kinto/views/buckets.py\n+++ b/kinto/views/buckets.py\n@@ -4,6 +4,7 @@\n from pyramid.view import view_config\n \n from cliquet import resource\n+from cliquet.errors import raise_invalid\n from cliquet.utils import build_request, reapply_cors\n from cliquet.storage import exceptions as storage_exceptions\n \n@@ -112,6 +113,12 @@\n id=collection_id,\n **request.matchdict)\n resource = Collection(request, context)\n+ if not resource.model.id_generator.match(collection_id):\n+ error_details = {\n+ 'location': 'path',\n+ 'description': \"Invalid collection_id id\"\n+ }\n+ raise_invalid(request, **error_details)\n try:\n collection = resource.model.create_record({'id': collection_id})\n except storage_exceptions.UnicityError as e:\n", "issue": "Implicit collection creation doesn't validate the ID format\n```\n$ http GET http://localhost:8888/v1/buckets/default/collections/\u898b/records --auth natim:\nHTTP/1.1 200 OK\nAccess-Control-Expose-Headers: Content-Length, Expires, Alert, Retry-After, Last-Modified, Total-Records, ETag, Pragma, Cache-Control, Backoff, Next-Page\nCache-Control: no-cache\nContent-Length: 11\nContent-Type: application/json; charset=UTF-8\nDate: Thu, 05 Nov 2015 14:19:38 GMT\nEtag: \"1446733178498\"\nLast-Modified: Thu, 05 Nov 2015 14:19:38 GMT\nServer: waitress\nTotal-Records: 0\n\n{\n \"data\": []\n}\n```\n\n", "before_files": [{"content": "from pyramid import httpexceptions\nfrom pyramid.settings import asbool\nfrom pyramid.security import NO_PERMISSION_REQUIRED\nfrom pyramid.view import view_config\n\nfrom cliquet import resource\nfrom cliquet.utils import build_request, reapply_cors\nfrom cliquet.storage import exceptions as storage_exceptions\n\nfrom kinto.authorization import RouteFactory\nfrom kinto.views import NameGenerator\nfrom kinto.views.collections import Collection\n\n\[email protected](name='bucket',\n collection_methods=('GET', 'POST'),\n collection_path='/buckets',\n record_path='/buckets/{{id}}')\nclass Bucket(resource.ProtectedResource):\n permissions = ('read', 'write', 'collection:create', 'group:create')\n\n def __init__(self, *args, **kwargs):\n super(Bucket, self).__init__(*args, **kwargs)\n self.model.id_generator = NameGenerator()\n\n def get_parent_id(self, request):\n # Buckets are not isolated by user, unlike Cliquet resources.\n return ''\n\n def delete(self):\n result = super(Bucket, self).delete()\n\n # Delete groups.\n storage = self.model.storage\n parent_id = '/buckets/%s' % self.record_id\n storage.delete_all(collection_id='group',\n parent_id=parent_id,\n with_deleted=False)\n storage.purge_deleted(collection_id='group',\n parent_id=parent_id)\n\n # Delete collections.\n deleted = storage.delete_all(collection_id='collection',\n parent_id=parent_id,\n with_deleted=False)\n storage.purge_deleted(collection_id='collection',\n parent_id=parent_id)\n\n # Delete records.\n id_field = self.model.id_field\n for collection in deleted:\n parent_id = '/buckets/%s/collections/%s' % (self.record_id,\n collection[id_field])\n storage.delete_all(collection_id='record',\n parent_id=parent_id,\n with_deleted=False)\n storage.purge_deleted(collection_id='record', parent_id=parent_id)\n\n return result\n\n\ndef create_bucket(request, bucket_id):\n \"\"\"Create a bucket if it doesn't exists.\"\"\"\n bucket_put = (request.method.lower() == 'put' and\n request.path.endswith('buckets/default'))\n # Do nothing if current request will already create the bucket.\n if bucket_put:\n return\n\n # Do not intent to create multiple times per request (e.g. in batch).\n already_created = request.bound_data.setdefault('buckets', {})\n if bucket_id in already_created:\n return\n\n # Fake context to instantiate a Bucket resource.\n context = RouteFactory(request)\n context.get_permission_object_id = lambda r, i: '/buckets/%s' % bucket_id\n resource = Bucket(request, context)\n try:\n bucket = resource.model.create_record({'id': bucket_id})\n except storage_exceptions.UnicityError as e:\n bucket = e.record\n already_created[bucket_id] = bucket\n\n\ndef create_collection(request, bucket_id):\n # Do nothing if current request does not involve a collection.\n subpath = request.matchdict.get('subpath')\n if not (subpath and subpath.startswith('collections/')):\n return\n\n collection_id = subpath.split('/')[1]\n collection_uri = '/buckets/%s/collections/%s' % (bucket_id, collection_id)\n\n # Do not intent to create multiple times per request (e.g. in batch).\n already_created = request.bound_data.setdefault('collections', {})\n if collection_uri in already_created:\n return\n\n # Do nothing if current request will already create the collection.\n collection_put = (request.method.lower() == 'put' and\n request.path.endswith(collection_id))\n if collection_put:\n return\n\n # Fake context to instantiate a Collection resource.\n context = RouteFactory(request)\n context.get_permission_object_id = lambda r, i: collection_uri\n\n backup = request.matchdict\n request.matchdict = dict(bucket_id=bucket_id,\n id=collection_id,\n **request.matchdict)\n resource = Collection(request, context)\n try:\n collection = resource.model.create_record({'id': collection_id})\n except storage_exceptions.UnicityError as e:\n collection = e.record\n already_created[collection_uri] = collection\n request.matchdict = backup\n\n\n@view_config(route_name='default_bucket', permission=NO_PERMISSION_REQUIRED)\n@view_config(route_name='default_bucket_collection',\n permission=NO_PERMISSION_REQUIRED)\ndef default_bucket(request):\n if request.method.lower() == 'options':\n path = request.path.replace('default', 'unknown')\n subrequest = build_request(request, {\n 'method': 'OPTIONS',\n 'path': path\n })\n return request.invoke_subrequest(subrequest)\n\n if getattr(request, 'prefixed_userid', None) is None:\n # Pass through the forbidden_view_config\n raise httpexceptions.HTTPForbidden()\n\n settings = request.registry.settings\n\n if asbool(settings['readonly']):\n raise httpexceptions.HTTPMethodNotAllowed()\n\n bucket_id = request.default_bucket_id\n path = request.path.replace('/buckets/default', '/buckets/%s' % bucket_id)\n querystring = request.url[(request.url.index(request.path) +\n len(request.path)):]\n\n # Make sure bucket exists\n create_bucket(request, bucket_id)\n\n # Make sure the collection exists\n create_collection(request, bucket_id)\n\n subrequest = build_request(request, {\n 'method': request.method,\n 'path': path + querystring,\n 'body': request.body\n })\n subrequest.bound_data = request.bound_data\n\n try:\n response = request.invoke_subrequest(subrequest)\n except httpexceptions.HTTPException as error:\n if error.content_type == 'application/json':\n response = reapply_cors(subrequest, error)\n else:\n # Ask the upper level to format the error.\n raise error\n return response\n", "path": "kinto/views/buckets.py"}]}
| 2,402 | 202 |
gh_patches_debug_30317
|
rasdani/github-patches
|
git_diff
|
qtile__qtile-3479
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
notify-send stops working when Qtile config is reload instead restart, or if widget.Notify is disabled
### The issue:
notify-send does not work anymore if Qtile config is reload, until I restart Qtile. It also will not work anymore and wait if I disable the "widget.Notify". I am not 100% sure if this is a problem caused by Qtile.
1. Whenever I reload Qtile config instead of restarting, the next run of the program `notify-send` will hang and wait. I have to Ctrl+c to end the process. When I then restart Qtile, then the program `notify-send` works as expected.
2. And i just tested to disable the widget.Notify and try again. Now The program always hangs, reload or not. `notify-send` seems not to find anything to send to. When I enable the widget, then it works like described at point 1.
Some system information:
- Your Qtile version (qtile --version): `0.21.1.dev8+gea4462b0`
- Relevant logs from ~/.local/share/qtile/qtile.log:
`2022-04-05 21:48:41,482 ERROR libqtile notify.py:_release():L183 Could not release org.freedesktop.Notifications.`
```
OS: Manjaro Linux [x86_64]
Kernel: 5.16.14-1-MANJARO
Uptime: 1 day, 17 hours, 36 mins
Packages: 1896 (pacman)[stable], 1 (flatpak)
Shell: zsh 5.8.1
Resolution: 2560x1440 @ 144Hz
DE: KDE Plasma 5.24.3
WM: Qtile 0.21.1.dev8+gea4462b0
Theme: breeze (GruvboxColors) [Plasma], Klaus [GTK2/3/4]
Icons: breeze [Plasma], Gruvbox [GTK2/3/4]
Font: Noto Sans (12pt) [Plasma], Noto Sans (12pt) [GTK2/3/4]
Cursor: default (24px)
Terminal: kitty
CPU: Intel Xeon E3-1230 v3 (8) @ 3.7GHz
GPU: Nvidia GeForce GTX 1070
Memory: 3225MiB / 15947MiB (20%)
Disk (/): 98GB / 219GB (44%)
Locale: de_DE.UTF-8
```
### Required:
- [X] I have searched past issues to see if this bug has already been reported.
</issue>
<code>
[start of libqtile/widget/notify.py]
1 # -*- coding: utf-8 -*-
2 # Copyright (c) 2011 Florian Mounier
3 # Copyright (c) 2011 Mounier Florian
4 # Copyright (c) 2012 roger
5 # Copyright (c) 2012-2014 Tycho Andersen
6 # Copyright (c) 2012-2013 Craig Barnes
7 # Copyright (c) 2013 Tao Sauvage
8 # Copyright (c) 2014 Sean Vig
9 # Copyright (c) 2014 Adi Sieker
10 # Copyright (c) 2020 elParaguayo
11 #
12 # Permission is hereby granted, free of charge, to any person obtaining a copy
13 # of this software and associated documentation files (the "Software"), to deal
14 # in the Software without restriction, including without limitation the rights
15 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 # copies of the Software, and to permit persons to whom the Software is
17 # furnished to do so, subject to the following conditions:
18 #
19 # The above copyright notice and this permission notice shall be included in
20 # all copies or substantial portions of the Software.
21 #
22 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
28 # SOFTWARE.
29 import asyncio
30 from os import path
31
32 from libqtile import bar, pangocffi, utils
33 from libqtile.log_utils import logger
34 from libqtile.notify import ClosedReason, notifier
35 from libqtile.widget import base
36
37
38 class Notify(base._TextBox):
39 """
40 A notify widget
41
42 This widget can handle actions provided by notification clients. However, only the
43 default action is supported, so if a client provides multiple actions then only the
44 default (first) action can be invoked. Some programs will provide their own
45 notification windows if the notification server does not support actions, so if you
46 want your notifications to handle more than one action then specify ``False`` for
47 the ``action`` option to disable all action handling. Unfortunately we cannot
48 specify the capability for exactly one action.
49 """
50
51 defaults = [
52 ("foreground_urgent", "ff0000", "Foreground urgent priority colour"),
53 ("foreground_low", "dddddd", "Foreground low priority colour"),
54 ("default_timeout", None, "Default timeout (seconds) for notifications"),
55 ("audiofile", None, "Audiofile played during notifications"),
56 ("action", True, "Enable handling of default action upon right click"),
57 (
58 "parse_text",
59 None,
60 "Function to parse and modify notifications. "
61 "e.g. function in config that removes line returns:"
62 "def my_func(text)"
63 " return text.replace('\n', '')"
64 "then set option parse_text=my_func",
65 ),
66 ]
67 capabilities = {"body", "actions"}
68
69 def __init__(self, width=bar.CALCULATED, **config):
70 base._TextBox.__init__(self, "", width, **config)
71 self.add_defaults(Notify.defaults)
72 self.current_id = 0
73
74 default_callbacks = {
75 "Button1": self.clear,
76 "Button4": self.prev,
77 "Button5": self.next,
78 }
79 if self.action:
80 default_callbacks["Button3"] = self.invoke
81 else:
82 self.capabilities = Notify.capabilities.difference({"actions"})
83 self.add_callbacks(default_callbacks)
84
85 def _configure(self, qtile, bar):
86 base._TextBox._configure(self, qtile, bar)
87 self.layout = self.drawer.textlayout(
88 self.text, self.foreground, self.font, self.fontsize, self.fontshadow, markup=True
89 )
90
91 async def _config_async(self):
92 await notifier.register(self.update, self.capabilities, on_close=self.on_close)
93
94 def set_notif_text(self, notif):
95 self.text = pangocffi.markup_escape_text(notif.summary)
96 urgency = getattr(notif.hints.get("urgency"), "value", 1)
97 if urgency != 1:
98 self.text = '<span color="%s">%s</span>' % (
99 utils.hex(self.foreground_urgent if urgency == 2 else self.foreground_low),
100 self.text,
101 )
102 if notif.body:
103 self.text = '<span weight="bold">%s</span> - %s' % (
104 self.text,
105 pangocffi.markup_escape_text(notif.body),
106 )
107 if callable(self.parse_text):
108 try:
109 self.text = self.parse_text(self.text)
110 except: # noqa: E722
111 logger.exception("parse_text function failed:")
112 if self.audiofile and path.exists(self.audiofile):
113 self.qtile.cmd_spawn("aplay -q '%s'" % self.audiofile)
114
115 def update(self, notif):
116 self.qtile.call_soon_threadsafe(self.real_update, notif)
117
118 def real_update(self, notif):
119 self.set_notif_text(notif)
120 self.current_id = notif.id - 1
121 if notif.timeout and notif.timeout > 0:
122 self.timeout_add(
123 notif.timeout / 1000, self.clear, method_args=(ClosedReason.expired,)
124 )
125 elif self.default_timeout:
126 self.timeout_add(
127 self.default_timeout, self.clear, method_args=(ClosedReason.expired,)
128 )
129 self.bar.draw()
130 return True
131
132 def display(self):
133 self.set_notif_text(notifier.notifications[self.current_id])
134 self.bar.draw()
135
136 def clear(self, reason=ClosedReason.dismissed):
137 notifier._service.NotificationClosed(notifier.notifications[self.current_id].id, reason)
138 self.text = ""
139 self.current_id = len(notifier.notifications) - 1
140 self.bar.draw()
141
142 def on_close(self, nid):
143 if self.current_id < len(notifier.notifications):
144 notif = notifier.notifications[self.current_id]
145 if notif.id == nid:
146 self.clear(ClosedReason.method)
147
148 def prev(self):
149 if self.current_id > 0:
150 self.current_id -= 1
151 self.display()
152
153 def next(self):
154 if self.current_id < len(notifier.notifications) - 1:
155 self.current_id += 1
156 self.display()
157
158 def invoke(self):
159 if self.current_id < len(notifier.notifications):
160 notif = notifier.notifications[self.current_id]
161 if notif.actions:
162 notifier._service.ActionInvoked(notif.id, notif.actions[0])
163 self.clear()
164
165 def cmd_display(self):
166 """Display the notifcication"""
167 self.display()
168
169 def cmd_clear(self):
170 """Clear the notification"""
171 self.clear()
172
173 def cmd_toggle(self):
174 """Toggle showing/clearing the notification"""
175 if self.text == "":
176 self.display()
177 else:
178 self.clear()
179
180 def cmd_prev(self):
181 """Show previous notification"""
182 self.prev()
183
184 def cmd_next(self):
185 """Show next notification"""
186 self.next()
187
188 def cmd_invoke(self):
189 """Invoke the notification's default action"""
190 if self.action:
191 self.invoke()
192
193 def finalize(self):
194 asyncio.create_task(self._finalize())
195
196 async def _finalize(self):
197 task = notifier.unregister(self.update)
198
199 # If the notifier has no more callbacks then it needs to be stopped.
200 # The returned task will handle the release of the service name from
201 # dbus. We await it here to make sure it's finished before we
202 # complete the finalisation of this widget.
203 if task:
204 await task
205 base._TextBox.finalize(self)
206
[end of libqtile/widget/notify.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/libqtile/widget/notify.py b/libqtile/widget/notify.py
--- a/libqtile/widget/notify.py
+++ b/libqtile/widget/notify.py
@@ -26,7 +26,6 @@
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
-import asyncio
from os import path
from libqtile import bar, pangocffi, utils
@@ -89,6 +88,7 @@
)
async def _config_async(self):
+ logger.debug("Registering notify widget")
await notifier.register(self.update, self.capabilities, on_close=self.on_close)
def set_notif_text(self, notif):
@@ -191,7 +191,14 @@
self.invoke()
def finalize(self):
- asyncio.create_task(self._finalize())
+ # We may need some async calls as part of the finalize call
+ # We run this with `call_soon_threadsafe` as this waits for
+ # the job to finish before continuing. This is important as,
+ # if the config is just reloading, we need to finish deregistering
+ # the notification server before the new Notify widget instance
+ # registers and creates a new server.
+ self.qtile.call_soon_threadsafe(self._finalize)
+ base._TextBox.finalize(self)
async def _finalize(self):
task = notifier.unregister(self.update)
@@ -202,4 +209,3 @@
# complete the finalisation of this widget.
if task:
await task
- base._TextBox.finalize(self)
|
{"golden_diff": "diff --git a/libqtile/widget/notify.py b/libqtile/widget/notify.py\n--- a/libqtile/widget/notify.py\n+++ b/libqtile/widget/notify.py\n@@ -26,7 +26,6 @@\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE.\n-import asyncio\n from os import path\n \n from libqtile import bar, pangocffi, utils\n@@ -89,6 +88,7 @@\n )\n \n async def _config_async(self):\n+ logger.debug(\"Registering notify widget\")\n await notifier.register(self.update, self.capabilities, on_close=self.on_close)\n \n def set_notif_text(self, notif):\n@@ -191,7 +191,14 @@\n self.invoke()\n \n def finalize(self):\n- asyncio.create_task(self._finalize())\n+ # We may need some async calls as part of the finalize call\n+ # We run this with `call_soon_threadsafe` as this waits for\n+ # the job to finish before continuing. This is important as,\n+ # if the config is just reloading, we need to finish deregistering\n+ # the notification server before the new Notify widget instance\n+ # registers and creates a new server.\n+ self.qtile.call_soon_threadsafe(self._finalize)\n+ base._TextBox.finalize(self)\n \n async def _finalize(self):\n task = notifier.unregister(self.update)\n@@ -202,4 +209,3 @@\n # complete the finalisation of this widget.\n if task:\n await task\n- base._TextBox.finalize(self)\n", "issue": "notify-send stops working when Qtile config is reload instead restart, or if widget.Notify is disabled\n### The issue:\r\n\r\nnotify-send does not work anymore if Qtile config is reload, until I restart Qtile. It also will not work anymore and wait if I disable the \"widget.Notify\". I am not 100% sure if this is a problem caused by Qtile.\r\n\r\n1. Whenever I reload Qtile config instead of restarting, the next run of the program `notify-send` will hang and wait. I have to Ctrl+c to end the process. When I then restart Qtile, then the program `notify-send` works as expected.\r\n2. And i just tested to disable the widget.Notify and try again. Now The program always hangs, reload or not. `notify-send` seems not to find anything to send to. When I enable the widget, then it works like described at point 1.\r\n\r\nSome system information:\r\n\r\n- Your Qtile version (qtile --version): `0.21.1.dev8+gea4462b0`\r\n- Relevant logs from ~/.local/share/qtile/qtile.log: \r\n`2022-04-05 21:48:41,482 ERROR libqtile notify.py:_release():L183 Could not release org.freedesktop.Notifications.`\r\n\r\n```\r\nOS: Manjaro Linux [x86_64]\r\nKernel: 5.16.14-1-MANJARO\r\nUptime: 1 day, 17 hours, 36 mins\r\nPackages: 1896 (pacman)[stable], 1 (flatpak)\r\nShell: zsh 5.8.1\r\nResolution: 2560x1440 @ 144Hz\r\nDE: KDE Plasma 5.24.3\r\nWM: Qtile 0.21.1.dev8+gea4462b0\r\nTheme: breeze (GruvboxColors) [Plasma], Klaus [GTK2/3/4]\r\nIcons: breeze [Plasma], Gruvbox [GTK2/3/4]\r\nFont: Noto Sans (12pt) [Plasma], Noto Sans (12pt) [GTK2/3/4]\r\nCursor: default (24px)\r\nTerminal: kitty\r\nCPU: Intel Xeon E3-1230 v3 (8) @ 3.7GHz\r\nGPU: Nvidia GeForce GTX 1070\r\nMemory: 3225MiB / 15947MiB (20%)\r\nDisk (/): 98GB / 219GB (44%)\r\nLocale: de_DE.UTF-8\r\n```\r\n\r\n### Required:\r\n\r\n- [X] I have searched past issues to see if this bug has already been reported.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2011 Florian Mounier\n# Copyright (c) 2011 Mounier Florian\n# Copyright (c) 2012 roger\n# Copyright (c) 2012-2014 Tycho Andersen\n# Copyright (c) 2012-2013 Craig Barnes\n# Copyright (c) 2013 Tao Sauvage\n# Copyright (c) 2014 Sean Vig\n# Copyright (c) 2014 Adi Sieker\n# Copyright (c) 2020 elParaguayo\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nimport asyncio\nfrom os import path\n\nfrom libqtile import bar, pangocffi, utils\nfrom libqtile.log_utils import logger\nfrom libqtile.notify import ClosedReason, notifier\nfrom libqtile.widget import base\n\n\nclass Notify(base._TextBox):\n \"\"\"\n A notify widget\n\n This widget can handle actions provided by notification clients. However, only the\n default action is supported, so if a client provides multiple actions then only the\n default (first) action can be invoked. Some programs will provide their own\n notification windows if the notification server does not support actions, so if you\n want your notifications to handle more than one action then specify ``False`` for\n the ``action`` option to disable all action handling. Unfortunately we cannot\n specify the capability for exactly one action.\n \"\"\"\n\n defaults = [\n (\"foreground_urgent\", \"ff0000\", \"Foreground urgent priority colour\"),\n (\"foreground_low\", \"dddddd\", \"Foreground low priority colour\"),\n (\"default_timeout\", None, \"Default timeout (seconds) for notifications\"),\n (\"audiofile\", None, \"Audiofile played during notifications\"),\n (\"action\", True, \"Enable handling of default action upon right click\"),\n (\n \"parse_text\",\n None,\n \"Function to parse and modify notifications. \"\n \"e.g. function in config that removes line returns:\"\n \"def my_func(text)\"\n \" return text.replace('\\n', '')\"\n \"then set option parse_text=my_func\",\n ),\n ]\n capabilities = {\"body\", \"actions\"}\n\n def __init__(self, width=bar.CALCULATED, **config):\n base._TextBox.__init__(self, \"\", width, **config)\n self.add_defaults(Notify.defaults)\n self.current_id = 0\n\n default_callbacks = {\n \"Button1\": self.clear,\n \"Button4\": self.prev,\n \"Button5\": self.next,\n }\n if self.action:\n default_callbacks[\"Button3\"] = self.invoke\n else:\n self.capabilities = Notify.capabilities.difference({\"actions\"})\n self.add_callbacks(default_callbacks)\n\n def _configure(self, qtile, bar):\n base._TextBox._configure(self, qtile, bar)\n self.layout = self.drawer.textlayout(\n self.text, self.foreground, self.font, self.fontsize, self.fontshadow, markup=True\n )\n\n async def _config_async(self):\n await notifier.register(self.update, self.capabilities, on_close=self.on_close)\n\n def set_notif_text(self, notif):\n self.text = pangocffi.markup_escape_text(notif.summary)\n urgency = getattr(notif.hints.get(\"urgency\"), \"value\", 1)\n if urgency != 1:\n self.text = '<span color=\"%s\">%s</span>' % (\n utils.hex(self.foreground_urgent if urgency == 2 else self.foreground_low),\n self.text,\n )\n if notif.body:\n self.text = '<span weight=\"bold\">%s</span> - %s' % (\n self.text,\n pangocffi.markup_escape_text(notif.body),\n )\n if callable(self.parse_text):\n try:\n self.text = self.parse_text(self.text)\n except: # noqa: E722\n logger.exception(\"parse_text function failed:\")\n if self.audiofile and path.exists(self.audiofile):\n self.qtile.cmd_spawn(\"aplay -q '%s'\" % self.audiofile)\n\n def update(self, notif):\n self.qtile.call_soon_threadsafe(self.real_update, notif)\n\n def real_update(self, notif):\n self.set_notif_text(notif)\n self.current_id = notif.id - 1\n if notif.timeout and notif.timeout > 0:\n self.timeout_add(\n notif.timeout / 1000, self.clear, method_args=(ClosedReason.expired,)\n )\n elif self.default_timeout:\n self.timeout_add(\n self.default_timeout, self.clear, method_args=(ClosedReason.expired,)\n )\n self.bar.draw()\n return True\n\n def display(self):\n self.set_notif_text(notifier.notifications[self.current_id])\n self.bar.draw()\n\n def clear(self, reason=ClosedReason.dismissed):\n notifier._service.NotificationClosed(notifier.notifications[self.current_id].id, reason)\n self.text = \"\"\n self.current_id = len(notifier.notifications) - 1\n self.bar.draw()\n\n def on_close(self, nid):\n if self.current_id < len(notifier.notifications):\n notif = notifier.notifications[self.current_id]\n if notif.id == nid:\n self.clear(ClosedReason.method)\n\n def prev(self):\n if self.current_id > 0:\n self.current_id -= 1\n self.display()\n\n def next(self):\n if self.current_id < len(notifier.notifications) - 1:\n self.current_id += 1\n self.display()\n\n def invoke(self):\n if self.current_id < len(notifier.notifications):\n notif = notifier.notifications[self.current_id]\n if notif.actions:\n notifier._service.ActionInvoked(notif.id, notif.actions[0])\n self.clear()\n\n def cmd_display(self):\n \"\"\"Display the notifcication\"\"\"\n self.display()\n\n def cmd_clear(self):\n \"\"\"Clear the notification\"\"\"\n self.clear()\n\n def cmd_toggle(self):\n \"\"\"Toggle showing/clearing the notification\"\"\"\n if self.text == \"\":\n self.display()\n else:\n self.clear()\n\n def cmd_prev(self):\n \"\"\"Show previous notification\"\"\"\n self.prev()\n\n def cmd_next(self):\n \"\"\"Show next notification\"\"\"\n self.next()\n\n def cmd_invoke(self):\n \"\"\"Invoke the notification's default action\"\"\"\n if self.action:\n self.invoke()\n\n def finalize(self):\n asyncio.create_task(self._finalize())\n\n async def _finalize(self):\n task = notifier.unregister(self.update)\n\n # If the notifier has no more callbacks then it needs to be stopped.\n # The returned task will handle the release of the service name from\n # dbus. We await it here to make sure it's finished before we\n # complete the finalisation of this widget.\n if task:\n await task\n base._TextBox.finalize(self)\n", "path": "libqtile/widget/notify.py"}]}
| 3,386 | 378 |
gh_patches_debug_15654
|
rasdani/github-patches
|
git_diff
|
docker__docker-py-1285
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
NpipeSocket.settimeout - Blocking and non-blocking are mixed up
`settimeout(None)` puts the socket in non-blocking mode, `settimeout(0)` in blocking mode. Specification indicates it should be the other way around.
</issue>
<code>
[start of docker/transport/npipesocket.py]
1 import functools
2 import io
3
4 import six
5 import win32file
6 import win32pipe
7
8 cERROR_PIPE_BUSY = 0xe7
9 cSECURITY_SQOS_PRESENT = 0x100000
10 cSECURITY_ANONYMOUS = 0
11
12 RETRY_WAIT_TIMEOUT = 10000
13
14
15 def check_closed(f):
16 @functools.wraps(f)
17 def wrapped(self, *args, **kwargs):
18 if self._closed:
19 raise RuntimeError(
20 'Can not reuse socket after connection was closed.'
21 )
22 return f(self, *args, **kwargs)
23 return wrapped
24
25
26 class NpipeSocket(object):
27 """ Partial implementation of the socket API over windows named pipes.
28 This implementation is only designed to be used as a client socket,
29 and server-specific methods (bind, listen, accept...) are not
30 implemented.
31 """
32
33 def __init__(self, handle=None):
34 self._timeout = win32pipe.NMPWAIT_USE_DEFAULT_WAIT
35 self._handle = handle
36 self._closed = False
37
38 def accept(self):
39 raise NotImplementedError()
40
41 def bind(self, address):
42 raise NotImplementedError()
43
44 def close(self):
45 self._handle.Close()
46 self._closed = True
47
48 @check_closed
49 def connect(self, address):
50 win32pipe.WaitNamedPipe(address, self._timeout)
51 try:
52 handle = win32file.CreateFile(
53 address,
54 win32file.GENERIC_READ | win32file.GENERIC_WRITE,
55 0,
56 None,
57 win32file.OPEN_EXISTING,
58 cSECURITY_ANONYMOUS | cSECURITY_SQOS_PRESENT,
59 0
60 )
61 except win32pipe.error as e:
62 # See Remarks:
63 # https://msdn.microsoft.com/en-us/library/aa365800.aspx
64 if e.winerror == cERROR_PIPE_BUSY:
65 # Another program or thread has grabbed our pipe instance
66 # before we got to it. Wait for availability and attempt to
67 # connect again.
68 win32pipe.WaitNamedPipe(address, RETRY_WAIT_TIMEOUT)
69 return self.connect(address)
70 raise e
71
72 self.flags = win32pipe.GetNamedPipeInfo(handle)[0]
73
74 self._handle = handle
75 self._address = address
76
77 @check_closed
78 def connect_ex(self, address):
79 return self.connect(address)
80
81 @check_closed
82 def detach(self):
83 self._closed = True
84 return self._handle
85
86 @check_closed
87 def dup(self):
88 return NpipeSocket(self._handle)
89
90 @check_closed
91 def fileno(self):
92 return int(self._handle)
93
94 def getpeername(self):
95 return self._address
96
97 def getsockname(self):
98 return self._address
99
100 def getsockopt(self, level, optname, buflen=None):
101 raise NotImplementedError()
102
103 def ioctl(self, control, option):
104 raise NotImplementedError()
105
106 def listen(self, backlog):
107 raise NotImplementedError()
108
109 def makefile(self, mode=None, bufsize=None):
110 if mode.strip('b') != 'r':
111 raise NotImplementedError()
112 rawio = NpipeFileIOBase(self)
113 if bufsize is None or bufsize <= 0:
114 bufsize = io.DEFAULT_BUFFER_SIZE
115 return io.BufferedReader(rawio, buffer_size=bufsize)
116
117 @check_closed
118 def recv(self, bufsize, flags=0):
119 err, data = win32file.ReadFile(self._handle, bufsize)
120 return data
121
122 @check_closed
123 def recvfrom(self, bufsize, flags=0):
124 data = self.recv(bufsize, flags)
125 return (data, self._address)
126
127 @check_closed
128 def recvfrom_into(self, buf, nbytes=0, flags=0):
129 return self.recv_into(buf, nbytes, flags), self._address
130
131 @check_closed
132 def recv_into(self, buf, nbytes=0):
133 if six.PY2:
134 return self._recv_into_py2(buf, nbytes)
135
136 readbuf = buf
137 if not isinstance(buf, memoryview):
138 readbuf = memoryview(buf)
139
140 err, data = win32file.ReadFile(
141 self._handle,
142 readbuf[:nbytes] if nbytes else readbuf
143 )
144 return len(data)
145
146 def _recv_into_py2(self, buf, nbytes):
147 err, data = win32file.ReadFile(self._handle, nbytes or len(buf))
148 n = len(data)
149 buf[:n] = data
150 return n
151
152 @check_closed
153 def send(self, string, flags=0):
154 err, nbytes = win32file.WriteFile(self._handle, string)
155 return nbytes
156
157 @check_closed
158 def sendall(self, string, flags=0):
159 return self.send(string, flags)
160
161 @check_closed
162 def sendto(self, string, address):
163 self.connect(address)
164 return self.send(string)
165
166 def setblocking(self, flag):
167 if flag:
168 return self.settimeout(None)
169 return self.settimeout(0)
170
171 def settimeout(self, value):
172 if value is None:
173 self._timeout = win32pipe.NMPWAIT_NOWAIT
174 elif not isinstance(value, (float, int)) or value < 0:
175 raise ValueError('Timeout value out of range')
176 elif value == 0:
177 self._timeout = win32pipe.NMPWAIT_USE_DEFAULT_WAIT
178 else:
179 self._timeout = value
180
181 def gettimeout(self):
182 return self._timeout
183
184 def setsockopt(self, level, optname, value):
185 raise NotImplementedError()
186
187 @check_closed
188 def shutdown(self, how):
189 return self.close()
190
191
192 class NpipeFileIOBase(io.RawIOBase):
193 def __init__(self, npipe_socket):
194 self.sock = npipe_socket
195
196 def close(self):
197 super(NpipeFileIOBase, self).close()
198 self.sock = None
199
200 def fileno(self):
201 return self.sock.fileno()
202
203 def isatty(self):
204 return False
205
206 def readable(self):
207 return True
208
209 def readinto(self, buf):
210 return self.sock.recv_into(buf)
211
212 def seekable(self):
213 return False
214
215 def writable(self):
216 return False
217
[end of docker/transport/npipesocket.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docker/transport/npipesocket.py b/docker/transport/npipesocket.py
--- a/docker/transport/npipesocket.py
+++ b/docker/transport/npipesocket.py
@@ -170,13 +170,16 @@
def settimeout(self, value):
if value is None:
- self._timeout = win32pipe.NMPWAIT_NOWAIT
+ # Blocking mode
+ self._timeout = win32pipe.NMPWAIT_WAIT_FOREVER
elif not isinstance(value, (float, int)) or value < 0:
raise ValueError('Timeout value out of range')
elif value == 0:
- self._timeout = win32pipe.NMPWAIT_USE_DEFAULT_WAIT
+ # Non-blocking mode
+ self._timeout = win32pipe.NMPWAIT_NO_WAIT
else:
- self._timeout = value
+ # Timeout mode - Value converted to milliseconds
+ self._timeout = value * 1000
def gettimeout(self):
return self._timeout
|
{"golden_diff": "diff --git a/docker/transport/npipesocket.py b/docker/transport/npipesocket.py\n--- a/docker/transport/npipesocket.py\n+++ b/docker/transport/npipesocket.py\n@@ -170,13 +170,16 @@\n \n def settimeout(self, value):\n if value is None:\n- self._timeout = win32pipe.NMPWAIT_NOWAIT\n+ # Blocking mode\n+ self._timeout = win32pipe.NMPWAIT_WAIT_FOREVER\n elif not isinstance(value, (float, int)) or value < 0:\n raise ValueError('Timeout value out of range')\n elif value == 0:\n- self._timeout = win32pipe.NMPWAIT_USE_DEFAULT_WAIT\n+ # Non-blocking mode\n+ self._timeout = win32pipe.NMPWAIT_NO_WAIT\n else:\n- self._timeout = value\n+ # Timeout mode - Value converted to milliseconds\n+ self._timeout = value * 1000\n \n def gettimeout(self):\n return self._timeout\n", "issue": "NpipeSocket.settimeout - Blocking and non-blocking are mixed up\n`settimeout(None)` puts the socket in non-blocking mode, `settimeout(0)` in blocking mode. Specification indicates it should be the other way around.\n", "before_files": [{"content": "import functools\nimport io\n\nimport six\nimport win32file\nimport win32pipe\n\ncERROR_PIPE_BUSY = 0xe7\ncSECURITY_SQOS_PRESENT = 0x100000\ncSECURITY_ANONYMOUS = 0\n\nRETRY_WAIT_TIMEOUT = 10000\n\n\ndef check_closed(f):\n @functools.wraps(f)\n def wrapped(self, *args, **kwargs):\n if self._closed:\n raise RuntimeError(\n 'Can not reuse socket after connection was closed.'\n )\n return f(self, *args, **kwargs)\n return wrapped\n\n\nclass NpipeSocket(object):\n \"\"\" Partial implementation of the socket API over windows named pipes.\n This implementation is only designed to be used as a client socket,\n and server-specific methods (bind, listen, accept...) are not\n implemented.\n \"\"\"\n\n def __init__(self, handle=None):\n self._timeout = win32pipe.NMPWAIT_USE_DEFAULT_WAIT\n self._handle = handle\n self._closed = False\n\n def accept(self):\n raise NotImplementedError()\n\n def bind(self, address):\n raise NotImplementedError()\n\n def close(self):\n self._handle.Close()\n self._closed = True\n\n @check_closed\n def connect(self, address):\n win32pipe.WaitNamedPipe(address, self._timeout)\n try:\n handle = win32file.CreateFile(\n address,\n win32file.GENERIC_READ | win32file.GENERIC_WRITE,\n 0,\n None,\n win32file.OPEN_EXISTING,\n cSECURITY_ANONYMOUS | cSECURITY_SQOS_PRESENT,\n 0\n )\n except win32pipe.error as e:\n # See Remarks:\n # https://msdn.microsoft.com/en-us/library/aa365800.aspx\n if e.winerror == cERROR_PIPE_BUSY:\n # Another program or thread has grabbed our pipe instance\n # before we got to it. Wait for availability and attempt to\n # connect again.\n win32pipe.WaitNamedPipe(address, RETRY_WAIT_TIMEOUT)\n return self.connect(address)\n raise e\n\n self.flags = win32pipe.GetNamedPipeInfo(handle)[0]\n\n self._handle = handle\n self._address = address\n\n @check_closed\n def connect_ex(self, address):\n return self.connect(address)\n\n @check_closed\n def detach(self):\n self._closed = True\n return self._handle\n\n @check_closed\n def dup(self):\n return NpipeSocket(self._handle)\n\n @check_closed\n def fileno(self):\n return int(self._handle)\n\n def getpeername(self):\n return self._address\n\n def getsockname(self):\n return self._address\n\n def getsockopt(self, level, optname, buflen=None):\n raise NotImplementedError()\n\n def ioctl(self, control, option):\n raise NotImplementedError()\n\n def listen(self, backlog):\n raise NotImplementedError()\n\n def makefile(self, mode=None, bufsize=None):\n if mode.strip('b') != 'r':\n raise NotImplementedError()\n rawio = NpipeFileIOBase(self)\n if bufsize is None or bufsize <= 0:\n bufsize = io.DEFAULT_BUFFER_SIZE\n return io.BufferedReader(rawio, buffer_size=bufsize)\n\n @check_closed\n def recv(self, bufsize, flags=0):\n err, data = win32file.ReadFile(self._handle, bufsize)\n return data\n\n @check_closed\n def recvfrom(self, bufsize, flags=0):\n data = self.recv(bufsize, flags)\n return (data, self._address)\n\n @check_closed\n def recvfrom_into(self, buf, nbytes=0, flags=0):\n return self.recv_into(buf, nbytes, flags), self._address\n\n @check_closed\n def recv_into(self, buf, nbytes=0):\n if six.PY2:\n return self._recv_into_py2(buf, nbytes)\n\n readbuf = buf\n if not isinstance(buf, memoryview):\n readbuf = memoryview(buf)\n\n err, data = win32file.ReadFile(\n self._handle,\n readbuf[:nbytes] if nbytes else readbuf\n )\n return len(data)\n\n def _recv_into_py2(self, buf, nbytes):\n err, data = win32file.ReadFile(self._handle, nbytes or len(buf))\n n = len(data)\n buf[:n] = data\n return n\n\n @check_closed\n def send(self, string, flags=0):\n err, nbytes = win32file.WriteFile(self._handle, string)\n return nbytes\n\n @check_closed\n def sendall(self, string, flags=0):\n return self.send(string, flags)\n\n @check_closed\n def sendto(self, string, address):\n self.connect(address)\n return self.send(string)\n\n def setblocking(self, flag):\n if flag:\n return self.settimeout(None)\n return self.settimeout(0)\n\n def settimeout(self, value):\n if value is None:\n self._timeout = win32pipe.NMPWAIT_NOWAIT\n elif not isinstance(value, (float, int)) or value < 0:\n raise ValueError('Timeout value out of range')\n elif value == 0:\n self._timeout = win32pipe.NMPWAIT_USE_DEFAULT_WAIT\n else:\n self._timeout = value\n\n def gettimeout(self):\n return self._timeout\n\n def setsockopt(self, level, optname, value):\n raise NotImplementedError()\n\n @check_closed\n def shutdown(self, how):\n return self.close()\n\n\nclass NpipeFileIOBase(io.RawIOBase):\n def __init__(self, npipe_socket):\n self.sock = npipe_socket\n\n def close(self):\n super(NpipeFileIOBase, self).close()\n self.sock = None\n\n def fileno(self):\n return self.sock.fileno()\n\n def isatty(self):\n return False\n\n def readable(self):\n return True\n\n def readinto(self, buf):\n return self.sock.recv_into(buf)\n\n def seekable(self):\n return False\n\n def writable(self):\n return False\n", "path": "docker/transport/npipesocket.py"}]}
| 2,520 | 236 |
gh_patches_debug_40693
|
rasdani/github-patches
|
git_diff
|
hpcaitech__ColossalAI-5232
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of applications/ColossalQA/colossalqa/chain/retrieval_qa/base.py]
1 """
2 Chain for question-answering against a vector database.
3
4 Modified from Original Source
5
6 This code is based on LangChain Ai's langchain, which can be found at
7 https://github.com/langchain-ai/langchain
8 The original code is licensed under the MIT license.
9 """
10 from __future__ import annotations
11
12 import copy
13 import inspect
14 from typing import Any, Dict, List, Optional
15
16 from colossalqa.chain.retrieval_qa.load_chain import load_qa_chain
17 from colossalqa.chain.retrieval_qa.stuff import CustomStuffDocumentsChain
18 from langchain.callbacks.manager import AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, Callbacks
19 from langchain.chains.llm import LLMChain
20 from langchain.chains.question_answering.stuff_prompt import PROMPT_SELECTOR
21 from langchain.chains.retrieval_qa.base import BaseRetrievalQA
22 from langchain.prompts import PromptTemplate
23 from langchain.pydantic_v1 import Field
24 from langchain.schema import BaseRetriever, Document
25 from langchain.schema.language_model import BaseLanguageModel
26
27 class CustomBaseRetrievalQA(BaseRetrievalQA):
28 """Base class for question-answering chains."""
29
30 @classmethod
31 def from_llm(
32 cls,
33 llm: BaseLanguageModel,
34 prompt: Optional[PromptTemplate] = None,
35 callbacks: Callbacks = None,
36 **kwargs: Any,
37 ) -> BaseRetrievalQA:
38 """Initialize from LLM."""
39 llm_kwargs = kwargs.pop("llm_kwargs", {})
40 _prompt = prompt or PROMPT_SELECTOR.get_prompt(llm)
41 llm_chain = LLMChain(llm=llm, prompt=_prompt, callbacks=callbacks, llm_kwargs=llm_kwargs)
42 document_prompt = kwargs.get(
43 "document_prompt", PromptTemplate(input_variables=["page_content"], template="Context:\n{page_content}")
44 )
45 combine_documents_chain = CustomStuffDocumentsChain(
46 llm_chain=llm_chain,
47 document_variable_name="context",
48 document_prompt=document_prompt,
49 callbacks=callbacks,
50 )
51
52 return cls(
53 combine_documents_chain=combine_documents_chain,
54 callbacks=callbacks,
55 **kwargs,
56 )
57
58 @classmethod
59 def from_chain_type(
60 cls,
61 llm: BaseLanguageModel,
62 chain_type: str = "stuff",
63 chain_type_kwargs: Optional[dict] = None,
64 **kwargs: Any,
65 ) -> BaseRetrievalQA:
66 """Load chain from chain type."""
67 llm_kwargs = kwargs.pop("llm_kwargs", {})
68 _chain_type_kwargs = chain_type_kwargs or {}
69 combine_documents_chain = load_qa_chain(llm, chain_type=chain_type, **_chain_type_kwargs, llm_kwargs=llm_kwargs)
70 return cls(combine_documents_chain=combine_documents_chain, **kwargs)
71
72 def _call(
73 self,
74 inputs: Dict[str, Any],
75 run_manager: Optional[CallbackManagerForChainRun] = None,
76 ) -> Dict[str, Any]:
77 """Run get_relevant_text and llm on input query.
78
79 If chain has 'return_source_documents' as 'True', returns
80 the retrieved documents as well under the key 'source_documents'.
81
82 Example:
83 .. code-block:: python
84
85 res = indexqa({'query': 'This is my query'})
86 answer, docs = res['result'], res['source_documents']
87 """
88 _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
89 question = inputs[self.input_key]
90 accepts_run_manager = "run_manager" in inspect.signature(self._get_docs).parameters
91 if accepts_run_manager:
92 docs = self._get_docs(question, run_manager=_run_manager)
93 else:
94 docs = self._get_docs(question) # type: ignore[call-arg]
95
96 kwargs = {
97 k: v
98 for k, v in inputs.items()
99 if k in ["stop", "temperature", "top_k", "top_p", "max_new_tokens", "doc_prefix"]
100 }
101 answers = []
102 if self.combine_documents_chain.memory is not None:
103 buffered_history_backup, summarized_history_temp_backup = copy.deepcopy(
104 self.combine_documents_chain.memory.buffered_history
105 ), copy.deepcopy(self.combine_documents_chain.memory.summarized_history_temp)
106 else:
107 buffered_history_backup = None
108 summarized_history_temp_backup = None
109
110 answer = self.combine_documents_chain.run(
111 input_documents=docs, question=question, callbacks=_run_manager.get_child(), **kwargs
112 )
113 if summarized_history_temp_backup is not None and buffered_history_backup is not None:
114 (
115 self.combine_documents_chain.memory.buffered_history,
116 self.combine_documents_chain.memory.summarized_history_temp,
117 ) = copy.deepcopy(buffered_history_backup), copy.deepcopy(summarized_history_temp_backup)
118
119 # if rejection_trigger_keywords is not given, return the response from LLM directly
120 rejection_trigger_keywrods = inputs.get('rejection_trigger_keywrods', [])
121 answer = answer if all([rej not in answer for rej in rejection_trigger_keywrods]) else None
122 if answer is None:
123 answer = inputs.get('rejection_answer', "抱歉,根据提供的信息无法回答该问题。")
124 if self.combine_documents_chain.memory is not None:
125 self.combine_documents_chain.memory.save_context({"question": question}, {"output": answer})
126
127 if self.return_source_documents:
128 return {self.output_key: answer, "source_documents": docs}
129 else:
130 return {self.output_key: answer}
131
132 async def _acall(
133 self,
134 inputs: Dict[str, Any],
135 run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
136 ) -> Dict[str, Any]:
137 """Run get_relevant_text and llm on input query.
138
139 If chain has 'return_source_documents' as 'True', returns
140 the retrieved documents as well under the key 'source_documents'.
141
142 Example:
143 .. code-block:: python
144
145 res = indexqa({'query': 'This is my query'})
146 answer, docs = res['result'], res['source_documents']
147 """
148 _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
149 question = inputs[self.input_key]
150 accepts_run_manager = "run_manager" in inspect.signature(self._aget_docs).parameters
151 if accepts_run_manager:
152 docs = await self._aget_docs(question, run_manager=_run_manager)
153 else:
154 docs = await self._aget_docs(question) # type: ignore[call-arg]
155 kwargs = {
156 k: v
157 for k, v in inputs.items()
158 if k in ["stop", "temperature", "top_k", "top_p", "max_new_tokens", "doc_prefix"]
159 }
160 answer = await self.combine_documents_chain.arun(
161 input_documents=docs, question=question, callbacks=_run_manager.get_child(), **kwargs
162 )
163 # if rejection_trigger_keywords is not given, return the response from LLM directly
164 rejection_trigger_keywrods = inputs.get('rejection_trigger_keywrods', [])
165 answer = answer if all([rej not in answer for rej in rejection_trigger_keywrods]) or len(rejection_trigger_keywrods)==0 else None
166 if answer is None:
167 answer = inputs.get('rejection_answer', "抱歉,根据提供的信息无法回答该问题。")
168 self.combine_documents_chain.memory.save_context({"question": question}, {"output": answer})
169
170 if self.return_source_documents:
171 return {self.output_key: answer, "source_documents": docs}
172 else:
173 return {self.output_key: answer}
174
175
176 class RetrievalQA(CustomBaseRetrievalQA):
177 """Chain for question-answering against an index.
178
179 Example:
180 .. code-block:: python
181
182 from langchain.llms import OpenAI
183 from langchain.chains import RetrievalQA
184 from langchain.faiss import FAISS
185 from langchain.vectorstores.base import VectorStoreRetriever
186 retriever = VectorStoreRetriever(vectorstore=FAISS(...))
187 retrievalQA = RetrievalQA.from_llm(llm=OpenAI(), retriever=retriever)
188
189 """
190
191 retriever: BaseRetriever = Field(exclude=True)
192
193 def _get_docs(
194 self,
195 question: str,
196 *,
197 run_manager: CallbackManagerForChainRun,
198 ) -> List[Document]:
199 """Get docs."""
200 return self.retriever.get_relevant_documents(question, callbacks=run_manager.get_child())
201
202 async def _aget_docs(
203 self,
204 question: str,
205 *,
206 run_manager: AsyncCallbackManagerForChainRun,
207 ) -> List[Document]:
208 """Get docs."""
209 return await self.retriever.aget_relevant_documents(question, callbacks=run_manager.get_child())
210
211 @property
212 def _chain_type(self) -> str:
213 """Return the chain type."""
214 return "retrieval_qa"
215
[end of applications/ColossalQA/colossalqa/chain/retrieval_qa/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/applications/ColossalQA/colossalqa/chain/retrieval_qa/base.py b/applications/ColossalQA/colossalqa/chain/retrieval_qa/base.py
--- a/applications/ColossalQA/colossalqa/chain/retrieval_qa/base.py
+++ b/applications/ColossalQA/colossalqa/chain/retrieval_qa/base.py
@@ -24,6 +24,7 @@
from langchain.schema import BaseRetriever, Document
from langchain.schema.language_model import BaseLanguageModel
+
class CustomBaseRetrievalQA(BaseRetrievalQA):
"""Base class for question-answering chains."""
@@ -98,7 +99,6 @@
for k, v in inputs.items()
if k in ["stop", "temperature", "top_k", "top_p", "max_new_tokens", "doc_prefix"]
}
- answers = []
if self.combine_documents_chain.memory is not None:
buffered_history_backup, summarized_history_temp_backup = copy.deepcopy(
self.combine_documents_chain.memory.buffered_history
@@ -117,10 +117,10 @@
) = copy.deepcopy(buffered_history_backup), copy.deepcopy(summarized_history_temp_backup)
# if rejection_trigger_keywords is not given, return the response from LLM directly
- rejection_trigger_keywrods = inputs.get('rejection_trigger_keywrods', [])
+ rejection_trigger_keywrods = inputs.get("rejection_trigger_keywrods", [])
answer = answer if all([rej not in answer for rej in rejection_trigger_keywrods]) else None
- if answer is None:
- answer = inputs.get('rejection_answer', "抱歉,根据提供的信息无法回答该问题。")
+ if answer is None:
+ answer = inputs.get("rejection_answer", "抱歉,根据提供的信息无法回答该问题。")
if self.combine_documents_chain.memory is not None:
self.combine_documents_chain.memory.save_context({"question": question}, {"output": answer})
@@ -161,10 +161,14 @@
input_documents=docs, question=question, callbacks=_run_manager.get_child(), **kwargs
)
# if rejection_trigger_keywords is not given, return the response from LLM directly
- rejection_trigger_keywrods = inputs.get('rejection_trigger_keywrods', [])
- answer = answer if all([rej not in answer for rej in rejection_trigger_keywrods]) or len(rejection_trigger_keywrods)==0 else None
+ rejection_trigger_keywrods = inputs.get("rejection_trigger_keywrods", [])
+ answer = (
+ answer
+ if all([rej not in answer for rej in rejection_trigger_keywrods]) or len(rejection_trigger_keywrods) == 0
+ else None
+ )
if answer is None:
- answer = inputs.get('rejection_answer', "抱歉,根据提供的信息无法回答该问题。")
+ answer = inputs.get("rejection_answer", "抱歉,根据提供的信息无法回答该问题。")
self.combine_documents_chain.memory.save_context({"question": question}, {"output": answer})
if self.return_source_documents:
|
{"golden_diff": "diff --git a/applications/ColossalQA/colossalqa/chain/retrieval_qa/base.py b/applications/ColossalQA/colossalqa/chain/retrieval_qa/base.py\n--- a/applications/ColossalQA/colossalqa/chain/retrieval_qa/base.py\n+++ b/applications/ColossalQA/colossalqa/chain/retrieval_qa/base.py\n@@ -24,6 +24,7 @@\n from langchain.schema import BaseRetriever, Document\n from langchain.schema.language_model import BaseLanguageModel\n \n+\n class CustomBaseRetrievalQA(BaseRetrievalQA):\n \"\"\"Base class for question-answering chains.\"\"\"\n \n@@ -98,7 +99,6 @@\n for k, v in inputs.items()\n if k in [\"stop\", \"temperature\", \"top_k\", \"top_p\", \"max_new_tokens\", \"doc_prefix\"]\n }\n- answers = []\n if self.combine_documents_chain.memory is not None:\n buffered_history_backup, summarized_history_temp_backup = copy.deepcopy(\n self.combine_documents_chain.memory.buffered_history\n@@ -117,10 +117,10 @@\n ) = copy.deepcopy(buffered_history_backup), copy.deepcopy(summarized_history_temp_backup)\n \n # if rejection_trigger_keywords is not given, return the response from LLM directly\n- rejection_trigger_keywrods = inputs.get('rejection_trigger_keywrods', [])\n+ rejection_trigger_keywrods = inputs.get(\"rejection_trigger_keywrods\", [])\n answer = answer if all([rej not in answer for rej in rejection_trigger_keywrods]) else None\n- if answer is None: \n- answer = inputs.get('rejection_answer', \"\u62b1\u6b49\uff0c\u6839\u636e\u63d0\u4f9b\u7684\u4fe1\u606f\u65e0\u6cd5\u56de\u7b54\u8be5\u95ee\u9898\u3002\")\n+ if answer is None:\n+ answer = inputs.get(\"rejection_answer\", \"\u62b1\u6b49\uff0c\u6839\u636e\u63d0\u4f9b\u7684\u4fe1\u606f\u65e0\u6cd5\u56de\u7b54\u8be5\u95ee\u9898\u3002\")\n if self.combine_documents_chain.memory is not None:\n self.combine_documents_chain.memory.save_context({\"question\": question}, {\"output\": answer})\n \n@@ -161,10 +161,14 @@\n input_documents=docs, question=question, callbacks=_run_manager.get_child(), **kwargs\n )\n # if rejection_trigger_keywords is not given, return the response from LLM directly\n- rejection_trigger_keywrods = inputs.get('rejection_trigger_keywrods', [])\n- answer = answer if all([rej not in answer for rej in rejection_trigger_keywrods]) or len(rejection_trigger_keywrods)==0 else None\n+ rejection_trigger_keywrods = inputs.get(\"rejection_trigger_keywrods\", [])\n+ answer = (\n+ answer\n+ if all([rej not in answer for rej in rejection_trigger_keywrods]) or len(rejection_trigger_keywrods) == 0\n+ else None\n+ )\n if answer is None:\n- answer = inputs.get('rejection_answer', \"\u62b1\u6b49\uff0c\u6839\u636e\u63d0\u4f9b\u7684\u4fe1\u606f\u65e0\u6cd5\u56de\u7b54\u8be5\u95ee\u9898\u3002\")\n+ answer = inputs.get(\"rejection_answer\", \"\u62b1\u6b49\uff0c\u6839\u636e\u63d0\u4f9b\u7684\u4fe1\u606f\u65e0\u6cd5\u56de\u7b54\u8be5\u95ee\u9898\u3002\")\n self.combine_documents_chain.memory.save_context({\"question\": question}, {\"output\": answer})\n \n if self.return_source_documents:\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "\"\"\"\nChain for question-answering against a vector database.\n\nModified from Original Source\n\nThis code is based on LangChain Ai's langchain, which can be found at\nhttps://github.com/langchain-ai/langchain\nThe original code is licensed under the MIT license.\n\"\"\"\nfrom __future__ import annotations\n\nimport copy\nimport inspect\nfrom typing import Any, Dict, List, Optional\n\nfrom colossalqa.chain.retrieval_qa.load_chain import load_qa_chain\nfrom colossalqa.chain.retrieval_qa.stuff import CustomStuffDocumentsChain\nfrom langchain.callbacks.manager import AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, Callbacks\nfrom langchain.chains.llm import LLMChain\nfrom langchain.chains.question_answering.stuff_prompt import PROMPT_SELECTOR\nfrom langchain.chains.retrieval_qa.base import BaseRetrievalQA\nfrom langchain.prompts import PromptTemplate\nfrom langchain.pydantic_v1 import Field\nfrom langchain.schema import BaseRetriever, Document\nfrom langchain.schema.language_model import BaseLanguageModel\n\nclass CustomBaseRetrievalQA(BaseRetrievalQA):\n \"\"\"Base class for question-answering chains.\"\"\"\n\n @classmethod\n def from_llm(\n cls,\n llm: BaseLanguageModel,\n prompt: Optional[PromptTemplate] = None,\n callbacks: Callbacks = None,\n **kwargs: Any,\n ) -> BaseRetrievalQA:\n \"\"\"Initialize from LLM.\"\"\"\n llm_kwargs = kwargs.pop(\"llm_kwargs\", {})\n _prompt = prompt or PROMPT_SELECTOR.get_prompt(llm)\n llm_chain = LLMChain(llm=llm, prompt=_prompt, callbacks=callbacks, llm_kwargs=llm_kwargs)\n document_prompt = kwargs.get(\n \"document_prompt\", PromptTemplate(input_variables=[\"page_content\"], template=\"Context:\\n{page_content}\")\n )\n combine_documents_chain = CustomStuffDocumentsChain(\n llm_chain=llm_chain,\n document_variable_name=\"context\",\n document_prompt=document_prompt,\n callbacks=callbacks,\n )\n\n return cls(\n combine_documents_chain=combine_documents_chain,\n callbacks=callbacks,\n **kwargs,\n )\n\n @classmethod\n def from_chain_type(\n cls,\n llm: BaseLanguageModel,\n chain_type: str = \"stuff\",\n chain_type_kwargs: Optional[dict] = None,\n **kwargs: Any,\n ) -> BaseRetrievalQA:\n \"\"\"Load chain from chain type.\"\"\"\n llm_kwargs = kwargs.pop(\"llm_kwargs\", {})\n _chain_type_kwargs = chain_type_kwargs or {}\n combine_documents_chain = load_qa_chain(llm, chain_type=chain_type, **_chain_type_kwargs, llm_kwargs=llm_kwargs)\n return cls(combine_documents_chain=combine_documents_chain, **kwargs)\n\n def _call(\n self,\n inputs: Dict[str, Any],\n run_manager: Optional[CallbackManagerForChainRun] = None,\n ) -> Dict[str, Any]:\n \"\"\"Run get_relevant_text and llm on input query.\n\n If chain has 'return_source_documents' as 'True', returns\n the retrieved documents as well under the key 'source_documents'.\n\n Example:\n .. code-block:: python\n\n res = indexqa({'query': 'This is my query'})\n answer, docs = res['result'], res['source_documents']\n \"\"\"\n _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()\n question = inputs[self.input_key]\n accepts_run_manager = \"run_manager\" in inspect.signature(self._get_docs).parameters\n if accepts_run_manager:\n docs = self._get_docs(question, run_manager=_run_manager)\n else:\n docs = self._get_docs(question) # type: ignore[call-arg]\n\n kwargs = {\n k: v\n for k, v in inputs.items()\n if k in [\"stop\", \"temperature\", \"top_k\", \"top_p\", \"max_new_tokens\", \"doc_prefix\"]\n }\n answers = []\n if self.combine_documents_chain.memory is not None:\n buffered_history_backup, summarized_history_temp_backup = copy.deepcopy(\n self.combine_documents_chain.memory.buffered_history\n ), copy.deepcopy(self.combine_documents_chain.memory.summarized_history_temp)\n else:\n buffered_history_backup = None\n summarized_history_temp_backup = None\n\n answer = self.combine_documents_chain.run(\n input_documents=docs, question=question, callbacks=_run_manager.get_child(), **kwargs\n )\n if summarized_history_temp_backup is not None and buffered_history_backup is not None:\n (\n self.combine_documents_chain.memory.buffered_history,\n self.combine_documents_chain.memory.summarized_history_temp,\n ) = copy.deepcopy(buffered_history_backup), copy.deepcopy(summarized_history_temp_backup)\n\n # if rejection_trigger_keywords is not given, return the response from LLM directly\n rejection_trigger_keywrods = inputs.get('rejection_trigger_keywrods', [])\n answer = answer if all([rej not in answer for rej in rejection_trigger_keywrods]) else None\n if answer is None: \n answer = inputs.get('rejection_answer', \"\u62b1\u6b49\uff0c\u6839\u636e\u63d0\u4f9b\u7684\u4fe1\u606f\u65e0\u6cd5\u56de\u7b54\u8be5\u95ee\u9898\u3002\")\n if self.combine_documents_chain.memory is not None:\n self.combine_documents_chain.memory.save_context({\"question\": question}, {\"output\": answer})\n\n if self.return_source_documents:\n return {self.output_key: answer, \"source_documents\": docs}\n else:\n return {self.output_key: answer}\n\n async def _acall(\n self,\n inputs: Dict[str, Any],\n run_manager: Optional[AsyncCallbackManagerForChainRun] = None,\n ) -> Dict[str, Any]:\n \"\"\"Run get_relevant_text and llm on input query.\n\n If chain has 'return_source_documents' as 'True', returns\n the retrieved documents as well under the key 'source_documents'.\n\n Example:\n .. code-block:: python\n\n res = indexqa({'query': 'This is my query'})\n answer, docs = res['result'], res['source_documents']\n \"\"\"\n _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()\n question = inputs[self.input_key]\n accepts_run_manager = \"run_manager\" in inspect.signature(self._aget_docs).parameters\n if accepts_run_manager:\n docs = await self._aget_docs(question, run_manager=_run_manager)\n else:\n docs = await self._aget_docs(question) # type: ignore[call-arg]\n kwargs = {\n k: v\n for k, v in inputs.items()\n if k in [\"stop\", \"temperature\", \"top_k\", \"top_p\", \"max_new_tokens\", \"doc_prefix\"]\n }\n answer = await self.combine_documents_chain.arun(\n input_documents=docs, question=question, callbacks=_run_manager.get_child(), **kwargs\n )\n # if rejection_trigger_keywords is not given, return the response from LLM directly\n rejection_trigger_keywrods = inputs.get('rejection_trigger_keywrods', [])\n answer = answer if all([rej not in answer for rej in rejection_trigger_keywrods]) or len(rejection_trigger_keywrods)==0 else None\n if answer is None:\n answer = inputs.get('rejection_answer', \"\u62b1\u6b49\uff0c\u6839\u636e\u63d0\u4f9b\u7684\u4fe1\u606f\u65e0\u6cd5\u56de\u7b54\u8be5\u95ee\u9898\u3002\")\n self.combine_documents_chain.memory.save_context({\"question\": question}, {\"output\": answer})\n\n if self.return_source_documents:\n return {self.output_key: answer, \"source_documents\": docs}\n else:\n return {self.output_key: answer}\n\n\nclass RetrievalQA(CustomBaseRetrievalQA):\n \"\"\"Chain for question-answering against an index.\n\n Example:\n .. code-block:: python\n\n from langchain.llms import OpenAI\n from langchain.chains import RetrievalQA\n from langchain.faiss import FAISS\n from langchain.vectorstores.base import VectorStoreRetriever\n retriever = VectorStoreRetriever(vectorstore=FAISS(...))\n retrievalQA = RetrievalQA.from_llm(llm=OpenAI(), retriever=retriever)\n\n \"\"\"\n\n retriever: BaseRetriever = Field(exclude=True)\n\n def _get_docs(\n self,\n question: str,\n *,\n run_manager: CallbackManagerForChainRun,\n ) -> List[Document]:\n \"\"\"Get docs.\"\"\"\n return self.retriever.get_relevant_documents(question, callbacks=run_manager.get_child())\n\n async def _aget_docs(\n self,\n question: str,\n *,\n run_manager: AsyncCallbackManagerForChainRun,\n ) -> List[Document]:\n \"\"\"Get docs.\"\"\"\n return await self.retriever.aget_relevant_documents(question, callbacks=run_manager.get_child())\n\n @property\n def _chain_type(self) -> str:\n \"\"\"Return the chain type.\"\"\"\n return \"retrieval_qa\"\n", "path": "applications/ColossalQA/colossalqa/chain/retrieval_qa/base.py"}]}
| 3,055 | 715 |
gh_patches_debug_8469
|
rasdani/github-patches
|
git_diff
|
mdn__kuma-6263
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Site search searches on sidebar
Look at this search result: https://developer.mozilla.org/en-US/search?q=django
<img width="726" alt="Screen Shot 2019-12-18 at 8 33 39 AM" src="https://user-images.githubusercontent.com/26739/71090488-2096db80-2171-11ea-999b-a92d470687db.png">
It's showing pages that mention "Django" in an "unexpanded" sidebar.
That's not helping. It's misleading and if a particular string is in the sidebar, it'll exist on [its own page](https://developer.mozilla.org/en-US/docs/Learn/Server-side/Django).
</issue>
<code>
[start of kuma/wiki/search.py]
1
2
3 import logging
4 from math import ceil
5
6 from celery import chain
7 from django.conf import settings
8 from django.db.models import Q
9 from django.utils.html import strip_tags
10 from django.utils.translation import ugettext_lazy as _
11 from elasticsearch.helpers import bulk
12 from elasticsearch_dsl import document, field
13 from elasticsearch_dsl.connections import connections
14 from elasticsearch_dsl.mapping import Mapping
15 from elasticsearch_dsl.search import Search
16
17 from kuma.core.utils import chord_flow, chunked
18
19 from .constants import EXPERIMENT_TITLE_PREFIX
20
21
22 log = logging.getLogger('kuma.wiki.search')
23
24
25 class WikiDocumentType(document.Document):
26 excerpt_fields = ['summary', 'content']
27 exclude_slugs = ['Talk:', 'User:', 'User_talk:', 'Template_talk:',
28 'Project_talk:', EXPERIMENT_TITLE_PREFIX]
29
30 boost = field.Float(null_value=1.0)
31 content = field.Text(analyzer='kuma_content',
32 term_vector='with_positions_offsets')
33 css_classnames = field.Keyword()
34 html_attributes = field.Keyword()
35 id = field.Long()
36 kumascript_macros = field.Keyword()
37 locale = field.Keyword()
38 modified = field.Date()
39 parent = field.Object(properties={
40 'id': field.Long(),
41 'title': field.Text(analyzer='kuma_title'),
42 'slug': field.Keyword(),
43 'locale': field.Keyword(),
44 })
45 slug = field.Keyword()
46 summary = field.Text(analyzer='kuma_content',
47 term_vector='with_positions_offsets')
48 tags = field.Keyword()
49 title = field.Text(analyzer='kuma_title')
50
51 class Meta(object):
52 mapping = Mapping('wiki_document')
53 mapping.meta('_all', enabled=False)
54
55 @classmethod
56 def get_connection(cls, alias='default'):
57 return connections.get_connection(alias)
58
59 @classmethod
60 def get_doc_type(cls):
61 return cls._doc_type.name
62
63 @classmethod
64 def case_insensitive_keywords(cls, keywords):
65 '''Create a unique list of lowercased keywords.'''
66 return sorted({keyword.lower() for keyword in keywords})
67
68 @classmethod
69 def from_django(cls, obj):
70 is_root_document = obj.slug.count('/') == 1
71 doc = {
72 'id': obj.id,
73 'boost': 4.0 if is_root_document else 1.0,
74 'title': obj.title,
75 'slug': obj.slug,
76 'summary': obj.get_summary_text(),
77 'locale': obj.locale,
78 'modified': obj.modified,
79 'content': strip_tags(obj.rendered_html or ''),
80 'tags': [o.name for o in obj.tags.all()],
81 'kumascript_macros': cls.case_insensitive_keywords(
82 obj.extract.macro_names()),
83 'css_classnames': cls.case_insensitive_keywords(
84 obj.extract.css_classnames()),
85 'html_attributes': cls.case_insensitive_keywords(
86 obj.extract.html_attributes()),
87 }
88
89 if obj.parent:
90 doc['parent'] = {
91 'id': obj.parent.id,
92 'title': obj.parent.title,
93 'locale': obj.parent.locale,
94 'slug': obj.parent.slug,
95 }
96 else:
97 doc['parent'] = {}
98
99 return doc
100
101 @classmethod
102 def get_mapping(cls):
103 return cls._doc_type.mapping.to_dict()
104
105 @classmethod
106 def get_analysis(cls):
107 return {
108 'filter': {
109 'kuma_word_delimiter': {
110 'type': 'word_delimiter',
111 'preserve_original': True, # hi-fi -> hifi, hi-fi
112 'catenate_words': True, # hi-fi -> hifi
113 'catenate_numbers': True, # 90-210 -> 90210
114 }
115 },
116 'analyzer': {
117 'default': {
118 'tokenizer': 'standard',
119 'filter': ['standard', 'elision']
120 },
121 # a custom analyzer that strips html and uses our own
122 # word delimiter filter and the elision filter
123 # (e.g. L'attribut -> attribut). The rest is the same as
124 # the snowball analyzer
125 'kuma_content': {
126 'type': 'custom',
127 'tokenizer': 'standard',
128 'char_filter': ['html_strip'],
129 'filter': [
130 'elision',
131 'kuma_word_delimiter',
132 'lowercase',
133 'standard',
134 'stop',
135 'snowball',
136 ],
137 },
138 'kuma_title': {
139 'type': 'custom',
140 'tokenizer': 'standard',
141 'filter': [
142 'elision',
143 'kuma_word_delimiter',
144 'lowercase',
145 'standard',
146 'snowball',
147 ],
148 },
149 },
150 }
151
152 @classmethod
153 def get_settings(cls):
154 return {
155 'mappings': cls.get_mapping(),
156 'settings': {
157 'analysis': cls.get_analysis(),
158 'number_of_replicas': settings.ES_DEFAULT_NUM_REPLICAS,
159 'number_of_shards': settings.ES_DEFAULT_NUM_SHARDS,
160 }
161 }
162
163 @classmethod
164 def bulk_index(cls, documents, id_field='id', es=None, index=None):
165 """Index of a bunch of documents."""
166 es = es or cls.get_connection()
167 index = index or cls.get_index()
168 type = cls.get_doc_type()
169
170 actions = [
171 {'_index': index, '_type': type, '_id': d['id'], '_source': d}
172 for d in documents]
173
174 bulk(es, actions)
175
176 @classmethod
177 def bulk_delete(cls, ids, es=None, index=None):
178 """Index of a bunch of documents."""
179 es = es or cls.get_connection()
180 index = index or cls.get_index()
181 type = cls.get_doc_type()
182
183 actions = [
184 {'_op_type': 'delete', '_index': index, '_type': type, '_id': _id}
185 for _id in ids]
186
187 bulk(es, actions)
188
189 @classmethod
190 def get_index(cls):
191 from kuma.search.models import Index
192 return Index.objects.get_current().prefixed_name
193
194 @classmethod
195 def search(cls, **kwargs):
196 options = {
197 'using': connections.get_connection(),
198 'index': cls.get_index(),
199 'doc_type': {cls._doc_type.name: cls.from_es},
200 }
201 options.update(kwargs)
202 sq = Search(**options)
203
204 return sq
205
206 @classmethod
207 def get_model(cls):
208 from kuma.wiki.models import Document
209 return Document
210
211 @classmethod
212 def get_indexable(cls, percent=100):
213 """
214 For this mapping type return a list of model IDs that should be
215 indexed with the management command, in a full reindex.
216
217 WARNING: When changing this code make sure to update the
218 ``should_update`` method below, too!
219
220 """
221 model = cls.get_model()
222
223 excludes = Q()
224 for exclude in cls.exclude_slugs:
225 excludes |= Q(slug__startswith=exclude)
226
227 qs = model.objects.filter(is_redirect=False).exclude(excludes)
228
229 percent = percent / 100
230 if percent < 1:
231 qs = qs[:int(qs.count() * percent)]
232
233 return qs.values_list('id', flat=True)
234
235 @classmethod
236 def should_update(cls, obj):
237 """
238 Given a Document instance should return boolean value
239 whether the instance should be indexed or not.
240
241 WARNING: This *must* mirror the logic of the ``get_indexable``
242 method above!
243 """
244 return (not obj.is_redirect and not obj.deleted and
245 not any([exclude in obj.slug
246 for exclude in cls.exclude_slugs]))
247
248 def get_excerpt(self):
249 highlighted = getattr(self.meta, 'highlight', None)
250 if highlighted:
251 for excerpt_field in self.excerpt_fields:
252 if excerpt_field in highlighted:
253 return '…'.join(highlighted[excerpt_field])
254 return self.summary
255
256 @classmethod
257 def reindex_all(cls, chunk_size=500, index=None, percent=100):
258 """Rebuild ElasticSearch indexes.
259
260 :arg chunk_size: how many documents to bulk index as a single chunk.
261 :arg index: the `Index` object to reindex into. Uses the current
262 promoted index if none provided.
263 :arg percent: 1 to 100--the percentage of the db to index.
264
265 """
266 from kuma.search.models import Index
267 from kuma.search.tasks import prepare_index, finalize_index
268 from kuma.wiki.tasks import index_documents
269
270 index = index or Index.objects.get_current()
271
272 # Get the list of document IDs to index.
273 indexable = WikiDocumentType.get_indexable(percent)
274
275 total = len(indexable)
276 total_chunks = int(ceil(total / chunk_size))
277
278 pre_task = prepare_index.si(index.pk)
279 post_task = finalize_index.si(index.pk)
280
281 if not total:
282 # If there's no data we still create the index and finalize it.
283 chain(pre_task, post_task).apply_async()
284 else:
285 index_tasks = [index_documents.si(chunk, index.pk)
286 for chunk in chunked(indexable, chunk_size)]
287 chord_flow(pre_task, index_tasks, post_task).apply_async()
288
289 message = _(
290 'Indexing %(total)d documents into %(total_chunks)d chunks of '
291 'size %(size)d into index %(index)s.' % {
292 'total': total,
293 'total_chunks': total_chunks,
294 'size': chunk_size,
295 'index': index.prefixed_name
296 }
297 )
298 return message
299
[end of kuma/wiki/search.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kuma/wiki/search.py b/kuma/wiki/search.py
--- a/kuma/wiki/search.py
+++ b/kuma/wiki/search.py
@@ -76,7 +76,7 @@
'summary': obj.get_summary_text(),
'locale': obj.locale,
'modified': obj.modified,
- 'content': strip_tags(obj.rendered_html or ''),
+ 'content': strip_tags(obj.get_body_html() or ''),
'tags': [o.name for o in obj.tags.all()],
'kumascript_macros': cls.case_insensitive_keywords(
obj.extract.macro_names()),
|
{"golden_diff": "diff --git a/kuma/wiki/search.py b/kuma/wiki/search.py\n--- a/kuma/wiki/search.py\n+++ b/kuma/wiki/search.py\n@@ -76,7 +76,7 @@\n 'summary': obj.get_summary_text(),\n 'locale': obj.locale,\n 'modified': obj.modified,\n- 'content': strip_tags(obj.rendered_html or ''),\n+ 'content': strip_tags(obj.get_body_html() or ''),\n 'tags': [o.name for o in obj.tags.all()],\n 'kumascript_macros': cls.case_insensitive_keywords(\n obj.extract.macro_names()),\n", "issue": "Site search searches on sidebar\nLook at this search result: https://developer.mozilla.org/en-US/search?q=django\r\n<img width=\"726\" alt=\"Screen Shot 2019-12-18 at 8 33 39 AM\" src=\"https://user-images.githubusercontent.com/26739/71090488-2096db80-2171-11ea-999b-a92d470687db.png\">\r\n\r\nIt's showing pages that mention \"Django\" in an \"unexpanded\" sidebar. \r\nThat's not helping. It's misleading and if a particular string is in the sidebar, it'll exist on [its own page](https://developer.mozilla.org/en-US/docs/Learn/Server-side/Django). \n", "before_files": [{"content": "\n\nimport logging\nfrom math import ceil\n\nfrom celery import chain\nfrom django.conf import settings\nfrom django.db.models import Q\nfrom django.utils.html import strip_tags\nfrom django.utils.translation import ugettext_lazy as _\nfrom elasticsearch.helpers import bulk\nfrom elasticsearch_dsl import document, field\nfrom elasticsearch_dsl.connections import connections\nfrom elasticsearch_dsl.mapping import Mapping\nfrom elasticsearch_dsl.search import Search\n\nfrom kuma.core.utils import chord_flow, chunked\n\nfrom .constants import EXPERIMENT_TITLE_PREFIX\n\n\nlog = logging.getLogger('kuma.wiki.search')\n\n\nclass WikiDocumentType(document.Document):\n excerpt_fields = ['summary', 'content']\n exclude_slugs = ['Talk:', 'User:', 'User_talk:', 'Template_talk:',\n 'Project_talk:', EXPERIMENT_TITLE_PREFIX]\n\n boost = field.Float(null_value=1.0)\n content = field.Text(analyzer='kuma_content',\n term_vector='with_positions_offsets')\n css_classnames = field.Keyword()\n html_attributes = field.Keyword()\n id = field.Long()\n kumascript_macros = field.Keyword()\n locale = field.Keyword()\n modified = field.Date()\n parent = field.Object(properties={\n 'id': field.Long(),\n 'title': field.Text(analyzer='kuma_title'),\n 'slug': field.Keyword(),\n 'locale': field.Keyword(),\n })\n slug = field.Keyword()\n summary = field.Text(analyzer='kuma_content',\n term_vector='with_positions_offsets')\n tags = field.Keyword()\n title = field.Text(analyzer='kuma_title')\n\n class Meta(object):\n mapping = Mapping('wiki_document')\n mapping.meta('_all', enabled=False)\n\n @classmethod\n def get_connection(cls, alias='default'):\n return connections.get_connection(alias)\n\n @classmethod\n def get_doc_type(cls):\n return cls._doc_type.name\n\n @classmethod\n def case_insensitive_keywords(cls, keywords):\n '''Create a unique list of lowercased keywords.'''\n return sorted({keyword.lower() for keyword in keywords})\n\n @classmethod\n def from_django(cls, obj):\n is_root_document = obj.slug.count('/') == 1\n doc = {\n 'id': obj.id,\n 'boost': 4.0 if is_root_document else 1.0,\n 'title': obj.title,\n 'slug': obj.slug,\n 'summary': obj.get_summary_text(),\n 'locale': obj.locale,\n 'modified': obj.modified,\n 'content': strip_tags(obj.rendered_html or ''),\n 'tags': [o.name for o in obj.tags.all()],\n 'kumascript_macros': cls.case_insensitive_keywords(\n obj.extract.macro_names()),\n 'css_classnames': cls.case_insensitive_keywords(\n obj.extract.css_classnames()),\n 'html_attributes': cls.case_insensitive_keywords(\n obj.extract.html_attributes()),\n }\n\n if obj.parent:\n doc['parent'] = {\n 'id': obj.parent.id,\n 'title': obj.parent.title,\n 'locale': obj.parent.locale,\n 'slug': obj.parent.slug,\n }\n else:\n doc['parent'] = {}\n\n return doc\n\n @classmethod\n def get_mapping(cls):\n return cls._doc_type.mapping.to_dict()\n\n @classmethod\n def get_analysis(cls):\n return {\n 'filter': {\n 'kuma_word_delimiter': {\n 'type': 'word_delimiter',\n 'preserve_original': True, # hi-fi -> hifi, hi-fi\n 'catenate_words': True, # hi-fi -> hifi\n 'catenate_numbers': True, # 90-210 -> 90210\n }\n },\n 'analyzer': {\n 'default': {\n 'tokenizer': 'standard',\n 'filter': ['standard', 'elision']\n },\n # a custom analyzer that strips html and uses our own\n # word delimiter filter and the elision filter\n # (e.g. L'attribut -> attribut). The rest is the same as\n # the snowball analyzer\n 'kuma_content': {\n 'type': 'custom',\n 'tokenizer': 'standard',\n 'char_filter': ['html_strip'],\n 'filter': [\n 'elision',\n 'kuma_word_delimiter',\n 'lowercase',\n 'standard',\n 'stop',\n 'snowball',\n ],\n },\n 'kuma_title': {\n 'type': 'custom',\n 'tokenizer': 'standard',\n 'filter': [\n 'elision',\n 'kuma_word_delimiter',\n 'lowercase',\n 'standard',\n 'snowball',\n ],\n },\n },\n }\n\n @classmethod\n def get_settings(cls):\n return {\n 'mappings': cls.get_mapping(),\n 'settings': {\n 'analysis': cls.get_analysis(),\n 'number_of_replicas': settings.ES_DEFAULT_NUM_REPLICAS,\n 'number_of_shards': settings.ES_DEFAULT_NUM_SHARDS,\n }\n }\n\n @classmethod\n def bulk_index(cls, documents, id_field='id', es=None, index=None):\n \"\"\"Index of a bunch of documents.\"\"\"\n es = es or cls.get_connection()\n index = index or cls.get_index()\n type = cls.get_doc_type()\n\n actions = [\n {'_index': index, '_type': type, '_id': d['id'], '_source': d}\n for d in documents]\n\n bulk(es, actions)\n\n @classmethod\n def bulk_delete(cls, ids, es=None, index=None):\n \"\"\"Index of a bunch of documents.\"\"\"\n es = es or cls.get_connection()\n index = index or cls.get_index()\n type = cls.get_doc_type()\n\n actions = [\n {'_op_type': 'delete', '_index': index, '_type': type, '_id': _id}\n for _id in ids]\n\n bulk(es, actions)\n\n @classmethod\n def get_index(cls):\n from kuma.search.models import Index\n return Index.objects.get_current().prefixed_name\n\n @classmethod\n def search(cls, **kwargs):\n options = {\n 'using': connections.get_connection(),\n 'index': cls.get_index(),\n 'doc_type': {cls._doc_type.name: cls.from_es},\n }\n options.update(kwargs)\n sq = Search(**options)\n\n return sq\n\n @classmethod\n def get_model(cls):\n from kuma.wiki.models import Document\n return Document\n\n @classmethod\n def get_indexable(cls, percent=100):\n \"\"\"\n For this mapping type return a list of model IDs that should be\n indexed with the management command, in a full reindex.\n\n WARNING: When changing this code make sure to update the\n ``should_update`` method below, too!\n\n \"\"\"\n model = cls.get_model()\n\n excludes = Q()\n for exclude in cls.exclude_slugs:\n excludes |= Q(slug__startswith=exclude)\n\n qs = model.objects.filter(is_redirect=False).exclude(excludes)\n\n percent = percent / 100\n if percent < 1:\n qs = qs[:int(qs.count() * percent)]\n\n return qs.values_list('id', flat=True)\n\n @classmethod\n def should_update(cls, obj):\n \"\"\"\n Given a Document instance should return boolean value\n whether the instance should be indexed or not.\n\n WARNING: This *must* mirror the logic of the ``get_indexable``\n method above!\n \"\"\"\n return (not obj.is_redirect and not obj.deleted and\n not any([exclude in obj.slug\n for exclude in cls.exclude_slugs]))\n\n def get_excerpt(self):\n highlighted = getattr(self.meta, 'highlight', None)\n if highlighted:\n for excerpt_field in self.excerpt_fields:\n if excerpt_field in highlighted:\n return '\u2026'.join(highlighted[excerpt_field])\n return self.summary\n\n @classmethod\n def reindex_all(cls, chunk_size=500, index=None, percent=100):\n \"\"\"Rebuild ElasticSearch indexes.\n\n :arg chunk_size: how many documents to bulk index as a single chunk.\n :arg index: the `Index` object to reindex into. Uses the current\n promoted index if none provided.\n :arg percent: 1 to 100--the percentage of the db to index.\n\n \"\"\"\n from kuma.search.models import Index\n from kuma.search.tasks import prepare_index, finalize_index\n from kuma.wiki.tasks import index_documents\n\n index = index or Index.objects.get_current()\n\n # Get the list of document IDs to index.\n indexable = WikiDocumentType.get_indexable(percent)\n\n total = len(indexable)\n total_chunks = int(ceil(total / chunk_size))\n\n pre_task = prepare_index.si(index.pk)\n post_task = finalize_index.si(index.pk)\n\n if not total:\n # If there's no data we still create the index and finalize it.\n chain(pre_task, post_task).apply_async()\n else:\n index_tasks = [index_documents.si(chunk, index.pk)\n for chunk in chunked(indexable, chunk_size)]\n chord_flow(pre_task, index_tasks, post_task).apply_async()\n\n message = _(\n 'Indexing %(total)d documents into %(total_chunks)d chunks of '\n 'size %(size)d into index %(index)s.' % {\n 'total': total,\n 'total_chunks': total_chunks,\n 'size': chunk_size,\n 'index': index.prefixed_name\n }\n )\n return message\n", "path": "kuma/wiki/search.py"}]}
| 3,589 | 128 |
gh_patches_debug_6111
|
rasdani/github-patches
|
git_diff
|
mkdocs__mkdocs-2464
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
GET /%E8%AE%A1%E7%AE%97/HTTP/1.1" code 404 does not support Chinese name md documents
Questions are as follows:
I have encountered a problem when using Chinese to set the navigation, click on the navigation bar to prompt the page to display: 404-Not found
The CLI error is as follows: "GET /%E8%AE%A1%E7%AE%97/ HTTP/1.1" code 404
The mkdocs.yml configuration file is as follows:
```
site_name: My Docs
site_url: https://example.com/
theme:
name: material
nav:
- 首页: index.md
- 测试1: 测试1.md
- 测试2: 测试2.md
```
I tried windows system, ubuntu system, mac system, and I can’t quote markdown documents with using Chinese to set the navigation but I see other people’s blogs support Chinese, but theirs is not the latest version, the author is in the latest version. Do you choose not to support Chinese markdown documents?
I checked the historical problem records, but did not find a clear instruction, if you know, please help me, thank you very much
</issue>
<code>
[start of mkdocs/livereload/__init__.py]
1 import functools
2 import io
3 import logging
4 import mimetypes
5 import os
6 import os.path
7 import pathlib
8 import re
9 import socketserver
10 import threading
11 import time
12 import warnings
13 import wsgiref.simple_server
14
15 import watchdog.events
16 import watchdog.observers
17
18
19 class _LoggerAdapter(logging.LoggerAdapter):
20 def process(self, msg, kwargs):
21 return time.strftime("[%H:%M:%S] ") + msg, kwargs
22
23
24 log = _LoggerAdapter(logging.getLogger(__name__), {})
25
26
27 class LiveReloadServer(socketserver.ThreadingMixIn, wsgiref.simple_server.WSGIServer):
28 daemon_threads = True
29 poll_response_timeout = 60
30
31 def __init__(
32 self,
33 builder,
34 host,
35 port,
36 root,
37 mount_path="/",
38 build_delay=0.25,
39 shutdown_delay=0.25,
40 **kwargs,
41 ):
42 self.builder = builder
43 self.server_name = host
44 self.server_port = port
45 self.root = os.path.abspath(root)
46 self.mount_path = ("/" + mount_path.lstrip("/")).rstrip("/") + "/"
47 self.url = f"http://{self.server_name}:{self.server_port}{self.mount_path}"
48 self.build_delay = build_delay
49 self.shutdown_delay = shutdown_delay
50 # To allow custom error pages.
51 self.error_handler = lambda code: None
52
53 super().__init__((host, port), _Handler, **kwargs)
54 self.set_app(self.serve_request)
55
56 self._wanted_epoch = _timestamp() # The version of the site that started building.
57 self._visible_epoch = self._wanted_epoch # Latest fully built version of the site.
58 self._epoch_cond = threading.Condition() # Must be held when accessing _visible_epoch.
59
60 self._to_rebuild = {} # Used as an ordered set of functions to call.
61 self._rebuild_cond = threading.Condition() # Must be held when accessing _to_rebuild.
62
63 self._shutdown = False
64 self.serve_thread = threading.Thread(target=lambda: self.serve_forever(shutdown_delay))
65 self.observer = watchdog.observers.Observer(timeout=shutdown_delay)
66
67 def watch(self, path, func=None, recursive=True):
68 """Add the 'path' to watched paths, call the function and reload when any file changes under it."""
69 path = os.path.abspath(path)
70 if func in (None, self.builder):
71 func = self.builder
72 else:
73 warnings.warn(
74 "Plugins should not pass the 'func' parameter of watch(). "
75 "The ability to execute custom callbacks will be removed soon.",
76 DeprecationWarning,
77 stacklevel=2,
78 )
79
80 def callback(event, allowed_path=None):
81 if isinstance(event, watchdog.events.DirCreatedEvent):
82 return
83 if allowed_path is not None and event.src_path != allowed_path:
84 return
85 # Text editors always cause a "file close" event in addition to "modified" when saving
86 # a file. Some editors also have "swap" functionality that keeps writing into another
87 # file that's never closed. Prevent such write events from causing a rebuild.
88 if isinstance(event, watchdog.events.FileModifiedEvent):
89 # But FileClosedEvent is implemented only on Linux, otherwise we mustn't skip this:
90 if type(self.observer).__name__ == "InotifyObserver":
91 return
92 log.debug(str(event))
93 with self._rebuild_cond:
94 self._to_rebuild[func] = True
95 self._rebuild_cond.notify_all()
96
97 dir_handler = watchdog.events.FileSystemEventHandler()
98 dir_handler.on_any_event = callback
99
100 seen = set()
101
102 def schedule(path):
103 seen.add(path)
104 if path.is_file():
105 # Watchdog doesn't support watching files, so watch its directory and filter by path
106 handler = watchdog.events.FileSystemEventHandler()
107 handler.on_any_event = lambda event: callback(event, allowed_path=os.fspath(path))
108
109 parent = path.parent
110 log.debug(f"Watching file '{path}' through directory '{parent}'")
111 self.observer.schedule(handler, parent)
112 else:
113 log.debug(f"Watching directory '{path}'")
114 self.observer.schedule(dir_handler, path, recursive=recursive)
115
116 schedule(pathlib.Path(path).resolve())
117
118 def watch_symlink_targets(path_obj): # path is os.DirEntry or pathlib.Path
119 if path_obj.is_symlink():
120 path_obj = pathlib.Path(path_obj).resolve()
121 if path_obj in seen or not path_obj.exists():
122 return
123 schedule(path_obj)
124
125 if path_obj.is_dir() and recursive:
126 with os.scandir(os.fspath(path_obj)) as scan:
127 for entry in scan:
128 watch_symlink_targets(entry)
129
130 watch_symlink_targets(pathlib.Path(path))
131
132 def serve(self):
133 self.observer.start()
134
135 log.info(f"Serving on {self.url}")
136 self.serve_thread.start()
137
138 self._build_loop()
139
140 def _build_loop(self):
141 while True:
142 with self._rebuild_cond:
143 while not self._rebuild_cond.wait_for(
144 lambda: self._to_rebuild or self._shutdown, timeout=self.shutdown_delay
145 ):
146 # We could have used just one wait instead of a loop + timeout, but we need
147 # occasional breaks, otherwise on Windows we can't receive KeyboardInterrupt.
148 pass
149 if self._shutdown:
150 break
151 log.info("Detected file changes")
152 while self._rebuild_cond.wait(timeout=self.build_delay):
153 log.debug("Waiting for file changes to stop happening")
154
155 self._wanted_epoch = _timestamp()
156 funcs = list(self._to_rebuild)
157 self._to_rebuild.clear()
158
159 for func in funcs:
160 func()
161
162 with self._epoch_cond:
163 log.info("Reloading browsers")
164 self._visible_epoch = self._wanted_epoch
165 self._epoch_cond.notify_all()
166
167 def shutdown(self):
168 self.observer.stop()
169 with self._rebuild_cond:
170 self._shutdown = True
171 self._rebuild_cond.notify_all()
172
173 if self.serve_thread.is_alive():
174 super().shutdown()
175 self.serve_thread.join()
176 self.observer.join()
177
178 def serve_request(self, environ, start_response):
179 try:
180 result = self._serve_request(environ, start_response)
181 except Exception:
182 code = 500
183 msg = "500 Internal Server Error"
184 log.exception(msg)
185 else:
186 if result is not None:
187 return result
188 code = 404
189 msg = "404 Not Found"
190
191 error_content = None
192 try:
193 error_content = self.error_handler(code)
194 except Exception:
195 log.exception("Failed to render an error message!")
196 if error_content is None:
197 error_content = msg.encode()
198
199 start_response(msg, [("Content-Type", "text/html")])
200 return [error_content]
201
202 def _serve_request(self, environ, start_response):
203 path = environ["PATH_INFO"]
204
205 m = re.fullmatch(r"/livereload/([0-9]+)/[0-9]+", path)
206 if m:
207 epoch = int(m[1])
208 start_response("200 OK", [("Content-Type", "text/plain")])
209
210 def condition():
211 return self._visible_epoch > epoch
212
213 with self._epoch_cond:
214 if not condition():
215 # Stall the browser, respond as soon as there's something new.
216 # If there's not, respond anyway after a minute.
217 self._log_poll_request(environ.get("HTTP_REFERER"), request_id=path)
218 self._epoch_cond.wait_for(condition, timeout=self.poll_response_timeout)
219 return [b"%d" % self._visible_epoch]
220
221 if path == "/js/livereload.js":
222 file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "livereload.js")
223 elif path.startswith(self.mount_path):
224 if path.endswith("/"):
225 path += "index.html"
226 path = path[len(self.mount_path):]
227 file_path = os.path.join(self.root, path.lstrip("/"))
228 elif path == "/":
229 start_response("302 Found", [("Location", self.mount_path)])
230 return []
231 else:
232 return None # Not found
233
234 # Wait until the ongoing rebuild (if any) finishes, so we're not serving a half-built site.
235 with self._epoch_cond:
236 self._epoch_cond.wait_for(lambda: self._visible_epoch == self._wanted_epoch)
237 epoch = self._visible_epoch
238
239 try:
240 file = open(file_path, "rb")
241 except OSError:
242 return None # Not found
243
244 if path.endswith(".html"):
245 with file:
246 content = file.read()
247 content = self._inject_js_into_html(content, epoch)
248 file = io.BytesIO(content)
249 content_length = len(content)
250 else:
251 content_length = os.path.getsize(file_path)
252
253 content_type = self._guess_type(file_path)
254 start_response(
255 "200 OK", [("Content-Type", content_type), ("Content-Length", str(content_length))]
256 )
257 return wsgiref.util.FileWrapper(file)
258
259 @classmethod
260 def _inject_js_into_html(cls, content, epoch):
261 try:
262 body_end = content.rindex(b"</body>")
263 except ValueError:
264 body_end = len(content)
265 # The page will reload if the livereload poller returns a newer epoch than what it knows.
266 # The other timestamp becomes just a unique identifier for the initiating page.
267 return (
268 b'%b<script src="/js/livereload.js"></script><script>livereload(%d, %d);</script>%b'
269 % (content[:body_end], epoch, _timestamp(), content[body_end:])
270 )
271
272 @classmethod
273 @functools.lru_cache() # "Cache" to not repeat the same message for the same browser tab.
274 def _log_poll_request(cls, url, request_id):
275 log.info(f"Browser connected: {url}")
276
277 def _guess_type(cls, path):
278 # MkDocs only ensures a few common types (as seen in livereload_tests.py::test_mime_types).
279 # Other uncommon types will not be accepted.
280 if path.endswith((".js", ".JS")):
281 return "application/javascript"
282 if path.endswith(".gz"):
283 return "application/gzip"
284
285 guess, _ = mimetypes.guess_type(path)
286 if guess:
287 return guess
288 return "application/octet-stream"
289
290
291 class _Handler(wsgiref.simple_server.WSGIRequestHandler):
292 def log_request(self, code="-", size="-"):
293 level = logging.DEBUG if str(code) == "200" else logging.WARNING
294 log.log(level, f'"{self.requestline}" code {code}')
295
296 def log_message(self, format, *args):
297 log.debug(format, *args)
298
299
300 def _timestamp():
301 return round(time.monotonic() * 1000)
302
[end of mkdocs/livereload/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mkdocs/livereload/__init__.py b/mkdocs/livereload/__init__.py
--- a/mkdocs/livereload/__init__.py
+++ b/mkdocs/livereload/__init__.py
@@ -200,7 +200,9 @@
return [error_content]
def _serve_request(self, environ, start_response):
- path = environ["PATH_INFO"]
+ # https://bugs.python.org/issue16679
+ # https://github.com/bottlepy/bottle/blob/f9b1849db4/bottle.py#L984
+ path = environ["PATH_INFO"].encode("latin-1").decode("utf-8", "ignore")
m = re.fullmatch(r"/livereload/([0-9]+)/[0-9]+", path)
if m:
|
{"golden_diff": "diff --git a/mkdocs/livereload/__init__.py b/mkdocs/livereload/__init__.py\n--- a/mkdocs/livereload/__init__.py\n+++ b/mkdocs/livereload/__init__.py\n@@ -200,7 +200,9 @@\n return [error_content]\n \n def _serve_request(self, environ, start_response):\n- path = environ[\"PATH_INFO\"]\n+ # https://bugs.python.org/issue16679\n+ # https://github.com/bottlepy/bottle/blob/f9b1849db4/bottle.py#L984\n+ path = environ[\"PATH_INFO\"].encode(\"latin-1\").decode(\"utf-8\", \"ignore\")\n \n m = re.fullmatch(r\"/livereload/([0-9]+)/[0-9]+\", path)\n if m:\n", "issue": "GET /%E8%AE%A1%E7%AE%97/HTTP/1.1\" code 404 does not support Chinese name md documents\nQuestions are as follows:\r\nI have encountered a problem when using Chinese to set the navigation, click on the navigation bar to prompt the page to display: 404-Not found\r\n\r\nThe CLI error is as follows: \"GET /%E8%AE%A1%E7%AE%97/ HTTP/1.1\" code 404\r\n\r\nThe mkdocs.yml configuration file is as follows:\r\n```\r\nsite_name: My Docs\r\nsite_url: https://example.com/\r\ntheme:\r\nname: material\r\nnav:\r\n- \u9996\u9875: index.md\r\n- \u6d4b\u8bd51: \u6d4b\u8bd51.md\r\n- \u6d4b\u8bd52: \u6d4b\u8bd52.md\r\n```\r\n\r\nI tried windows system, ubuntu system, mac system, and I can\u2019t quote markdown documents with using Chinese to set the navigation but I see other people\u2019s blogs support Chinese, but theirs is not the latest version, the author is in the latest version. Do you choose not to support Chinese markdown documents? \r\n\r\nI checked the historical problem records, but did not find a clear instruction, if you know, please help me, thank you very much\r\n\n", "before_files": [{"content": "import functools\nimport io\nimport logging\nimport mimetypes\nimport os\nimport os.path\nimport pathlib\nimport re\nimport socketserver\nimport threading\nimport time\nimport warnings\nimport wsgiref.simple_server\n\nimport watchdog.events\nimport watchdog.observers\n\n\nclass _LoggerAdapter(logging.LoggerAdapter):\n def process(self, msg, kwargs):\n return time.strftime(\"[%H:%M:%S] \") + msg, kwargs\n\n\nlog = _LoggerAdapter(logging.getLogger(__name__), {})\n\n\nclass LiveReloadServer(socketserver.ThreadingMixIn, wsgiref.simple_server.WSGIServer):\n daemon_threads = True\n poll_response_timeout = 60\n\n def __init__(\n self,\n builder,\n host,\n port,\n root,\n mount_path=\"/\",\n build_delay=0.25,\n shutdown_delay=0.25,\n **kwargs,\n ):\n self.builder = builder\n self.server_name = host\n self.server_port = port\n self.root = os.path.abspath(root)\n self.mount_path = (\"/\" + mount_path.lstrip(\"/\")).rstrip(\"/\") + \"/\"\n self.url = f\"http://{self.server_name}:{self.server_port}{self.mount_path}\"\n self.build_delay = build_delay\n self.shutdown_delay = shutdown_delay\n # To allow custom error pages.\n self.error_handler = lambda code: None\n\n super().__init__((host, port), _Handler, **kwargs)\n self.set_app(self.serve_request)\n\n self._wanted_epoch = _timestamp() # The version of the site that started building.\n self._visible_epoch = self._wanted_epoch # Latest fully built version of the site.\n self._epoch_cond = threading.Condition() # Must be held when accessing _visible_epoch.\n\n self._to_rebuild = {} # Used as an ordered set of functions to call.\n self._rebuild_cond = threading.Condition() # Must be held when accessing _to_rebuild.\n\n self._shutdown = False\n self.serve_thread = threading.Thread(target=lambda: self.serve_forever(shutdown_delay))\n self.observer = watchdog.observers.Observer(timeout=shutdown_delay)\n\n def watch(self, path, func=None, recursive=True):\n \"\"\"Add the 'path' to watched paths, call the function and reload when any file changes under it.\"\"\"\n path = os.path.abspath(path)\n if func in (None, self.builder):\n func = self.builder\n else:\n warnings.warn(\n \"Plugins should not pass the 'func' parameter of watch(). \"\n \"The ability to execute custom callbacks will be removed soon.\",\n DeprecationWarning,\n stacklevel=2,\n )\n\n def callback(event, allowed_path=None):\n if isinstance(event, watchdog.events.DirCreatedEvent):\n return\n if allowed_path is not None and event.src_path != allowed_path:\n return\n # Text editors always cause a \"file close\" event in addition to \"modified\" when saving\n # a file. Some editors also have \"swap\" functionality that keeps writing into another\n # file that's never closed. Prevent such write events from causing a rebuild.\n if isinstance(event, watchdog.events.FileModifiedEvent):\n # But FileClosedEvent is implemented only on Linux, otherwise we mustn't skip this:\n if type(self.observer).__name__ == \"InotifyObserver\":\n return\n log.debug(str(event))\n with self._rebuild_cond:\n self._to_rebuild[func] = True\n self._rebuild_cond.notify_all()\n\n dir_handler = watchdog.events.FileSystemEventHandler()\n dir_handler.on_any_event = callback\n\n seen = set()\n\n def schedule(path):\n seen.add(path)\n if path.is_file():\n # Watchdog doesn't support watching files, so watch its directory and filter by path\n handler = watchdog.events.FileSystemEventHandler()\n handler.on_any_event = lambda event: callback(event, allowed_path=os.fspath(path))\n\n parent = path.parent\n log.debug(f\"Watching file '{path}' through directory '{parent}'\")\n self.observer.schedule(handler, parent)\n else:\n log.debug(f\"Watching directory '{path}'\")\n self.observer.schedule(dir_handler, path, recursive=recursive)\n\n schedule(pathlib.Path(path).resolve())\n\n def watch_symlink_targets(path_obj): # path is os.DirEntry or pathlib.Path\n if path_obj.is_symlink():\n path_obj = pathlib.Path(path_obj).resolve()\n if path_obj in seen or not path_obj.exists():\n return\n schedule(path_obj)\n\n if path_obj.is_dir() and recursive:\n with os.scandir(os.fspath(path_obj)) as scan:\n for entry in scan:\n watch_symlink_targets(entry)\n\n watch_symlink_targets(pathlib.Path(path))\n\n def serve(self):\n self.observer.start()\n\n log.info(f\"Serving on {self.url}\")\n self.serve_thread.start()\n\n self._build_loop()\n\n def _build_loop(self):\n while True:\n with self._rebuild_cond:\n while not self._rebuild_cond.wait_for(\n lambda: self._to_rebuild or self._shutdown, timeout=self.shutdown_delay\n ):\n # We could have used just one wait instead of a loop + timeout, but we need\n # occasional breaks, otherwise on Windows we can't receive KeyboardInterrupt.\n pass\n if self._shutdown:\n break\n log.info(\"Detected file changes\")\n while self._rebuild_cond.wait(timeout=self.build_delay):\n log.debug(\"Waiting for file changes to stop happening\")\n\n self._wanted_epoch = _timestamp()\n funcs = list(self._to_rebuild)\n self._to_rebuild.clear()\n\n for func in funcs:\n func()\n\n with self._epoch_cond:\n log.info(\"Reloading browsers\")\n self._visible_epoch = self._wanted_epoch\n self._epoch_cond.notify_all()\n\n def shutdown(self):\n self.observer.stop()\n with self._rebuild_cond:\n self._shutdown = True\n self._rebuild_cond.notify_all()\n\n if self.serve_thread.is_alive():\n super().shutdown()\n self.serve_thread.join()\n self.observer.join()\n\n def serve_request(self, environ, start_response):\n try:\n result = self._serve_request(environ, start_response)\n except Exception:\n code = 500\n msg = \"500 Internal Server Error\"\n log.exception(msg)\n else:\n if result is not None:\n return result\n code = 404\n msg = \"404 Not Found\"\n\n error_content = None\n try:\n error_content = self.error_handler(code)\n except Exception:\n log.exception(\"Failed to render an error message!\")\n if error_content is None:\n error_content = msg.encode()\n\n start_response(msg, [(\"Content-Type\", \"text/html\")])\n return [error_content]\n\n def _serve_request(self, environ, start_response):\n path = environ[\"PATH_INFO\"]\n\n m = re.fullmatch(r\"/livereload/([0-9]+)/[0-9]+\", path)\n if m:\n epoch = int(m[1])\n start_response(\"200 OK\", [(\"Content-Type\", \"text/plain\")])\n\n def condition():\n return self._visible_epoch > epoch\n\n with self._epoch_cond:\n if not condition():\n # Stall the browser, respond as soon as there's something new.\n # If there's not, respond anyway after a minute.\n self._log_poll_request(environ.get(\"HTTP_REFERER\"), request_id=path)\n self._epoch_cond.wait_for(condition, timeout=self.poll_response_timeout)\n return [b\"%d\" % self._visible_epoch]\n\n if path == \"/js/livereload.js\":\n file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"livereload.js\")\n elif path.startswith(self.mount_path):\n if path.endswith(\"/\"):\n path += \"index.html\"\n path = path[len(self.mount_path):]\n file_path = os.path.join(self.root, path.lstrip(\"/\"))\n elif path == \"/\":\n start_response(\"302 Found\", [(\"Location\", self.mount_path)])\n return []\n else:\n return None # Not found\n\n # Wait until the ongoing rebuild (if any) finishes, so we're not serving a half-built site.\n with self._epoch_cond:\n self._epoch_cond.wait_for(lambda: self._visible_epoch == self._wanted_epoch)\n epoch = self._visible_epoch\n\n try:\n file = open(file_path, \"rb\")\n except OSError:\n return None # Not found\n\n if path.endswith(\".html\"):\n with file:\n content = file.read()\n content = self._inject_js_into_html(content, epoch)\n file = io.BytesIO(content)\n content_length = len(content)\n else:\n content_length = os.path.getsize(file_path)\n\n content_type = self._guess_type(file_path)\n start_response(\n \"200 OK\", [(\"Content-Type\", content_type), (\"Content-Length\", str(content_length))]\n )\n return wsgiref.util.FileWrapper(file)\n\n @classmethod\n def _inject_js_into_html(cls, content, epoch):\n try:\n body_end = content.rindex(b\"</body>\")\n except ValueError:\n body_end = len(content)\n # The page will reload if the livereload poller returns a newer epoch than what it knows.\n # The other timestamp becomes just a unique identifier for the initiating page.\n return (\n b'%b<script src=\"/js/livereload.js\"></script><script>livereload(%d, %d);</script>%b'\n % (content[:body_end], epoch, _timestamp(), content[body_end:])\n )\n\n @classmethod\n @functools.lru_cache() # \"Cache\" to not repeat the same message for the same browser tab.\n def _log_poll_request(cls, url, request_id):\n log.info(f\"Browser connected: {url}\")\n\n def _guess_type(cls, path):\n # MkDocs only ensures a few common types (as seen in livereload_tests.py::test_mime_types).\n # Other uncommon types will not be accepted.\n if path.endswith((\".js\", \".JS\")):\n return \"application/javascript\"\n if path.endswith(\".gz\"):\n return \"application/gzip\"\n\n guess, _ = mimetypes.guess_type(path)\n if guess:\n return guess\n return \"application/octet-stream\"\n\n\nclass _Handler(wsgiref.simple_server.WSGIRequestHandler):\n def log_request(self, code=\"-\", size=\"-\"):\n level = logging.DEBUG if str(code) == \"200\" else logging.WARNING\n log.log(level, f'\"{self.requestline}\" code {code}')\n\n def log_message(self, format, *args):\n log.debug(format, *args)\n\n\ndef _timestamp():\n return round(time.monotonic() * 1000)\n", "path": "mkdocs/livereload/__init__.py"}]}
| 3,984 | 195 |
gh_patches_debug_1396
|
rasdani/github-patches
|
git_diff
|
googleapis__google-cloud-python-5366
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
General: v0.33.0 pip install fails
In a fresh Python v2.7.12 virtualenv on linux:
```
pip install google-cloud
```
Results in:
```
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/tmp/pip-install-3_n60m/google-cloud/setup.py", line 22, in <module>
with open(os.path.join(PACKAGE_ROOT, 'setup-README.rst')) as file_obj:
IOError: [Errno 2] No such file or directory: '/tmp/pip-install-3_n60m/google-cloud/setup-README.rst'
```
Note:
```
pip install google-cloud==0.32.0
```
works fine.
I believe it has to do with recent changes: https://github.com/GoogleCloudPlatform/google-cloud-python/commit/71e5d4bf94745580834b86c3e92ac4186c3115c0
</issue>
<code>
[start of legacy/google-cloud/setup.py]
1 # Copyright 2016 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 import logging
17
18 from setuptools import setup
19
20 PACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__))
21
22 with open(os.path.join(PACKAGE_ROOT, 'setup-README.rst')) as file_obj:
23 README = file_obj.read()
24
25 # NOTE: This is duplicated throughout and we should try to
26 # consolidate.
27 SETUP_BASE = {
28 'author': 'Google Cloud Platform',
29 'author_email': '[email protected]',
30 'scripts': [],
31 'url': 'https://github.com/GoogleCloudPlatform/google-cloud-python',
32 'license': 'Apache 2.0',
33 'platforms': 'Posix; MacOS X; Windows',
34 'include_package_data': True,
35 'zip_safe': False,
36 'classifiers': [
37 'Development Status :: 7 - Inactive',
38 'Intended Audience :: Developers',
39 'License :: OSI Approved :: Apache Software License',
40 'Operating System :: OS Independent',
41 'Programming Language :: Python :: 2',
42 'Programming Language :: Python :: 2.7',
43 'Programming Language :: Python :: 3',
44 'Programming Language :: Python :: 3.4',
45 'Programming Language :: Python :: 3.5',
46 'Programming Language :: Python :: 3.6',
47 'Topic :: Internet',
48 ],
49 }
50
51 REQUIREMENTS = [
52 'google-api-core >= 0.1.2, < 0.2.0dev',
53 'google-cloud-bigquery >= 0.28.0, < 0.29dev',
54 'google-cloud-bigquery-datatransfer >= 0.1.0, < 0.2dev',
55 'google-cloud-bigtable >= 0.28.1, < 0.29dev',
56 'google-cloud-container >= 0.1.0, < 0.2dev',
57 'google-cloud-core >= 0.28.0, < 0.29dev',
58 'google-cloud-datastore >= 1.4.0, < 1.5dev',
59 'google-cloud-dns >= 0.28.0, < 0.29dev',
60 'google-cloud-error-reporting >= 0.28.0, < 0.29dev',
61 'google-cloud-firestore >= 0.28.0, < 0.29dev',
62 'google-cloud-language >= 1.0.0, < 1.1dev',
63 'google-cloud-logging >= 1.4.0, < 1.5dev',
64 'google-cloud-monitoring >= 0.28.0, < 0.29dev',
65 'google-cloud-pubsub >= 0.30.0, < 0.31dev',
66 'google-cloud-resource-manager >= 0.28.0, < 0.29dev',
67 'google-cloud-runtimeconfig >= 0.28.0, < 0.29dev',
68 'google-cloud-spanner >= 0.29.0, < 0.30dev',
69 'google-cloud-speech >= 0.30.0, < 0.31dev',
70 'google-cloud-storage >= 1.6.0, < 1.7dev',
71 'google-cloud-trace >= 0.17.0, < 0.18dev',
72 'google-cloud-translate >= 1.3.0, < 1.4dev',
73 'google-cloud-videointelligence >= 1.0.0, < 1.1dev',
74 'google-cloud-vision >= 0.29.0, < 0.30dev',
75 ]
76
77 setup(
78 name='google-cloud',
79 version='0.33.0',
80 description='API Client library for Google Cloud',
81 long_description=README,
82 install_requires=REQUIREMENTS,
83 **SETUP_BASE
84 )
85
86 warning = "WARNING: The google-cloud Python package is deprecated. On " \
87 "June 18, 2018, this package will no longer install any other " \
88 "packages. Please install the product-specific google-cloud-* " \
89 "packages needed for your application. See " \
90 "https://github.com/GoogleCloudPlatform/google-cloud-python."
91
92 logging.warn(warning)
93
[end of legacy/google-cloud/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/legacy/google-cloud/setup.py b/legacy/google-cloud/setup.py
--- a/legacy/google-cloud/setup.py
+++ b/legacy/google-cloud/setup.py
@@ -76,7 +76,7 @@
setup(
name='google-cloud',
- version='0.33.0',
+ version='0.33.1',
description='API Client library for Google Cloud',
long_description=README,
install_requires=REQUIREMENTS,
|
{"golden_diff": "diff --git a/legacy/google-cloud/setup.py b/legacy/google-cloud/setup.py\n--- a/legacy/google-cloud/setup.py\n+++ b/legacy/google-cloud/setup.py\n@@ -76,7 +76,7 @@\n \n setup(\n name='google-cloud',\n- version='0.33.0',\n+ version='0.33.1',\n description='API Client library for Google Cloud',\n long_description=README,\n install_requires=REQUIREMENTS,\n", "issue": "General: v0.33.0 pip install fails\nIn a fresh Python v2.7.12 virtualenv on linux:\r\n```\r\npip install google-cloud\r\n```\r\n\r\nResults in:\r\n```\r\n Traceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\n File \"/tmp/pip-install-3_n60m/google-cloud/setup.py\", line 22, in <module>\r\n with open(os.path.join(PACKAGE_ROOT, 'setup-README.rst')) as file_obj:\r\n IOError: [Errno 2] No such file or directory: '/tmp/pip-install-3_n60m/google-cloud/setup-README.rst'\r\n```\r\n\r\nNote:\r\n```\r\npip install google-cloud==0.32.0\r\n```\r\nworks fine.\r\n\r\nI believe it has to do with recent changes: https://github.com/GoogleCloudPlatform/google-cloud-python/commit/71e5d4bf94745580834b86c3e92ac4186c3115c0\n", "before_files": [{"content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport logging\n\nfrom setuptools import setup\n\nPACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(PACKAGE_ROOT, 'setup-README.rst')) as file_obj:\n README = file_obj.read()\n\n# NOTE: This is duplicated throughout and we should try to\n# consolidate.\nSETUP_BASE = {\n 'author': 'Google Cloud Platform',\n 'author_email': '[email protected]',\n 'scripts': [],\n 'url': 'https://github.com/GoogleCloudPlatform/google-cloud-python',\n 'license': 'Apache 2.0',\n 'platforms': 'Posix; MacOS X; Windows',\n 'include_package_data': True,\n 'zip_safe': False,\n 'classifiers': [\n 'Development Status :: 7 - Inactive',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Internet',\n ],\n}\n\nREQUIREMENTS = [\n 'google-api-core >= 0.1.2, < 0.2.0dev',\n 'google-cloud-bigquery >= 0.28.0, < 0.29dev',\n 'google-cloud-bigquery-datatransfer >= 0.1.0, < 0.2dev',\n 'google-cloud-bigtable >= 0.28.1, < 0.29dev',\n 'google-cloud-container >= 0.1.0, < 0.2dev',\n 'google-cloud-core >= 0.28.0, < 0.29dev',\n 'google-cloud-datastore >= 1.4.0, < 1.5dev',\n 'google-cloud-dns >= 0.28.0, < 0.29dev',\n 'google-cloud-error-reporting >= 0.28.0, < 0.29dev',\n 'google-cloud-firestore >= 0.28.0, < 0.29dev',\n 'google-cloud-language >= 1.0.0, < 1.1dev',\n 'google-cloud-logging >= 1.4.0, < 1.5dev',\n 'google-cloud-monitoring >= 0.28.0, < 0.29dev',\n 'google-cloud-pubsub >= 0.30.0, < 0.31dev',\n 'google-cloud-resource-manager >= 0.28.0, < 0.29dev',\n 'google-cloud-runtimeconfig >= 0.28.0, < 0.29dev',\n 'google-cloud-spanner >= 0.29.0, < 0.30dev',\n 'google-cloud-speech >= 0.30.0, < 0.31dev',\n 'google-cloud-storage >= 1.6.0, < 1.7dev',\n 'google-cloud-trace >= 0.17.0, < 0.18dev',\n 'google-cloud-translate >= 1.3.0, < 1.4dev',\n 'google-cloud-videointelligence >= 1.0.0, < 1.1dev',\n 'google-cloud-vision >= 0.29.0, < 0.30dev',\n]\n\nsetup(\n name='google-cloud',\n version='0.33.0',\n description='API Client library for Google Cloud',\n long_description=README,\n install_requires=REQUIREMENTS,\n **SETUP_BASE\n)\n\nwarning = \"WARNING: The google-cloud Python package is deprecated. On \" \\\n \"June 18, 2018, this package will no longer install any other \" \\\n \"packages. Please install the product-specific google-cloud-* \" \\\n \"packages needed for your application. See \" \\\n \"https://github.com/GoogleCloudPlatform/google-cloud-python.\"\n\nlogging.warn(warning)\n", "path": "legacy/google-cloud/setup.py"}]}
| 2,015 | 103 |
gh_patches_debug_38743
|
rasdani/github-patches
|
git_diff
|
matrix-org__synapse-8390
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support config fragments for docker image
**Description:**
I'm using the very nice docker image of synapse and would like to follow the default configuration and only override specific parts of it. It seems that synapse supports config fragments in a directory (which the debian package seems to be [using](https://github.com/matrix-org/synapse/blob/master/debian/matrix-synapse.service#L10) ), but the docker image runs [start.py](https://github.com/matrix-org/synapse/blob/master/docker/start.py#L250) which doesn't have the relevant flags and doesn't allow passing them in.
</issue>
<code>
[start of docker/start.py]
1 #!/usr/local/bin/python
2
3 import codecs
4 import glob
5 import os
6 import subprocess
7 import sys
8
9 import jinja2
10
11
12 # Utility functions
13 def log(txt):
14 print(txt, file=sys.stderr)
15
16
17 def error(txt):
18 log(txt)
19 sys.exit(2)
20
21
22 def convert(src, dst, environ):
23 """Generate a file from a template
24
25 Args:
26 src (str): path to input file
27 dst (str): path to file to write
28 environ (dict): environment dictionary, for replacement mappings.
29 """
30 with open(src) as infile:
31 template = infile.read()
32 rendered = jinja2.Template(template).render(**environ)
33 with open(dst, "w") as outfile:
34 outfile.write(rendered)
35
36
37 def generate_config_from_template(config_dir, config_path, environ, ownership):
38 """Generate a homeserver.yaml from environment variables
39
40 Args:
41 config_dir (str): where to put generated config files
42 config_path (str): where to put the main config file
43 environ (dict): environment dictionary
44 ownership (str|None): "<user>:<group>" string which will be used to set
45 ownership of the generated configs. If None, ownership will not change.
46 """
47 for v in ("SYNAPSE_SERVER_NAME", "SYNAPSE_REPORT_STATS"):
48 if v not in environ:
49 error(
50 "Environment variable '%s' is mandatory when generating a config file."
51 % (v,)
52 )
53
54 # populate some params from data files (if they exist, else create new ones)
55 environ = environ.copy()
56 secrets = {
57 "registration": "SYNAPSE_REGISTRATION_SHARED_SECRET",
58 "macaroon": "SYNAPSE_MACAROON_SECRET_KEY",
59 }
60
61 for name, secret in secrets.items():
62 if secret not in environ:
63 filename = "/data/%s.%s.key" % (environ["SYNAPSE_SERVER_NAME"], name)
64
65 # if the file already exists, load in the existing value; otherwise,
66 # generate a new secret and write it to a file
67
68 if os.path.exists(filename):
69 log("Reading %s from %s" % (secret, filename))
70 with open(filename) as handle:
71 value = handle.read()
72 else:
73 log("Generating a random secret for {}".format(secret))
74 value = codecs.encode(os.urandom(32), "hex").decode()
75 with open(filename, "w") as handle:
76 handle.write(value)
77 environ[secret] = value
78
79 environ["SYNAPSE_APPSERVICES"] = glob.glob("/data/appservices/*.yaml")
80 if not os.path.exists(config_dir):
81 os.mkdir(config_dir)
82
83 # Convert SYNAPSE_NO_TLS to boolean if exists
84 if "SYNAPSE_NO_TLS" in environ:
85 tlsanswerstring = str.lower(environ["SYNAPSE_NO_TLS"])
86 if tlsanswerstring in ("true", "on", "1", "yes"):
87 environ["SYNAPSE_NO_TLS"] = True
88 else:
89 if tlsanswerstring in ("false", "off", "0", "no"):
90 environ["SYNAPSE_NO_TLS"] = False
91 else:
92 error(
93 'Environment variable "SYNAPSE_NO_TLS" found but value "'
94 + tlsanswerstring
95 + '" unrecognized; exiting.'
96 )
97
98 if "SYNAPSE_LOG_CONFIG" not in environ:
99 environ["SYNAPSE_LOG_CONFIG"] = config_dir + "/log.config"
100
101 log("Generating synapse config file " + config_path)
102 convert("/conf/homeserver.yaml", config_path, environ)
103
104 log_config_file = environ["SYNAPSE_LOG_CONFIG"]
105 log("Generating log config file " + log_config_file)
106 convert("/conf/log.config", log_config_file, environ)
107
108 # Hopefully we already have a signing key, but generate one if not.
109 args = [
110 "python",
111 "-m",
112 "synapse.app.homeserver",
113 "--config-path",
114 config_path,
115 # tell synapse to put generated keys in /data rather than /compiled
116 "--keys-directory",
117 config_dir,
118 "--generate-keys",
119 ]
120
121 if ownership is not None:
122 subprocess.check_output(["chown", "-R", ownership, "/data"])
123 args = ["gosu", ownership] + args
124
125 subprocess.check_output(args)
126
127
128 def run_generate_config(environ, ownership):
129 """Run synapse with a --generate-config param to generate a template config file
130
131 Args:
132 environ (dict): env var dict
133 ownership (str|None): "userid:groupid" arg for chmod. If None, ownership will not change.
134
135 Never returns.
136 """
137 for v in ("SYNAPSE_SERVER_NAME", "SYNAPSE_REPORT_STATS"):
138 if v not in environ:
139 error("Environment variable '%s' is mandatory in `generate` mode." % (v,))
140
141 server_name = environ["SYNAPSE_SERVER_NAME"]
142 config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
143 config_path = environ.get("SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml")
144 data_dir = environ.get("SYNAPSE_DATA_DIR", "/data")
145
146 # create a suitable log config from our template
147 log_config_file = "%s/%s.log.config" % (config_dir, server_name)
148 if not os.path.exists(log_config_file):
149 log("Creating log config %s" % (log_config_file,))
150 convert("/conf/log.config", log_config_file, environ)
151
152 args = [
153 "python",
154 "-m",
155 "synapse.app.homeserver",
156 "--server-name",
157 server_name,
158 "--report-stats",
159 environ["SYNAPSE_REPORT_STATS"],
160 "--config-path",
161 config_path,
162 "--config-directory",
163 config_dir,
164 "--data-directory",
165 data_dir,
166 "--generate-config",
167 "--open-private-ports",
168 ]
169 # log("running %s" % (args, ))
170
171 if ownership is not None:
172 # make sure that synapse has perms to write to the data dir.
173 subprocess.check_output(["chown", ownership, data_dir])
174
175 args = ["gosu", ownership] + args
176 os.execv("/usr/sbin/gosu", args)
177 else:
178 os.execv("/usr/local/bin/python", args)
179
180
181 def main(args, environ):
182 mode = args[1] if len(args) > 1 else None
183 desired_uid = int(environ.get("UID", "991"))
184 desired_gid = int(environ.get("GID", "991"))
185 synapse_worker = environ.get("SYNAPSE_WORKER", "synapse.app.homeserver")
186 if (desired_uid == os.getuid()) and (desired_gid == os.getgid()):
187 ownership = None
188 else:
189 ownership = "{}:{}".format(desired_uid, desired_gid)
190
191 if ownership is None:
192 log("Will not perform chmod/gosu as UserID already matches request")
193
194 # In generate mode, generate a configuration and missing keys, then exit
195 if mode == "generate":
196 return run_generate_config(environ, ownership)
197
198 if mode == "migrate_config":
199 # generate a config based on environment vars.
200 config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
201 config_path = environ.get(
202 "SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml"
203 )
204 return generate_config_from_template(
205 config_dir, config_path, environ, ownership
206 )
207
208 if mode is not None:
209 error("Unknown execution mode '%s'" % (mode,))
210
211 config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
212 config_path = environ.get("SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml")
213
214 if not os.path.exists(config_path):
215 if "SYNAPSE_SERVER_NAME" in environ:
216 error(
217 """\
218 Config file '%s' does not exist.
219
220 The synapse docker image no longer supports generating a config file on-the-fly
221 based on environment variables. You can migrate to a static config file by
222 running with 'migrate_config'. See the README for more details.
223 """
224 % (config_path,)
225 )
226
227 error(
228 "Config file '%s' does not exist. You should either create a new "
229 "config file by running with the `generate` argument (and then edit "
230 "the resulting file before restarting) or specify the path to an "
231 "existing config file with the SYNAPSE_CONFIG_PATH variable."
232 % (config_path,)
233 )
234
235 log("Starting synapse with config file " + config_path)
236
237 args = ["python", "-m", synapse_worker, "--config-path", config_path]
238 if ownership is not None:
239 args = ["gosu", ownership] + args
240 os.execv("/usr/sbin/gosu", args)
241 else:
242 os.execv("/usr/local/bin/python", args)
243
244
245 if __name__ == "__main__":
246 main(sys.argv, os.environ)
247
[end of docker/start.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docker/start.py b/docker/start.py
--- a/docker/start.py
+++ b/docker/start.py
@@ -179,7 +179,7 @@
def main(args, environ):
- mode = args[1] if len(args) > 1 else None
+ mode = args[1] if len(args) > 1 else "run"
desired_uid = int(environ.get("UID", "991"))
desired_gid = int(environ.get("GID", "991"))
synapse_worker = environ.get("SYNAPSE_WORKER", "synapse.app.homeserver")
@@ -205,36 +205,47 @@
config_dir, config_path, environ, ownership
)
- if mode is not None:
+ if mode != "run":
error("Unknown execution mode '%s'" % (mode,))
- config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
- config_path = environ.get("SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml")
+ args = args[2:]
- if not os.path.exists(config_path):
- if "SYNAPSE_SERVER_NAME" in environ:
- error(
- """\
+ if "-m" not in args:
+ args = ["-m", synapse_worker] + args
+
+ # if there are no config files passed to synapse, try adding the default file
+ if not any(p.startswith("--config-path") or p.startswith("-c") for p in args):
+ config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
+ config_path = environ.get(
+ "SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml"
+ )
+
+ if not os.path.exists(config_path):
+ if "SYNAPSE_SERVER_NAME" in environ:
+ error(
+ """\
Config file '%s' does not exist.
The synapse docker image no longer supports generating a config file on-the-fly
based on environment variables. You can migrate to a static config file by
running with 'migrate_config'. See the README for more details.
"""
+ % (config_path,)
+ )
+
+ error(
+ "Config file '%s' does not exist. You should either create a new "
+ "config file by running with the `generate` argument (and then edit "
+ "the resulting file before restarting) or specify the path to an "
+ "existing config file with the SYNAPSE_CONFIG_PATH variable."
% (config_path,)
)
- error(
- "Config file '%s' does not exist. You should either create a new "
- "config file by running with the `generate` argument (and then edit "
- "the resulting file before restarting) or specify the path to an "
- "existing config file with the SYNAPSE_CONFIG_PATH variable."
- % (config_path,)
- )
+ args += ["--config-path", config_path]
- log("Starting synapse with config file " + config_path)
+ log("Starting synapse with args " + " ".join(args))
- args = ["python", "-m", synapse_worker, "--config-path", config_path]
+ args = ["python"] + args
if ownership is not None:
args = ["gosu", ownership] + args
os.execv("/usr/sbin/gosu", args)
|
{"golden_diff": "diff --git a/docker/start.py b/docker/start.py\n--- a/docker/start.py\n+++ b/docker/start.py\n@@ -179,7 +179,7 @@\n \n \n def main(args, environ):\n- mode = args[1] if len(args) > 1 else None\n+ mode = args[1] if len(args) > 1 else \"run\"\n desired_uid = int(environ.get(\"UID\", \"991\"))\n desired_gid = int(environ.get(\"GID\", \"991\"))\n synapse_worker = environ.get(\"SYNAPSE_WORKER\", \"synapse.app.homeserver\")\n@@ -205,36 +205,47 @@\n config_dir, config_path, environ, ownership\n )\n \n- if mode is not None:\n+ if mode != \"run\":\n error(\"Unknown execution mode '%s'\" % (mode,))\n \n- config_dir = environ.get(\"SYNAPSE_CONFIG_DIR\", \"/data\")\n- config_path = environ.get(\"SYNAPSE_CONFIG_PATH\", config_dir + \"/homeserver.yaml\")\n+ args = args[2:]\n \n- if not os.path.exists(config_path):\n- if \"SYNAPSE_SERVER_NAME\" in environ:\n- error(\n- \"\"\"\\\n+ if \"-m\" not in args:\n+ args = [\"-m\", synapse_worker] + args\n+\n+ # if there are no config files passed to synapse, try adding the default file\n+ if not any(p.startswith(\"--config-path\") or p.startswith(\"-c\") for p in args):\n+ config_dir = environ.get(\"SYNAPSE_CONFIG_DIR\", \"/data\")\n+ config_path = environ.get(\n+ \"SYNAPSE_CONFIG_PATH\", config_dir + \"/homeserver.yaml\"\n+ )\n+\n+ if not os.path.exists(config_path):\n+ if \"SYNAPSE_SERVER_NAME\" in environ:\n+ error(\n+ \"\"\"\\\n Config file '%s' does not exist.\n \n The synapse docker image no longer supports generating a config file on-the-fly\n based on environment variables. You can migrate to a static config file by\n running with 'migrate_config'. See the README for more details.\n \"\"\"\n+ % (config_path,)\n+ )\n+\n+ error(\n+ \"Config file '%s' does not exist. You should either create a new \"\n+ \"config file by running with the `generate` argument (and then edit \"\n+ \"the resulting file before restarting) or specify the path to an \"\n+ \"existing config file with the SYNAPSE_CONFIG_PATH variable.\"\n % (config_path,)\n )\n \n- error(\n- \"Config file '%s' does not exist. You should either create a new \"\n- \"config file by running with the `generate` argument (and then edit \"\n- \"the resulting file before restarting) or specify the path to an \"\n- \"existing config file with the SYNAPSE_CONFIG_PATH variable.\"\n- % (config_path,)\n- )\n+ args += [\"--config-path\", config_path]\n \n- log(\"Starting synapse with config file \" + config_path)\n+ log(\"Starting synapse with args \" + \" \".join(args))\n \n- args = [\"python\", \"-m\", synapse_worker, \"--config-path\", config_path]\n+ args = [\"python\"] + args\n if ownership is not None:\n args = [\"gosu\", ownership] + args\n os.execv(\"/usr/sbin/gosu\", args)\n", "issue": "Support config fragments for docker image\n**Description:**\r\n\r\nI'm using the very nice docker image of synapse and would like to follow the default configuration and only override specific parts of it. It seems that synapse supports config fragments in a directory (which the debian package seems to be [using](https://github.com/matrix-org/synapse/blob/master/debian/matrix-synapse.service#L10) ), but the docker image runs [start.py](https://github.com/matrix-org/synapse/blob/master/docker/start.py#L250) which doesn't have the relevant flags and doesn't allow passing them in.\n", "before_files": [{"content": "#!/usr/local/bin/python\n\nimport codecs\nimport glob\nimport os\nimport subprocess\nimport sys\n\nimport jinja2\n\n\n# Utility functions\ndef log(txt):\n print(txt, file=sys.stderr)\n\n\ndef error(txt):\n log(txt)\n sys.exit(2)\n\n\ndef convert(src, dst, environ):\n \"\"\"Generate a file from a template\n\n Args:\n src (str): path to input file\n dst (str): path to file to write\n environ (dict): environment dictionary, for replacement mappings.\n \"\"\"\n with open(src) as infile:\n template = infile.read()\n rendered = jinja2.Template(template).render(**environ)\n with open(dst, \"w\") as outfile:\n outfile.write(rendered)\n\n\ndef generate_config_from_template(config_dir, config_path, environ, ownership):\n \"\"\"Generate a homeserver.yaml from environment variables\n\n Args:\n config_dir (str): where to put generated config files\n config_path (str): where to put the main config file\n environ (dict): environment dictionary\n ownership (str|None): \"<user>:<group>\" string which will be used to set\n ownership of the generated configs. If None, ownership will not change.\n \"\"\"\n for v in (\"SYNAPSE_SERVER_NAME\", \"SYNAPSE_REPORT_STATS\"):\n if v not in environ:\n error(\n \"Environment variable '%s' is mandatory when generating a config file.\"\n % (v,)\n )\n\n # populate some params from data files (if they exist, else create new ones)\n environ = environ.copy()\n secrets = {\n \"registration\": \"SYNAPSE_REGISTRATION_SHARED_SECRET\",\n \"macaroon\": \"SYNAPSE_MACAROON_SECRET_KEY\",\n }\n\n for name, secret in secrets.items():\n if secret not in environ:\n filename = \"/data/%s.%s.key\" % (environ[\"SYNAPSE_SERVER_NAME\"], name)\n\n # if the file already exists, load in the existing value; otherwise,\n # generate a new secret and write it to a file\n\n if os.path.exists(filename):\n log(\"Reading %s from %s\" % (secret, filename))\n with open(filename) as handle:\n value = handle.read()\n else:\n log(\"Generating a random secret for {}\".format(secret))\n value = codecs.encode(os.urandom(32), \"hex\").decode()\n with open(filename, \"w\") as handle:\n handle.write(value)\n environ[secret] = value\n\n environ[\"SYNAPSE_APPSERVICES\"] = glob.glob(\"/data/appservices/*.yaml\")\n if not os.path.exists(config_dir):\n os.mkdir(config_dir)\n\n # Convert SYNAPSE_NO_TLS to boolean if exists\n if \"SYNAPSE_NO_TLS\" in environ:\n tlsanswerstring = str.lower(environ[\"SYNAPSE_NO_TLS\"])\n if tlsanswerstring in (\"true\", \"on\", \"1\", \"yes\"):\n environ[\"SYNAPSE_NO_TLS\"] = True\n else:\n if tlsanswerstring in (\"false\", \"off\", \"0\", \"no\"):\n environ[\"SYNAPSE_NO_TLS\"] = False\n else:\n error(\n 'Environment variable \"SYNAPSE_NO_TLS\" found but value \"'\n + tlsanswerstring\n + '\" unrecognized; exiting.'\n )\n\n if \"SYNAPSE_LOG_CONFIG\" not in environ:\n environ[\"SYNAPSE_LOG_CONFIG\"] = config_dir + \"/log.config\"\n\n log(\"Generating synapse config file \" + config_path)\n convert(\"/conf/homeserver.yaml\", config_path, environ)\n\n log_config_file = environ[\"SYNAPSE_LOG_CONFIG\"]\n log(\"Generating log config file \" + log_config_file)\n convert(\"/conf/log.config\", log_config_file, environ)\n\n # Hopefully we already have a signing key, but generate one if not.\n args = [\n \"python\",\n \"-m\",\n \"synapse.app.homeserver\",\n \"--config-path\",\n config_path,\n # tell synapse to put generated keys in /data rather than /compiled\n \"--keys-directory\",\n config_dir,\n \"--generate-keys\",\n ]\n\n if ownership is not None:\n subprocess.check_output([\"chown\", \"-R\", ownership, \"/data\"])\n args = [\"gosu\", ownership] + args\n\n subprocess.check_output(args)\n\n\ndef run_generate_config(environ, ownership):\n \"\"\"Run synapse with a --generate-config param to generate a template config file\n\n Args:\n environ (dict): env var dict\n ownership (str|None): \"userid:groupid\" arg for chmod. If None, ownership will not change.\n\n Never returns.\n \"\"\"\n for v in (\"SYNAPSE_SERVER_NAME\", \"SYNAPSE_REPORT_STATS\"):\n if v not in environ:\n error(\"Environment variable '%s' is mandatory in `generate` mode.\" % (v,))\n\n server_name = environ[\"SYNAPSE_SERVER_NAME\"]\n config_dir = environ.get(\"SYNAPSE_CONFIG_DIR\", \"/data\")\n config_path = environ.get(\"SYNAPSE_CONFIG_PATH\", config_dir + \"/homeserver.yaml\")\n data_dir = environ.get(\"SYNAPSE_DATA_DIR\", \"/data\")\n\n # create a suitable log config from our template\n log_config_file = \"%s/%s.log.config\" % (config_dir, server_name)\n if not os.path.exists(log_config_file):\n log(\"Creating log config %s\" % (log_config_file,))\n convert(\"/conf/log.config\", log_config_file, environ)\n\n args = [\n \"python\",\n \"-m\",\n \"synapse.app.homeserver\",\n \"--server-name\",\n server_name,\n \"--report-stats\",\n environ[\"SYNAPSE_REPORT_STATS\"],\n \"--config-path\",\n config_path,\n \"--config-directory\",\n config_dir,\n \"--data-directory\",\n data_dir,\n \"--generate-config\",\n \"--open-private-ports\",\n ]\n # log(\"running %s\" % (args, ))\n\n if ownership is not None:\n # make sure that synapse has perms to write to the data dir.\n subprocess.check_output([\"chown\", ownership, data_dir])\n\n args = [\"gosu\", ownership] + args\n os.execv(\"/usr/sbin/gosu\", args)\n else:\n os.execv(\"/usr/local/bin/python\", args)\n\n\ndef main(args, environ):\n mode = args[1] if len(args) > 1 else None\n desired_uid = int(environ.get(\"UID\", \"991\"))\n desired_gid = int(environ.get(\"GID\", \"991\"))\n synapse_worker = environ.get(\"SYNAPSE_WORKER\", \"synapse.app.homeserver\")\n if (desired_uid == os.getuid()) and (desired_gid == os.getgid()):\n ownership = None\n else:\n ownership = \"{}:{}\".format(desired_uid, desired_gid)\n\n if ownership is None:\n log(\"Will not perform chmod/gosu as UserID already matches request\")\n\n # In generate mode, generate a configuration and missing keys, then exit\n if mode == \"generate\":\n return run_generate_config(environ, ownership)\n\n if mode == \"migrate_config\":\n # generate a config based on environment vars.\n config_dir = environ.get(\"SYNAPSE_CONFIG_DIR\", \"/data\")\n config_path = environ.get(\n \"SYNAPSE_CONFIG_PATH\", config_dir + \"/homeserver.yaml\"\n )\n return generate_config_from_template(\n config_dir, config_path, environ, ownership\n )\n\n if mode is not None:\n error(\"Unknown execution mode '%s'\" % (mode,))\n\n config_dir = environ.get(\"SYNAPSE_CONFIG_DIR\", \"/data\")\n config_path = environ.get(\"SYNAPSE_CONFIG_PATH\", config_dir + \"/homeserver.yaml\")\n\n if not os.path.exists(config_path):\n if \"SYNAPSE_SERVER_NAME\" in environ:\n error(\n \"\"\"\\\nConfig file '%s' does not exist.\n\nThe synapse docker image no longer supports generating a config file on-the-fly\nbased on environment variables. You can migrate to a static config file by\nrunning with 'migrate_config'. See the README for more details.\n\"\"\"\n % (config_path,)\n )\n\n error(\n \"Config file '%s' does not exist. You should either create a new \"\n \"config file by running with the `generate` argument (and then edit \"\n \"the resulting file before restarting) or specify the path to an \"\n \"existing config file with the SYNAPSE_CONFIG_PATH variable.\"\n % (config_path,)\n )\n\n log(\"Starting synapse with config file \" + config_path)\n\n args = [\"python\", \"-m\", synapse_worker, \"--config-path\", config_path]\n if ownership is not None:\n args = [\"gosu\", ownership] + args\n os.execv(\"/usr/sbin/gosu\", args)\n else:\n os.execv(\"/usr/local/bin/python\", args)\n\n\nif __name__ == \"__main__\":\n main(sys.argv, os.environ)\n", "path": "docker/start.py"}]}
| 3,287 | 766 |
gh_patches_debug_1664
|
rasdani/github-patches
|
git_diff
|
pytorch__ignite-2852
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[CI] Doc test is failing
We have somehow doctest ci job failing right now. The failure happens with the following code snippet from our docs:
- https://pytorch.org/ignite/generated/ignite.contrib.metrics.PrecisionRecallCurve.html
```
**********************************************************************
File "../../ignite/contrib/metrics/precision_recall_curve.py", line ?, in default
Failed example:
y_pred = torch.tensor([0.0474, 0.5987, 0.7109, 0.9997])
y_true = torch.tensor([0, 0, 1, 1])
prec_recall_curve = PrecisionRecallCurve()
prec_recall_curve.attach(default_evaluator, 'prec_recall_curve')
state = default_evaluator.run([[y_pred, y_true]])
print("Precision", [round(i, 4) for i in state.metrics['prec_recall_curve'][0].tolist()])
print("Recall", [round(i, 4) for i in state.metrics['prec_recall_curve'][1].tolist()])
print("Thresholds", [round(i, 4) for i in state.metrics['prec_recall_curve'][2].tolist()])
Expected:
Precision [1.0, 1.0, 1.0]
Recall [1.0, 0.5, 0.0]
Thresholds [0.7109, 0.9997]
Got:
Precision [0.5, 0.6667, 1.0, 1.0, 1.0]
Recall [1.0, 1.0, 1.0, 0.5, 0.0]
Thresholds [0.0474, 0.5987, 0.7109, 0.9997]
```
- https://github.com/pytorch/ignite/actions/runs/4099985910/jobs/7074343114
### How to help with this issue
You need to do some detective work:
- Reproduce the issue locally
- Try to figure out which result is correct: "Expected" or "Got"
- Try to figure out why it started to happen: maybe sklearn version updated ? Previously, for example Jan 18, doctest was passing: https://github.com/pytorch/ignite/actions/runs/3894024421/jobs/6647420435
- Report here your findings and propose a way to solve the issue
</issue>
<code>
[start of ignite/contrib/metrics/precision_recall_curve.py]
1 from typing import Any, Callable, cast, Tuple, Union
2
3 import torch
4
5 import ignite.distributed as idist
6 from ignite.exceptions import NotComputableError
7 from ignite.metrics import EpochMetric
8
9
10 def precision_recall_curve_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor) -> Tuple[Any, Any, Any]:
11 try:
12 from sklearn.metrics import precision_recall_curve
13 except ImportError:
14 raise ModuleNotFoundError("This contrib module requires scikit-learn to be installed.")
15
16 y_true = y_targets.cpu().numpy()
17 y_pred = y_preds.cpu().numpy()
18 return precision_recall_curve(y_true, y_pred)
19
20
21 class PrecisionRecallCurve(EpochMetric):
22 """Compute precision-recall pairs for different probability thresholds for binary classification task
23 by accumulating predictions and the ground-truth during an epoch and applying
24 `sklearn.metrics.precision_recall_curve <https://scikit-learn.org/stable/modules/generated/
25 sklearn.metrics.precision_recall_curve.html#sklearn.metrics.precision_recall_curve>`_ .
26
27 Args:
28 output_transform: a callable that is used to transform the
29 :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
30 form expected by the metric. This can be useful if, for example, you have a multi-output model and
31 you want to compute the metric with respect to one of the outputs.
32 check_compute_fn: Default False. If True, `precision_recall_curve
33 <https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_curve.html
34 #sklearn.metrics.precision_recall_curve>`_ is run on the first batch of data to ensure there are
35 no issues. User will be warned in case there are any issues computing the function.
36
37 Note:
38 PrecisionRecallCurve expects y to be comprised of 0's and 1's. y_pred must either be probability estimates
39 or confidence values. To apply an activation to y_pred, use output_transform as shown below:
40
41 .. code-block:: python
42
43 def sigmoid_output_transform(output):
44 y_pred, y = output
45 y_pred = torch.sigmoid(y_pred)
46 return y_pred, y
47 avg_precision = PrecisionRecallCurve(sigmoid_output_transform)
48
49 Examples:
50
51 .. include:: defaults.rst
52 :start-after: :orphan:
53
54 .. testcode::
55
56 y_pred = torch.tensor([0.0474, 0.5987, 0.7109, 0.9997])
57 y_true = torch.tensor([0, 0, 1, 1])
58 prec_recall_curve = PrecisionRecallCurve()
59 prec_recall_curve.attach(default_evaluator, 'prec_recall_curve')
60 state = default_evaluator.run([[y_pred, y_true]])
61
62 print("Precision", [round(i, 4) for i in state.metrics['prec_recall_curve'][0].tolist()])
63 print("Recall", [round(i, 4) for i in state.metrics['prec_recall_curve'][1].tolist()])
64 print("Thresholds", [round(i, 4) for i in state.metrics['prec_recall_curve'][2].tolist()])
65
66 .. testoutput::
67
68 Precision [1.0, 1.0, 1.0]
69 Recall [1.0, 0.5, 0.0]
70 Thresholds [0.7109, 0.9997]
71
72 """
73
74 def __init__(
75 self,
76 output_transform: Callable = lambda x: x,
77 check_compute_fn: bool = False,
78 device: Union[str, torch.device] = torch.device("cpu"),
79 ) -> None:
80 super(PrecisionRecallCurve, self).__init__(
81 precision_recall_curve_compute_fn,
82 output_transform=output_transform,
83 check_compute_fn=check_compute_fn,
84 device=device,
85 )
86
87 def compute(self) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
88 if len(self._predictions) < 1 or len(self._targets) < 1:
89 raise NotComputableError("PrecisionRecallCurve must have at least one example before it can be computed.")
90
91 _prediction_tensor = torch.cat(self._predictions, dim=0)
92 _target_tensor = torch.cat(self._targets, dim=0)
93
94 ws = idist.get_world_size()
95 if ws > 1 and not self._is_reduced:
96 # All gather across all processes
97 _prediction_tensor = cast(torch.Tensor, idist.all_gather(_prediction_tensor))
98 _target_tensor = cast(torch.Tensor, idist.all_gather(_target_tensor))
99 self._is_reduced = True
100
101 if idist.get_rank() == 0:
102 # Run compute_fn on zero rank only
103 precision, recall, thresholds = self.compute_fn(_prediction_tensor, _target_tensor)
104 precision = torch.tensor(precision)
105 recall = torch.tensor(recall)
106 # thresholds can have negative strides, not compatible with torch tensors
107 # https://discuss.pytorch.org/t/negative-strides-in-tensor-error/134287/2
108 thresholds = torch.tensor(thresholds.copy())
109 else:
110 precision, recall, thresholds = None, None, None
111
112 if ws > 1:
113 # broadcast result to all processes
114 precision = idist.broadcast(precision, src=0, safe_mode=True)
115 recall = idist.broadcast(recall, src=0, safe_mode=True)
116 thresholds = idist.broadcast(thresholds, src=0, safe_mode=True)
117
118 return precision, recall, thresholds
119
[end of ignite/contrib/metrics/precision_recall_curve.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ignite/contrib/metrics/precision_recall_curve.py b/ignite/contrib/metrics/precision_recall_curve.py
--- a/ignite/contrib/metrics/precision_recall_curve.py
+++ b/ignite/contrib/metrics/precision_recall_curve.py
@@ -65,9 +65,9 @@
.. testoutput::
- Precision [1.0, 1.0, 1.0]
- Recall [1.0, 0.5, 0.0]
- Thresholds [0.7109, 0.9997]
+ Precision [0.5, 0.6667, 1.0, 1.0, 1.0]
+ Recall [1.0, 1.0, 1.0, 0.5, 0.0]
+ Thresholds [0.0474, 0.5987, 0.7109, 0.9997]
"""
|
{"golden_diff": "diff --git a/ignite/contrib/metrics/precision_recall_curve.py b/ignite/contrib/metrics/precision_recall_curve.py\n--- a/ignite/contrib/metrics/precision_recall_curve.py\n+++ b/ignite/contrib/metrics/precision_recall_curve.py\n@@ -65,9 +65,9 @@\n \n .. testoutput::\n \n- Precision [1.0, 1.0, 1.0]\n- Recall [1.0, 0.5, 0.0]\n- Thresholds [0.7109, 0.9997]\n+ Precision [0.5, 0.6667, 1.0, 1.0, 1.0]\n+ Recall [1.0, 1.0, 1.0, 0.5, 0.0]\n+ Thresholds [0.0474, 0.5987, 0.7109, 0.9997]\n \n \"\"\"\n", "issue": "[CI] Doc test is failing\nWe have somehow doctest ci job failing right now. The failure happens with the following code snippet from our docs: \r\n- https://pytorch.org/ignite/generated/ignite.contrib.metrics.PrecisionRecallCurve.html\r\n\r\n```\r\n **********************************************************************\r\nFile \"../../ignite/contrib/metrics/precision_recall_curve.py\", line ?, in default\r\nFailed example:\r\n y_pred = torch.tensor([0.0474, 0.5987, 0.7109, 0.9997])\r\n y_true = torch.tensor([0, 0, 1, 1])\r\n prec_recall_curve = PrecisionRecallCurve()\r\n prec_recall_curve.attach(default_evaluator, 'prec_recall_curve')\r\n state = default_evaluator.run([[y_pred, y_true]])\r\n\r\n print(\"Precision\", [round(i, 4) for i in state.metrics['prec_recall_curve'][0].tolist()])\r\n print(\"Recall\", [round(i, 4) for i in state.metrics['prec_recall_curve'][1].tolist()])\r\n print(\"Thresholds\", [round(i, 4) for i in state.metrics['prec_recall_curve'][2].tolist()])\r\nExpected:\r\n Precision [1.0, 1.0, 1.0]\r\n Recall [1.0, 0.5, 0.0]\r\n Thresholds [0.7109, 0.9997]\r\nGot:\r\n Precision [0.5, 0.6667, 1.0, 1.0, 1.0]\r\n Recall [1.0, 1.0, 1.0, 0.5, 0.0]\r\n Thresholds [0.0474, 0.5987, 0.7109, 0.9997]\r\n```\r\n- https://github.com/pytorch/ignite/actions/runs/4099985910/jobs/7074343114\r\n\r\n### How to help with this issue\r\n\r\nYou need to do some detective work:\r\n- Reproduce the issue locally\r\n- Try to figure out which result is correct: \"Expected\" or \"Got\"\r\n- Try to figure out why it started to happen: maybe sklearn version updated ? Previously, for example Jan 18, doctest was passing: https://github.com/pytorch/ignite/actions/runs/3894024421/jobs/6647420435\r\n- Report here your findings and propose a way to solve the issue\r\n\n", "before_files": [{"content": "from typing import Any, Callable, cast, Tuple, Union\n\nimport torch\n\nimport ignite.distributed as idist\nfrom ignite.exceptions import NotComputableError\nfrom ignite.metrics import EpochMetric\n\n\ndef precision_recall_curve_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor) -> Tuple[Any, Any, Any]:\n try:\n from sklearn.metrics import precision_recall_curve\n except ImportError:\n raise ModuleNotFoundError(\"This contrib module requires scikit-learn to be installed.\")\n\n y_true = y_targets.cpu().numpy()\n y_pred = y_preds.cpu().numpy()\n return precision_recall_curve(y_true, y_pred)\n\n\nclass PrecisionRecallCurve(EpochMetric):\n \"\"\"Compute precision-recall pairs for different probability thresholds for binary classification task\n by accumulating predictions and the ground-truth during an epoch and applying\n `sklearn.metrics.precision_recall_curve <https://scikit-learn.org/stable/modules/generated/\n sklearn.metrics.precision_recall_curve.html#sklearn.metrics.precision_recall_curve>`_ .\n\n Args:\n output_transform: a callable that is used to transform the\n :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the\n form expected by the metric. This can be useful if, for example, you have a multi-output model and\n you want to compute the metric with respect to one of the outputs.\n check_compute_fn: Default False. If True, `precision_recall_curve\n <https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_curve.html\n #sklearn.metrics.precision_recall_curve>`_ is run on the first batch of data to ensure there are\n no issues. User will be warned in case there are any issues computing the function.\n\n Note:\n PrecisionRecallCurve expects y to be comprised of 0's and 1's. y_pred must either be probability estimates\n or confidence values. To apply an activation to y_pred, use output_transform as shown below:\n\n .. code-block:: python\n\n def sigmoid_output_transform(output):\n y_pred, y = output\n y_pred = torch.sigmoid(y_pred)\n return y_pred, y\n avg_precision = PrecisionRecallCurve(sigmoid_output_transform)\n\n Examples:\n\n .. include:: defaults.rst\n :start-after: :orphan:\n\n .. testcode::\n\n y_pred = torch.tensor([0.0474, 0.5987, 0.7109, 0.9997])\n y_true = torch.tensor([0, 0, 1, 1])\n prec_recall_curve = PrecisionRecallCurve()\n prec_recall_curve.attach(default_evaluator, 'prec_recall_curve')\n state = default_evaluator.run([[y_pred, y_true]])\n\n print(\"Precision\", [round(i, 4) for i in state.metrics['prec_recall_curve'][0].tolist()])\n print(\"Recall\", [round(i, 4) for i in state.metrics['prec_recall_curve'][1].tolist()])\n print(\"Thresholds\", [round(i, 4) for i in state.metrics['prec_recall_curve'][2].tolist()])\n\n .. testoutput::\n\n Precision [1.0, 1.0, 1.0]\n Recall [1.0, 0.5, 0.0]\n Thresholds [0.7109, 0.9997]\n\n \"\"\"\n\n def __init__(\n self,\n output_transform: Callable = lambda x: x,\n check_compute_fn: bool = False,\n device: Union[str, torch.device] = torch.device(\"cpu\"),\n ) -> None:\n super(PrecisionRecallCurve, self).__init__(\n precision_recall_curve_compute_fn,\n output_transform=output_transform,\n check_compute_fn=check_compute_fn,\n device=device,\n )\n\n def compute(self) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n if len(self._predictions) < 1 or len(self._targets) < 1:\n raise NotComputableError(\"PrecisionRecallCurve must have at least one example before it can be computed.\")\n\n _prediction_tensor = torch.cat(self._predictions, dim=0)\n _target_tensor = torch.cat(self._targets, dim=0)\n\n ws = idist.get_world_size()\n if ws > 1 and not self._is_reduced:\n # All gather across all processes\n _prediction_tensor = cast(torch.Tensor, idist.all_gather(_prediction_tensor))\n _target_tensor = cast(torch.Tensor, idist.all_gather(_target_tensor))\n self._is_reduced = True\n\n if idist.get_rank() == 0:\n # Run compute_fn on zero rank only\n precision, recall, thresholds = self.compute_fn(_prediction_tensor, _target_tensor)\n precision = torch.tensor(precision)\n recall = torch.tensor(recall)\n # thresholds can have negative strides, not compatible with torch tensors\n # https://discuss.pytorch.org/t/negative-strides-in-tensor-error/134287/2\n thresholds = torch.tensor(thresholds.copy())\n else:\n precision, recall, thresholds = None, None, None\n\n if ws > 1:\n # broadcast result to all processes\n precision = idist.broadcast(precision, src=0, safe_mode=True)\n recall = idist.broadcast(recall, src=0, safe_mode=True)\n thresholds = idist.broadcast(thresholds, src=0, safe_mode=True)\n\n return precision, recall, thresholds\n", "path": "ignite/contrib/metrics/precision_recall_curve.py"}]}
| 2,560 | 229 |
gh_patches_debug_30505
|
rasdani/github-patches
|
git_diff
|
keras-team__autokeras-459
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
improve code quality using Codacy
<!---
**If you are reporting a bug:**
* Verify that your issue is not being currently addressed by other issues or pull requests.
* Please note that Auto-Keras is only compatible with **Python 3.6**.
* Tag the issue with the `bug report` tag.
-->
### Bug Description
<!---
A clear and concise description of what the bug is.
-->
We are now using Codacy and CodeClimate to improve our code quality.
Please try to solve anything reported by Codacy by following this [link](https://app.codacy.com/manual/jhfjhfj1/autokeras/issues/index).
On the page, you can see a list of **issues**.
You can click to see the details and suggestions to fix the issue.
Thanks.
</issue>
<code>
[start of autokeras/pretrained/voice_generator/deepvoice3_pytorch/conv.py]
1 # coding: utf-8
2 from torch import nn
3 from torch.nn import functional as F
4
5
6 class Conv1d(nn.Conv1d):
7 """Extended nn.Conv1d for incremental dilated convolutions
8 """
9
10 def __init__(self, *args, **kwargs):
11 super().__init__(*args, **kwargs)
12 self.clear_buffer()
13 self._linearized_weight = None
14
15 def incremental_forward(self, input):
16
17 # reshape weight
18 weight = self._get_linearized_weight()
19 kw = self.kernel_size[0]
20 dilation = self.dilation[0]
21
22 bsz = input.size(0) # input: bsz x len x dim
23 if kw > 1:
24 input = input.data
25 if self.input_buffer is None:
26 self.input_buffer = input.new(bsz, kw + (kw - 1) * (dilation - 1), input.size(2))
27 self.input_buffer.zero_()
28 else:
29 # shift buffer
30 self.input_buffer[:, :-1, :] = self.input_buffer[:, 1:, :].clone()
31 # append next input
32 self.input_buffer[:, -1, :] = input[:, -1, :]
33 input = self.input_buffer
34 if dilation > 1:
35 input = input[:, 0::dilation, :].contiguous()
36 output = F.linear(input.view(bsz, -1), weight, self.bias)
37 return output.view(bsz, 1, -1)
38
39 def clear_buffer(self):
40 self.input_buffer = None
41
42 def _get_linearized_weight(self):
43 if self._linearized_weight is None:
44 kw = self.kernel_size[0]
45 # nn.Conv1d
46 weight = self.weight.transpose(1, 2).contiguous()
47
48 assert weight.size() == (self.out_channels, kw, self.in_channels)
49 self._linearized_weight = weight.view(self.out_channels, -1)
50 return self._linearized_weight
51
[end of autokeras/pretrained/voice_generator/deepvoice3_pytorch/conv.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/autokeras/pretrained/voice_generator/deepvoice3_pytorch/conv.py b/autokeras/pretrained/voice_generator/deepvoice3_pytorch/conv.py
--- a/autokeras/pretrained/voice_generator/deepvoice3_pytorch/conv.py
+++ b/autokeras/pretrained/voice_generator/deepvoice3_pytorch/conv.py
@@ -12,29 +12,29 @@
self.clear_buffer()
self._linearized_weight = None
- def incremental_forward(self, input):
+ def incremental_forward(self, input_data):
# reshape weight
weight = self._get_linearized_weight()
kw = self.kernel_size[0]
dilation = self.dilation[0]
- bsz = input.size(0) # input: bsz x len x dim
+ bsz = input_data.size(0) # conv_input: bsz x len x dim
if kw > 1:
- input = input.data
+ input_data = input_data.data
if self.input_buffer is None:
- self.input_buffer = input.new(bsz, kw + (kw - 1) * (dilation - 1), input.size(2))
+ self.input_buffer = input_data.new(bsz, kw + (kw - 1) * (dilation - 1), input_data.size(2))
self.input_buffer.zero_()
else:
# shift buffer
self.input_buffer[:, :-1, :] = self.input_buffer[:, 1:, :].clone()
# append next input
- self.input_buffer[:, -1, :] = input[:, -1, :]
- input = self.input_buffer
+ self.input_buffer[:, -1, :] = input_data[:, -1, :]
+ input_data = self.input_buffer
if dilation > 1:
- input = input[:, 0::dilation, :].contiguous()
- output = F.linear(input.view(bsz, -1), weight, self.bias)
- return output.view(bsz, 1, -1)
+ input_data = input_data[:, 0::dilation, :].contiguous()
+ input_data = F.linear(input_data.view(bsz, -1), weight, self.bias)
+ return input_data.view(bsz, 1, -1)
def clear_buffer(self):
self.input_buffer = None
|
{"golden_diff": "diff --git a/autokeras/pretrained/voice_generator/deepvoice3_pytorch/conv.py b/autokeras/pretrained/voice_generator/deepvoice3_pytorch/conv.py\n--- a/autokeras/pretrained/voice_generator/deepvoice3_pytorch/conv.py\n+++ b/autokeras/pretrained/voice_generator/deepvoice3_pytorch/conv.py\n@@ -12,29 +12,29 @@\n self.clear_buffer()\n self._linearized_weight = None\n \n- def incremental_forward(self, input):\n+ def incremental_forward(self, input_data):\n \n # reshape weight\n weight = self._get_linearized_weight()\n kw = self.kernel_size[0]\n dilation = self.dilation[0]\n \n- bsz = input.size(0) # input: bsz x len x dim\n+ bsz = input_data.size(0) # conv_input: bsz x len x dim\n if kw > 1:\n- input = input.data\n+ input_data = input_data.data\n if self.input_buffer is None:\n- self.input_buffer = input.new(bsz, kw + (kw - 1) * (dilation - 1), input.size(2))\n+ self.input_buffer = input_data.new(bsz, kw + (kw - 1) * (dilation - 1), input_data.size(2))\n self.input_buffer.zero_()\n else:\n # shift buffer\n self.input_buffer[:, :-1, :] = self.input_buffer[:, 1:, :].clone()\n # append next input\n- self.input_buffer[:, -1, :] = input[:, -1, :]\n- input = self.input_buffer\n+ self.input_buffer[:, -1, :] = input_data[:, -1, :]\n+ input_data = self.input_buffer\n if dilation > 1:\n- input = input[:, 0::dilation, :].contiguous()\n- output = F.linear(input.view(bsz, -1), weight, self.bias)\n- return output.view(bsz, 1, -1)\n+ input_data = input_data[:, 0::dilation, :].contiguous()\n+ input_data = F.linear(input_data.view(bsz, -1), weight, self.bias)\n+ return input_data.view(bsz, 1, -1)\n \n def clear_buffer(self):\n self.input_buffer = None\n", "issue": "improve code quality using Codacy\n<!---\r\n**If you are reporting a bug:**\r\n* Verify that your issue is not being currently addressed by other issues or pull requests.\r\n* Please note that Auto-Keras is only compatible with **Python 3.6**.\r\n* Tag the issue with the `bug report` tag.\r\n-->\r\n\r\n### Bug Description\r\n<!---\r\nA clear and concise description of what the bug is.\r\n-->\r\nWe are now using Codacy and CodeClimate to improve our code quality.\r\n\r\nPlease try to solve anything reported by Codacy by following this [link](https://app.codacy.com/manual/jhfjhfj1/autokeras/issues/index).\r\nOn the page, you can see a list of **issues**.\r\nYou can click to see the details and suggestions to fix the issue.\r\n\r\nThanks.\n", "before_files": [{"content": "# coding: utf-8\nfrom torch import nn\nfrom torch.nn import functional as F\n\n\nclass Conv1d(nn.Conv1d):\n \"\"\"Extended nn.Conv1d for incremental dilated convolutions\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.clear_buffer()\n self._linearized_weight = None\n\n def incremental_forward(self, input):\n\n # reshape weight\n weight = self._get_linearized_weight()\n kw = self.kernel_size[0]\n dilation = self.dilation[0]\n\n bsz = input.size(0) # input: bsz x len x dim\n if kw > 1:\n input = input.data\n if self.input_buffer is None:\n self.input_buffer = input.new(bsz, kw + (kw - 1) * (dilation - 1), input.size(2))\n self.input_buffer.zero_()\n else:\n # shift buffer\n self.input_buffer[:, :-1, :] = self.input_buffer[:, 1:, :].clone()\n # append next input\n self.input_buffer[:, -1, :] = input[:, -1, :]\n input = self.input_buffer\n if dilation > 1:\n input = input[:, 0::dilation, :].contiguous()\n output = F.linear(input.view(bsz, -1), weight, self.bias)\n return output.view(bsz, 1, -1)\n\n def clear_buffer(self):\n self.input_buffer = None\n\n def _get_linearized_weight(self):\n if self._linearized_weight is None:\n kw = self.kernel_size[0]\n # nn.Conv1d\n weight = self.weight.transpose(1, 2).contiguous()\n\n assert weight.size() == (self.out_channels, kw, self.in_channels)\n self._linearized_weight = weight.view(self.out_channels, -1)\n return self._linearized_weight\n", "path": "autokeras/pretrained/voice_generator/deepvoice3_pytorch/conv.py"}]}
| 1,248 | 526 |
gh_patches_debug_8653
|
rasdani/github-patches
|
git_diff
|
Health-Informatics-UoN__Carrot-Mapper-732
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Scan Report Edit Table
</issue>
<code>
[start of app/api/proxy/urls.py]
1 from config import settings
2 from django.urls import re_path
3 from revproxy.views import ProxyView
4
5 # A set of urls that will override any root paths requested, and proxy them to the Next.js app.
6 urlpatterns = [
7 # /scanreports/ and escape any further paths
8 re_path(
9 r"^scanreports/(?P<path>(?!create))$",
10 ProxyView.as_view(upstream=f"{settings.NEXTJS_URL}/scanreports"),
11 name="scan-report-list",
12 ),
13 re_path(
14 r"^scanreports/(?P<path>\d+)/?$",
15 ProxyView.as_view(upstream=f"{settings.NEXTJS_URL}/scanreports/"),
16 name="scan-report-tables",
17 ),
18 re_path(
19 r"^scanreports/(?P<path>\d+/tables/\d+)/$",
20 ProxyView.as_view(upstream=f"{settings.NEXTJS_URL}/scanreports/"),
21 name="scan-report-fields",
22 ),
23 re_path(
24 r"^scanreports/(?P<path>\d+/tables/\d+/fields/\d+)/$",
25 ProxyView.as_view(upstream=f"{settings.NEXTJS_URL}/scanreports/"),
26 name="scan-report-values",
27 ),
28 re_path(
29 r"^datasets/(?P<path>(?![\d/]).*)$",
30 ProxyView.as_view(upstream=f"{settings.NEXTJS_URL}/datasets"),
31 name="datasets-list",
32 ),
33 re_path(
34 r"^datasets/(?P<path>\d+)/?$",
35 ProxyView.as_view(upstream=f"{settings.NEXTJS_URL}/datasets"),
36 name="datasets-scanreports-list",
37 ),
38 re_path(
39 "_next/(?P<path>.*)$",
40 ProxyView.as_view(upstream=f"{settings.NEXTJS_URL}/_next"),
41 ),
42 ]
43
[end of app/api/proxy/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/app/api/proxy/urls.py b/app/api/proxy/urls.py
--- a/app/api/proxy/urls.py
+++ b/app/api/proxy/urls.py
@@ -25,6 +25,11 @@
ProxyView.as_view(upstream=f"{settings.NEXTJS_URL}/scanreports/"),
name="scan-report-values",
),
+ re_path(
+ r"^scanreports/(?P<path>\d+/tables/\d+/update)/$",
+ ProxyView.as_view(upstream=f"{settings.NEXTJS_URL}/scanreports/"),
+ name="scan-report-edit-table",
+ ),
re_path(
r"^datasets/(?P<path>(?![\d/]).*)$",
ProxyView.as_view(upstream=f"{settings.NEXTJS_URL}/datasets"),
|
{"golden_diff": "diff --git a/app/api/proxy/urls.py b/app/api/proxy/urls.py\n--- a/app/api/proxy/urls.py\n+++ b/app/api/proxy/urls.py\n@@ -25,6 +25,11 @@\n ProxyView.as_view(upstream=f\"{settings.NEXTJS_URL}/scanreports/\"),\n name=\"scan-report-values\",\n ),\n+ re_path(\n+ r\"^scanreports/(?P<path>\\d+/tables/\\d+/update)/$\",\n+ ProxyView.as_view(upstream=f\"{settings.NEXTJS_URL}/scanreports/\"),\n+ name=\"scan-report-edit-table\",\n+ ),\n re_path(\n r\"^datasets/(?P<path>(?![\\d/]).*)$\",\n ProxyView.as_view(upstream=f\"{settings.NEXTJS_URL}/datasets\"),\n", "issue": "Scan Report Edit Table\n\n", "before_files": [{"content": "from config import settings\nfrom django.urls import re_path\nfrom revproxy.views import ProxyView\n\n# A set of urls that will override any root paths requested, and proxy them to the Next.js app.\nurlpatterns = [\n # /scanreports/ and escape any further paths\n re_path(\n r\"^scanreports/(?P<path>(?!create))$\",\n ProxyView.as_view(upstream=f\"{settings.NEXTJS_URL}/scanreports\"),\n name=\"scan-report-list\",\n ),\n re_path(\n r\"^scanreports/(?P<path>\\d+)/?$\",\n ProxyView.as_view(upstream=f\"{settings.NEXTJS_URL}/scanreports/\"),\n name=\"scan-report-tables\",\n ),\n re_path(\n r\"^scanreports/(?P<path>\\d+/tables/\\d+)/$\",\n ProxyView.as_view(upstream=f\"{settings.NEXTJS_URL}/scanreports/\"),\n name=\"scan-report-fields\",\n ),\n re_path(\n r\"^scanreports/(?P<path>\\d+/tables/\\d+/fields/\\d+)/$\",\n ProxyView.as_view(upstream=f\"{settings.NEXTJS_URL}/scanreports/\"),\n name=\"scan-report-values\",\n ),\n re_path(\n r\"^datasets/(?P<path>(?![\\d/]).*)$\",\n ProxyView.as_view(upstream=f\"{settings.NEXTJS_URL}/datasets\"),\n name=\"datasets-list\",\n ),\n re_path(\n r\"^datasets/(?P<path>\\d+)/?$\",\n ProxyView.as_view(upstream=f\"{settings.NEXTJS_URL}/datasets\"),\n name=\"datasets-scanreports-list\",\n ),\n re_path(\n \"_next/(?P<path>.*)$\",\n ProxyView.as_view(upstream=f\"{settings.NEXTJS_URL}/_next\"),\n ),\n]\n", "path": "app/api/proxy/urls.py"}]}
| 997 | 176 |
gh_patches_debug_17188
|
rasdani/github-patches
|
git_diff
|
WordPress__openverse-api-934
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Oembed endpoint raises uncaught error on non-existent identifier
## Sentry link
<!-- The public (aka "share") Sentry issue link. -->
https://sentry.io/share/issue/797fb240716d49efaf6f0b762e3484dd/
## Description
<!-- Example: We are trying to access property foo of ImportantClass but the instance is null. -->
<!-- Include any additional information you may have, including potential remedies if any come to mind, and the general context of the code (what causes it to run in the app). -->
When the `/v1/images/oembed/` URL receives an invalid identifier or URL for the `url` query parameter, an `AssertionError` is raised. This should probably return a 4XX error instead.
<!-- Mention whether this is a known regression, i.e., the feature used to work and now does not. -->
## Reproduction
<!-- Share the steps to reproduce the issue, if you were able to, OR a note sharing that you tried to reproduce but weren’t able to. -->
1. `just up`
2. Visit: http://localhost:50280/v1/images/oembed/?url=00000000-0000-0000-0000-000000000000
</issue>
<code>
[start of api/catalog/api/views/image_views.py]
1 import io
2 import struct
3
4 from django.conf import settings
5 from django.http.response import FileResponse, HttpResponse
6 from django.utils.decorators import method_decorator
7 from rest_framework.decorators import action
8 from rest_framework.exceptions import NotFound
9 from rest_framework.response import Response
10
11 import piexif
12 import requests
13 from drf_yasg.utils import swagger_auto_schema
14 from PIL import Image as PILImage
15
16 from catalog.api.constants.media_types import IMAGE_TYPE
17 from catalog.api.docs.image_docs import (
18 ImageComplain,
19 ImageDetail,
20 ImageOembed,
21 ImageRelated,
22 ImageSearch,
23 ImageStats,
24 ImageThumbnail,
25 )
26 from catalog.api.models import Image
27 from catalog.api.serializers.image_serializers import (
28 ImageReportRequestSerializer,
29 ImageSearchRequestSerializer,
30 ImageSerializer,
31 OembedRequestSerializer,
32 OembedSerializer,
33 WatermarkRequestSerializer,
34 )
35 from catalog.api.serializers.media_serializers import MediaThumbnailRequestSerializer
36 from catalog.api.utils.exceptions import get_api_exception
37 from catalog.api.utils.throttle import (
38 AnonThumbnailRateThrottle,
39 OAuth2IdThumbnailRateThrottle,
40 )
41 from catalog.api.utils.watermark import watermark
42 from catalog.api.views.media_views import MediaViewSet
43
44
45 @method_decorator(swagger_auto_schema(**ImageSearch.swagger_setup), "list")
46 @method_decorator(swagger_auto_schema(**ImageStats.swagger_setup), "stats")
47 @method_decorator(swagger_auto_schema(**ImageDetail.swagger_setup), "retrieve")
48 @method_decorator(swagger_auto_schema(**ImageRelated.swagger_setup), "related")
49 @method_decorator(swagger_auto_schema(**ImageComplain.swagger_setup), "report")
50 @method_decorator(swagger_auto_schema(**ImageOembed.swagger_setup), "oembed")
51 @method_decorator(swagger_auto_schema(**ImageThumbnail.swagger_setup), "thumbnail")
52 @method_decorator(swagger_auto_schema(auto_schema=None), "watermark")
53 class ImageViewSet(MediaViewSet):
54 """
55 Viewset for all endpoints pertaining to images.
56 """
57
58 model_class = Image
59 query_serializer_class = ImageSearchRequestSerializer
60 default_index = settings.MEDIA_INDEX_MAPPING[IMAGE_TYPE]
61 qa_index = "search-qa-image"
62
63 serializer_class = ImageSerializer
64
65 OEMBED_HEADERS = {
66 "User-Agent": settings.OUTBOUND_USER_AGENT_TEMPLATE.format(purpose="OEmbed"),
67 }
68
69 # Extra actions
70
71 @action(
72 detail=False,
73 url_path="oembed",
74 url_name="oembed",
75 serializer_class=OembedSerializer,
76 )
77 def oembed(self, request, *_, **__):
78 params = OembedRequestSerializer(data=request.query_params)
79 params.is_valid(raise_exception=True)
80
81 context = self.get_serializer_context()
82
83 url = params.validated_data["url"]
84 identifier = url.rsplit("/", 1)[1]
85 try:
86 image = self.get_queryset().get(identifier=identifier)
87 except Image.DoesNotExist:
88 return get_api_exception("Could not find image.", 404)
89 if not (image.height and image.width):
90 image_file = requests.get(image.url, headers=self.OEMBED_HEADERS)
91 width, height = PILImage.open(io.BytesIO(image_file.content)).size
92 context |= {
93 "width": width,
94 "height": height,
95 }
96
97 serializer = self.get_serializer(image, context=context)
98 return Response(data=serializer.data)
99
100 @action(
101 detail=True,
102 url_path="thumb",
103 url_name="thumb",
104 serializer_class=MediaThumbnailRequestSerializer,
105 throttle_classes=[AnonThumbnailRateThrottle, OAuth2IdThumbnailRateThrottle],
106 )
107 def thumbnail(self, request, *_, **__):
108 image = self.get_object()
109
110 image_url = image.url
111 if not image_url:
112 raise get_api_exception("Could not find image.", 404)
113
114 return super().thumbnail(image_url, request)
115
116 @action(detail=True, url_path="watermark", url_name="watermark")
117 def watermark(self, request, *_, **__):
118 if not settings.WATERMARK_ENABLED:
119 raise NotFound("The watermark feature is currently disabled.")
120
121 params = WatermarkRequestSerializer(data=request.query_params)
122 params.is_valid(raise_exception=True)
123
124 image = self.get_object()
125 image_url = image.url
126 image_info = {
127 attr: getattr(image, attr)
128 for attr in ["title", "creator", "license", "license_version"]
129 }
130
131 # Create the actual watermarked image.
132 watermarked, exif = watermark(image_url, image_info, params.data["watermark"])
133 # Re-insert EXIF metadata.
134 if exif:
135 # piexif dump raises InvalidImageDataError which is a child class
136 # of ValueError, and a struct error when the value is not
137 # between -2147483648 and 2147483647
138 # https://github.com/WordPress/openverse-api/issues/849
139 try:
140 exif_bytes = piexif.dump(exif)
141 except (struct.error, ValueError):
142 exif_bytes = None
143 else:
144 exif_bytes = None
145 img_bytes = io.BytesIO()
146 self._save_wrapper(watermarked, exif_bytes, img_bytes)
147
148 if params.data["embed_metadata"]:
149 # Embed ccREL metadata with XMP.
150 work_properties = {
151 "creator": image.creator,
152 "license_url": image.license_url,
153 "attribution": image.attribution,
154 "work_landing_page": image.foreign_landing_url,
155 "identifier": str(image.identifier),
156 }
157
158 # Import inside a function to allow server run without Exempi library
159 import libxmp
160
161 from catalog.api.utils import ccrel
162
163 try:
164 with_xmp = ccrel.embed_xmp_bytes(img_bytes, work_properties)
165 return FileResponse(with_xmp, content_type="image/jpeg")
166 except (libxmp.XMPError, AttributeError):
167 # Just send the EXIF-ified file if libxmp fails to add metadata
168 response = HttpResponse(content_type="image/jpeg")
169 self._save_wrapper(watermarked, exif_bytes, response)
170 return response
171 else:
172 response = HttpResponse(img_bytes, content_type="image/jpeg")
173 self._save_wrapper(watermarked, exif_bytes, response)
174 return response
175
176 @action(
177 detail=True,
178 methods=["post"],
179 serializer_class=ImageReportRequestSerializer,
180 )
181 def report(self, *args, **kwargs):
182 return super().report(*args, **kwargs)
183
184 # Helper functions
185
186 @staticmethod
187 def _save_wrapper(pil_img, exif_bytes, destination):
188 """
189 PIL crashes if exif_bytes=None, so we have to wrap it to avoid littering
190 the code with branches.
191 """
192 if exif_bytes:
193 pil_img.save(destination, "jpeg", exif=exif_bytes)
194 else:
195 pil_img.save(destination, "jpeg")
196
[end of api/catalog/api/views/image_views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/api/catalog/api/views/image_views.py b/api/catalog/api/views/image_views.py
--- a/api/catalog/api/views/image_views.py
+++ b/api/catalog/api/views/image_views.py
@@ -81,11 +81,13 @@
context = self.get_serializer_context()
url = params.validated_data["url"]
+ if url.endswith("/"):
+ url = url[:-1]
identifier = url.rsplit("/", 1)[1]
try:
image = self.get_queryset().get(identifier=identifier)
except Image.DoesNotExist:
- return get_api_exception("Could not find image.", 404)
+ raise get_api_exception("Could not find image.", 404)
if not (image.height and image.width):
image_file = requests.get(image.url, headers=self.OEMBED_HEADERS)
width, height = PILImage.open(io.BytesIO(image_file.content)).size
|
{"golden_diff": "diff --git a/api/catalog/api/views/image_views.py b/api/catalog/api/views/image_views.py\n--- a/api/catalog/api/views/image_views.py\n+++ b/api/catalog/api/views/image_views.py\n@@ -81,11 +81,13 @@\n context = self.get_serializer_context()\n \n url = params.validated_data[\"url\"]\n+ if url.endswith(\"/\"):\n+ url = url[:-1]\n identifier = url.rsplit(\"/\", 1)[1]\n try:\n image = self.get_queryset().get(identifier=identifier)\n except Image.DoesNotExist:\n- return get_api_exception(\"Could not find image.\", 404)\n+ raise get_api_exception(\"Could not find image.\", 404)\n if not (image.height and image.width):\n image_file = requests.get(image.url, headers=self.OEMBED_HEADERS)\n width, height = PILImage.open(io.BytesIO(image_file.content)).size\n", "issue": "Oembed endpoint raises uncaught error on non-existent identifier\n## Sentry link\n\n<!-- The public (aka \"share\") Sentry issue link. -->\nhttps://sentry.io/share/issue/797fb240716d49efaf6f0b762e3484dd/\n\n## Description\n\n<!-- Example: We are trying to access property foo of ImportantClass but the instance is null. -->\n<!-- Include any additional information you may have, including potential remedies if any come to mind, and the general context of the code (what causes it to run in the app). -->\nWhen the `/v1/images/oembed/` URL receives an invalid identifier or URL for the `url` query parameter, an `AssertionError` is raised. This should probably return a 4XX error instead.\n\n<!-- Mention whether this is a known regression, i.e., the feature used to work and now does not. -->\n\n## Reproduction\n\n<!-- Share the steps to reproduce the issue, if you were able to, OR a note sharing that you tried to reproduce but weren\u2019t able to. -->\n1. `just up`\n2. Visit: http://localhost:50280/v1/images/oembed/?url=00000000-0000-0000-0000-000000000000\n\n", "before_files": [{"content": "import io\nimport struct\n\nfrom django.conf import settings\nfrom django.http.response import FileResponse, HttpResponse\nfrom django.utils.decorators import method_decorator\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import NotFound\nfrom rest_framework.response import Response\n\nimport piexif\nimport requests\nfrom drf_yasg.utils import swagger_auto_schema\nfrom PIL import Image as PILImage\n\nfrom catalog.api.constants.media_types import IMAGE_TYPE\nfrom catalog.api.docs.image_docs import (\n ImageComplain,\n ImageDetail,\n ImageOembed,\n ImageRelated,\n ImageSearch,\n ImageStats,\n ImageThumbnail,\n)\nfrom catalog.api.models import Image\nfrom catalog.api.serializers.image_serializers import (\n ImageReportRequestSerializer,\n ImageSearchRequestSerializer,\n ImageSerializer,\n OembedRequestSerializer,\n OembedSerializer,\n WatermarkRequestSerializer,\n)\nfrom catalog.api.serializers.media_serializers import MediaThumbnailRequestSerializer\nfrom catalog.api.utils.exceptions import get_api_exception\nfrom catalog.api.utils.throttle import (\n AnonThumbnailRateThrottle,\n OAuth2IdThumbnailRateThrottle,\n)\nfrom catalog.api.utils.watermark import watermark\nfrom catalog.api.views.media_views import MediaViewSet\n\n\n@method_decorator(swagger_auto_schema(**ImageSearch.swagger_setup), \"list\")\n@method_decorator(swagger_auto_schema(**ImageStats.swagger_setup), \"stats\")\n@method_decorator(swagger_auto_schema(**ImageDetail.swagger_setup), \"retrieve\")\n@method_decorator(swagger_auto_schema(**ImageRelated.swagger_setup), \"related\")\n@method_decorator(swagger_auto_schema(**ImageComplain.swagger_setup), \"report\")\n@method_decorator(swagger_auto_schema(**ImageOembed.swagger_setup), \"oembed\")\n@method_decorator(swagger_auto_schema(**ImageThumbnail.swagger_setup), \"thumbnail\")\n@method_decorator(swagger_auto_schema(auto_schema=None), \"watermark\")\nclass ImageViewSet(MediaViewSet):\n \"\"\"\n Viewset for all endpoints pertaining to images.\n \"\"\"\n\n model_class = Image\n query_serializer_class = ImageSearchRequestSerializer\n default_index = settings.MEDIA_INDEX_MAPPING[IMAGE_TYPE]\n qa_index = \"search-qa-image\"\n\n serializer_class = ImageSerializer\n\n OEMBED_HEADERS = {\n \"User-Agent\": settings.OUTBOUND_USER_AGENT_TEMPLATE.format(purpose=\"OEmbed\"),\n }\n\n # Extra actions\n\n @action(\n detail=False,\n url_path=\"oembed\",\n url_name=\"oembed\",\n serializer_class=OembedSerializer,\n )\n def oembed(self, request, *_, **__):\n params = OembedRequestSerializer(data=request.query_params)\n params.is_valid(raise_exception=True)\n\n context = self.get_serializer_context()\n\n url = params.validated_data[\"url\"]\n identifier = url.rsplit(\"/\", 1)[1]\n try:\n image = self.get_queryset().get(identifier=identifier)\n except Image.DoesNotExist:\n return get_api_exception(\"Could not find image.\", 404)\n if not (image.height and image.width):\n image_file = requests.get(image.url, headers=self.OEMBED_HEADERS)\n width, height = PILImage.open(io.BytesIO(image_file.content)).size\n context |= {\n \"width\": width,\n \"height\": height,\n }\n\n serializer = self.get_serializer(image, context=context)\n return Response(data=serializer.data)\n\n @action(\n detail=True,\n url_path=\"thumb\",\n url_name=\"thumb\",\n serializer_class=MediaThumbnailRequestSerializer,\n throttle_classes=[AnonThumbnailRateThrottle, OAuth2IdThumbnailRateThrottle],\n )\n def thumbnail(self, request, *_, **__):\n image = self.get_object()\n\n image_url = image.url\n if not image_url:\n raise get_api_exception(\"Could not find image.\", 404)\n\n return super().thumbnail(image_url, request)\n\n @action(detail=True, url_path=\"watermark\", url_name=\"watermark\")\n def watermark(self, request, *_, **__):\n if not settings.WATERMARK_ENABLED:\n raise NotFound(\"The watermark feature is currently disabled.\")\n\n params = WatermarkRequestSerializer(data=request.query_params)\n params.is_valid(raise_exception=True)\n\n image = self.get_object()\n image_url = image.url\n image_info = {\n attr: getattr(image, attr)\n for attr in [\"title\", \"creator\", \"license\", \"license_version\"]\n }\n\n # Create the actual watermarked image.\n watermarked, exif = watermark(image_url, image_info, params.data[\"watermark\"])\n # Re-insert EXIF metadata.\n if exif:\n # piexif dump raises InvalidImageDataError which is a child class\n # of ValueError, and a struct error when the value is not\n # between -2147483648 and 2147483647\n # https://github.com/WordPress/openverse-api/issues/849\n try:\n exif_bytes = piexif.dump(exif)\n except (struct.error, ValueError):\n exif_bytes = None\n else:\n exif_bytes = None\n img_bytes = io.BytesIO()\n self._save_wrapper(watermarked, exif_bytes, img_bytes)\n\n if params.data[\"embed_metadata\"]:\n # Embed ccREL metadata with XMP.\n work_properties = {\n \"creator\": image.creator,\n \"license_url\": image.license_url,\n \"attribution\": image.attribution,\n \"work_landing_page\": image.foreign_landing_url,\n \"identifier\": str(image.identifier),\n }\n\n # Import inside a function to allow server run without Exempi library\n import libxmp\n\n from catalog.api.utils import ccrel\n\n try:\n with_xmp = ccrel.embed_xmp_bytes(img_bytes, work_properties)\n return FileResponse(with_xmp, content_type=\"image/jpeg\")\n except (libxmp.XMPError, AttributeError):\n # Just send the EXIF-ified file if libxmp fails to add metadata\n response = HttpResponse(content_type=\"image/jpeg\")\n self._save_wrapper(watermarked, exif_bytes, response)\n return response\n else:\n response = HttpResponse(img_bytes, content_type=\"image/jpeg\")\n self._save_wrapper(watermarked, exif_bytes, response)\n return response\n\n @action(\n detail=True,\n methods=[\"post\"],\n serializer_class=ImageReportRequestSerializer,\n )\n def report(self, *args, **kwargs):\n return super().report(*args, **kwargs)\n\n # Helper functions\n\n @staticmethod\n def _save_wrapper(pil_img, exif_bytes, destination):\n \"\"\"\n PIL crashes if exif_bytes=None, so we have to wrap it to avoid littering\n the code with branches.\n \"\"\"\n if exif_bytes:\n pil_img.save(destination, \"jpeg\", exif=exif_bytes)\n else:\n pil_img.save(destination, \"jpeg\")\n", "path": "api/catalog/api/views/image_views.py"}]}
| 2,801 | 197 |
gh_patches_debug_8969
|
rasdani/github-patches
|
git_diff
|
aws__serverless-application-model-610
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Rendering on PyPI is broken
**Description:**
Rendering of the README on PyPI is broken because it expects RST by default and Markdown is being uploaded.
**Steps to reproduce the issue:**
1. Go to https://pypi.org/project/aws-sam-translator/
**Observed result:**
Raw markdown
**Expected result:**
Links and images working
</issue>
<code>
[start of setup.py]
1 # -*- coding: utf-8 -*-
2 #
3 # setup.py
4 #
5 # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
6 #
7 # Licensed under the Apache License, Version 2.0 (the "License");
8 # you may not use this file except in compliance with the License.
9 # You may obtain a copy of the License at
10 #
11 # http://www.apache.org/licenses/LICENSE-2.0
12 #
13 # Unless required by applicable law or agreed to in writing, software
14 # distributed under the License is distributed on an "AS IS" BASIS,
15 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 # See the License for the specific language governing permissions and
17 # limitations under the License.
18 #
19
20 """
21 AWS SAM Serverless Application Model
22 """
23 import io
24 import os
25 import re
26
27 from setuptools import setup, find_packages
28
29
30 def read(*filenames, **kwargs):
31 encoding = kwargs.get('encoding', 'utf-8')
32 sep = kwargs.get('sep', os.linesep)
33 buf = []
34 for filename in filenames:
35 with io.open(filename, encoding=encoding) as f:
36 buf.append(f.read())
37 return sep.join(buf)
38
39
40 def read_version():
41 content = read(os.path.join(
42 os.path.dirname(__file__), 'samtranslator', '__init__.py'))
43 return re.search(r"__version__ = '([^']+)'", content).group(1)
44
45
46 def read_requirements(req='base.txt'):
47 content = read(os.path.join('requirements', req))
48 return [line for line in content.split(os.linesep)
49 if not line.strip().startswith('#')]
50
51
52 setup(
53 name='aws-sam-translator',
54 version=read_version(),
55 description='AWS SAM Translator is a library that transform SAM templates into AWS CloudFormation templates',
56 long_description=read('README.md'),
57 author='Amazon Web Services',
58 author_email='[email protected]',
59 url='https://github.com/awslabs/serverless-application-model',
60 license='Apache License 2.0',
61 # Exclude all but the code folders
62 packages=find_packages(exclude=('tests', 'docs', 'examples', 'versions')),
63 install_requires=read_requirements('base.txt'),
64 include_package_data=True,
65 extras_require={
66 'dev': read_requirements('dev.txt')
67 },
68 keywords="AWS SAM Serverless Application Model",
69 classifiers=[
70 'Development Status :: 4 - Beta',
71 'Environment :: Console',
72 'Environment :: Other Environment',
73 'Intended Audience :: Developers',
74 'Intended Audience :: Information Technology',
75 'License :: OSI Approved :: Apache Software License',
76 'Operating System :: OS Independent',
77 'Programming Language :: Python',
78 'Programming Language :: Python :: 2.7',
79 'Topic :: Internet',
80 'Topic :: Software Development :: Build Tools',
81 'Topic :: Utilities'
82 ]
83 )
84
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -54,6 +54,7 @@
version=read_version(),
description='AWS SAM Translator is a library that transform SAM templates into AWS CloudFormation templates',
long_description=read('README.md'),
+ long_description_content_type='text/markdown',
author='Amazon Web Services',
author_email='[email protected]',
url='https://github.com/awslabs/serverless-application-model',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -54,6 +54,7 @@\n version=read_version(),\n description='AWS SAM Translator is a library that transform SAM templates into AWS CloudFormation templates',\n long_description=read('README.md'),\n+ long_description_content_type='text/markdown',\n author='Amazon Web Services',\n author_email='[email protected]',\n url='https://github.com/awslabs/serverless-application-model',\n", "issue": "Rendering on PyPI is broken\n**Description:**\r\nRendering of the README on PyPI is broken because it expects RST by default and Markdown is being uploaded.\r\n\r\n**Steps to reproduce the issue:**\r\n\r\n1. Go to https://pypi.org/project/aws-sam-translator/\r\n\r\n**Observed result:**\r\n\r\nRaw markdown\r\n\r\n**Expected result:**\r\n\r\nLinks and images working\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# setup.py\n#\n# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nAWS SAM Serverless Application Model\n\"\"\"\nimport io\nimport os\nimport re\n\nfrom setuptools import setup, find_packages\n\n\ndef read(*filenames, **kwargs):\n encoding = kwargs.get('encoding', 'utf-8')\n sep = kwargs.get('sep', os.linesep)\n buf = []\n for filename in filenames:\n with io.open(filename, encoding=encoding) as f:\n buf.append(f.read())\n return sep.join(buf)\n\n\ndef read_version():\n content = read(os.path.join(\n os.path.dirname(__file__), 'samtranslator', '__init__.py'))\n return re.search(r\"__version__ = '([^']+)'\", content).group(1)\n\n\ndef read_requirements(req='base.txt'):\n content = read(os.path.join('requirements', req))\n return [line for line in content.split(os.linesep)\n if not line.strip().startswith('#')]\n\n\nsetup(\n name='aws-sam-translator',\n version=read_version(),\n description='AWS SAM Translator is a library that transform SAM templates into AWS CloudFormation templates',\n long_description=read('README.md'),\n author='Amazon Web Services',\n author_email='[email protected]',\n url='https://github.com/awslabs/serverless-application-model',\n license='Apache License 2.0',\n # Exclude all but the code folders\n packages=find_packages(exclude=('tests', 'docs', 'examples', 'versions')),\n install_requires=read_requirements('base.txt'),\n include_package_data=True,\n extras_require={\n 'dev': read_requirements('dev.txt')\n },\n keywords=\"AWS SAM Serverless Application Model\",\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Environment :: Other Environment',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Internet',\n 'Topic :: Software Development :: Build Tools',\n 'Topic :: Utilities'\n ]\n)\n", "path": "setup.py"}]}
| 1,379 | 111 |
gh_patches_debug_142
|
rasdani/github-patches
|
git_diff
|
python__mypy-4770
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
__ne__ doesn't allow returning NotImplemented
It seems that mypy doesn't allow `__ne__` returning `NotImplemented`, (this is allowed with `__eq__`).
And when I try explicitly adding `NotImplemented` as a possible return type, I get a run-time error:
```
File ".../pod.py", line 65, in PlainOldData
def __ne__(self, other: Any) -> Union[bool, NotImplemented]:
File "/usr/lib/python3.6/typing.py", line 682, in inner
return func(*args, **kwds)
File "/usr/lib/python3.6/typing.py", line 800, in __getitem__
parameters = tuple(_type_check(p, msg) for p in parameters)
File "/usr/lib/python3.6/typing.py", line 800, in <genexpr>
parameters = tuple(_type_check(p, msg) for p in parameters)
File "/usr/lib/python3.6/typing.py", line 374, in _type_check
raise TypeError(msg + " Got %.100r." % (arg,))
TypeError: Union[arg, ...]: each arg must be a type. Got NotImplemented.
```
If I change this to `def __ne__(self, other: Any) -> Union[bool, type(NotImplemented)]:`, then mypy complains:
```
error: invalid type comment or annotation
note: Suggestion: use type[...] instead of type(...)
```
and when I try using this suggestion, I get a runtime error:
```
TypeError: 'type' object is not subscriptable
```
@gvanrossum suggested (in [issue 1101](https://github.com/python/mypy/issues/1101#issuecomment-374685779)) that this was due to `__ne__` not being in `sharedparse.__MAGICMETHODS`; but it does appear to be there.
</issue>
<code>
[start of mypy/sharedparse.py]
1 from typing import Optional
2
3 """Shared logic between our three mypy parser files."""
4
5
6 MAGIC_METHODS = {
7 "__abs__",
8 "__add__",
9 "__and__",
10 "__call__",
11 "__cmp__",
12 "__complex__",
13 "__contains__",
14 "__del__",
15 "__delattr__",
16 "__delitem__",
17 "__divmod__",
18 "__div__",
19 "__enter__",
20 "__exit__",
21 "__eq__",
22 "__floordiv__",
23 "__float__",
24 "__ge__",
25 "__getattr__",
26 "__getattribute__",
27 "__getitem__",
28 "__gt__",
29 "__hex__",
30 "__iadd__",
31 "__iand__",
32 "__idiv__",
33 "__ifloordiv__",
34 "__ilshift__",
35 "__imod__",
36 "__imul__",
37 "__init__",
38 "__init_subclass__",
39 "__int__",
40 "__invert__",
41 "__ior__",
42 "__ipow__",
43 "__irshift__",
44 "__isub__",
45 "__iter__",
46 "__ixor__",
47 "__le__",
48 "__len__",
49 "__long__",
50 "__lshift__",
51 "__lt__",
52 "__mod__",
53 "__mul__",
54 "__ne__",
55 "__neg__",
56 "__new__",
57 "__nonzero__",
58 "__oct__",
59 "__or__",
60 "__pos__",
61 "__pow__",
62 "__radd__",
63 "__rand__",
64 "__rdiv__",
65 "__repr__",
66 "__reversed__",
67 "__rfloordiv__",
68 "__rlshift__",
69 "__rmod__",
70 "__rmul__",
71 "__ror__",
72 "__rpow__",
73 "__rrshift__",
74 "__rshift__",
75 "__rsub__",
76 "__rxor__",
77 "__setattr__",
78 "__setitem__",
79 "__str__",
80 "__sub__",
81 "__unicode__",
82 "__xor__",
83 }
84
85 MAGIC_METHODS_ALLOWING_KWARGS = {
86 "__init__",
87 "__init_subclass__",
88 "__new__",
89 "__call__",
90 }
91
92 MAGIC_METHODS_POS_ARGS_ONLY = MAGIC_METHODS - MAGIC_METHODS_ALLOWING_KWARGS
93
94 BINARY_MAGIC_METHODS = {
95 "__add__",
96 "__and__",
97 "__cmp__",
98 "__divmod__",
99 "__div__",
100 "__eq__",
101 "__floordiv__",
102 "__ge__",
103 "__gt__",
104 "__iadd__",
105 "__iand__",
106 "__idiv__",
107 "__ifloordiv__",
108 "__ilshift__",
109 "__imod__",
110 "__imul__",
111 "__ior__",
112 "__ipow__",
113 "__irshift__",
114 "__isub__",
115 "__ixor__",
116 "__le__",
117 "__lshift__",
118 "__lt__",
119 "__mod__",
120 "__mul__",
121 "__or__",
122 "__pow__",
123 "__radd__",
124 "__rand__",
125 "__rdiv__",
126 "__rfloordiv__",
127 "__rlshift__",
128 "__rmod__",
129 "__rmul__",
130 "__ror__",
131 "__rpow__",
132 "__rrshift__",
133 "__rshift__",
134 "__rsub__",
135 "__rxor__",
136 "__sub__",
137 "__xor__",
138 }
139
140
141 def special_function_elide_names(name: str) -> bool:
142 return name in MAGIC_METHODS_POS_ARGS_ONLY
143
144
145 def argument_elide_name(name: Optional[str]) -> bool:
146 return name is not None and name.startswith("__")
147
[end of mypy/sharedparse.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mypy/sharedparse.py b/mypy/sharedparse.py
--- a/mypy/sharedparse.py
+++ b/mypy/sharedparse.py
@@ -118,6 +118,7 @@
"__lt__",
"__mod__",
"__mul__",
+ "__ne__",
"__or__",
"__pow__",
"__radd__",
|
{"golden_diff": "diff --git a/mypy/sharedparse.py b/mypy/sharedparse.py\n--- a/mypy/sharedparse.py\n+++ b/mypy/sharedparse.py\n@@ -118,6 +118,7 @@\n \"__lt__\",\n \"__mod__\",\n \"__mul__\",\n+ \"__ne__\",\n \"__or__\",\n \"__pow__\",\n \"__radd__\",\n", "issue": "__ne__ doesn't allow returning NotImplemented\nIt seems that mypy doesn't allow `__ne__` returning `NotImplemented`, (this is allowed with `__eq__`).\r\nAnd when I try explicitly adding `NotImplemented` as a possible return type, I get a run-time error:\r\n```\r\n File \".../pod.py\", line 65, in PlainOldData\r\n def __ne__(self, other: Any) -> Union[bool, NotImplemented]:\r\n File \"/usr/lib/python3.6/typing.py\", line 682, in inner\r\n return func(*args, **kwds)\r\n File \"/usr/lib/python3.6/typing.py\", line 800, in __getitem__\r\n parameters = tuple(_type_check(p, msg) for p in parameters)\r\n File \"/usr/lib/python3.6/typing.py\", line 800, in <genexpr>\r\n parameters = tuple(_type_check(p, msg) for p in parameters)\r\n File \"/usr/lib/python3.6/typing.py\", line 374, in _type_check\r\n raise TypeError(msg + \" Got %.100r.\" % (arg,))\r\nTypeError: Union[arg, ...]: each arg must be a type. Got NotImplemented.\r\n```\r\n\r\nIf I change this to `def __ne__(self, other: Any) -> Union[bool, type(NotImplemented)]:`, then mypy complains: \r\n```\r\nerror: invalid type comment or annotation\r\nnote: Suggestion: use type[...] instead of type(...)\r\n```\r\n\r\nand when I try using this suggestion, I get a runtime error:\r\n```\r\nTypeError: 'type' object is not subscriptable\r\n```\r\n\r\n@gvanrossum suggested (in [issue 1101](https://github.com/python/mypy/issues/1101#issuecomment-374685779)) that this was due to `__ne__` not being in `sharedparse.__MAGICMETHODS`; but it does appear to be there.\r\n\n", "before_files": [{"content": "from typing import Optional\n\n\"\"\"Shared logic between our three mypy parser files.\"\"\"\n\n\nMAGIC_METHODS = {\n \"__abs__\",\n \"__add__\",\n \"__and__\",\n \"__call__\",\n \"__cmp__\",\n \"__complex__\",\n \"__contains__\",\n \"__del__\",\n \"__delattr__\",\n \"__delitem__\",\n \"__divmod__\",\n \"__div__\",\n \"__enter__\",\n \"__exit__\",\n \"__eq__\",\n \"__floordiv__\",\n \"__float__\",\n \"__ge__\",\n \"__getattr__\",\n \"__getattribute__\",\n \"__getitem__\",\n \"__gt__\",\n \"__hex__\",\n \"__iadd__\",\n \"__iand__\",\n \"__idiv__\",\n \"__ifloordiv__\",\n \"__ilshift__\",\n \"__imod__\",\n \"__imul__\",\n \"__init__\",\n \"__init_subclass__\",\n \"__int__\",\n \"__invert__\",\n \"__ior__\",\n \"__ipow__\",\n \"__irshift__\",\n \"__isub__\",\n \"__iter__\",\n \"__ixor__\",\n \"__le__\",\n \"__len__\",\n \"__long__\",\n \"__lshift__\",\n \"__lt__\",\n \"__mod__\",\n \"__mul__\",\n \"__ne__\",\n \"__neg__\",\n \"__new__\",\n \"__nonzero__\",\n \"__oct__\",\n \"__or__\",\n \"__pos__\",\n \"__pow__\",\n \"__radd__\",\n \"__rand__\",\n \"__rdiv__\",\n \"__repr__\",\n \"__reversed__\",\n \"__rfloordiv__\",\n \"__rlshift__\",\n \"__rmod__\",\n \"__rmul__\",\n \"__ror__\",\n \"__rpow__\",\n \"__rrshift__\",\n \"__rshift__\",\n \"__rsub__\",\n \"__rxor__\",\n \"__setattr__\",\n \"__setitem__\",\n \"__str__\",\n \"__sub__\",\n \"__unicode__\",\n \"__xor__\",\n}\n\nMAGIC_METHODS_ALLOWING_KWARGS = {\n \"__init__\",\n \"__init_subclass__\",\n \"__new__\",\n \"__call__\",\n}\n\nMAGIC_METHODS_POS_ARGS_ONLY = MAGIC_METHODS - MAGIC_METHODS_ALLOWING_KWARGS\n\nBINARY_MAGIC_METHODS = {\n \"__add__\",\n \"__and__\",\n \"__cmp__\",\n \"__divmod__\",\n \"__div__\",\n \"__eq__\",\n \"__floordiv__\",\n \"__ge__\",\n \"__gt__\",\n \"__iadd__\",\n \"__iand__\",\n \"__idiv__\",\n \"__ifloordiv__\",\n \"__ilshift__\",\n \"__imod__\",\n \"__imul__\",\n \"__ior__\",\n \"__ipow__\",\n \"__irshift__\",\n \"__isub__\",\n \"__ixor__\",\n \"__le__\",\n \"__lshift__\",\n \"__lt__\",\n \"__mod__\",\n \"__mul__\",\n \"__or__\",\n \"__pow__\",\n \"__radd__\",\n \"__rand__\",\n \"__rdiv__\",\n \"__rfloordiv__\",\n \"__rlshift__\",\n \"__rmod__\",\n \"__rmul__\",\n \"__ror__\",\n \"__rpow__\",\n \"__rrshift__\",\n \"__rshift__\",\n \"__rsub__\",\n \"__rxor__\",\n \"__sub__\",\n \"__xor__\",\n}\n\n\ndef special_function_elide_names(name: str) -> bool:\n return name in MAGIC_METHODS_POS_ARGS_ONLY\n\n\ndef argument_elide_name(name: Optional[str]) -> bool:\n return name is not None and name.startswith(\"__\")\n", "path": "mypy/sharedparse.py"}]}
| 2,105 | 83 |
gh_patches_debug_5713
|
rasdani/github-patches
|
git_diff
|
huggingface__diffusers-550
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Question about flax model output classes
For example here, it is using `from dataclasses import dataclass`
https://github.com/huggingface/diffusers/blob/d8b0e4f433778f9d4555cfab642e43db3aff4833/src/diffusers/models/unet_2d_condition_flax.py#L22-L23
But transformers equivalents use `@flax.struct.dataclass`. For example [here](https://github.com/huggingface/transformers/blob/21f6f58721dd9154357576be6de54eefef1f1818/src/transformers/models/bert/modeling_flax_bert.py#L63-L64)
```py
@flax.struct.dataclass
class FlaxBertForPreTrainingOutput(ModelOutput):
```
The benefit of using `@flax.struct.dataclass` over naive `python dataclass` is that: [`jax.jit` can consume @flax.struct.dataclass](https://github.com/huggingface/transformers/pull/11886)
So the question is: should we use `@flax.struct.dataclass` on `diffusers` as well ?
</issue>
<code>
[start of src/diffusers/models/unet_2d_condition_flax.py]
1 from dataclasses import dataclass
2 from typing import Tuple, Union
3
4 import flax.linen as nn
5 import jax
6 import jax.numpy as jnp
7 from flax.core.frozen_dict import FrozenDict
8
9 from ..configuration_utils import ConfigMixin, flax_register_to_config
10 from ..modeling_flax_utils import FlaxModelMixin
11 from ..utils import BaseOutput
12 from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
13 from .unet_blocks_flax import (
14 FlaxCrossAttnDownBlock2D,
15 FlaxCrossAttnUpBlock2D,
16 FlaxDownBlock2D,
17 FlaxUNetMidBlock2DCrossAttn,
18 FlaxUpBlock2D,
19 )
20
21
22 @dataclass
23 class FlaxUNet2DConditionOutput(BaseOutput):
24 """
25 Args:
26 sample (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)`):
27 Hidden states conditioned on `encoder_hidden_states` input. Output of last layer of model.
28 """
29
30 sample: jnp.ndarray
31
32
33 @flax_register_to_config
34 class FlaxUNet2DConditionModel(nn.Module, FlaxModelMixin, ConfigMixin):
35 r"""
36 FlaxUNet2DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a
37 timestep and returns sample shaped output.
38
39 This model inherits from [`FlaxModelMixin`]. Check the superclass documentation for the generic methods the library
40 implements for all the models (such as downloading or saving, etc.)
41
42 Parameters:
43 sample_size (`int`, *optional*): The size of the input sample.
44 in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample.
45 out_channels (`int`, *optional*, defaults to 4): The number of channels in the output.
46 down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`):
47 The tuple of downsample blocks to use. The corresponding class names will be: "FlaxCrossAttnDownBlock2D",
48 "FlaxCrossAttnDownBlock2D", "FlaxCrossAttnDownBlock2D", "FlaxDownBlock2D"
49 up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D",)`):
50 The tuple of upsample blocks to use. The corresponding class names will be: "FlaxUpBlock2D",
51 "FlaxCrossAttnUpBlock2D", "FlaxCrossAttnUpBlock2D", "FlaxCrossAttnUpBlock2D"
52 block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
53 The tuple of output channels for each block.
54 layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.
55 attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.
56 cross_attention_dim (`int`, *optional*, defaults to 768): The dimension of the cross attention features.
57 dropout (`float`, *optional*, defaults to 0): Dropout probability for down, up and bottleneck blocks.
58 """
59
60 sample_size: int = 32
61 in_channels: int = 4
62 out_channels: int = 4
63 down_block_types: Tuple[str] = (
64 "CrossAttnDownBlock2D",
65 "CrossAttnDownBlock2D",
66 "CrossAttnDownBlock2D",
67 "DownBlock2D",
68 )
69 up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
70 block_out_channels: Tuple[int] = (320, 640, 1280, 1280)
71 layers_per_block: int = 2
72 attention_head_dim: int = 8
73 cross_attention_dim: int = 1280
74 dropout: float = 0.0
75 dtype: jnp.dtype = jnp.float32
76
77 def init_weights(self, rng: jax.random.PRNGKey) -> FrozenDict:
78 # init input tensors
79 sample_shape = (1, self.sample_size, self.sample_size, self.in_channels)
80 sample = jnp.zeros(sample_shape, dtype=jnp.float32)
81 timesteps = jnp.ones((1,), dtype=jnp.int32)
82 encoder_hidden_states = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.float32)
83
84 params_rng, dropout_rng = jax.random.split(rng)
85 rngs = {"params": params_rng, "dropout": dropout_rng}
86
87 return self.init(rngs, sample, timesteps, encoder_hidden_states)["params"]
88
89 def setup(self):
90 block_out_channels = self.block_out_channels
91 time_embed_dim = block_out_channels[0] * 4
92
93 # input
94 self.conv_in = nn.Conv(
95 block_out_channels[0],
96 kernel_size=(3, 3),
97 strides=(1, 1),
98 padding=((1, 1), (1, 1)),
99 dtype=self.dtype,
100 )
101
102 # time
103 self.time_proj = FlaxTimesteps(block_out_channels[0])
104 self.time_embedding = FlaxTimestepEmbedding(time_embed_dim, dtype=self.dtype)
105
106 # down
107 down_blocks = []
108 output_channel = block_out_channels[0]
109 for i, down_block_type in enumerate(self.down_block_types):
110 input_channel = output_channel
111 output_channel = block_out_channels[i]
112 is_final_block = i == len(block_out_channels) - 1
113
114 if down_block_type == "CrossAttnDownBlock2D":
115 down_block = FlaxCrossAttnDownBlock2D(
116 in_channels=input_channel,
117 out_channels=output_channel,
118 dropout=self.dropout,
119 num_layers=self.layers_per_block,
120 attn_num_head_channels=self.attention_head_dim,
121 add_downsample=not is_final_block,
122 dtype=self.dtype,
123 )
124 else:
125 down_block = FlaxDownBlock2D(
126 in_channels=input_channel,
127 out_channels=output_channel,
128 dropout=self.dropout,
129 num_layers=self.layers_per_block,
130 add_downsample=not is_final_block,
131 dtype=self.dtype,
132 )
133
134 down_blocks.append(down_block)
135 self.down_blocks = down_blocks
136
137 # mid
138 self.mid_block = FlaxUNetMidBlock2DCrossAttn(
139 in_channels=block_out_channels[-1],
140 dropout=self.dropout,
141 attn_num_head_channels=self.attention_head_dim,
142 dtype=self.dtype,
143 )
144
145 # up
146 up_blocks = []
147 reversed_block_out_channels = list(reversed(block_out_channels))
148 output_channel = reversed_block_out_channels[0]
149 for i, up_block_type in enumerate(self.up_block_types):
150 prev_output_channel = output_channel
151 output_channel = reversed_block_out_channels[i]
152 input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
153
154 is_final_block = i == len(block_out_channels) - 1
155
156 if up_block_type == "CrossAttnUpBlock2D":
157 up_block = FlaxCrossAttnUpBlock2D(
158 in_channels=input_channel,
159 out_channels=output_channel,
160 prev_output_channel=prev_output_channel,
161 num_layers=self.layers_per_block + 1,
162 attn_num_head_channels=self.attention_head_dim,
163 add_upsample=not is_final_block,
164 dropout=self.dropout,
165 dtype=self.dtype,
166 )
167 else:
168 up_block = FlaxUpBlock2D(
169 in_channels=input_channel,
170 out_channels=output_channel,
171 prev_output_channel=prev_output_channel,
172 num_layers=self.layers_per_block + 1,
173 add_upsample=not is_final_block,
174 dropout=self.dropout,
175 dtype=self.dtype,
176 )
177
178 up_blocks.append(up_block)
179 prev_output_channel = output_channel
180 self.up_blocks = up_blocks
181
182 # out
183 self.conv_norm_out = nn.GroupNorm(num_groups=32, epsilon=1e-5)
184 self.conv_out = nn.Conv(
185 self.out_channels,
186 kernel_size=(3, 3),
187 strides=(1, 1),
188 padding=((1, 1), (1, 1)),
189 dtype=self.dtype,
190 )
191
192 def __call__(
193 self,
194 sample,
195 timesteps,
196 encoder_hidden_states,
197 return_dict: bool = True,
198 train: bool = False,
199 ) -> Union[FlaxUNet2DConditionOutput, Tuple]:
200 """r
201 Args:
202 sample (`jnp.ndarray`): (channel, height, width) noisy inputs tensor
203 timestep (`jnp.ndarray` or `float` or `int`): timesteps
204 encoder_hidden_states (`jnp.ndarray`): (channel, height, width) encoder hidden states
205 return_dict (`bool`, *optional*, defaults to `True`):
206 Whether or not to return a [`models.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] instead of a
207 plain tuple.
208 train (`bool`, *optional*, defaults to `False`):
209 Use deterministic functions and disable dropout when not training.
210
211 Returns:
212 [`~models.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] or `tuple`:
213 [`~models.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`.
214 When returning a tuple, the first element is the sample tensor.
215 """
216 # 1. time
217 t_emb = self.time_proj(timesteps)
218 t_emb = self.time_embedding(t_emb)
219
220 # 2. pre-process
221 sample = self.conv_in(sample)
222
223 # 3. down
224 down_block_res_samples = (sample,)
225 for down_block in self.down_blocks:
226 if isinstance(down_block, FlaxCrossAttnDownBlock2D):
227 sample, res_samples = down_block(sample, t_emb, encoder_hidden_states, deterministic=not train)
228 else:
229 sample, res_samples = down_block(sample, t_emb, deterministic=not train)
230 down_block_res_samples += res_samples
231
232 # 4. mid
233 sample = self.mid_block(sample, t_emb, encoder_hidden_states, deterministic=not train)
234
235 # 5. up
236 for up_block in self.up_blocks:
237 res_samples = down_block_res_samples[-(self.layers_per_block + 1) :]
238 down_block_res_samples = down_block_res_samples[: -(self.layers_per_block + 1)]
239 if isinstance(up_block, FlaxCrossAttnUpBlock2D):
240 sample = up_block(
241 sample,
242 temb=t_emb,
243 encoder_hidden_states=encoder_hidden_states,
244 res_hidden_states_tuple=res_samples,
245 deterministic=not train,
246 )
247 else:
248 sample = up_block(sample, temb=t_emb, res_hidden_states_tuple=res_samples, deterministic=not train)
249
250 # 6. post-process
251 sample = self.conv_norm_out(sample)
252 sample = nn.silu(sample)
253 sample = self.conv_out(sample)
254
255 if not return_dict:
256 return (sample,)
257
258 return FlaxUNet2DConditionOutput(sample=sample)
259
[end of src/diffusers/models/unet_2d_condition_flax.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/diffusers/models/unet_2d_condition_flax.py b/src/diffusers/models/unet_2d_condition_flax.py
--- a/src/diffusers/models/unet_2d_condition_flax.py
+++ b/src/diffusers/models/unet_2d_condition_flax.py
@@ -1,6 +1,6 @@
-from dataclasses import dataclass
from typing import Tuple, Union
+import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
@@ -19,7 +19,7 @@
)
-@dataclass
[email protected]
class FlaxUNet2DConditionOutput(BaseOutput):
"""
Args:
|
{"golden_diff": "diff --git a/src/diffusers/models/unet_2d_condition_flax.py b/src/diffusers/models/unet_2d_condition_flax.py\n--- a/src/diffusers/models/unet_2d_condition_flax.py\n+++ b/src/diffusers/models/unet_2d_condition_flax.py\n@@ -1,6 +1,6 @@\n-from dataclasses import dataclass\n from typing import Tuple, Union\n \n+import flax\n import flax.linen as nn\n import jax\n import jax.numpy as jnp\n@@ -19,7 +19,7 @@\n )\n \n \n-@dataclass\[email protected]\n class FlaxUNet2DConditionOutput(BaseOutput):\n \"\"\"\n Args:\n", "issue": "Question about flax model output classes\nFor example here, it is using `from dataclasses import dataclass`\r\nhttps://github.com/huggingface/diffusers/blob/d8b0e4f433778f9d4555cfab642e43db3aff4833/src/diffusers/models/unet_2d_condition_flax.py#L22-L23\r\n\r\nBut transformers equivalents use `@flax.struct.dataclass`. For example [here](https://github.com/huggingface/transformers/blob/21f6f58721dd9154357576be6de54eefef1f1818/src/transformers/models/bert/modeling_flax_bert.py#L63-L64)\r\n```py\r\[email protected]\r\nclass FlaxBertForPreTrainingOutput(ModelOutput):\r\n```\r\n\r\nThe benefit of using `@flax.struct.dataclass` over naive `python dataclass` is that: [`jax.jit` can consume @flax.struct.dataclass](https://github.com/huggingface/transformers/pull/11886)\r\n\r\nSo the question is: should we use `@flax.struct.dataclass` on `diffusers` as well ?\n", "before_files": [{"content": "from dataclasses import dataclass\nfrom typing import Tuple, Union\n\nimport flax.linen as nn\nimport jax\nimport jax.numpy as jnp\nfrom flax.core.frozen_dict import FrozenDict\n\nfrom ..configuration_utils import ConfigMixin, flax_register_to_config\nfrom ..modeling_flax_utils import FlaxModelMixin\nfrom ..utils import BaseOutput\nfrom .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps\nfrom .unet_blocks_flax import (\n FlaxCrossAttnDownBlock2D,\n FlaxCrossAttnUpBlock2D,\n FlaxDownBlock2D,\n FlaxUNetMidBlock2DCrossAttn,\n FlaxUpBlock2D,\n)\n\n\n@dataclass\nclass FlaxUNet2DConditionOutput(BaseOutput):\n \"\"\"\n Args:\n sample (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)`):\n Hidden states conditioned on `encoder_hidden_states` input. Output of last layer of model.\n \"\"\"\n\n sample: jnp.ndarray\n\n\n@flax_register_to_config\nclass FlaxUNet2DConditionModel(nn.Module, FlaxModelMixin, ConfigMixin):\n r\"\"\"\n FlaxUNet2DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a\n timestep and returns sample shaped output.\n\n This model inherits from [`FlaxModelMixin`]. Check the superclass documentation for the generic methods the library\n implements for all the models (such as downloading or saving, etc.)\n\n Parameters:\n sample_size (`int`, *optional*): The size of the input sample.\n in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample.\n out_channels (`int`, *optional*, defaults to 4): The number of channels in the output.\n down_block_types (`Tuple[str]`, *optional*, defaults to `(\"CrossAttnDownBlock2D\", \"CrossAttnDownBlock2D\", \"CrossAttnDownBlock2D\", \"DownBlock2D\")`):\n The tuple of downsample blocks to use. The corresponding class names will be: \"FlaxCrossAttnDownBlock2D\",\n \"FlaxCrossAttnDownBlock2D\", \"FlaxCrossAttnDownBlock2D\", \"FlaxDownBlock2D\"\n up_block_types (`Tuple[str]`, *optional*, defaults to `(\"UpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\",)`):\n The tuple of upsample blocks to use. The corresponding class names will be: \"FlaxUpBlock2D\",\n \"FlaxCrossAttnUpBlock2D\", \"FlaxCrossAttnUpBlock2D\", \"FlaxCrossAttnUpBlock2D\"\n block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):\n The tuple of output channels for each block.\n layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.\n attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.\n cross_attention_dim (`int`, *optional*, defaults to 768): The dimension of the cross attention features.\n dropout (`float`, *optional*, defaults to 0): Dropout probability for down, up and bottleneck blocks.\n \"\"\"\n\n sample_size: int = 32\n in_channels: int = 4\n out_channels: int = 4\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock2D\",\n \"CrossAttnDownBlock2D\",\n \"CrossAttnDownBlock2D\",\n \"DownBlock2D\",\n )\n up_block_types: Tuple[str] = (\"UpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\")\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280)\n layers_per_block: int = 2\n attention_head_dim: int = 8\n cross_attention_dim: int = 1280\n dropout: float = 0.0\n dtype: jnp.dtype = jnp.float32\n\n def init_weights(self, rng: jax.random.PRNGKey) -> FrozenDict:\n # init input tensors\n sample_shape = (1, self.sample_size, self.sample_size, self.in_channels)\n sample = jnp.zeros(sample_shape, dtype=jnp.float32)\n timesteps = jnp.ones((1,), dtype=jnp.int32)\n encoder_hidden_states = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.float32)\n\n params_rng, dropout_rng = jax.random.split(rng)\n rngs = {\"params\": params_rng, \"dropout\": dropout_rng}\n\n return self.init(rngs, sample, timesteps, encoder_hidden_states)[\"params\"]\n\n def setup(self):\n block_out_channels = self.block_out_channels\n time_embed_dim = block_out_channels[0] * 4\n\n # input\n self.conv_in = nn.Conv(\n block_out_channels[0],\n kernel_size=(3, 3),\n strides=(1, 1),\n padding=((1, 1), (1, 1)),\n dtype=self.dtype,\n )\n\n # time\n self.time_proj = FlaxTimesteps(block_out_channels[0])\n self.time_embedding = FlaxTimestepEmbedding(time_embed_dim, dtype=self.dtype)\n\n # down\n down_blocks = []\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(self.down_block_types):\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n if down_block_type == \"CrossAttnDownBlock2D\":\n down_block = FlaxCrossAttnDownBlock2D(\n in_channels=input_channel,\n out_channels=output_channel,\n dropout=self.dropout,\n num_layers=self.layers_per_block,\n attn_num_head_channels=self.attention_head_dim,\n add_downsample=not is_final_block,\n dtype=self.dtype,\n )\n else:\n down_block = FlaxDownBlock2D(\n in_channels=input_channel,\n out_channels=output_channel,\n dropout=self.dropout,\n num_layers=self.layers_per_block,\n add_downsample=not is_final_block,\n dtype=self.dtype,\n )\n\n down_blocks.append(down_block)\n self.down_blocks = down_blocks\n\n # mid\n self.mid_block = FlaxUNetMidBlock2DCrossAttn(\n in_channels=block_out_channels[-1],\n dropout=self.dropout,\n attn_num_head_channels=self.attention_head_dim,\n dtype=self.dtype,\n )\n\n # up\n up_blocks = []\n reversed_block_out_channels = list(reversed(block_out_channels))\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(self.up_block_types):\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n is_final_block = i == len(block_out_channels) - 1\n\n if up_block_type == \"CrossAttnUpBlock2D\":\n up_block = FlaxCrossAttnUpBlock2D(\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n num_layers=self.layers_per_block + 1,\n attn_num_head_channels=self.attention_head_dim,\n add_upsample=not is_final_block,\n dropout=self.dropout,\n dtype=self.dtype,\n )\n else:\n up_block = FlaxUpBlock2D(\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n num_layers=self.layers_per_block + 1,\n add_upsample=not is_final_block,\n dropout=self.dropout,\n dtype=self.dtype,\n )\n\n up_blocks.append(up_block)\n prev_output_channel = output_channel\n self.up_blocks = up_blocks\n\n # out\n self.conv_norm_out = nn.GroupNorm(num_groups=32, epsilon=1e-5)\n self.conv_out = nn.Conv(\n self.out_channels,\n kernel_size=(3, 3),\n strides=(1, 1),\n padding=((1, 1), (1, 1)),\n dtype=self.dtype,\n )\n\n def __call__(\n self,\n sample,\n timesteps,\n encoder_hidden_states,\n return_dict: bool = True,\n train: bool = False,\n ) -> Union[FlaxUNet2DConditionOutput, Tuple]:\n \"\"\"r\n Args:\n sample (`jnp.ndarray`): (channel, height, width) noisy inputs tensor\n timestep (`jnp.ndarray` or `float` or `int`): timesteps\n encoder_hidden_states (`jnp.ndarray`): (channel, height, width) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] instead of a\n plain tuple.\n train (`bool`, *optional*, defaults to `False`):\n Use deterministic functions and disable dropout when not training.\n\n Returns:\n [`~models.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`.\n When returning a tuple, the first element is the sample tensor.\n \"\"\"\n # 1. time\n t_emb = self.time_proj(timesteps)\n t_emb = self.time_embedding(t_emb)\n\n # 2. pre-process\n sample = self.conv_in(sample)\n\n # 3. down\n down_block_res_samples = (sample,)\n for down_block in self.down_blocks:\n if isinstance(down_block, FlaxCrossAttnDownBlock2D):\n sample, res_samples = down_block(sample, t_emb, encoder_hidden_states, deterministic=not train)\n else:\n sample, res_samples = down_block(sample, t_emb, deterministic=not train)\n down_block_res_samples += res_samples\n\n # 4. mid\n sample = self.mid_block(sample, t_emb, encoder_hidden_states, deterministic=not train)\n\n # 5. up\n for up_block in self.up_blocks:\n res_samples = down_block_res_samples[-(self.layers_per_block + 1) :]\n down_block_res_samples = down_block_res_samples[: -(self.layers_per_block + 1)]\n if isinstance(up_block, FlaxCrossAttnUpBlock2D):\n sample = up_block(\n sample,\n temb=t_emb,\n encoder_hidden_states=encoder_hidden_states,\n res_hidden_states_tuple=res_samples,\n deterministic=not train,\n )\n else:\n sample = up_block(sample, temb=t_emb, res_hidden_states_tuple=res_samples, deterministic=not train)\n\n # 6. post-process\n sample = self.conv_norm_out(sample)\n sample = nn.silu(sample)\n sample = self.conv_out(sample)\n\n if not return_dict:\n return (sample,)\n\n return FlaxUNet2DConditionOutput(sample=sample)\n", "path": "src/diffusers/models/unet_2d_condition_flax.py"}]}
| 4,046 | 161 |
gh_patches_debug_18843
|
rasdani/github-patches
|
git_diff
|
iterative__dvc-2366
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
update `url` argument description in `get` and `import` help output
to `URL of Git repository with DVC project to download from.`
per https://github.com/iterative/dvc.org/pull/464#pullrequestreview-259776737
</issue>
<code>
[start of dvc/command/imp.py]
1 from __future__ import unicode_literals
2
3 import argparse
4 import logging
5
6 from dvc.exceptions import DvcException
7 from dvc.command.base import CmdBase, append_doc_link
8
9
10 logger = logging.getLogger(__name__)
11
12
13 class CmdImport(CmdBase):
14 def run(self):
15 try:
16 self.repo.imp(
17 self.args.url,
18 self.args.path,
19 out=self.args.out,
20 rev=self.args.rev,
21 )
22 except DvcException:
23 logger.exception(
24 "failed to import '{}' from '{}'.".format(
25 self.args.path, self.args.url
26 )
27 )
28 return 1
29 return 0
30
31
32 def add_parser(subparsers, parent_parser):
33 IMPORT_HELP = (
34 "Download data from DVC repository and take it under DVC control."
35 )
36
37 import_parser = subparsers.add_parser(
38 "import",
39 parents=[parent_parser],
40 description=append_doc_link(IMPORT_HELP, "import"),
41 help=IMPORT_HELP,
42 formatter_class=argparse.RawTextHelpFormatter,
43 )
44 import_parser.add_argument("url", help="DVC repository URL.")
45 import_parser.add_argument(
46 "path", help="Path to data within DVC repository."
47 )
48 import_parser.add_argument(
49 "-o", "--out", nargs="?", help="Destination path to put data to."
50 )
51 import_parser.add_argument(
52 "--rev", nargs="?", help="DVC repository git revision."
53 )
54 import_parser.set_defaults(func=CmdImport)
55
[end of dvc/command/imp.py]
[start of dvc/command/get.py]
1 from __future__ import unicode_literals
2
3 import argparse
4 import logging
5
6 from dvc.repo import Repo
7 from dvc.exceptions import DvcException
8 from .base import CmdBaseNoRepo, append_doc_link
9
10
11 logger = logging.getLogger(__name__)
12
13
14 class CmdGet(CmdBaseNoRepo):
15 def run(self):
16 try:
17 Repo.get(
18 self.args.url,
19 path=self.args.path,
20 out=self.args.out,
21 rev=self.args.rev,
22 )
23 return 0
24 except DvcException:
25 logger.exception(
26 "failed to get '{}' from '{}'".format(
27 self.args.path, self.args.url
28 )
29 )
30 return 1
31
32
33 def add_parser(subparsers, parent_parser):
34 GET_HELP = "Download data from DVC repository."
35 get_parser = subparsers.add_parser(
36 "get",
37 parents=[parent_parser],
38 description=append_doc_link(GET_HELP, "get"),
39 help=GET_HELP,
40 formatter_class=argparse.RawDescriptionHelpFormatter,
41 )
42 get_parser.add_argument(
43 "url", help="DVC repository URL to download data from."
44 )
45 get_parser.add_argument("path", help="Path to data within DVC repository.")
46 get_parser.add_argument(
47 "-o", "--out", nargs="?", help="Destination path to put data to."
48 )
49 get_parser.add_argument(
50 "--rev", nargs="?", help="DVC repository git revision."
51 )
52 get_parser.set_defaults(func=CmdGet)
53
[end of dvc/command/get.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/dvc/command/get.py b/dvc/command/get.py
--- a/dvc/command/get.py
+++ b/dvc/command/get.py
@@ -40,7 +40,7 @@
formatter_class=argparse.RawDescriptionHelpFormatter,
)
get_parser.add_argument(
- "url", help="DVC repository URL to download data from."
+ "url", help="URL of Git repository with DVC project to download from."
)
get_parser.add_argument("path", help="Path to data within DVC repository.")
get_parser.add_argument(
diff --git a/dvc/command/imp.py b/dvc/command/imp.py
--- a/dvc/command/imp.py
+++ b/dvc/command/imp.py
@@ -41,7 +41,9 @@
help=IMPORT_HELP,
formatter_class=argparse.RawTextHelpFormatter,
)
- import_parser.add_argument("url", help="DVC repository URL.")
+ import_parser.add_argument(
+ "url", help="URL of Git repository with DVC project to download from."
+ )
import_parser.add_argument(
"path", help="Path to data within DVC repository."
)
|
{"golden_diff": "diff --git a/dvc/command/get.py b/dvc/command/get.py\n--- a/dvc/command/get.py\n+++ b/dvc/command/get.py\n@@ -40,7 +40,7 @@\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n get_parser.add_argument(\n- \"url\", help=\"DVC repository URL to download data from.\"\n+ \"url\", help=\"URL of Git repository with DVC project to download from.\"\n )\n get_parser.add_argument(\"path\", help=\"Path to data within DVC repository.\")\n get_parser.add_argument(\ndiff --git a/dvc/command/imp.py b/dvc/command/imp.py\n--- a/dvc/command/imp.py\n+++ b/dvc/command/imp.py\n@@ -41,7 +41,9 @@\n help=IMPORT_HELP,\n formatter_class=argparse.RawTextHelpFormatter,\n )\n- import_parser.add_argument(\"url\", help=\"DVC repository URL.\")\n+ import_parser.add_argument(\n+ \"url\", help=\"URL of Git repository with DVC project to download from.\"\n+ )\n import_parser.add_argument(\n \"path\", help=\"Path to data within DVC repository.\"\n )\n", "issue": "update `url` argument description in `get` and `import` help output\nto `URL of Git repository with DVC project to download from.`\r\n\r\nper https://github.com/iterative/dvc.org/pull/464#pullrequestreview-259776737\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport argparse\nimport logging\n\nfrom dvc.exceptions import DvcException\nfrom dvc.command.base import CmdBase, append_doc_link\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CmdImport(CmdBase):\n def run(self):\n try:\n self.repo.imp(\n self.args.url,\n self.args.path,\n out=self.args.out,\n rev=self.args.rev,\n )\n except DvcException:\n logger.exception(\n \"failed to import '{}' from '{}'.\".format(\n self.args.path, self.args.url\n )\n )\n return 1\n return 0\n\n\ndef add_parser(subparsers, parent_parser):\n IMPORT_HELP = (\n \"Download data from DVC repository and take it under DVC control.\"\n )\n\n import_parser = subparsers.add_parser(\n \"import\",\n parents=[parent_parser],\n description=append_doc_link(IMPORT_HELP, \"import\"),\n help=IMPORT_HELP,\n formatter_class=argparse.RawTextHelpFormatter,\n )\n import_parser.add_argument(\"url\", help=\"DVC repository URL.\")\n import_parser.add_argument(\n \"path\", help=\"Path to data within DVC repository.\"\n )\n import_parser.add_argument(\n \"-o\", \"--out\", nargs=\"?\", help=\"Destination path to put data to.\"\n )\n import_parser.add_argument(\n \"--rev\", nargs=\"?\", help=\"DVC repository git revision.\"\n )\n import_parser.set_defaults(func=CmdImport)\n", "path": "dvc/command/imp.py"}, {"content": "from __future__ import unicode_literals\n\nimport argparse\nimport logging\n\nfrom dvc.repo import Repo\nfrom dvc.exceptions import DvcException\nfrom .base import CmdBaseNoRepo, append_doc_link\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CmdGet(CmdBaseNoRepo):\n def run(self):\n try:\n Repo.get(\n self.args.url,\n path=self.args.path,\n out=self.args.out,\n rev=self.args.rev,\n )\n return 0\n except DvcException:\n logger.exception(\n \"failed to get '{}' from '{}'\".format(\n self.args.path, self.args.url\n )\n )\n return 1\n\n\ndef add_parser(subparsers, parent_parser):\n GET_HELP = \"Download data from DVC repository.\"\n get_parser = subparsers.add_parser(\n \"get\",\n parents=[parent_parser],\n description=append_doc_link(GET_HELP, \"get\"),\n help=GET_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n get_parser.add_argument(\n \"url\", help=\"DVC repository URL to download data from.\"\n )\n get_parser.add_argument(\"path\", help=\"Path to data within DVC repository.\")\n get_parser.add_argument(\n \"-o\", \"--out\", nargs=\"?\", help=\"Destination path to put data to.\"\n )\n get_parser.add_argument(\n \"--rev\", nargs=\"?\", help=\"DVC repository git revision.\"\n )\n get_parser.set_defaults(func=CmdGet)\n", "path": "dvc/command/get.py"}]}
| 1,456 | 258 |
gh_patches_debug_27443
|
rasdani/github-patches
|
git_diff
|
sopel-irc__sopel-1292
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
wikipedia: may trigger other Sopel instances
Reported by `sloopy` in IRC. bot2-. uses prefix `.` and bot1-# uses `#`
```
<sloopy> .w Star Trek
<bot2-.> [WIKIPEDIA] Star Trek | "Star Trek is an American science fiction entertainment franchise created by Gene Roddenberry and owned by CBS and Paramount Pictures. The television series Star Trek: The Original Series, Star Trek: The Animated Series, Star Trek: The Next Generation, Star Trek: Deep Space Nine, Star Trek: Voyager, Star..." | https://en.wikipedia.org/wiki/Star_Trek
<bot1-#> [WIKIPEDIA] Star Trek | "Star Trek is an American science fiction entertainment franchise created by Gene Roddenberry and owned by CBS and Paramount Pictures. The television series Star Trek: The Original Series, Star Trek: The Animated Series, Star Trek: The Next Generation, Star Trek: Deep Space Nine, Star Trek: Voyager, Star..."
```
</issue>
<code>
[start of sopel/modules/wikipedia.py]
1 # coding=utf-8
2 # Copyright 2013 Elsie Powell - embolalia.com
3 # Licensed under the Eiffel Forum License 2.
4 from __future__ import unicode_literals, absolute_import, print_function, division
5
6 from sopel import tools
7 from sopel.config.types import StaticSection, ValidatedAttribute
8 from sopel.module import NOLIMIT, commands, example, rule
9 from requests import get
10 import re
11
12 import sys
13 if sys.version_info.major < 3:
14 from urllib import quote as _quote
15 from urlparse import unquote as _unquote
16 quote = lambda s: _quote(s.encode('utf-8')).decode('utf-8')
17 unquote = lambda s: _unquote(s.encode('utf-8')).decode('utf-8')
18 else:
19 from urllib.parse import quote, unquote
20
21 REDIRECT = re.compile(r'^REDIRECT (.*)')
22
23
24 class WikipediaSection(StaticSection):
25 default_lang = ValidatedAttribute('default_lang', default='en')
26 """The default language to find articles from."""
27 lang_per_channel = ValidatedAttribute('lang_per_channel')
28
29
30 def setup(bot):
31 bot.config.define_section('wikipedia', WikipediaSection)
32
33 regex = re.compile('([a-z]+).(wikipedia.org/wiki/)([^ ]+)')
34 if not bot.memory.contains('url_callbacks'):
35 bot.memory['url_callbacks'] = tools.SopelMemory()
36 bot.memory['url_callbacks'][regex] = mw_info
37
38
39 def configure(config):
40 config.define_section('wikipedia', WikipediaSection)
41 config.wikipedia.configure_setting(
42 'default_lang',
43 "Enter the default language to find articles from."
44 )
45
46
47 def mw_search(server, query, num):
48 """
49 Searches the specified MediaWiki server for the given query, and returns
50 the specified number of results.
51 """
52 search_url = ('http://%s/w/api.php?format=json&action=query'
53 '&list=search&srlimit=%d&srprop=timestamp&srwhat=text'
54 '&srsearch=') % (server, num)
55 search_url += query
56 query = get(search_url).json()
57 if 'query' in query:
58 query = query['query']['search']
59 return [r['title'] for r in query]
60 else:
61 return None
62
63
64 def say_snippet(bot, server, query, show_url=True):
65 page_name = query.replace('_', ' ')
66 query = quote(query.replace(' ', '_'))
67 try:
68 snippet = mw_snippet(server, query)
69 except KeyError:
70 if show_url:
71 bot.say("[WIKIPEDIA] Error fetching snippet for \"{}\".".format(page_name))
72 return
73 msg = '[WIKIPEDIA] {} | "{}"'.format(page_name, snippet)
74 if show_url:
75 msg = msg + ' | https://{}/wiki/{}'.format(server, query)
76 bot.say(msg)
77
78
79 def mw_snippet(server, query):
80 """
81 Retrives a snippet of the specified length from the given page on the given
82 server.
83 """
84 snippet_url = ('https://' + server + '/w/api.php?format=json'
85 '&action=query&prop=extracts&exintro&explaintext'
86 '&exchars=300&redirects&titles=')
87 snippet_url += query
88 snippet = get(snippet_url).json()
89 snippet = snippet['query']['pages']
90
91 # For some reason, the API gives the page *number* as the key, so we just
92 # grab the first page number in the results.
93 snippet = snippet[list(snippet.keys())[0]]
94
95 return snippet['extract']
96
97
98 @rule('.*\/([a-z]+\.wikipedia.org)\/wiki\/((?!File\:)[^ ]+).*')
99 def mw_info(bot, trigger, found_match=None):
100 """
101 Retrives a snippet of the specified length from the given page on the given
102 server.
103 """
104 match = found_match or trigger
105 say_snippet(bot, match.group(1), unquote(match.group(2)), show_url=False)
106
107
108 @commands('w', 'wiki', 'wik')
109 @example('.w San Francisco')
110 def wikipedia(bot, trigger):
111 lang = bot.config.wikipedia.default_lang
112
113 # change lang if channel has custom language set
114 if (trigger.sender and not trigger.sender.is_nick() and
115 bot.config.wikipedia.lang_per_channel):
116 customlang = re.search('(' + trigger.sender + '):(\w+)',
117 bot.config.wikipedia.lang_per_channel)
118 if customlang is not None:
119 lang = customlang.group(2)
120
121 if trigger.group(2) is None:
122 bot.reply("What do you want me to look up?")
123 return NOLIMIT
124
125 query = trigger.group(2)
126 args = re.search(r'^-([a-z]{2,12})\s(.*)', query)
127 if args is not None:
128 lang = args.group(1)
129 query = args.group(2)
130
131 if not query:
132 bot.reply('What do you want me to look up?')
133 return NOLIMIT
134 server = lang + '.wikipedia.org'
135 query = mw_search(server, query, 1)
136 if not query:
137 bot.reply("I can't find any results for that.")
138 return NOLIMIT
139 else:
140 query = query[0]
141 say_snippet(bot, server, query)
142
[end of sopel/modules/wikipedia.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sopel/modules/wikipedia.py b/sopel/modules/wikipedia.py
--- a/sopel/modules/wikipedia.py
+++ b/sopel/modules/wikipedia.py
@@ -61,7 +61,7 @@
return None
-def say_snippet(bot, server, query, show_url=True):
+def say_snippet(bot, trigger, server, query, show_url=True):
page_name = query.replace('_', ' ')
query = quote(query.replace(' ', '_'))
try:
@@ -71,8 +71,11 @@
bot.say("[WIKIPEDIA] Error fetching snippet for \"{}\".".format(page_name))
return
msg = '[WIKIPEDIA] {} | "{}"'.format(page_name, snippet)
+ msg_url = msg + ' | https://{}/wiki/{}'.format(server, query)
+ if msg_url == trigger: # prevents triggering on another instance of Sopel
+ return
if show_url:
- msg = msg + ' | https://{}/wiki/{}'.format(server, query)
+ msg = msg_url
bot.say(msg)
@@ -102,7 +105,7 @@
server.
"""
match = found_match or trigger
- say_snippet(bot, match.group(1), unquote(match.group(2)), show_url=False)
+ say_snippet(bot, trigger, match.group(1), unquote(match.group(2)), show_url=False)
@commands('w', 'wiki', 'wik')
@@ -138,4 +141,4 @@
return NOLIMIT
else:
query = query[0]
- say_snippet(bot, server, query)
+ say_snippet(bot, trigger, server, query)
|
{"golden_diff": "diff --git a/sopel/modules/wikipedia.py b/sopel/modules/wikipedia.py\n--- a/sopel/modules/wikipedia.py\n+++ b/sopel/modules/wikipedia.py\n@@ -61,7 +61,7 @@\n return None\n \n \n-def say_snippet(bot, server, query, show_url=True):\n+def say_snippet(bot, trigger, server, query, show_url=True):\n page_name = query.replace('_', ' ')\n query = quote(query.replace(' ', '_'))\n try:\n@@ -71,8 +71,11 @@\n bot.say(\"[WIKIPEDIA] Error fetching snippet for \\\"{}\\\".\".format(page_name))\n return\n msg = '[WIKIPEDIA] {} | \"{}\"'.format(page_name, snippet)\n+ msg_url = msg + ' | https://{}/wiki/{}'.format(server, query)\n+ if msg_url == trigger: # prevents triggering on another instance of Sopel\n+ return\n if show_url:\n- msg = msg + ' | https://{}/wiki/{}'.format(server, query)\n+ msg = msg_url\n bot.say(msg)\n \n \n@@ -102,7 +105,7 @@\n server.\n \"\"\"\n match = found_match or trigger\n- say_snippet(bot, match.group(1), unquote(match.group(2)), show_url=False)\n+ say_snippet(bot, trigger, match.group(1), unquote(match.group(2)), show_url=False)\n \n \n @commands('w', 'wiki', 'wik')\n@@ -138,4 +141,4 @@\n return NOLIMIT\n else:\n query = query[0]\n- say_snippet(bot, server, query)\n+ say_snippet(bot, trigger, server, query)\n", "issue": "wikipedia: may trigger other Sopel instances\nReported by `sloopy` in IRC. bot2-. uses prefix `.` and bot1-# uses `#`\n\n```\n<sloopy> .w Star Trek\n<bot2-.> [WIKIPEDIA] Star Trek | \"Star Trek is an American science fiction entertainment franchise created by Gene Roddenberry and owned by CBS and Paramount Pictures. The television series Star Trek: The Original Series, Star Trek: The Animated Series, Star Trek: The Next Generation, Star Trek: Deep Space Nine, Star Trek: Voyager, Star...\" | https://en.wikipedia.org/wiki/Star_Trek\n<bot1-#> [WIKIPEDIA] Star Trek | \"Star Trek is an American science fiction entertainment franchise created by Gene Roddenberry and owned by CBS and Paramount Pictures. The television series Star Trek: The Original Series, Star Trek: The Animated Series, Star Trek: The Next Generation, Star Trek: Deep Space Nine, Star Trek: Voyager, Star...\"\n```\n\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2013 Elsie Powell - embolalia.com\n# Licensed under the Eiffel Forum License 2.\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nfrom sopel import tools\nfrom sopel.config.types import StaticSection, ValidatedAttribute\nfrom sopel.module import NOLIMIT, commands, example, rule\nfrom requests import get\nimport re\n\nimport sys\nif sys.version_info.major < 3:\n from urllib import quote as _quote\n from urlparse import unquote as _unquote\n quote = lambda s: _quote(s.encode('utf-8')).decode('utf-8')\n unquote = lambda s: _unquote(s.encode('utf-8')).decode('utf-8')\nelse:\n from urllib.parse import quote, unquote\n\nREDIRECT = re.compile(r'^REDIRECT (.*)')\n\n\nclass WikipediaSection(StaticSection):\n default_lang = ValidatedAttribute('default_lang', default='en')\n \"\"\"The default language to find articles from.\"\"\"\n lang_per_channel = ValidatedAttribute('lang_per_channel')\n\n\ndef setup(bot):\n bot.config.define_section('wikipedia', WikipediaSection)\n\n regex = re.compile('([a-z]+).(wikipedia.org/wiki/)([^ ]+)')\n if not bot.memory.contains('url_callbacks'):\n bot.memory['url_callbacks'] = tools.SopelMemory()\n bot.memory['url_callbacks'][regex] = mw_info\n\n\ndef configure(config):\n config.define_section('wikipedia', WikipediaSection)\n config.wikipedia.configure_setting(\n 'default_lang',\n \"Enter the default language to find articles from.\"\n )\n\n\ndef mw_search(server, query, num):\n \"\"\"\n Searches the specified MediaWiki server for the given query, and returns\n the specified number of results.\n \"\"\"\n search_url = ('http://%s/w/api.php?format=json&action=query'\n '&list=search&srlimit=%d&srprop=timestamp&srwhat=text'\n '&srsearch=') % (server, num)\n search_url += query\n query = get(search_url).json()\n if 'query' in query:\n query = query['query']['search']\n return [r['title'] for r in query]\n else:\n return None\n\n\ndef say_snippet(bot, server, query, show_url=True):\n page_name = query.replace('_', ' ')\n query = quote(query.replace(' ', '_'))\n try:\n snippet = mw_snippet(server, query)\n except KeyError:\n if show_url:\n bot.say(\"[WIKIPEDIA] Error fetching snippet for \\\"{}\\\".\".format(page_name))\n return\n msg = '[WIKIPEDIA] {} | \"{}\"'.format(page_name, snippet)\n if show_url:\n msg = msg + ' | https://{}/wiki/{}'.format(server, query)\n bot.say(msg)\n\n\ndef mw_snippet(server, query):\n \"\"\"\n Retrives a snippet of the specified length from the given page on the given\n server.\n \"\"\"\n snippet_url = ('https://' + server + '/w/api.php?format=json'\n '&action=query&prop=extracts&exintro&explaintext'\n '&exchars=300&redirects&titles=')\n snippet_url += query\n snippet = get(snippet_url).json()\n snippet = snippet['query']['pages']\n\n # For some reason, the API gives the page *number* as the key, so we just\n # grab the first page number in the results.\n snippet = snippet[list(snippet.keys())[0]]\n\n return snippet['extract']\n\n\n@rule('.*\\/([a-z]+\\.wikipedia.org)\\/wiki\\/((?!File\\:)[^ ]+).*')\ndef mw_info(bot, trigger, found_match=None):\n \"\"\"\n Retrives a snippet of the specified length from the given page on the given\n server.\n \"\"\"\n match = found_match or trigger\n say_snippet(bot, match.group(1), unquote(match.group(2)), show_url=False)\n\n\n@commands('w', 'wiki', 'wik')\n@example('.w San Francisco')\ndef wikipedia(bot, trigger):\n lang = bot.config.wikipedia.default_lang\n\n # change lang if channel has custom language set\n if (trigger.sender and not trigger.sender.is_nick() and\n bot.config.wikipedia.lang_per_channel):\n customlang = re.search('(' + trigger.sender + '):(\\w+)',\n bot.config.wikipedia.lang_per_channel)\n if customlang is not None:\n lang = customlang.group(2)\n\n if trigger.group(2) is None:\n bot.reply(\"What do you want me to look up?\")\n return NOLIMIT\n\n query = trigger.group(2)\n args = re.search(r'^-([a-z]{2,12})\\s(.*)', query)\n if args is not None:\n lang = args.group(1)\n query = args.group(2)\n\n if not query:\n bot.reply('What do you want me to look up?')\n return NOLIMIT\n server = lang + '.wikipedia.org'\n query = mw_search(server, query, 1)\n if not query:\n bot.reply(\"I can't find any results for that.\")\n return NOLIMIT\n else:\n query = query[0]\n say_snippet(bot, server, query)\n", "path": "sopel/modules/wikipedia.py"}]}
| 2,242 | 389 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.