problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.71k
18.9k
golden_diff
stringlengths
145
5.13k
verification_info
stringlengths
465
23.6k
num_tokens_prompt
int64
556
4.1k
num_tokens_diff
int64
47
1.02k
gh_patches_debug_2024
rasdani/github-patches
git_diff
facebookresearch__fairseq-214
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Size Mismatch in AdaptiveSoftmax when targets are not specified Following up on #212 , I'm updating `sequence_generator.py` to generate text from a pre-trained language model (initially trained with adaptive softmax). When computing log probabilities, and the targets are set to none, I receive a size mismatch exception in the line below, possibly because the dictionary size is smaller than the adaptive softmax cut-off: https://github.com/pytorch/fairseq/blob/388c520be21752cacb9fe3b1712038f32e0e9a5f/fairseq/modules/adaptive_softmax.py#L126 I imagine this could be solved by some sort of truncation to the output of tail[i].input </issue> <code> [start of fairseq/modules/adaptive_softmax.py] 1 # Copyright (c) 2017-present, Facebook, Inc. 2 # All rights reserved. 3 # 4 # This source code is licensed under the license found in the LICENSE file in 5 # the root directory of this source tree. An additional grant of patent rights 6 # can be found in the PATENTS file in the same directory. 7 8 9 import torch.nn.functional as F 10 from torch import nn 11 12 13 class AdaptiveSoftmax(nn.Module): 14 """ 15 This is an implementation of the efficient softmax approximation for 16 graphical processing units (GPU), described in the paper "Efficient softmax 17 approximation for GPUs" (http://arxiv.org/abs/1609.04309). 18 """ 19 20 def __init__(self, vocab_size, input_dim, cutoff, dropout): 21 super().__init__() 22 23 if vocab_size > cutoff[-1]: 24 cutoff = cutoff + [vocab_size] 25 26 output_dim = cutoff[0] + len(cutoff) - 1 27 28 self.vocab_size = vocab_size 29 self.cutoff = cutoff 30 self.dropout = dropout 31 32 self.lsm = nn.LogSoftmax(dim=1) 33 self.head = nn.Linear(input_dim, output_dim, bias=False) 34 self.tail = nn.ModuleList() 35 36 for i in range(len(cutoff) - 1): 37 self.tail.append( 38 nn.Sequential( 39 nn.Linear(input_dim, input_dim // 4 ** i, bias=False), 40 nn.Dropout(dropout), 41 nn.Linear(input_dim // 4 ** i, cutoff[i + 1] - cutoff[i], bias=False) 42 ) 43 ) 44 45 def init_weights(m): 46 if hasattr(m, 'weight'): 47 nn.init.xavier_uniform_(m.weight) 48 49 self.apply(init_weights) 50 51 def adapt_target(self, target): 52 """ 53 In order to be efficient, the AdaptiveSoftMax does not compute the 54 scores for all the word of the vocabulary for all the examples. It is 55 thus necessary to call the method adapt_target of the AdaptiveSoftMax 56 layer inside each forward pass. 57 """ 58 59 target = target.view(-1) 60 new_target = [target.clone()] 61 target_idxs = [] 62 63 for i in range(len(self.cutoff) - 1): 64 mask = target.ge(self.cutoff[i]).mul(target.lt(self.cutoff[i + 1])) 65 new_target[0][mask] = self.cutoff[0] + i - 1 66 67 if mask.any(): 68 target_idxs.append(mask.nonzero().squeeze(1)) 69 new_target.append(target[mask].add(-self.cutoff[i])) 70 else: 71 target_idxs.append(None) 72 new_target.append(None) 73 74 return new_target, target_idxs 75 76 def forward(self, input, target): 77 """ 78 Args: 79 input: (b x t x d) 80 target: (b x t) 81 Returns: 82 2 lists: output for each cutoff section and new targets by cut off 83 """ 84 85 input = input.contiguous().view(-1, input.size(-1)) 86 input = F.dropout(input, p=self.dropout, training=self.training) 87 88 new_target, target_idxs = self.adapt_target(target) 89 output = [self.head(input)] 90 91 for i in range(len(target_idxs)): 92 if target_idxs[i] is not None: 93 output.append(self.tail[i](input.index_select(0, target_idxs[i]))) 94 else: 95 output.append(None) 96 97 return output, new_target 98 99 def get_log_prob(self, input, target): 100 """ 101 Computes the log probabilities for all the words of the vocabulary, 102 given a 2D tensor of hidden vectors. 103 """ 104 105 bsz, length, dim = input.size() 106 input = input.contiguous().view(-1, dim) 107 108 if target is not None: 109 _, target_idxs = self.adapt_target(target) 110 else: 111 target_idxs = None 112 113 head_y = self.head(input) 114 log_probs = head_y.new_zeros(input.size(0), self.vocab_size) 115 116 head_sz = self.cutoff[0] + len(self.tail) 117 log_probs[:, :head_sz] = self.lsm(head_y) 118 tail_priors = log_probs[:, self.cutoff[0] - 1: head_sz - 1].clone() 119 120 for i in range(len(self.tail)): 121 start = self.cutoff[i] 122 end = self.cutoff[i + 1] 123 124 if target_idxs is None: 125 tail_out = log_probs[:, start:end] 126 tail_out.copy_(self.tail[i](input)) 127 log_probs[:, start:end] = self.lsm(tail_out).add_(tail_priors[:, i, None]) 128 elif target_idxs[i] is not None: 129 idxs = target_idxs[i] 130 tail_out = log_probs[idxs, start:end] 131 tail_out.copy_(self.tail[i](input[idxs])) 132 log_probs[idxs, start:end] = self.lsm(tail_out).add_(tail_priors[idxs, i, None]) 133 134 log_probs = log_probs.view(bsz, length, -1) 135 return log_probs 136 [end of fairseq/modules/adaptive_softmax.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/fairseq/modules/adaptive_softmax.py b/fairseq/modules/adaptive_softmax.py --- a/fairseq/modules/adaptive_softmax.py +++ b/fairseq/modules/adaptive_softmax.py @@ -22,6 +22,9 @@ if vocab_size > cutoff[-1]: cutoff = cutoff + [vocab_size] + else: + assert vocab_size == cutoff[ + -1], 'cannot specify cutoff smaller than vocab size' output_dim = cutoff[0] + len(cutoff) - 1
{"golden_diff": "diff --git a/fairseq/modules/adaptive_softmax.py b/fairseq/modules/adaptive_softmax.py\n--- a/fairseq/modules/adaptive_softmax.py\n+++ b/fairseq/modules/adaptive_softmax.py\n@@ -22,6 +22,9 @@\n \n if vocab_size > cutoff[-1]:\n cutoff = cutoff + [vocab_size]\n+ else:\n+ assert vocab_size == cutoff[\n+ -1], 'cannot specify cutoff smaller than vocab size'\n \n output_dim = cutoff[0] + len(cutoff) - 1\n", "issue": "Size Mismatch in AdaptiveSoftmax when targets are not specified\nFollowing up on #212 , I'm updating `sequence_generator.py` to generate text from a pre-trained language model (initially trained with adaptive softmax). When computing log probabilities, and the targets are set to none, I receive a size mismatch exception in the line below, possibly because the dictionary size is smaller than the adaptive softmax cut-off: \r\n\r\nhttps://github.com/pytorch/fairseq/blob/388c520be21752cacb9fe3b1712038f32e0e9a5f/fairseq/modules/adaptive_softmax.py#L126\r\n\r\nI imagine this could be solved by some sort of truncation to the output of tail[i].input\n", "before_files": [{"content": "# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\n\nimport torch.nn.functional as F\nfrom torch import nn\n\n\nclass AdaptiveSoftmax(nn.Module):\n \"\"\"\n This is an implementation of the efficient softmax approximation for\n graphical processing units (GPU), described in the paper \"Efficient softmax\n approximation for GPUs\" (http://arxiv.org/abs/1609.04309).\n \"\"\"\n\n def __init__(self, vocab_size, input_dim, cutoff, dropout):\n super().__init__()\n\n if vocab_size > cutoff[-1]:\n cutoff = cutoff + [vocab_size]\n\n output_dim = cutoff[0] + len(cutoff) - 1\n\n self.vocab_size = vocab_size\n self.cutoff = cutoff\n self.dropout = dropout\n\n self.lsm = nn.LogSoftmax(dim=1)\n self.head = nn.Linear(input_dim, output_dim, bias=False)\n self.tail = nn.ModuleList()\n\n for i in range(len(cutoff) - 1):\n self.tail.append(\n nn.Sequential(\n nn.Linear(input_dim, input_dim // 4 ** i, bias=False),\n nn.Dropout(dropout),\n nn.Linear(input_dim // 4 ** i, cutoff[i + 1] - cutoff[i], bias=False)\n )\n )\n\n def init_weights(m):\n if hasattr(m, 'weight'):\n nn.init.xavier_uniform_(m.weight)\n\n self.apply(init_weights)\n\n def adapt_target(self, target):\n \"\"\"\n In order to be efficient, the AdaptiveSoftMax does not compute the\n scores for all the word of the vocabulary for all the examples. It is\n thus necessary to call the method adapt_target of the AdaptiveSoftMax\n layer inside each forward pass.\n \"\"\"\n\n target = target.view(-1)\n new_target = [target.clone()]\n target_idxs = []\n\n for i in range(len(self.cutoff) - 1):\n mask = target.ge(self.cutoff[i]).mul(target.lt(self.cutoff[i + 1]))\n new_target[0][mask] = self.cutoff[0] + i - 1\n\n if mask.any():\n target_idxs.append(mask.nonzero().squeeze(1))\n new_target.append(target[mask].add(-self.cutoff[i]))\n else:\n target_idxs.append(None)\n new_target.append(None)\n\n return new_target, target_idxs\n\n def forward(self, input, target):\n \"\"\"\n Args:\n input: (b x t x d)\n target: (b x t)\n Returns:\n 2 lists: output for each cutoff section and new targets by cut off\n \"\"\"\n\n input = input.contiguous().view(-1, input.size(-1))\n input = F.dropout(input, p=self.dropout, training=self.training)\n\n new_target, target_idxs = self.adapt_target(target)\n output = [self.head(input)]\n\n for i in range(len(target_idxs)):\n if target_idxs[i] is not None:\n output.append(self.tail[i](input.index_select(0, target_idxs[i])))\n else:\n output.append(None)\n\n return output, new_target\n\n def get_log_prob(self, input, target):\n \"\"\"\n Computes the log probabilities for all the words of the vocabulary,\n given a 2D tensor of hidden vectors.\n \"\"\"\n\n bsz, length, dim = input.size()\n input = input.contiguous().view(-1, dim)\n\n if target is not None:\n _, target_idxs = self.adapt_target(target)\n else:\n target_idxs = None\n\n head_y = self.head(input)\n log_probs = head_y.new_zeros(input.size(0), self.vocab_size)\n\n head_sz = self.cutoff[0] + len(self.tail)\n log_probs[:, :head_sz] = self.lsm(head_y)\n tail_priors = log_probs[:, self.cutoff[0] - 1: head_sz - 1].clone()\n\n for i in range(len(self.tail)):\n start = self.cutoff[i]\n end = self.cutoff[i + 1]\n\n if target_idxs is None:\n tail_out = log_probs[:, start:end]\n tail_out.copy_(self.tail[i](input))\n log_probs[:, start:end] = self.lsm(tail_out).add_(tail_priors[:, i, None])\n elif target_idxs[i] is not None:\n idxs = target_idxs[i]\n tail_out = log_probs[idxs, start:end]\n tail_out.copy_(self.tail[i](input[idxs]))\n log_probs[idxs, start:end] = self.lsm(tail_out).add_(tail_priors[idxs, i, None])\n\n log_probs = log_probs.view(bsz, length, -1)\n return log_probs\n", "path": "fairseq/modules/adaptive_softmax.py"}]}
2,096
119
gh_patches_debug_34863
rasdani/github-patches
git_diff
microsoft__lisa-836
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Ubuntu 20.04 - platform.dist() is deprecated since Python 3.5 and removed in Python 3.8 Affected distro - ubuntu 20.04 (use python 3.8) Affected case - WALA-VERIFY-VERBOSE-ENABLED-LOGS Use distro.linux_distribution(full_distribution_name=False) instead </issue> <code> [start of Testscripts/Linux/WALA-VERIFY-VERBOSE-ENABLED-LOGS.py] 1 #!/usr/bin/env python 2 # Copyright (c) Microsoft Corporation. All rights reserved. 3 # Licensed under the Apache License. 4 from azuremodules import * 5 6 import argparse 7 import os 8 import platform 9 import time 10 11 parser = argparse.ArgumentParser() 12 13 file_path = os.path.dirname(os.path.realpath(__file__)) 14 constants_path = os.path.join(file_path, "constants.sh") 15 params = GetParams(constants_path) 16 passwd = params["PASSWORD"] 17 18 distro = platform.dist() 19 20 21 def RunTest(): 22 UpdateState("TestRunning") 23 if(distro[0] == "CoreOS"): 24 versionOutPut = Run("waagent --version") 25 else: 26 output = Run("pgrep -fa python3.*waagent") 27 if ("python3" in output) : 28 versionOutPut = Run("/usr/bin/python3 /usr/sbin/waagent --version") 29 else : 30 versionOutPut = Run("/usr/sbin/waagent --version") 31 32 RunLog.info("Checking log waagent.log...") 33 if("2.0." in versionOutPut): 34 output = Run("grep -i 'iptables -I INPUT -p udp --dport' /var/log/waagent* | wc -l | tr -d '\n'") 35 RunLog.info("agent version is 2.0") 36 else: 37 output = Run("grep -i 'VERBOSE' /var/log/waagent* | wc -l | tr -d '\n'") 38 RunLog.info("agent version > 2.0") 39 40 if not (output == "0") : 41 RunLog.info('The log file contains the verbose logs') 42 ResultLog.info('PASS') 43 UpdateState("TestCompleted") 44 else : 45 RunLog.error('Verify waagent.log fail, the log file does not contain the verbose logs') 46 ResultLog.error('FAIL') 47 UpdateState("TestCompleted") 48 49 50 def Restartwaagent(): 51 if (distro[0] == "CoreOS"): 52 Run("echo '"+passwd+"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g /usr/share/oem/waagent.conf") 53 elif (DetectDistro()[0] == 'clear-linux-os'): 54 Run("echo '"+passwd+"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g \ 55 /usr/share/defaults/waagent/waagent.conf") 56 else: 57 Run("echo '"+passwd+"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g /etc/waagent.conf") 58 RunLog.info("Restart waagent service...") 59 result = Run("echo '"+passwd+"' | sudo -S find / -name systemctl |wc -l | tr -d '\n'") 60 if (distro[0] == "Ubuntu") or (distro[0] == "debian"): 61 Run("echo '"+passwd+"' | sudo -S service walinuxagent restart") 62 else: 63 if (result == "0") : 64 os.system("echo '"+passwd+"' | sudo -S service waagent restart") 65 else: 66 os.system("echo '"+passwd+"' | sudo -S systemctl restart waagent") 67 time.sleep(60) 68 69 Restartwaagent() 70 RunTest() 71 [end of Testscripts/Linux/WALA-VERIFY-VERBOSE-ENABLED-LOGS.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/Testscripts/Linux/WALA-VERIFY-VERBOSE-ENABLED-LOGS.py b/Testscripts/Linux/WALA-VERIFY-VERBOSE-ENABLED-LOGS.py --- a/Testscripts/Linux/WALA-VERIFY-VERBOSE-ENABLED-LOGS.py +++ b/Testscripts/Linux/WALA-VERIFY-VERBOSE-ENABLED-LOGS.py @@ -7,6 +7,7 @@ import os import platform import time +import sys parser = argparse.ArgumentParser() @@ -14,13 +15,16 @@ constants_path = os.path.join(file_path, "constants.sh") params = GetParams(constants_path) passwd = params["PASSWORD"] - -distro = platform.dist() +if sys.version_info[0] >= 3: + import distro + distro = distro.linux_distribution(full_distribution_name=False) +else: + distro = platform.dist() def RunTest(): UpdateState("TestRunning") - if(distro[0] == "CoreOS"): + if(distro[0].upper() == "COREOS"): versionOutPut = Run("waagent --version") else: output = Run("pgrep -fa python3.*waagent") @@ -48,7 +52,7 @@ def Restartwaagent(): - if (distro[0] == "CoreOS"): + if (distro[0].upper() == "COREOS"): Run("echo '"+passwd+"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g /usr/share/oem/waagent.conf") elif (DetectDistro()[0] == 'clear-linux-os'): Run("echo '"+passwd+"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g \ @@ -57,7 +61,7 @@ Run("echo '"+passwd+"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g /etc/waagent.conf") RunLog.info("Restart waagent service...") result = Run("echo '"+passwd+"' | sudo -S find / -name systemctl |wc -l | tr -d '\n'") - if (distro[0] == "Ubuntu") or (distro[0] == "debian"): + if (distro[0].upper() == "UBUNTU") or (distro[0].upper() == "DEBIAN"): Run("echo '"+passwd+"' | sudo -S service walinuxagent restart") else: if (result == "0") :
{"golden_diff": "diff --git a/Testscripts/Linux/WALA-VERIFY-VERBOSE-ENABLED-LOGS.py b/Testscripts/Linux/WALA-VERIFY-VERBOSE-ENABLED-LOGS.py\n--- a/Testscripts/Linux/WALA-VERIFY-VERBOSE-ENABLED-LOGS.py\n+++ b/Testscripts/Linux/WALA-VERIFY-VERBOSE-ENABLED-LOGS.py\n@@ -7,6 +7,7 @@\n import os\n import platform\n import time\n+import sys\n \n parser = argparse.ArgumentParser()\n \n@@ -14,13 +15,16 @@\n constants_path = os.path.join(file_path, \"constants.sh\")\n params = GetParams(constants_path)\n passwd = params[\"PASSWORD\"]\n-\n-distro = platform.dist()\n+if sys.version_info[0] >= 3:\n+ import distro\n+ distro = distro.linux_distribution(full_distribution_name=False)\n+else:\n+ distro = platform.dist()\n \n \n def RunTest():\n UpdateState(\"TestRunning\")\n- if(distro[0] == \"CoreOS\"):\n+ if(distro[0].upper() == \"COREOS\"):\n versionOutPut = Run(\"waagent --version\")\n else:\n output = Run(\"pgrep -fa python3.*waagent\")\n@@ -48,7 +52,7 @@\n \n \n def Restartwaagent():\n- if (distro[0] == \"CoreOS\"):\n+ if (distro[0].upper() == \"COREOS\"):\n Run(\"echo '\"+passwd+\"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g /usr/share/oem/waagent.conf\")\n elif (DetectDistro()[0] == 'clear-linux-os'):\n Run(\"echo '\"+passwd+\"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g \\\n@@ -57,7 +61,7 @@\n Run(\"echo '\"+passwd+\"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g /etc/waagent.conf\")\n RunLog.info(\"Restart waagent service...\")\n result = Run(\"echo '\"+passwd+\"' | sudo -S find / -name systemctl |wc -l | tr -d '\\n'\")\n- if (distro[0] == \"Ubuntu\") or (distro[0] == \"debian\"):\n+ if (distro[0].upper() == \"UBUNTU\") or (distro[0].upper() == \"DEBIAN\"):\n Run(\"echo '\"+passwd+\"' | sudo -S service walinuxagent restart\")\n else:\n if (result == \"0\") :\n", "issue": "Ubuntu 20.04 - platform.dist() is deprecated since Python 3.5 and removed in Python 3.8\nAffected distro - ubuntu 20.04 (use python 3.8)\r\nAffected case - WALA-VERIFY-VERBOSE-ENABLED-LOGS\r\nUse distro.linux_distribution(full_distribution_name=False) instead\n", "before_files": [{"content": "#!/usr/bin/env python\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the Apache License.\nfrom azuremodules import *\n\nimport argparse\nimport os\nimport platform\nimport time\n\nparser = argparse.ArgumentParser()\n\nfile_path = os.path.dirname(os.path.realpath(__file__))\nconstants_path = os.path.join(file_path, \"constants.sh\")\nparams = GetParams(constants_path)\npasswd = params[\"PASSWORD\"]\n\ndistro = platform.dist()\n\n\ndef RunTest():\n UpdateState(\"TestRunning\")\n if(distro[0] == \"CoreOS\"):\n versionOutPut = Run(\"waagent --version\")\n else:\n output = Run(\"pgrep -fa python3.*waagent\")\n if (\"python3\" in output) :\n versionOutPut = Run(\"/usr/bin/python3 /usr/sbin/waagent --version\")\n else :\n versionOutPut = Run(\"/usr/sbin/waagent --version\")\n\n RunLog.info(\"Checking log waagent.log...\")\n if(\"2.0.\" in versionOutPut):\n output = Run(\"grep -i 'iptables -I INPUT -p udp --dport' /var/log/waagent* | wc -l | tr -d '\\n'\")\n RunLog.info(\"agent version is 2.0\")\n else:\n output = Run(\"grep -i 'VERBOSE' /var/log/waagent* | wc -l | tr -d '\\n'\")\n RunLog.info(\"agent version > 2.0\")\n\n if not (output == \"0\") :\n RunLog.info('The log file contains the verbose logs')\n ResultLog.info('PASS')\n UpdateState(\"TestCompleted\")\n else :\n RunLog.error('Verify waagent.log fail, the log file does not contain the verbose logs')\n ResultLog.error('FAIL')\n UpdateState(\"TestCompleted\")\n\n\ndef Restartwaagent():\n if (distro[0] == \"CoreOS\"):\n Run(\"echo '\"+passwd+\"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g /usr/share/oem/waagent.conf\")\n elif (DetectDistro()[0] == 'clear-linux-os'):\n Run(\"echo '\"+passwd+\"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g \\\n /usr/share/defaults/waagent/waagent.conf\")\n else:\n Run(\"echo '\"+passwd+\"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g /etc/waagent.conf\")\n RunLog.info(\"Restart waagent service...\")\n result = Run(\"echo '\"+passwd+\"' | sudo -S find / -name systemctl |wc -l | tr -d '\\n'\")\n if (distro[0] == \"Ubuntu\") or (distro[0] == \"debian\"):\n Run(\"echo '\"+passwd+\"' | sudo -S service walinuxagent restart\")\n else:\n if (result == \"0\") :\n os.system(\"echo '\"+passwd+\"' | sudo -S service waagent restart\")\n else:\n os.system(\"echo '\"+passwd+\"' | sudo -S systemctl restart waagent\")\n time.sleep(60)\n\nRestartwaagent()\nRunTest()\n", "path": "Testscripts/Linux/WALA-VERIFY-VERBOSE-ENABLED-LOGS.py"}]}
1,456
564
gh_patches_debug_40646
rasdani/github-patches
git_diff
CONP-PCNO__conp-dataset-675
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Auto-archive script not running on crawled dataset? All OSF or Zenodo datasets are not being archived when running the script `auto_archive.py` with option `--all`. The following information systematically shows: ``` SKIPPED: archive not needed for projects/<dataset_name> ``` but the dataset size is definitely below 20GB. List of affected datasets: - CFMM_7T__MP2RAGE_T1_mapping_version-2020-10-02T12:55:03.086599.tar.gz - Comparing_Perturbation_Modes_for_Evaluating_Instabilities_in_Neuroimaging__Processed_NKI_RS_Subset__08_2019__version-None.tar.gz - Intracellular_Recordings_of_Murine_Neocortical_Neurons_version-2020-04-18T19:34:10.458118.tar.gz - Learning_Naturalistic_Structure__Processed_fMRI_dataset_version-0.1.0.tar.gz - MRI_and_unbiased_averages_of_wild_muskrats__Ondatra_zibethicus__and_red_squirrels__Tamiasciurus_hudsonicus__version-1.0.tar.gz: - Multi-model_functionalization_of_disease-associated_PTEN_missense_mutations_version-1.0.tar.gz - Quantifying_Neural_Cognitive_Relationships_Across_the_Brain_version-2020-11-11T02:30:59.429493.tar.gz - Synthetic_Animated_Mouse__SAM___University_of_British_Columbia__Datasets_and_3D_models_version-2021-04-09T14:12:32.548954.tar.gz: - Systematic_phenomics_analysis_of_autism-associated_genes_version-3.2.tar.gz - VFA_T1_mapping___RTHawk__open__vs_Siemens__commercial__version-v1.0.tar.gz Auto-archive script not running on crawled dataset? All OSF or Zenodo datasets are not being archived when running the script `auto_archive.py` with option `--all`. The following information systematically shows: ``` SKIPPED: archive not needed for projects/<dataset_name> ``` but the dataset size is definitely below 20GB. List of affected datasets: - CFMM_7T__MP2RAGE_T1_mapping_version-2020-10-02T12:55:03.086599.tar.gz - Comparing_Perturbation_Modes_for_Evaluating_Instabilities_in_Neuroimaging__Processed_NKI_RS_Subset__08_2019__version-None.tar.gz - Intracellular_Recordings_of_Murine_Neocortical_Neurons_version-2020-04-18T19:34:10.458118.tar.gz - Learning_Naturalistic_Structure__Processed_fMRI_dataset_version-0.1.0.tar.gz - MRI_and_unbiased_averages_of_wild_muskrats__Ondatra_zibethicus__and_red_squirrels__Tamiasciurus_hudsonicus__version-1.0.tar.gz: - Multi-model_functionalization_of_disease-associated_PTEN_missense_mutations_version-1.0.tar.gz - Quantifying_Neural_Cognitive_Relationships_Across_the_Brain_version-2020-11-11T02:30:59.429493.tar.gz - Synthetic_Animated_Mouse__SAM___University_of_British_Columbia__Datasets_and_3D_models_version-2021-04-09T14:12:32.548954.tar.gz: - Systematic_phenomics_analysis_of_autism-associated_genes_version-3.2.tar.gz - VFA_T1_mapping___RTHawk__open__vs_Siemens__commercial__version-v1.0.tar.gz </issue> <code> [start of scripts/auto_archive.py] 1 from __future__ import annotations 2 3 import argparse 4 import json 5 import logging 6 import os 7 from datetime import datetime 8 from datetime import timedelta 9 10 import git 11 import humanfriendly 12 from datalad.plugin import export_archive 13 from github import Github 14 15 from scripts.datalad_utils import get_dataset 16 from scripts.datalad_utils import install_dataset 17 from scripts.log import get_logger 18 from tests.functions import get_proper_submodules 19 20 21 logger = get_logger( 22 "CONP-Archive", filename="conp-archive.log", file_level=logging.DEBUG 23 ) 24 25 26 class ArchiveFailed(Exception): 27 pass 28 29 30 def parse_args(): 31 example_text = """Example: 32 PYTHONPATH=$PWD python scripts/auto_archive.py <out_dir> 33 """ 34 35 parser = argparse.ArgumentParser( 36 description="Archiver for the CONP-datasets.", 37 epilog=example_text, 38 formatter_class=argparse.RawDescriptionHelpFormatter, 39 ) 40 41 parser.add_argument( 42 "--out_dir", "-o", type=str, help="Path to store the archived datasets." 43 ) 44 parser.add_argument( 45 "--max-size", 46 type=float, 47 default=20.0, 48 help="Maximum size of dataset to archive in GB.", 49 ) 50 group = parser.add_mutually_exclusive_group() 51 group.add_argument( 52 "--all", 53 action="store_true", 54 help="Archive all the datasets rather than those modified since the last time.", 55 ) 56 group.add_argument( 57 "--dataset", 58 "-d", 59 type=str, 60 nargs="+", 61 help="Restrict the archive to the specified dataset paths.", 62 ) 63 64 return parser.parse_args() 65 66 67 def get_all_datasets(): 68 return {os.path.basename(submodule.path) for submodule in git.Repo().submodules} 69 70 71 def get_modified_datasets( 72 *, 73 since: datetime | None = None, 74 until: datetime | None = None, 75 ) -> set[str]: 76 """Retrieve the modified datasets. 77 78 Requires to set GITHUB_ACCESS_TOKEN as an environment variable. 79 80 Parameters 81 ---------- 82 since : Optional[datetime], optional 83 Start date from which commits are retrieved, by default date of the previous crawl, if never crawled set to 84 one week ago. 85 until : Optional[datetime], optional 86 Latest date at which commit are retrieved, by default `now` 87 88 Returns 89 ------- 90 set[str] 91 Path of the dataset folders. 92 """ 93 now = datetime.now().astimezone() 94 95 if since is None: 96 if os.path.exists(".conp-archive"): 97 with open(".conp-archive") as fin: 98 since = datetime.fromisoformat(fin.read()) 99 else: 100 since = now - timedelta(weeks=1) 101 102 if until is None: 103 until = now 104 105 try: 106 gh_access_token = os.environ.get("GITHUB_ACCESS_TOKEN", None) 107 if gh_access_token is None: 108 raise OSError("GITHUB_ACCESS_TOKEN is not defined.") 109 110 except OSError as e: 111 # The program is not stopped since GitHub allows 60 query per hours with 112 # authentication. However the program will most likely fail. 113 logger.critical(e) 114 115 logger.info(f"Retrieving modified datasets since {since}") 116 repo = Github(gh_access_token).get_repo("CONP-PCNO/conp-dataset") 117 commits = repo.get_commits(since=since, until=until) 118 119 with open(".conp-archive", "w") as fout: 120 fout.write(now.isoformat()) 121 122 modified_datasets: set[str] = { 123 os.path.basename(file_.filename) 124 for commit in commits 125 for file_ in commit.files 126 if file_.filename.startswith("projects/") 127 } 128 129 return modified_datasets 130 131 132 def archive_dataset(dataset_path: str, out_dir: str, version: str) -> None: 133 os.makedirs(os.path.dirname(out_dir), mode=0o755, exist_ok=True) 134 out_filename = f"{out_dir}_version-{version}.tar.gz" 135 logger.info(f"Archiving dataset: {dataset_path} to {out_filename}") 136 137 cwd = os.getcwd() 138 try: 139 datalad_archiver = export_archive.ExportArchive() 140 dataset_repo = git.Repo(dataset_path) 141 142 with open(os.path.join(dataset_path, ".git.log"), "w") as fout: 143 fout.write(dataset_repo.git.log(pretty="format:%H %s")) 144 145 # Export is performed from the dataset root. 146 # This is to avoid failure when a submodule is not downloaded; e.g. for parent 147 # dataset in dataset derivative. 148 os.chdir(os.path.join(cwd, dataset_path)) 149 datalad_archiver(".", filename=out_filename) 150 151 except Exception as e: 152 raise ArchiveFailed( 153 f"FAILURE: could not archive dataset: {dataset_path} to {out_filename}\n{e}" 154 ) 155 finally: 156 os.chdir(cwd) 157 158 159 if __name__ == "__main__": 160 args = parse_args() 161 162 # Only archive the datasets available locally. 163 datasets = get_all_datasets() 164 if args.dataset: 165 target_datasets = {os.path.basename(os.path.normpath(d)) for d in args.dataset} 166 logger.warning( 167 f"The following dataset were not found locally: {target_datasets - datasets}" 168 ) 169 datasets &= target_datasets 170 171 elif not args.all: 172 modified_datasets = get_modified_datasets() 173 logger.warning( 174 f"The following dataset were not found locally: {modified_datasets - datasets}" 175 ) 176 datasets &= modified_datasets 177 178 for dataset_name in datasets: 179 dataset = "projects/" + dataset_name 180 181 try: 182 logger.info(f"Installing dataset: {dataset}") 183 install_dataset(dataset) 184 185 is_public = False 186 version = "" 187 dataset_size = 0.0 188 189 with open(os.path.join(dataset, "DATS.json")) as fin: 190 metadata = json.load(fin) 191 192 is_public = metadata.get("privacy") == "open" 193 version = metadata.get("version") 194 195 for distribution in metadata.get("distributions", list()): 196 dataset_size += humanfriendly.parse_size( 197 f"{distribution['size']} {distribution['unit']['value']}", 198 ) 199 dataset_size //= 1024 ** 3 # Convert to GB 200 201 # Only archive public dataset less than 20GB 202 if dataset_size <= args.max_size and is_public: 203 logger.info(f"Downloading dataset: {dataset}") 204 get_dataset(dataset) 205 for submodule in get_proper_submodules(dataset): 206 get_dataset(submodule) 207 208 archive_dataset( 209 dataset, 210 out_dir=os.path.join(args.out_dir, dataset_name), 211 version=version, 212 ) 213 logger.info(f"SUCCESS: archive created for {dataset}") 214 215 else: 216 logger.info(f"SKIPPED: archive not needed for {dataset}") 217 218 except Exception as e: 219 # TODO implement notification system. 220 # This will alert when a dataset fails the archiving process. 221 logger.exception( 222 f"FAILURE: could not archive dataset: {dataset} to {args.out_dir}.tar.gz\n{e}" 223 ) 224 225 logger.info("Done archiving the datasets.") 226 [end of scripts/auto_archive.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scripts/auto_archive.py b/scripts/auto_archive.py --- a/scripts/auto_archive.py +++ b/scripts/auto_archive.py @@ -64,8 +64,11 @@ return parser.parse_args() -def get_all_datasets(): - return {os.path.basename(submodule.path) for submodule in git.Repo().submodules} +def get_datasets_path(): + return { + os.path.basename(submodule.path): submodule.path + for submodule in git.Repo().submodules + } def get_modified_datasets( @@ -129,9 +132,11 @@ return modified_datasets -def archive_dataset(dataset_path: str, out_dir: str, version: str) -> None: +def archive_dataset( + dataset_path: str, out_dir: str, archive_name: str, version: str +) -> None: os.makedirs(os.path.dirname(out_dir), mode=0o755, exist_ok=True) - out_filename = f"{out_dir}_version-{version}.tar.gz" + out_filename = f"{archive_name}_version-{version}.tar.gz" logger.info(f"Archiving dataset: {dataset_path} to {out_filename}") cwd = os.getcwd() @@ -160,7 +165,8 @@ args = parse_args() # Only archive the datasets available locally. - datasets = get_all_datasets() + datasets_path = get_datasets_path() + datasets = datasets_path.keys() if args.dataset: target_datasets = {os.path.basename(os.path.normpath(d)) for d in args.dataset} logger.warning( @@ -176,7 +182,7 @@ datasets &= modified_datasets for dataset_name in datasets: - dataset = "projects/" + dataset_name + dataset = datasets_path[dataset_name] try: logger.info(f"Installing dataset: {dataset}") @@ -189,7 +195,13 @@ with open(os.path.join(dataset, "DATS.json")) as fin: metadata = json.load(fin) - is_public = metadata.get("privacy") == "open" + is_public = ( + metadata.get("distributions", [{}])[0] + .get("access", {}) + .get("authorizations", [{}])[0] + .get("value") + == "public" + ) version = metadata.get("version") for distribution in metadata.get("distributions", list()): @@ -205,9 +217,13 @@ for submodule in get_proper_submodules(dataset): get_dataset(submodule) + archive_name = "__".join( + os.path.relpath(dataset, "projects").split("/") + ) archive_dataset( dataset, out_dir=os.path.join(args.out_dir, dataset_name), + archive_name=archive_name, version=version, ) logger.info(f"SUCCESS: archive created for {dataset}")
{"golden_diff": "diff --git a/scripts/auto_archive.py b/scripts/auto_archive.py\n--- a/scripts/auto_archive.py\n+++ b/scripts/auto_archive.py\n@@ -64,8 +64,11 @@\n return parser.parse_args()\n \n \n-def get_all_datasets():\n- return {os.path.basename(submodule.path) for submodule in git.Repo().submodules}\n+def get_datasets_path():\n+ return {\n+ os.path.basename(submodule.path): submodule.path\n+ for submodule in git.Repo().submodules\n+ }\n \n \n def get_modified_datasets(\n@@ -129,9 +132,11 @@\n return modified_datasets\n \n \n-def archive_dataset(dataset_path: str, out_dir: str, version: str) -> None:\n+def archive_dataset(\n+ dataset_path: str, out_dir: str, archive_name: str, version: str\n+) -> None:\n os.makedirs(os.path.dirname(out_dir), mode=0o755, exist_ok=True)\n- out_filename = f\"{out_dir}_version-{version}.tar.gz\"\n+ out_filename = f\"{archive_name}_version-{version}.tar.gz\"\n logger.info(f\"Archiving dataset: {dataset_path} to {out_filename}\")\n \n cwd = os.getcwd()\n@@ -160,7 +165,8 @@\n args = parse_args()\n \n # Only archive the datasets available locally.\n- datasets = get_all_datasets()\n+ datasets_path = get_datasets_path()\n+ datasets = datasets_path.keys()\n if args.dataset:\n target_datasets = {os.path.basename(os.path.normpath(d)) for d in args.dataset}\n logger.warning(\n@@ -176,7 +182,7 @@\n datasets &= modified_datasets\n \n for dataset_name in datasets:\n- dataset = \"projects/\" + dataset_name\n+ dataset = datasets_path[dataset_name]\n \n try:\n logger.info(f\"Installing dataset: {dataset}\")\n@@ -189,7 +195,13 @@\n with open(os.path.join(dataset, \"DATS.json\")) as fin:\n metadata = json.load(fin)\n \n- is_public = metadata.get(\"privacy\") == \"open\"\n+ is_public = (\n+ metadata.get(\"distributions\", [{}])[0]\n+ .get(\"access\", {})\n+ .get(\"authorizations\", [{}])[0]\n+ .get(\"value\")\n+ == \"public\"\n+ )\n version = metadata.get(\"version\")\n \n for distribution in metadata.get(\"distributions\", list()):\n@@ -205,9 +217,13 @@\n for submodule in get_proper_submodules(dataset):\n get_dataset(submodule)\n \n+ archive_name = \"__\".join(\n+ os.path.relpath(dataset, \"projects\").split(\"/\")\n+ )\n archive_dataset(\n dataset,\n out_dir=os.path.join(args.out_dir, dataset_name),\n+ archive_name=archive_name,\n version=version,\n )\n logger.info(f\"SUCCESS: archive created for {dataset}\")\n", "issue": "Auto-archive script not running on crawled dataset?\nAll OSF or Zenodo datasets are not being archived when running the script `auto_archive.py` with option `--all`.\r\n\r\nThe following information systematically shows:\r\n```\r\nSKIPPED: archive not needed for projects/<dataset_name>\r\n```\r\n but the dataset size is definitely below 20GB.\r\n\r\nList of affected datasets:\r\n\r\n- CFMM_7T__MP2RAGE_T1_mapping_version-2020-10-02T12:55:03.086599.tar.gz\r\n- Comparing_Perturbation_Modes_for_Evaluating_Instabilities_in_Neuroimaging__Processed_NKI_RS_Subset__08_2019__version-None.tar.gz\r\n- Intracellular_Recordings_of_Murine_Neocortical_Neurons_version-2020-04-18T19:34:10.458118.tar.gz\r\n- Learning_Naturalistic_Structure__Processed_fMRI_dataset_version-0.1.0.tar.gz\r\n- MRI_and_unbiased_averages_of_wild_muskrats__Ondatra_zibethicus__and_red_squirrels__Tamiasciurus_hudsonicus__version-1.0.tar.gz:\r\n- Multi-model_functionalization_of_disease-associated_PTEN_missense_mutations_version-1.0.tar.gz\r\n- Quantifying_Neural_Cognitive_Relationships_Across_the_Brain_version-2020-11-11T02:30:59.429493.tar.gz\r\n- Synthetic_Animated_Mouse__SAM___University_of_British_Columbia__Datasets_and_3D_models_version-2021-04-09T14:12:32.548954.tar.gz:\r\n- Systematic_phenomics_analysis_of_autism-associated_genes_version-3.2.tar.gz\r\n- VFA_T1_mapping___RTHawk__open__vs_Siemens__commercial__version-v1.0.tar.gz\r\n\r\n\r\n\nAuto-archive script not running on crawled dataset?\nAll OSF or Zenodo datasets are not being archived when running the script `auto_archive.py` with option `--all`.\r\n\r\nThe following information systematically shows:\r\n```\r\nSKIPPED: archive not needed for projects/<dataset_name>\r\n```\r\n but the dataset size is definitely below 20GB.\r\n\r\nList of affected datasets:\r\n\r\n- CFMM_7T__MP2RAGE_T1_mapping_version-2020-10-02T12:55:03.086599.tar.gz\r\n- Comparing_Perturbation_Modes_for_Evaluating_Instabilities_in_Neuroimaging__Processed_NKI_RS_Subset__08_2019__version-None.tar.gz\r\n- Intracellular_Recordings_of_Murine_Neocortical_Neurons_version-2020-04-18T19:34:10.458118.tar.gz\r\n- Learning_Naturalistic_Structure__Processed_fMRI_dataset_version-0.1.0.tar.gz\r\n- MRI_and_unbiased_averages_of_wild_muskrats__Ondatra_zibethicus__and_red_squirrels__Tamiasciurus_hudsonicus__version-1.0.tar.gz:\r\n- Multi-model_functionalization_of_disease-associated_PTEN_missense_mutations_version-1.0.tar.gz\r\n- Quantifying_Neural_Cognitive_Relationships_Across_the_Brain_version-2020-11-11T02:30:59.429493.tar.gz\r\n- Synthetic_Animated_Mouse__SAM___University_of_British_Columbia__Datasets_and_3D_models_version-2021-04-09T14:12:32.548954.tar.gz:\r\n- Systematic_phenomics_analysis_of_autism-associated_genes_version-3.2.tar.gz\r\n- VFA_T1_mapping___RTHawk__open__vs_Siemens__commercial__version-v1.0.tar.gz\r\n\r\n\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport argparse\nimport json\nimport logging\nimport os\nfrom datetime import datetime\nfrom datetime import timedelta\n\nimport git\nimport humanfriendly\nfrom datalad.plugin import export_archive\nfrom github import Github\n\nfrom scripts.datalad_utils import get_dataset\nfrom scripts.datalad_utils import install_dataset\nfrom scripts.log import get_logger\nfrom tests.functions import get_proper_submodules\n\n\nlogger = get_logger(\n \"CONP-Archive\", filename=\"conp-archive.log\", file_level=logging.DEBUG\n)\n\n\nclass ArchiveFailed(Exception):\n pass\n\n\ndef parse_args():\n example_text = \"\"\"Example:\n PYTHONPATH=$PWD python scripts/auto_archive.py <out_dir>\n \"\"\"\n\n parser = argparse.ArgumentParser(\n description=\"Archiver for the CONP-datasets.\",\n epilog=example_text,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n\n parser.add_argument(\n \"--out_dir\", \"-o\", type=str, help=\"Path to store the archived datasets.\"\n )\n parser.add_argument(\n \"--max-size\",\n type=float,\n default=20.0,\n help=\"Maximum size of dataset to archive in GB.\",\n )\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\n \"--all\",\n action=\"store_true\",\n help=\"Archive all the datasets rather than those modified since the last time.\",\n )\n group.add_argument(\n \"--dataset\",\n \"-d\",\n type=str,\n nargs=\"+\",\n help=\"Restrict the archive to the specified dataset paths.\",\n )\n\n return parser.parse_args()\n\n\ndef get_all_datasets():\n return {os.path.basename(submodule.path) for submodule in git.Repo().submodules}\n\n\ndef get_modified_datasets(\n *,\n since: datetime | None = None,\n until: datetime | None = None,\n) -> set[str]:\n \"\"\"Retrieve the modified datasets.\n\n Requires to set GITHUB_ACCESS_TOKEN as an environment variable.\n\n Parameters\n ----------\n since : Optional[datetime], optional\n Start date from which commits are retrieved, by default date of the previous crawl, if never crawled set to\n one week ago.\n until : Optional[datetime], optional\n Latest date at which commit are retrieved, by default `now`\n\n Returns\n -------\n set[str]\n Path of the dataset folders.\n \"\"\"\n now = datetime.now().astimezone()\n\n if since is None:\n if os.path.exists(\".conp-archive\"):\n with open(\".conp-archive\") as fin:\n since = datetime.fromisoformat(fin.read())\n else:\n since = now - timedelta(weeks=1)\n\n if until is None:\n until = now\n\n try:\n gh_access_token = os.environ.get(\"GITHUB_ACCESS_TOKEN\", None)\n if gh_access_token is None:\n raise OSError(\"GITHUB_ACCESS_TOKEN is not defined.\")\n\n except OSError as e:\n # The program is not stopped since GitHub allows 60 query per hours with\n # authentication. However the program will most likely fail.\n logger.critical(e)\n\n logger.info(f\"Retrieving modified datasets since {since}\")\n repo = Github(gh_access_token).get_repo(\"CONP-PCNO/conp-dataset\")\n commits = repo.get_commits(since=since, until=until)\n\n with open(\".conp-archive\", \"w\") as fout:\n fout.write(now.isoformat())\n\n modified_datasets: set[str] = {\n os.path.basename(file_.filename)\n for commit in commits\n for file_ in commit.files\n if file_.filename.startswith(\"projects/\")\n }\n\n return modified_datasets\n\n\ndef archive_dataset(dataset_path: str, out_dir: str, version: str) -> None:\n os.makedirs(os.path.dirname(out_dir), mode=0o755, exist_ok=True)\n out_filename = f\"{out_dir}_version-{version}.tar.gz\"\n logger.info(f\"Archiving dataset: {dataset_path} to {out_filename}\")\n\n cwd = os.getcwd()\n try:\n datalad_archiver = export_archive.ExportArchive()\n dataset_repo = git.Repo(dataset_path)\n\n with open(os.path.join(dataset_path, \".git.log\"), \"w\") as fout:\n fout.write(dataset_repo.git.log(pretty=\"format:%H %s\"))\n\n # Export is performed from the dataset root.\n # This is to avoid failure when a submodule is not downloaded; e.g. for parent\n # dataset in dataset derivative.\n os.chdir(os.path.join(cwd, dataset_path))\n datalad_archiver(\".\", filename=out_filename)\n\n except Exception as e:\n raise ArchiveFailed(\n f\"FAILURE: could not archive dataset: {dataset_path} to {out_filename}\\n{e}\"\n )\n finally:\n os.chdir(cwd)\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n\n # Only archive the datasets available locally.\n datasets = get_all_datasets()\n if args.dataset:\n target_datasets = {os.path.basename(os.path.normpath(d)) for d in args.dataset}\n logger.warning(\n f\"The following dataset were not found locally: {target_datasets - datasets}\"\n )\n datasets &= target_datasets\n\n elif not args.all:\n modified_datasets = get_modified_datasets()\n logger.warning(\n f\"The following dataset were not found locally: {modified_datasets - datasets}\"\n )\n datasets &= modified_datasets\n\n for dataset_name in datasets:\n dataset = \"projects/\" + dataset_name\n\n try:\n logger.info(f\"Installing dataset: {dataset}\")\n install_dataset(dataset)\n\n is_public = False\n version = \"\"\n dataset_size = 0.0\n\n with open(os.path.join(dataset, \"DATS.json\")) as fin:\n metadata = json.load(fin)\n\n is_public = metadata.get(\"privacy\") == \"open\"\n version = metadata.get(\"version\")\n\n for distribution in metadata.get(\"distributions\", list()):\n dataset_size += humanfriendly.parse_size(\n f\"{distribution['size']} {distribution['unit']['value']}\",\n )\n dataset_size //= 1024 ** 3 # Convert to GB\n\n # Only archive public dataset less than 20GB\n if dataset_size <= args.max_size and is_public:\n logger.info(f\"Downloading dataset: {dataset}\")\n get_dataset(dataset)\n for submodule in get_proper_submodules(dataset):\n get_dataset(submodule)\n\n archive_dataset(\n dataset,\n out_dir=os.path.join(args.out_dir, dataset_name),\n version=version,\n )\n logger.info(f\"SUCCESS: archive created for {dataset}\")\n\n else:\n logger.info(f\"SKIPPED: archive not needed for {dataset}\")\n\n except Exception as e:\n # TODO implement notification system.\n # This will alert when a dataset fails the archiving process.\n logger.exception(\n f\"FAILURE: could not archive dataset: {dataset} to {args.out_dir}.tar.gz\\n{e}\"\n )\n\n logger.info(\"Done archiving the datasets.\")\n", "path": "scripts/auto_archive.py"}]}
3,535
651
gh_patches_debug_9987
rasdani/github-patches
git_diff
cltk__cltk-906
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Fix broken NER doctest Open to anyone. This simple NER doctest: https://github.com/cltk/cltk/blob/9b9cdb42dcc1c707ab3db3ef8214837bb7c262b5/cltk/tag/ner.py#L78 is all of a sudden failing (example: https://travis-ci.org/cltk/cltk/jobs/525125856#L1935 ). The test expects 4 padded spaces on the left. I have no idea why this would break all of a sudden. </issue> <code> [start of cltk/tag/ner.py] 1 """Named entity recognition (NER).""" 2 3 from cltk.corpus.utils.importer import CorpusImporter 4 from nltk.tokenize.punkt import PunktLanguageVars 5 from cltk.tokenize.word import WordTokenizer 6 import os 7 import importlib.machinery 8 9 __author__ = ['Natasha Voake <[email protected]>'] 10 __license__ = 'MIT License. See LICENSE.' 11 12 NER_DICT = {'greek': '~/cltk_data/greek/model/greek_models_cltk/ner/proper_names.txt', 13 'latin': '~/cltk_data/latin/model/latin_models_cltk/ner/proper_names.txt'} 14 15 16 class NamedEntityReplacer(object): 17 18 def __init__(self): 19 20 self.entities = self._load_necessary_data() 21 22 23 def _load_necessary_data(self): 24 rel_path = os.path.join('~', 'cltk_data', 25 'french', 26 'text', 'french_data_cltk', 27 'named_entities_fr.py') 28 path = os.path.expanduser(rel_path) 29 # logger.info('Loading entries. This may take a minute.') 30 loader = importlib.machinery.SourceFileLoader('entities', path) 31 module = loader.load_module() 32 entities = module.entities 33 return entities 34 35 """tags named entities in a string and outputs a list of tuples in the following format: 36 (name, "entity", kind_of_entity)""" 37 38 def tag_ner_fr(self, input_text, output_type=list): 39 40 entities = self.entities 41 42 for entity in entities: 43 (name, kind) = entity 44 45 word_tokenizer = WordTokenizer('french') 46 tokenized_text = word_tokenizer.tokenize(input_text) 47 ner_tuple_list = [] 48 49 match = False 50 for word in tokenized_text: 51 for name, kind in entities: 52 if word == name: 53 named_things = ([(name, 'entity', kind)]) 54 ner_tuple_list.append(named_things) 55 match = True 56 break 57 else: 58 ner_tuple_list.append((word,)) 59 return ner_tuple_list 60 61 62 def _check_latest_data(lang): 63 """Check for presence of proper names dir, clone if not.""" 64 65 assert lang in NER_DICT.keys(), \ 66 'Invalid language. Choose from: {}'.format(', '.join(NER_DICT.keys())) 67 68 ner_file_path = os.path.expanduser(NER_DICT[lang]) 69 70 if not os.path.isfile(ner_file_path): 71 corpus_importer = CorpusImporter(lang) 72 corpus_importer.import_corpus('{}_models_cltk'.format(lang)) 73 74 75 def tag_ner(lang, input_text, output_type=list): 76 """Run NER for chosen language. 77 Choosing output_type=list, returns a list of tuples: 78 79 >>> tag_ner('latin', input_text='ut Venus, ut Sirius, ut Spica', output_type=list) 80 [('ut',), ('Venus',), (',',), ('ut',), ('Sirius', 'Entity'), (',',), ('ut',), ('Spica', 'Entity')] 81 """ 82 83 _check_latest_data(lang) 84 85 assert lang in NER_DICT.keys(), \ 86 'Invalid language. Choose from: {}'.format(', '.join(NER_DICT.keys())) 87 types = [str, list] 88 assert type(input_text) in types, 'Input must be: {}.'.format(', '.join(types)) 89 assert output_type in types, 'Output must be a {}.'.format(', '.join(types)) 90 91 if type(input_text) == str: 92 punkt = PunktLanguageVars() 93 tokens = punkt.word_tokenize(input_text) 94 new_tokens = [] 95 for word in tokens: 96 if word.endswith('.'): 97 new_tokens.append(word[:-1]) 98 new_tokens.append('.') 99 else: 100 new_tokens.append(word) 101 input_text = new_tokens 102 103 ner_file_path = os.path.expanduser(NER_DICT[lang]) 104 with open(ner_file_path) as file_open: 105 ner_str = file_open.read() 106 ner_list = ner_str.split('\n') 107 108 ner_tuple_list = [] 109 for count, word_token in enumerate(input_text): 110 match = False 111 for ner_word in ner_list: 112 # the replacer slows things down, but is necessary 113 if word_token == ner_word: 114 ner_tuple = (word_token, 'Entity') 115 ner_tuple_list.append(ner_tuple) 116 match = True 117 break 118 if not match: 119 ner_tuple_list.append((word_token,)) 120 121 if output_type is str: 122 string = '' 123 for tup in ner_tuple_list: 124 start_space = ' ' 125 final_space = '' 126 # this is some mediocre string reconstitution 127 # maybe not worth the effort 128 if tup[0] in [',', '.', ';', ':', '?', '!']: 129 start_space = '' 130 if len(tup) == 2: 131 string += start_space + tup[0] + '/' + tup[1] + final_space 132 else: 133 string += start_space + tup[0] + final_space 134 return string 135 136 return ner_tuple_list 137 138 [end of cltk/tag/ner.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cltk/tag/ner.py b/cltk/tag/ner.py --- a/cltk/tag/ner.py +++ b/cltk/tag/ner.py @@ -74,10 +74,6 @@ def tag_ner(lang, input_text, output_type=list): """Run NER for chosen language. - Choosing output_type=list, returns a list of tuples: - - >>> tag_ner('latin', input_text='ut Venus, ut Sirius, ut Spica', output_type=list) - [('ut',), ('Venus',), (',',), ('ut',), ('Sirius', 'Entity'), (',',), ('ut',), ('Spica', 'Entity')] """ _check_latest_data(lang) @@ -134,4 +130,3 @@ return string return ner_tuple_list -
{"golden_diff": "diff --git a/cltk/tag/ner.py b/cltk/tag/ner.py\n--- a/cltk/tag/ner.py\n+++ b/cltk/tag/ner.py\n@@ -74,10 +74,6 @@\n \n def tag_ner(lang, input_text, output_type=list):\n \"\"\"Run NER for chosen language.\n- Choosing output_type=list, returns a list of tuples:\n- \n- >>> tag_ner('latin', input_text='ut Venus, ut Sirius, ut Spica', output_type=list)\n- [('ut',), ('Venus',), (',',), ('ut',), ('Sirius', 'Entity'), (',',), ('ut',), ('Spica', 'Entity')]\n \"\"\"\n \n _check_latest_data(lang)\n@@ -134,4 +130,3 @@\n return string\n \n return ner_tuple_list\n-\n", "issue": "Fix broken NER doctest\nOpen to anyone.\r\n\r\nThis simple NER doctest: https://github.com/cltk/cltk/blob/9b9cdb42dcc1c707ab3db3ef8214837bb7c262b5/cltk/tag/ner.py#L78\r\n\r\nis all of a sudden failing (example: https://travis-ci.org/cltk/cltk/jobs/525125856#L1935 ).\r\n\r\nThe test expects 4 padded spaces on the left.\r\n\r\nI have no idea why this would break all of a sudden.\r\n\r\n\n", "before_files": [{"content": "\"\"\"Named entity recognition (NER).\"\"\"\n\nfrom cltk.corpus.utils.importer import CorpusImporter\nfrom nltk.tokenize.punkt import PunktLanguageVars\nfrom cltk.tokenize.word import WordTokenizer\nimport os\nimport importlib.machinery\n\n__author__ = ['Natasha Voake <[email protected]>']\n__license__ = 'MIT License. See LICENSE.'\n\nNER_DICT = {'greek': '~/cltk_data/greek/model/greek_models_cltk/ner/proper_names.txt',\n 'latin': '~/cltk_data/latin/model/latin_models_cltk/ner/proper_names.txt'}\n\n\nclass NamedEntityReplacer(object):\n\n def __init__(self):\n\n self.entities = self._load_necessary_data()\n\n\n def _load_necessary_data(self):\n rel_path = os.path.join('~', 'cltk_data',\n 'french',\n 'text', 'french_data_cltk',\n 'named_entities_fr.py')\n path = os.path.expanduser(rel_path)\n # logger.info('Loading entries. This may take a minute.')\n loader = importlib.machinery.SourceFileLoader('entities', path)\n module = loader.load_module()\n entities = module.entities\n return entities\n\n \"\"\"tags named entities in a string and outputs a list of tuples in the following format:\n (name, \"entity\", kind_of_entity)\"\"\"\n\n def tag_ner_fr(self, input_text, output_type=list):\n\n entities = self.entities\n\n for entity in entities:\n (name, kind) = entity\n\n word_tokenizer = WordTokenizer('french')\n tokenized_text = word_tokenizer.tokenize(input_text)\n ner_tuple_list = []\n\n match = False\n for word in tokenized_text:\n for name, kind in entities:\n if word == name:\n named_things = ([(name, 'entity', kind)])\n ner_tuple_list.append(named_things)\n match = True\n break\n else:\n ner_tuple_list.append((word,))\n return ner_tuple_list\n\n\ndef _check_latest_data(lang):\n \"\"\"Check for presence of proper names dir, clone if not.\"\"\"\n\n assert lang in NER_DICT.keys(), \\\n 'Invalid language. Choose from: {}'.format(', '.join(NER_DICT.keys()))\n\n ner_file_path = os.path.expanduser(NER_DICT[lang])\n\n if not os.path.isfile(ner_file_path):\n corpus_importer = CorpusImporter(lang)\n corpus_importer.import_corpus('{}_models_cltk'.format(lang))\n\n\ndef tag_ner(lang, input_text, output_type=list):\n \"\"\"Run NER for chosen language.\n Choosing output_type=list, returns a list of tuples:\n \n >>> tag_ner('latin', input_text='ut Venus, ut Sirius, ut Spica', output_type=list)\n [('ut',), ('Venus',), (',',), ('ut',), ('Sirius', 'Entity'), (',',), ('ut',), ('Spica', 'Entity')]\n \"\"\"\n\n _check_latest_data(lang)\n\n assert lang in NER_DICT.keys(), \\\n 'Invalid language. Choose from: {}'.format(', '.join(NER_DICT.keys()))\n types = [str, list]\n assert type(input_text) in types, 'Input must be: {}.'.format(', '.join(types))\n assert output_type in types, 'Output must be a {}.'.format(', '.join(types))\n\n if type(input_text) == str:\n punkt = PunktLanguageVars()\n tokens = punkt.word_tokenize(input_text)\n new_tokens = []\n for word in tokens:\n if word.endswith('.'):\n new_tokens.append(word[:-1])\n new_tokens.append('.')\n else:\n new_tokens.append(word)\n input_text = new_tokens\n\n ner_file_path = os.path.expanduser(NER_DICT[lang])\n with open(ner_file_path) as file_open:\n ner_str = file_open.read()\n ner_list = ner_str.split('\\n')\n\n ner_tuple_list = []\n for count, word_token in enumerate(input_text):\n match = False\n for ner_word in ner_list:\n # the replacer slows things down, but is necessary\n if word_token == ner_word:\n ner_tuple = (word_token, 'Entity')\n ner_tuple_list.append(ner_tuple)\n match = True\n break\n if not match:\n ner_tuple_list.append((word_token,))\n\n if output_type is str:\n string = ''\n for tup in ner_tuple_list:\n start_space = ' '\n final_space = ''\n # this is some mediocre string reconstitution\n # maybe not worth the effort\n if tup[0] in [',', '.', ';', ':', '?', '!']:\n start_space = ''\n if len(tup) == 2:\n string += start_space + tup[0] + '/' + tup[1] + final_space\n else:\n string += start_space + tup[0] + final_space\n return string\n\n return ner_tuple_list\n\n", "path": "cltk/tag/ner.py"}]}
2,063
189
gh_patches_debug_10497
rasdani/github-patches
git_diff
lhotse-speech__lhotse-138
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bug: broken import from augmentations Hi I installed the latest pip version of lhotse and I am getting an import error when using the lhotse CLI: Setup: ``` python3.7.0 lhotse==0.2.0 ``` To reproduce, try either from the following lines: ``` lhotse convert-kaldi <data-dir> 16000 <other-data-dir> python -c "from lhotse.augmentation import available_wav_augmentations" ``` </issue> <code> [start of lhotse/augmentation/__init__.py] 1 from .common import AugmentFn 2 from .torchaudio import * 3 from .wavaugment import WavAugmenter, is_wav_augment_available 4 [end of lhotse/augmentation/__init__.py] [start of setup.py] 1 # coding=utf-8 2 import os 3 from pathlib import Path 4 5 from setuptools import find_packages, setup 6 7 project_root = Path(__file__).parent 8 9 install_requires = (project_root / 'requirements.txt').read_text().splitlines() 10 docs_require = (project_root / 'docs' / 'requirements.txt').read_text().splitlines() 11 tests_require = ['pytest==5.4.3', 'flake8==3.8.3', 'coverage==5.1', 'hypothesis==5.41.2'] 12 dev_requires = docs_require + tests_require + ['jupyterlab', 'matplotlib', 'isort'] 13 14 if os.environ.get('READTHEDOCS', False): 15 # When building documentation, omit torchaudio installation and mock it instead. 16 # This works around the inability to install libsoundfile1 in read-the-docs env, 17 # which caused the documentation builds to silently crash. 18 install_requires = [req for req in install_requires if not req.startswith('torchaudio')] 19 20 setup( 21 name='lhotse', 22 version='0.2.0', 23 python_requires='>=3.7.0', 24 description='Data preparation for speech processing models training.', 25 author='The Lhotse Development Team', 26 author_email="[email protected]", 27 long_description=(project_root / 'README.md').read_text(), 28 long_description_content_type="text/markdown", 29 license='Apache-2.0 License', 30 packages=find_packages(), 31 # The line below makes every script in the list an executable that's inserted in PATH 32 # as long as the virtualenv/conda env is active; they can be used like any other shell program 33 scripts=['lhotse/bin/lhotse'], 34 install_requires=install_requires, 35 extras_require={ 36 'docs': docs_require, 37 'tests': tests_require, 38 'dev': docs_require + tests_require 39 }, 40 classifiers=[ 41 "Development Status :: 3 - Alpha", 42 "Programming Language :: Python :: 3.7", 43 "Programming Language :: Python :: 3.8", 44 "Intended Audience :: Science/Research", 45 "Operating System :: POSIX :: Linux", 46 "Operating System :: MacOS :: MacOS X", 47 "License :: OSI Approved :: Apache Software License", 48 "Topic :: Multimedia :: Sound/Audio :: Speech", 49 "Topic :: Scientific/Engineering :: Artificial Intelligence", 50 "Topic :: Software Development :: Libraries :: Python Modules", 51 "Typing :: Typed" 52 ], 53 ) 54 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lhotse/augmentation/__init__.py b/lhotse/augmentation/__init__.py --- a/lhotse/augmentation/__init__.py +++ b/lhotse/augmentation/__init__.py @@ -1,3 +1,3 @@ from .common import AugmentFn from .torchaudio import * -from .wavaugment import WavAugmenter, is_wav_augment_available +from .wavaugment import WavAugmenter, is_wav_augment_available, available_wav_augmentations diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -19,7 +19,7 @@ setup( name='lhotse', - version='0.2.0', + version='0.2.1', python_requires='>=3.7.0', description='Data preparation for speech processing models training.', author='The Lhotse Development Team',
{"golden_diff": "diff --git a/lhotse/augmentation/__init__.py b/lhotse/augmentation/__init__.py\n--- a/lhotse/augmentation/__init__.py\n+++ b/lhotse/augmentation/__init__.py\n@@ -1,3 +1,3 @@\n from .common import AugmentFn\n from .torchaudio import *\n-from .wavaugment import WavAugmenter, is_wav_augment_available\n+from .wavaugment import WavAugmenter, is_wav_augment_available, available_wav_augmentations\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -19,7 +19,7 @@\n \n setup(\n name='lhotse',\n- version='0.2.0',\n+ version='0.2.1',\n python_requires='>=3.7.0',\n description='Data preparation for speech processing models training.',\n author='The Lhotse Development Team',\n", "issue": "Bug: broken import from augmentations\nHi I installed the latest pip version of lhotse and I am getting an import error when using the lhotse CLI:\r\n\r\nSetup:\r\n```\r\npython3.7.0 \r\nlhotse==0.2.0\r\n```\r\n\r\nTo reproduce, try either from the following lines:\r\n```\r\nlhotse convert-kaldi <data-dir> 16000 <other-data-dir>\r\npython -c \"from lhotse.augmentation import available_wav_augmentations\"\r\n```\n", "before_files": [{"content": "from .common import AugmentFn\nfrom .torchaudio import *\nfrom .wavaugment import WavAugmenter, is_wav_augment_available\n", "path": "lhotse/augmentation/__init__.py"}, {"content": "# coding=utf-8\nimport os\nfrom pathlib import Path\n\nfrom setuptools import find_packages, setup\n\nproject_root = Path(__file__).parent\n\ninstall_requires = (project_root / 'requirements.txt').read_text().splitlines()\ndocs_require = (project_root / 'docs' / 'requirements.txt').read_text().splitlines()\ntests_require = ['pytest==5.4.3', 'flake8==3.8.3', 'coverage==5.1', 'hypothesis==5.41.2']\ndev_requires = docs_require + tests_require + ['jupyterlab', 'matplotlib', 'isort']\n\nif os.environ.get('READTHEDOCS', False):\n # When building documentation, omit torchaudio installation and mock it instead.\n # This works around the inability to install libsoundfile1 in read-the-docs env,\n # which caused the documentation builds to silently crash.\n install_requires = [req for req in install_requires if not req.startswith('torchaudio')]\n\nsetup(\n name='lhotse',\n version='0.2.0',\n python_requires='>=3.7.0',\n description='Data preparation for speech processing models training.',\n author='The Lhotse Development Team',\n author_email=\"[email protected]\",\n long_description=(project_root / 'README.md').read_text(),\n long_description_content_type=\"text/markdown\",\n license='Apache-2.0 License',\n packages=find_packages(),\n # The line below makes every script in the list an executable that's inserted in PATH\n # as long as the virtualenv/conda env is active; they can be used like any other shell program\n scripts=['lhotse/bin/lhotse'],\n install_requires=install_requires,\n extras_require={\n 'docs': docs_require,\n 'tests': tests_require,\n 'dev': docs_require + tests_require\n },\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Intended Audience :: Science/Research\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: MacOS :: MacOS X\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Multimedia :: Sound/Audio :: Speech\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Typing :: Typed\"\n ],\n)\n", "path": "setup.py"}]}
1,336
219
gh_patches_debug_15590
rasdani/github-patches
git_diff
plone__Products.CMFPlone-3688
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Update package metadata in PyPi </issue> <code> [start of setup.py] 1 from setuptools import find_packages 2 from setuptools import setup 3 4 5 version = '6.0.0rc2.dev0' 6 7 8 setup( 9 name='Products.CMFPlone', 10 version=version, 11 description="The Plone Content Management System (core)", 12 long_description=open("README.rst").read() + "\n" + 13 open("CHANGES.rst").read(), 14 classifiers=[ 15 "Development Status :: 5 - Production/Stable", 16 "Environment :: Web Environment", 17 "Framework :: Plone", 18 "Framework :: Plone :: 6.0", 19 "Framework :: Plone :: Core", 20 "Framework :: Zope :: 5", 21 "License :: OSI Approved :: GNU General Public License v2 (GPLv2)", 22 "Operating System :: OS Independent", 23 "Programming Language :: Python", 24 "Programming Language :: Python :: 3.8", 25 "Programming Language :: Python :: 3.9", 26 "Programming Language :: Python :: 3.10", 27 "Programming Language :: Python :: 3.11", 28 ], 29 python_requires='>=3.8', 30 keywords='Plone CMF Python Zope CMS Webapplication', 31 author='Plone Foundation', 32 author_email='[email protected]', 33 url='https://plone.org', 34 license='GPL version 2', 35 packages=find_packages(), 36 namespace_packages=['Products'], 37 include_package_data=True, 38 zip_safe=False, 39 install_requires=[ 40 'borg.localrole', 41 'five.customerize', 42 'lxml', 43 'plone.api >= 1.4.4', 44 'plone.app.content', 45 'plone.app.contentlisting', 46 'plone.app.contentmenu >= 2.0.1', 47 'plone.app.contentrules', 48 'plone.app.contenttypes', 49 'plone.app.customerize', 50 'plone.app.dexterity', 51 'plone.app.discussion', 52 'plone.app.i18n', 53 'plone.app.layout >= 2.5.15', 54 'plone.app.linkintegrity >=1.0.3', 55 'plone.app.locales', 56 'plone.app.multilingual', 57 'plone.app.portlets', 58 'plone.app.redirector', 59 'plone.app.registry', 60 'plone.app.theming', 61 'plone.app.users', 62 'plone.app.uuid', 63 'plone.app.viewletmanager', 64 'plone.app.vocabularies', 65 'plone.app.workflow', 66 'plone.base', 67 'plone.browserlayer >= 2.1.5', 68 'plone.contentrules', 69 'plone.folder', 70 'plone.i18n >= 4.0.5', 71 'plone.indexer', 72 'plone.intelligenttext', 73 'plone.locking', 74 'plone.memoize', 75 'plone.outputfilters', 76 'plone.portlet.collection', 77 'plone.portlet.static', 78 'plone.portlets', 79 'plone.protect >= 3.0.0', 80 'plone.resource', 81 'plone.schema', 82 'plone.session', 83 'plone.staticresources', 84 'plone.theme', 85 'plonetheme.barceloneta', 86 'Products.CMFEditions', 87 'Products.DCWorkflow', 88 'Products.ExtendedPathIndex', 89 'Products.isurlinportal', 90 'Products.MimetypesRegistry', 91 'Products.PlonePAS', 92 'Products.PortalTransforms', 93 'Products.SiteErrorLog', 94 'Products.statusmessages', 95 'setuptools>=36.2', 96 'plone.autoinclude', 97 'webresource>=1.1', 98 'Zope[wsgi] >= 5.0', 99 'zope.app.locales >= 3.6.0', 100 'zope.cachedescriptors', 101 'zope.deferredimport', 102 'zope.deprecation', 103 'zope.dottedname', 104 'zope.i18n', 105 'zope.i18nmessageid', 106 'zope.structuredtext', 107 ], 108 extras_require={ 109 'test': [ 110 'lxml', 111 'mock', 112 'plone.app.robotframework>=1.0', 113 'robotframework-debuglibrary', 114 'plone.app.testing', 115 'zope.globalrequest', 116 'zope.testing', 117 'gunicorn', 118 ] 119 }, 120 ) 121 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -32,6 +32,19 @@ author_email='[email protected]', url='https://plone.org', license='GPL version 2', + project_urls={ + "Homepage": "https://plone.org", + "Documentation": "https://docs.plone.org", + "PyPI": "https://pypi.python.org/pypi/Products.CMFPlone", + "Source": "https://github.com/plone/Products.CMFPlone", + "Issues": "https://github.com/plone/plone.org/Products.CMFPlone", + "Forum": "https://community.plone.org/", + "Chat": "https://discord.gg/zFY3EBbjaj", + "Mastodon": "https://plone.social/@plone", + "Twitter": "https://twitter.com/plone", + "Videos": "https://youtube.com/@plonecms", + "Sponsor": "https://github.com/sponsors/plone", + }, packages=find_packages(), namespace_packages=['Products'], include_package_data=True,
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -32,6 +32,19 @@\n author_email='[email protected]',\n url='https://plone.org',\n license='GPL version 2',\n+ project_urls={\n+ \"Homepage\": \"https://plone.org\",\n+ \"Documentation\": \"https://docs.plone.org\",\n+ \"PyPI\": \"https://pypi.python.org/pypi/Products.CMFPlone\",\n+ \"Source\": \"https://github.com/plone/Products.CMFPlone\",\n+ \"Issues\": \"https://github.com/plone/plone.org/Products.CMFPlone\",\n+ \"Forum\": \"https://community.plone.org/\",\n+ \"Chat\": \"https://discord.gg/zFY3EBbjaj\",\n+ \"Mastodon\": \"https://plone.social/@plone\",\n+ \"Twitter\": \"https://twitter.com/plone\",\n+ \"Videos\": \"https://youtube.com/@plonecms\",\n+ \"Sponsor\": \"https://github.com/sponsors/plone\",\n+ },\n packages=find_packages(),\n namespace_packages=['Products'],\n include_package_data=True,\n", "issue": "Update package metadata in PyPi\n\n", "before_files": [{"content": "from setuptools import find_packages\nfrom setuptools import setup\n\n\nversion = '6.0.0rc2.dev0'\n\n\nsetup(\n name='Products.CMFPlone',\n version=version,\n description=\"The Plone Content Management System (core)\",\n long_description=open(\"README.rst\").read() + \"\\n\" +\n open(\"CHANGES.rst\").read(),\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Framework :: Plone\",\n \"Framework :: Plone :: 6.0\",\n \"Framework :: Plone :: Core\",\n \"Framework :: Zope :: 5\",\n \"License :: OSI Approved :: GNU General Public License v2 (GPLv2)\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n ],\n python_requires='>=3.8',\n keywords='Plone CMF Python Zope CMS Webapplication',\n author='Plone Foundation',\n author_email='[email protected]',\n url='https://plone.org',\n license='GPL version 2',\n packages=find_packages(),\n namespace_packages=['Products'],\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'borg.localrole',\n 'five.customerize',\n 'lxml',\n 'plone.api >= 1.4.4',\n 'plone.app.content',\n 'plone.app.contentlisting',\n 'plone.app.contentmenu >= 2.0.1',\n 'plone.app.contentrules',\n 'plone.app.contenttypes',\n 'plone.app.customerize',\n 'plone.app.dexterity',\n 'plone.app.discussion',\n 'plone.app.i18n',\n 'plone.app.layout >= 2.5.15',\n 'plone.app.linkintegrity >=1.0.3',\n 'plone.app.locales',\n 'plone.app.multilingual',\n 'plone.app.portlets',\n 'plone.app.redirector',\n 'plone.app.registry',\n 'plone.app.theming',\n 'plone.app.users',\n 'plone.app.uuid',\n 'plone.app.viewletmanager',\n 'plone.app.vocabularies',\n 'plone.app.workflow',\n 'plone.base',\n 'plone.browserlayer >= 2.1.5',\n 'plone.contentrules',\n 'plone.folder',\n 'plone.i18n >= 4.0.5',\n 'plone.indexer',\n 'plone.intelligenttext',\n 'plone.locking',\n 'plone.memoize',\n 'plone.outputfilters',\n 'plone.portlet.collection',\n 'plone.portlet.static',\n 'plone.portlets',\n 'plone.protect >= 3.0.0',\n 'plone.resource',\n 'plone.schema',\n 'plone.session',\n 'plone.staticresources',\n 'plone.theme',\n 'plonetheme.barceloneta',\n 'Products.CMFEditions',\n 'Products.DCWorkflow',\n 'Products.ExtendedPathIndex',\n 'Products.isurlinportal',\n 'Products.MimetypesRegistry',\n 'Products.PlonePAS',\n 'Products.PortalTransforms',\n 'Products.SiteErrorLog',\n 'Products.statusmessages',\n 'setuptools>=36.2',\n 'plone.autoinclude',\n 'webresource>=1.1',\n 'Zope[wsgi] >= 5.0',\n 'zope.app.locales >= 3.6.0',\n 'zope.cachedescriptors',\n 'zope.deferredimport',\n 'zope.deprecation',\n 'zope.dottedname',\n 'zope.i18n',\n 'zope.i18nmessageid',\n 'zope.structuredtext',\n ],\n extras_require={\n 'test': [\n 'lxml',\n 'mock',\n 'plone.app.robotframework>=1.0',\n 'robotframework-debuglibrary',\n 'plone.app.testing',\n 'zope.globalrequest',\n 'zope.testing',\n 'gunicorn',\n ]\n },\n)\n", "path": "setup.py"}]}
1,747
268
gh_patches_debug_14795
rasdani/github-patches
git_diff
open-mmlab__mmpretrain-1251
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Bug] The precision are not aligned between val during train and test ### Branch master branch (0.24 or other 0.x version) ### Describe the bug The precision of val in the train is 73, and the precision of test 76, they are not aligned ### Environment A100 or NPU ### Other information _No response_ </issue> <code> [start of mmcls/models/heads/multi_label_head.py] 1 # Copyright (c) OpenMMLab. All rights reserved. 2 import torch 3 4 from ..builder import HEADS, build_loss 5 from ..utils import is_tracing 6 from .base_head import BaseHead 7 8 9 @HEADS.register_module() 10 class MultiLabelClsHead(BaseHead): 11 """Classification head for multilabel task. 12 13 Args: 14 loss (dict): Config of classification loss. 15 """ 16 17 def __init__(self, 18 loss=dict( 19 type='CrossEntropyLoss', 20 use_sigmoid=True, 21 reduction='mean', 22 loss_weight=1.0), 23 init_cfg=None): 24 super(MultiLabelClsHead, self).__init__(init_cfg=init_cfg) 25 26 assert isinstance(loss, dict) 27 28 self.compute_loss = build_loss(loss) 29 30 def loss(self, cls_score, gt_label): 31 gt_label = gt_label.type_as(cls_score) 32 num_samples = len(cls_score) 33 losses = dict() 34 35 # map difficult examples to positive ones 36 _gt_label = torch.abs(gt_label) 37 # compute loss 38 loss = self.compute_loss(cls_score, _gt_label, avg_factor=num_samples) 39 losses['loss'] = loss 40 return losses 41 42 def forward_train(self, cls_score, gt_label, **kwargs): 43 if isinstance(cls_score, tuple): 44 cls_score = cls_score[-1] 45 gt_label = gt_label.type_as(cls_score) 46 losses = self.loss(cls_score, gt_label, **kwargs) 47 return losses 48 49 def pre_logits(self, x): 50 if isinstance(x, tuple): 51 x = x[-1] 52 53 from mmcls.utils import get_root_logger 54 logger = get_root_logger() 55 logger.warning( 56 'The input of MultiLabelClsHead should be already logits. ' 57 'Please modify the backbone if you want to get pre-logits feature.' 58 ) 59 return x 60 61 def simple_test(self, x, sigmoid=True, post_process=True): 62 """Inference without augmentation. 63 64 Args: 65 cls_score (tuple[Tensor]): The input classification score logits. 66 Multi-stage inputs are acceptable but only the last stage will 67 be used to classify. The shape of every item should be 68 ``(num_samples, num_classes)``. 69 sigmoid (bool): Whether to sigmoid the classification score. 70 post_process (bool): Whether to do post processing the 71 inference results. It will convert the output to a list. 72 73 Returns: 74 Tensor | list: The inference results. 75 76 - If no post processing, the output is a tensor with shape 77 ``(num_samples, num_classes)``. 78 - If post processing, the output is a multi-dimentional list of 79 float and the dimensions are ``(num_samples, num_classes)``. 80 """ 81 if isinstance(x, tuple): 82 x = x[-1] 83 84 if sigmoid: 85 pred = torch.sigmoid(x) if x is not None else None 86 else: 87 pred = x 88 89 if post_process: 90 return self.post_process(pred) 91 else: 92 return pred 93 94 def post_process(self, pred): 95 on_trace = is_tracing() 96 if torch.onnx.is_in_onnx_export() or on_trace: 97 return pred 98 pred = list(pred.detach().cpu().numpy()) 99 return pred 100 [end of mmcls/models/heads/multi_label_head.py] [start of mmcls/models/heads/multi_label_linear_head.py] 1 # Copyright (c) OpenMMLab. All rights reserved. 2 import torch 3 import torch.nn as nn 4 5 from ..builder import HEADS 6 from .multi_label_head import MultiLabelClsHead 7 8 9 @HEADS.register_module() 10 class MultiLabelLinearClsHead(MultiLabelClsHead): 11 """Linear classification head for multilabel task. 12 13 Args: 14 num_classes (int): Number of categories. 15 in_channels (int): Number of channels in the input feature map. 16 loss (dict): Config of classification loss. 17 init_cfg (dict | optional): The extra init config of layers. 18 Defaults to use dict(type='Normal', layer='Linear', std=0.01). 19 """ 20 21 def __init__(self, 22 num_classes, 23 in_channels, 24 loss=dict( 25 type='CrossEntropyLoss', 26 use_sigmoid=True, 27 reduction='mean', 28 loss_weight=1.0), 29 init_cfg=dict(type='Normal', layer='Linear', std=0.01)): 30 super(MultiLabelLinearClsHead, self).__init__( 31 loss=loss, init_cfg=init_cfg) 32 33 if num_classes <= 0: 34 raise ValueError( 35 f'num_classes={num_classes} must be a positive integer') 36 37 self.in_channels = in_channels 38 self.num_classes = num_classes 39 40 self.fc = nn.Linear(self.in_channels, self.num_classes) 41 42 def pre_logits(self, x): 43 if isinstance(x, tuple): 44 x = x[-1] 45 return x 46 47 def forward_train(self, x, gt_label, **kwargs): 48 x = self.pre_logits(x) 49 gt_label = gt_label.type_as(x) 50 cls_score = self.fc(x) 51 losses = self.loss(cls_score, gt_label, **kwargs) 52 return losses 53 54 def simple_test(self, x, sigmoid=True, post_process=True): 55 """Inference without augmentation. 56 57 Args: 58 x (tuple[Tensor]): The input features. 59 Multi-stage inputs are acceptable but only the last stage will 60 be used to classify. The shape of every item should be 61 ``(num_samples, in_channels)``. 62 sigmoid (bool): Whether to sigmoid the classification score. 63 post_process (bool): Whether to do post processing the 64 inference results. It will convert the output to a list. 65 66 Returns: 67 Tensor | list: The inference results. 68 69 - If no post processing, the output is a tensor with shape 70 ``(num_samples, num_classes)``. 71 - If post processing, the output is a multi-dimentional list of 72 float and the dimensions are ``(num_samples, num_classes)``. 73 """ 74 x = self.pre_logits(x) 75 cls_score = self.fc(x) 76 77 if sigmoid: 78 pred = torch.sigmoid(cls_score) if cls_score is not None else None 79 else: 80 pred = cls_score 81 82 if post_process: 83 return self.post_process(pred) 84 else: 85 return pred 86 [end of mmcls/models/heads/multi_label_linear_head.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mmcls/models/heads/multi_label_head.py b/mmcls/models/heads/multi_label_head.py --- a/mmcls/models/heads/multi_label_head.py +++ b/mmcls/models/heads/multi_label_head.py @@ -82,7 +82,8 @@ x = x[-1] if sigmoid: - pred = torch.sigmoid(x) if x is not None else None + # Convert to full precision because sigmoid is sensitive. + pred = torch.sigmoid(x.float()) if x is not None else None else: pred = x diff --git a/mmcls/models/heads/multi_label_linear_head.py b/mmcls/models/heads/multi_label_linear_head.py --- a/mmcls/models/heads/multi_label_linear_head.py +++ b/mmcls/models/heads/multi_label_linear_head.py @@ -75,7 +75,9 @@ cls_score = self.fc(x) if sigmoid: - pred = torch.sigmoid(cls_score) if cls_score is not None else None + # Convert to full precision because sigmoid is sensitive. + pred = torch.sigmoid( + cls_score.float()) if cls_score is not None else None else: pred = cls_score
{"golden_diff": "diff --git a/mmcls/models/heads/multi_label_head.py b/mmcls/models/heads/multi_label_head.py\n--- a/mmcls/models/heads/multi_label_head.py\n+++ b/mmcls/models/heads/multi_label_head.py\n@@ -82,7 +82,8 @@\n x = x[-1]\n \n if sigmoid:\n- pred = torch.sigmoid(x) if x is not None else None\n+ # Convert to full precision because sigmoid is sensitive.\n+ pred = torch.sigmoid(x.float()) if x is not None else None\n else:\n pred = x\n \ndiff --git a/mmcls/models/heads/multi_label_linear_head.py b/mmcls/models/heads/multi_label_linear_head.py\n--- a/mmcls/models/heads/multi_label_linear_head.py\n+++ b/mmcls/models/heads/multi_label_linear_head.py\n@@ -75,7 +75,9 @@\n cls_score = self.fc(x)\n \n if sigmoid:\n- pred = torch.sigmoid(cls_score) if cls_score is not None else None\n+ # Convert to full precision because sigmoid is sensitive.\n+ pred = torch.sigmoid(\n+ cls_score.float()) if cls_score is not None else None\n else:\n pred = cls_score\n", "issue": "[Bug] The precision are not aligned between val during train and test \n### Branch\n\nmaster branch (0.24 or other 0.x version)\n\n### Describe the bug\n\nThe precision of val in the train is 73, and the precision of test 76, they are not aligned\r\n\r\n\n\n### Environment\n\nA100 or NPU\n\n### Other information\n\n_No response_\n", "before_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom ..builder import HEADS, build_loss\nfrom ..utils import is_tracing\nfrom .base_head import BaseHead\n\n\[email protected]_module()\nclass MultiLabelClsHead(BaseHead):\n \"\"\"Classification head for multilabel task.\n\n Args:\n loss (dict): Config of classification loss.\n \"\"\"\n\n def __init__(self,\n loss=dict(\n type='CrossEntropyLoss',\n use_sigmoid=True,\n reduction='mean',\n loss_weight=1.0),\n init_cfg=None):\n super(MultiLabelClsHead, self).__init__(init_cfg=init_cfg)\n\n assert isinstance(loss, dict)\n\n self.compute_loss = build_loss(loss)\n\n def loss(self, cls_score, gt_label):\n gt_label = gt_label.type_as(cls_score)\n num_samples = len(cls_score)\n losses = dict()\n\n # map difficult examples to positive ones\n _gt_label = torch.abs(gt_label)\n # compute loss\n loss = self.compute_loss(cls_score, _gt_label, avg_factor=num_samples)\n losses['loss'] = loss\n return losses\n\n def forward_train(self, cls_score, gt_label, **kwargs):\n if isinstance(cls_score, tuple):\n cls_score = cls_score[-1]\n gt_label = gt_label.type_as(cls_score)\n losses = self.loss(cls_score, gt_label, **kwargs)\n return losses\n\n def pre_logits(self, x):\n if isinstance(x, tuple):\n x = x[-1]\n\n from mmcls.utils import get_root_logger\n logger = get_root_logger()\n logger.warning(\n 'The input of MultiLabelClsHead should be already logits. '\n 'Please modify the backbone if you want to get pre-logits feature.'\n )\n return x\n\n def simple_test(self, x, sigmoid=True, post_process=True):\n \"\"\"Inference without augmentation.\n\n Args:\n cls_score (tuple[Tensor]): The input classification score logits.\n Multi-stage inputs are acceptable but only the last stage will\n be used to classify. The shape of every item should be\n ``(num_samples, num_classes)``.\n sigmoid (bool): Whether to sigmoid the classification score.\n post_process (bool): Whether to do post processing the\n inference results. It will convert the output to a list.\n\n Returns:\n Tensor | list: The inference results.\n\n - If no post processing, the output is a tensor with shape\n ``(num_samples, num_classes)``.\n - If post processing, the output is a multi-dimentional list of\n float and the dimensions are ``(num_samples, num_classes)``.\n \"\"\"\n if isinstance(x, tuple):\n x = x[-1]\n\n if sigmoid:\n pred = torch.sigmoid(x) if x is not None else None\n else:\n pred = x\n\n if post_process:\n return self.post_process(pred)\n else:\n return pred\n\n def post_process(self, pred):\n on_trace = is_tracing()\n if torch.onnx.is_in_onnx_export() or on_trace:\n return pred\n pred = list(pred.detach().cpu().numpy())\n return pred\n", "path": "mmcls/models/heads/multi_label_head.py"}, {"content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\n\nfrom ..builder import HEADS\nfrom .multi_label_head import MultiLabelClsHead\n\n\[email protected]_module()\nclass MultiLabelLinearClsHead(MultiLabelClsHead):\n \"\"\"Linear classification head for multilabel task.\n\n Args:\n num_classes (int): Number of categories.\n in_channels (int): Number of channels in the input feature map.\n loss (dict): Config of classification loss.\n init_cfg (dict | optional): The extra init config of layers.\n Defaults to use dict(type='Normal', layer='Linear', std=0.01).\n \"\"\"\n\n def __init__(self,\n num_classes,\n in_channels,\n loss=dict(\n type='CrossEntropyLoss',\n use_sigmoid=True,\n reduction='mean',\n loss_weight=1.0),\n init_cfg=dict(type='Normal', layer='Linear', std=0.01)):\n super(MultiLabelLinearClsHead, self).__init__(\n loss=loss, init_cfg=init_cfg)\n\n if num_classes <= 0:\n raise ValueError(\n f'num_classes={num_classes} must be a positive integer')\n\n self.in_channels = in_channels\n self.num_classes = num_classes\n\n self.fc = nn.Linear(self.in_channels, self.num_classes)\n\n def pre_logits(self, x):\n if isinstance(x, tuple):\n x = x[-1]\n return x\n\n def forward_train(self, x, gt_label, **kwargs):\n x = self.pre_logits(x)\n gt_label = gt_label.type_as(x)\n cls_score = self.fc(x)\n losses = self.loss(cls_score, gt_label, **kwargs)\n return losses\n\n def simple_test(self, x, sigmoid=True, post_process=True):\n \"\"\"Inference without augmentation.\n\n Args:\n x (tuple[Tensor]): The input features.\n Multi-stage inputs are acceptable but only the last stage will\n be used to classify. The shape of every item should be\n ``(num_samples, in_channels)``.\n sigmoid (bool): Whether to sigmoid the classification score.\n post_process (bool): Whether to do post processing the\n inference results. It will convert the output to a list.\n\n Returns:\n Tensor | list: The inference results.\n\n - If no post processing, the output is a tensor with shape\n ``(num_samples, num_classes)``.\n - If post processing, the output is a multi-dimentional list of\n float and the dimensions are ``(num_samples, num_classes)``.\n \"\"\"\n x = self.pre_logits(x)\n cls_score = self.fc(x)\n\n if sigmoid:\n pred = torch.sigmoid(cls_score) if cls_score is not None else None\n else:\n pred = cls_score\n\n if post_process:\n return self.post_process(pred)\n else:\n return pred\n", "path": "mmcls/models/heads/multi_label_linear_head.py"}]}
2,353
271
gh_patches_debug_42382
rasdani/github-patches
git_diff
lutris__lutris-2973
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add option to turn columns on/off in List View When right-clicking to table headbar in List View, you expect to get a menu for turning columns on/off, but you just select first game in the list. </issue> <code> [start of lutris/gui/views/list.py] 1 """TreeView based game list""" 2 from gettext import gettext as _ 3 4 # Third Party Libraries 5 # pylint: disable=no-member 6 from gi.repository import Gtk, Pango 7 8 # Lutris Modules 9 from lutris import settings 10 from lutris.gui.views import ( 11 COL_ICON, COL_INSTALLED_AT, COL_INSTALLED_AT_TEXT, COL_LASTPLAYED, COL_LASTPLAYED_TEXT, COL_NAME, COL_PLATFORM, 12 COL_PLAYTIME, COL_PLAYTIME_TEXT, COL_RUNNER_HUMAN_NAME, COL_YEAR, COLUMN_NAMES 13 ) 14 from lutris.gui.views.base import GameView 15 from lutris.gui.views.store import sort_func 16 17 18 class GameListView(Gtk.TreeView, GameView): 19 20 """Show the main list of games.""" 21 22 __gsignals__ = GameView.__gsignals__ 23 24 def __init__(self, store): 25 self.game_store = store 26 self.model = self.game_store.modelsort 27 super().__init__(self.model) 28 self.set_rules_hint(True) 29 30 # Icon column 31 image_cell = Gtk.CellRendererPixbuf() 32 column = Gtk.TreeViewColumn("", image_cell, pixbuf=COL_ICON) 33 column.set_reorderable(True) 34 column.set_sort_indicator(False) 35 self.append_column(column) 36 37 # Text columns 38 default_text_cell = self.set_text_cell() 39 name_cell = self.set_text_cell() 40 name_cell.set_padding(5, 0) 41 42 self.set_column(name_cell, _("Name"), COL_NAME, 200) 43 self.set_column(default_text_cell, _("Year"), COL_YEAR, 60) 44 self.set_column(default_text_cell, _("Runner"), COL_RUNNER_HUMAN_NAME, 120) 45 self.set_column(default_text_cell, _("Platform"), COL_PLATFORM, 120) 46 self.set_column(default_text_cell, _("Last Played"), COL_LASTPLAYED_TEXT, 120) 47 self.set_sort_with_column(COL_LASTPLAYED_TEXT, COL_LASTPLAYED) 48 self.set_column(default_text_cell, _("Installed At"), COL_INSTALLED_AT_TEXT, 120) 49 self.set_sort_with_column(COL_INSTALLED_AT_TEXT, COL_INSTALLED_AT) 50 self.set_column(default_text_cell, _("Play Time"), COL_PLAYTIME_TEXT, 100) 51 self.set_sort_with_column(COL_PLAYTIME_TEXT, COL_PLAYTIME) 52 53 self.get_selection().set_mode(Gtk.SelectionMode.SINGLE) 54 55 self.connect_signals() 56 self.connect("row-activated", self.on_row_activated) 57 self.get_selection().connect("changed", self.on_cursor_changed) 58 59 @staticmethod 60 def set_text_cell(): 61 text_cell = Gtk.CellRendererText() 62 text_cell.set_padding(10, 0) 63 text_cell.set_property("ellipsize", Pango.EllipsizeMode.END) 64 return text_cell 65 66 def set_column(self, cell, header, column_id, default_width, sort_id=None): 67 column = Gtk.TreeViewColumn(header, cell, markup=column_id) 68 column.set_sort_indicator(True) 69 column.set_sort_column_id(column_id if sort_id is None else sort_id) 70 self.set_column_sort(column_id if sort_id is None else sort_id) 71 column.set_resizable(True) 72 column.set_reorderable(True) 73 width = settings.read_setting("%s_column_width" % COLUMN_NAMES[column_id], "list view") 74 column.set_fixed_width(int(width) if width else default_width) 75 self.append_column(column) 76 column.connect("notify::width", self.on_column_width_changed) 77 return column 78 79 def set_column_sort(self, col): 80 """Sort a column and fallback to sorting by name and runner.""" 81 self.model.set_sort_func(col, sort_func, col) 82 83 def set_sort_with_column(self, col, sort_col): 84 """Sort a column by using another column's data""" 85 self.model.set_sort_func(col, sort_func, sort_col) 86 87 def get_selected_item(self): 88 """Return the currently selected game's id.""" 89 selection = self.get_selection() 90 if not selection: 91 return None 92 _model, select_iter = selection.get_selected() 93 if select_iter: 94 return select_iter 95 96 def select(self): 97 self.set_cursor(self.current_path[0]) 98 99 def set_selected_game(self, game_id): 100 row = self.game_store.get_row_by_id(game_id, filtered=True) 101 if row: 102 self.set_cursor(row.path) 103 104 def on_row_activated(self, widget, line=None, column=None): 105 """Handles double clicks""" 106 selected_item = self.get_selected_item() 107 if selected_item: 108 selected_game = self.get_selected_game(selected_item) 109 else: 110 selected_game = None 111 self.emit("game-activated", selected_game) 112 113 def on_cursor_changed(self, widget, _line=None, _column=None): 114 selected_item = self.get_selected_item() 115 if selected_item: 116 self.selected_game = self.get_selected_game(selected_item) 117 else: 118 self.selected_game = None 119 self.emit("game-selected", self.selected_game) 120 121 @staticmethod 122 def on_column_width_changed(col, *args): 123 col_name = col.get_title() 124 if col_name: 125 settings.write_setting( 126 col_name.replace(" ", "") + "_column_width", 127 col.get_fixed_width(), 128 "list view", 129 ) 130 [end of lutris/gui/views/list.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lutris/gui/views/list.py b/lutris/gui/views/list.py --- a/lutris/gui/views/list.py +++ b/lutris/gui/views/list.py @@ -39,7 +39,7 @@ name_cell = self.set_text_cell() name_cell.set_padding(5, 0) - self.set_column(name_cell, _("Name"), COL_NAME, 200) + self.set_column(name_cell, _("Name"), COL_NAME, 200, always_visible=True) self.set_column(default_text_cell, _("Year"), COL_YEAR, 60) self.set_column(default_text_cell, _("Runner"), COL_RUNNER_HUMAN_NAME, 120) self.set_column(default_text_cell, _("Platform"), COL_PLATFORM, 120) @@ -63,7 +63,7 @@ text_cell.set_property("ellipsize", Pango.EllipsizeMode.END) return text_cell - def set_column(self, cell, header, column_id, default_width, sort_id=None): + def set_column(self, cell, header, column_id, default_width, always_visible=False, sort_id=None): column = Gtk.TreeViewColumn(header, cell, markup=column_id) column.set_sort_indicator(True) column.set_sort_column_id(column_id if sort_id is None else sort_id) @@ -71,9 +71,12 @@ column.set_resizable(True) column.set_reorderable(True) width = settings.read_setting("%s_column_width" % COLUMN_NAMES[column_id], "list view") + is_visible = settings.read_setting("%s_visible" % COLUMN_NAMES[column_id], "list view") column.set_fixed_width(int(width) if width else default_width) + column.set_visible(is_visible == "True" or always_visible if is_visible else True) self.append_column(column) column.connect("notify::width", self.on_column_width_changed) + column.get_button().connect('button-press-event', self.on_column_header_button_pressed) return column def set_column_sort(self, col): @@ -101,6 +104,13 @@ if row: self.set_cursor(row.path) + def on_column_header_button_pressed(self, button, event): + """Handles column header button press events""" + if event.button == 3: + menu = GameListColumnToggleMenu(self.get_columns()) + menu.popup_at_pointer(None) + return True + def on_row_activated(self, widget, line=None, column=None): """Handles double clicks""" selected_item = self.get_selected_item() @@ -127,3 +137,37 @@ col.get_fixed_width(), "list view", ) + + +class GameListColumnToggleMenu(Gtk.Menu): + + def __init__(self, columns): + super().__init__() + self.columns = columns + self.column_map = {} + self.create_menuitems() + self.show_all() + + def create_menuitems(self): + for column in self.columns: + title = column.get_title() + if title == "": + continue + checkbox = Gtk.CheckMenuItem(title) + checkbox.set_active(column.get_visible()) + if title == _("Name"): + checkbox.set_sensitive(False) + else: + checkbox.connect("toggled", self.on_toggle_column) + self.column_map[checkbox] = column + self.append(checkbox) + + def on_toggle_column(self, check_menu_item): + column = self.column_map[check_menu_item] + is_visible = check_menu_item.get_active() + column.set_visible(is_visible) + settings.write_setting( + column.get_title().replace(" ", "") + "_visible", + str(is_visible), + "list view", + )
{"golden_diff": "diff --git a/lutris/gui/views/list.py b/lutris/gui/views/list.py\n--- a/lutris/gui/views/list.py\n+++ b/lutris/gui/views/list.py\n@@ -39,7 +39,7 @@\n name_cell = self.set_text_cell()\n name_cell.set_padding(5, 0)\n \n- self.set_column(name_cell, _(\"Name\"), COL_NAME, 200)\n+ self.set_column(name_cell, _(\"Name\"), COL_NAME, 200, always_visible=True)\n self.set_column(default_text_cell, _(\"Year\"), COL_YEAR, 60)\n self.set_column(default_text_cell, _(\"Runner\"), COL_RUNNER_HUMAN_NAME, 120)\n self.set_column(default_text_cell, _(\"Platform\"), COL_PLATFORM, 120)\n@@ -63,7 +63,7 @@\n text_cell.set_property(\"ellipsize\", Pango.EllipsizeMode.END)\n return text_cell\n \n- def set_column(self, cell, header, column_id, default_width, sort_id=None):\n+ def set_column(self, cell, header, column_id, default_width, always_visible=False, sort_id=None):\n column = Gtk.TreeViewColumn(header, cell, markup=column_id)\n column.set_sort_indicator(True)\n column.set_sort_column_id(column_id if sort_id is None else sort_id)\n@@ -71,9 +71,12 @@\n column.set_resizable(True)\n column.set_reorderable(True)\n width = settings.read_setting(\"%s_column_width\" % COLUMN_NAMES[column_id], \"list view\")\n+ is_visible = settings.read_setting(\"%s_visible\" % COLUMN_NAMES[column_id], \"list view\")\n column.set_fixed_width(int(width) if width else default_width)\n+ column.set_visible(is_visible == \"True\" or always_visible if is_visible else True)\n self.append_column(column)\n column.connect(\"notify::width\", self.on_column_width_changed)\n+ column.get_button().connect('button-press-event', self.on_column_header_button_pressed)\n return column\n \n def set_column_sort(self, col):\n@@ -101,6 +104,13 @@\n if row:\n self.set_cursor(row.path)\n \n+ def on_column_header_button_pressed(self, button, event):\n+ \"\"\"Handles column header button press events\"\"\"\n+ if event.button == 3:\n+ menu = GameListColumnToggleMenu(self.get_columns())\n+ menu.popup_at_pointer(None)\n+ return True\n+\n def on_row_activated(self, widget, line=None, column=None):\n \"\"\"Handles double clicks\"\"\"\n selected_item = self.get_selected_item()\n@@ -127,3 +137,37 @@\n col.get_fixed_width(),\n \"list view\",\n )\n+\n+\n+class GameListColumnToggleMenu(Gtk.Menu):\n+\n+ def __init__(self, columns):\n+ super().__init__()\n+ self.columns = columns\n+ self.column_map = {}\n+ self.create_menuitems()\n+ self.show_all()\n+\n+ def create_menuitems(self):\n+ for column in self.columns:\n+ title = column.get_title()\n+ if title == \"\":\n+ continue\n+ checkbox = Gtk.CheckMenuItem(title)\n+ checkbox.set_active(column.get_visible())\n+ if title == _(\"Name\"):\n+ checkbox.set_sensitive(False)\n+ else:\n+ checkbox.connect(\"toggled\", self.on_toggle_column)\n+ self.column_map[checkbox] = column\n+ self.append(checkbox)\n+\n+ def on_toggle_column(self, check_menu_item):\n+ column = self.column_map[check_menu_item]\n+ is_visible = check_menu_item.get_active()\n+ column.set_visible(is_visible)\n+ settings.write_setting(\n+ column.get_title().replace(\" \", \"\") + \"_visible\",\n+ str(is_visible),\n+ \"list view\",\n+ )\n", "issue": "Add option to turn columns on/off in List View\nWhen right-clicking to table headbar in List View, you expect to get a menu for turning columns on/off, but you just select first game in the list.\n", "before_files": [{"content": "\"\"\"TreeView based game list\"\"\"\nfrom gettext import gettext as _\n\n# Third Party Libraries\n# pylint: disable=no-member\nfrom gi.repository import Gtk, Pango\n\n# Lutris Modules\nfrom lutris import settings\nfrom lutris.gui.views import (\n COL_ICON, COL_INSTALLED_AT, COL_INSTALLED_AT_TEXT, COL_LASTPLAYED, COL_LASTPLAYED_TEXT, COL_NAME, COL_PLATFORM,\n COL_PLAYTIME, COL_PLAYTIME_TEXT, COL_RUNNER_HUMAN_NAME, COL_YEAR, COLUMN_NAMES\n)\nfrom lutris.gui.views.base import GameView\nfrom lutris.gui.views.store import sort_func\n\n\nclass GameListView(Gtk.TreeView, GameView):\n\n \"\"\"Show the main list of games.\"\"\"\n\n __gsignals__ = GameView.__gsignals__\n\n def __init__(self, store):\n self.game_store = store\n self.model = self.game_store.modelsort\n super().__init__(self.model)\n self.set_rules_hint(True)\n\n # Icon column\n image_cell = Gtk.CellRendererPixbuf()\n column = Gtk.TreeViewColumn(\"\", image_cell, pixbuf=COL_ICON)\n column.set_reorderable(True)\n column.set_sort_indicator(False)\n self.append_column(column)\n\n # Text columns\n default_text_cell = self.set_text_cell()\n name_cell = self.set_text_cell()\n name_cell.set_padding(5, 0)\n\n self.set_column(name_cell, _(\"Name\"), COL_NAME, 200)\n self.set_column(default_text_cell, _(\"Year\"), COL_YEAR, 60)\n self.set_column(default_text_cell, _(\"Runner\"), COL_RUNNER_HUMAN_NAME, 120)\n self.set_column(default_text_cell, _(\"Platform\"), COL_PLATFORM, 120)\n self.set_column(default_text_cell, _(\"Last Played\"), COL_LASTPLAYED_TEXT, 120)\n self.set_sort_with_column(COL_LASTPLAYED_TEXT, COL_LASTPLAYED)\n self.set_column(default_text_cell, _(\"Installed At\"), COL_INSTALLED_AT_TEXT, 120)\n self.set_sort_with_column(COL_INSTALLED_AT_TEXT, COL_INSTALLED_AT)\n self.set_column(default_text_cell, _(\"Play Time\"), COL_PLAYTIME_TEXT, 100)\n self.set_sort_with_column(COL_PLAYTIME_TEXT, COL_PLAYTIME)\n\n self.get_selection().set_mode(Gtk.SelectionMode.SINGLE)\n\n self.connect_signals()\n self.connect(\"row-activated\", self.on_row_activated)\n self.get_selection().connect(\"changed\", self.on_cursor_changed)\n\n @staticmethod\n def set_text_cell():\n text_cell = Gtk.CellRendererText()\n text_cell.set_padding(10, 0)\n text_cell.set_property(\"ellipsize\", Pango.EllipsizeMode.END)\n return text_cell\n\n def set_column(self, cell, header, column_id, default_width, sort_id=None):\n column = Gtk.TreeViewColumn(header, cell, markup=column_id)\n column.set_sort_indicator(True)\n column.set_sort_column_id(column_id if sort_id is None else sort_id)\n self.set_column_sort(column_id if sort_id is None else sort_id)\n column.set_resizable(True)\n column.set_reorderable(True)\n width = settings.read_setting(\"%s_column_width\" % COLUMN_NAMES[column_id], \"list view\")\n column.set_fixed_width(int(width) if width else default_width)\n self.append_column(column)\n column.connect(\"notify::width\", self.on_column_width_changed)\n return column\n\n def set_column_sort(self, col):\n \"\"\"Sort a column and fallback to sorting by name and runner.\"\"\"\n self.model.set_sort_func(col, sort_func, col)\n\n def set_sort_with_column(self, col, sort_col):\n \"\"\"Sort a column by using another column's data\"\"\"\n self.model.set_sort_func(col, sort_func, sort_col)\n\n def get_selected_item(self):\n \"\"\"Return the currently selected game's id.\"\"\"\n selection = self.get_selection()\n if not selection:\n return None\n _model, select_iter = selection.get_selected()\n if select_iter:\n return select_iter\n\n def select(self):\n self.set_cursor(self.current_path[0])\n\n def set_selected_game(self, game_id):\n row = self.game_store.get_row_by_id(game_id, filtered=True)\n if row:\n self.set_cursor(row.path)\n\n def on_row_activated(self, widget, line=None, column=None):\n \"\"\"Handles double clicks\"\"\"\n selected_item = self.get_selected_item()\n if selected_item:\n selected_game = self.get_selected_game(selected_item)\n else:\n selected_game = None\n self.emit(\"game-activated\", selected_game)\n\n def on_cursor_changed(self, widget, _line=None, _column=None):\n selected_item = self.get_selected_item()\n if selected_item:\n self.selected_game = self.get_selected_game(selected_item)\n else:\n self.selected_game = None\n self.emit(\"game-selected\", self.selected_game)\n\n @staticmethod\n def on_column_width_changed(col, *args):\n col_name = col.get_title()\n if col_name:\n settings.write_setting(\n col_name.replace(\" \", \"\") + \"_column_width\",\n col.get_fixed_width(),\n \"list view\",\n )\n", "path": "lutris/gui/views/list.py"}]}
1,995
839
gh_patches_debug_35481
rasdani/github-patches
git_diff
mdn__kuma-6144
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Magic is incorrectly using image/svg as MIME type for SVG files, preventing uploads of SVG files Magic or libmagic is incorrectly using `image/svg` as the MIME type for SVG uploads, rather than `image/svg+xml`, which is the only correct MIME type [1] (and the one we've always used in the past). Normally, we keep SVG uploads disabled for security reasons since SVG can contain JavaScript code. However, we like to use it for diagrams and the like because they can be translated, and they are more efficient for that purpose in many cases. When we want to upload SVG, we go to the admin panel in Django and in the Constance panel, add "image/svg+xml" to the `WIKI_ATTACHMENT_ALLOWED_TYPES` string. Then we upload the file and restore the previous string to that preference. This is no longer working; even with `image/svg+xml` listed in the preference, uploads fail with an error announcing that "Files of this type are not permitted." This should work, however. This issue is blocking work on WebXR and potentially other documentation that requires the use of diagrams to explain things properly. Hopefully the fix is not complicated. [1] https://www.iana.org/assignments/media-types/media-types.xhtml#image </issue> <code> [start of kuma/attachments/forms.py] 1 import magic 2 from constance import config 3 from django import forms 4 from django.core.validators import EMPTY_VALUES 5 from django.utils.translation import ugettext_lazy as _ 6 7 from .models import AttachmentRevision 8 9 10 MIME_TYPE_INVALID = _('Files of this type are not permitted.') 11 12 13 class AttachmentRevisionForm(forms.ModelForm): 14 """ 15 Unlike the DocumentForm/RevisionForm split, we have only one 16 form for file attachments. The handling view will determine if 17 this is a new revision of an existing file, or the first version 18 of a new file. 19 20 As a result of this, calling save(commit=True) is off-limits. 21 """ 22 class Meta: 23 model = AttachmentRevision 24 fields = ('file', 'title', 'description', 'comment') 25 26 def __init__(self, *args, **kwargs): 27 super(AttachmentRevisionForm, self).__init__(*args, **kwargs) 28 self.mime_type = None 29 30 def clean(self): 31 """ 32 Check the submitted file for its MIME type in case the provided 33 MIME type is missing or is the default MIME type as given in the 34 model field definition. 35 36 That allows overriding the MIME type via the admin UI. 37 """ 38 cleaned_data = super(AttachmentRevisionForm, self).clean() 39 nulls = EMPTY_VALUES + (AttachmentRevision.DEFAULT_MIME_TYPE,) 40 submitted_mime_type = cleaned_data.get('mime_type') 41 42 if (submitted_mime_type in nulls) and ('file' in cleaned_data): 43 self.mime_type = self.mime_type_from_file(cleaned_data['file']) 44 allowed_mime_types = config.WIKI_ATTACHMENT_ALLOWED_TYPES.split() 45 if self.mime_type not in allowed_mime_types: 46 raise forms.ValidationError(MIME_TYPE_INVALID, code='invalid') 47 48 return cleaned_data 49 50 def save(self, *args, **kwargs): 51 revision = super(AttachmentRevisionForm, self).save(*args, **kwargs) 52 if self.mime_type is not None: 53 revision.mime_type = self.mime_type 54 return revision 55 56 def mime_type_from_file(self, file): 57 m_mime = magic.Magic(mime=True) 58 mime_type = m_mime.from_buffer(file.read(1024)).split(';')[0] 59 file.seek(0) 60 return mime_type 61 62 63 class AdminAttachmentRevisionForm(AttachmentRevisionForm): 64 class Meta(AttachmentRevisionForm.Meta): 65 fields = ['attachment', 'file', 'title', 'mime_type', 'description', 66 'is_approved'] 67 [end of kuma/attachments/forms.py] [start of kuma/attachments/views.py] 1 import mimetypes 2 3 from django.conf import settings 4 from django.core.exceptions import PermissionDenied 5 from django.http import Http404, StreamingHttpResponse 6 from django.shortcuts import get_object_or_404, redirect, render 7 from django.views.decorators.cache import cache_control, never_cache 8 from django.views.decorators.clickjacking import xframe_options_sameorigin 9 10 from kuma.core.decorators import (ensure_wiki_domain, login_required, 11 shared_cache_control) 12 from kuma.core.utils import is_untrusted 13 from kuma.wiki.decorators import process_document_path 14 from kuma.wiki.models import Document 15 16 from .forms import AttachmentRevisionForm 17 from .models import Attachment 18 from .utils import allow_add_attachment_by, convert_to_http_date 19 20 21 # Mime types used on MDN 22 OVERRIDE_MIMETYPES = { 23 'image/jpeg': '.jpeg, .jpg, .jpe', 24 'image/vnd.adobe.photoshop': '.psd', 25 } 26 27 IMAGE_MIMETYPES = ['image/png', 'image/jpeg', 'image/jpg', 'image/gif'] 28 29 30 def guess_extension(_type): 31 return OVERRIDE_MIMETYPES.get(_type, mimetypes.guess_extension(_type)) 32 33 34 @cache_control(public=True, max_age=60 * 15) 35 def raw_file(request, attachment_id, filename): 36 """ 37 Serve up an attachment's file. 38 """ 39 qs = Attachment.objects.select_related('current_revision') 40 attachment = get_object_or_404(qs, pk=attachment_id) 41 if attachment.current_revision is None: 42 raise Http404 43 44 if is_untrusted(request): 45 rev = attachment.current_revision 46 response = StreamingHttpResponse(rev.file, content_type=rev.mime_type) 47 response['Content-Length'] = rev.file.size 48 response['Last-Modified'] = convert_to_http_date(rev.created) 49 response['X-Frame-Options'] = 'ALLOW-FROM %s' % settings.DOMAIN 50 return response 51 52 return redirect(attachment.get_file_url(), permanent=True) 53 54 55 @shared_cache_control(s_maxage=60 * 60 * 24 * 30) 56 def mindtouch_file_redirect(request, file_id, filename): 57 """Redirect an old MindTouch file URL to a new kuma file URL.""" 58 attachment = get_object_or_404(Attachment, mindtouch_attachment_id=file_id) 59 return redirect(attachment.get_file_url(), permanent=True) 60 61 62 @ensure_wiki_domain 63 @never_cache 64 @xframe_options_sameorigin 65 @login_required 66 @process_document_path 67 def edit_attachment(request, document_slug, document_locale): 68 """ 69 Create a new Attachment object and populate its initial 70 revision or show a separate form view that allows to fix form submission 71 errors. 72 73 Redirects back to the document's editing URL on success. 74 """ 75 document = get_object_or_404( 76 Document, 77 locale=document_locale, 78 slug=document_slug, 79 ) 80 if request.method != 'POST': 81 return redirect(document.get_edit_url()) 82 83 # No access if no permissions to upload 84 if not allow_add_attachment_by(request.user): 85 raise PermissionDenied 86 87 form = AttachmentRevisionForm(data=request.POST, files=request.FILES) 88 if form.is_valid(): 89 revision = form.save(commit=False) 90 revision.creator = request.user 91 attachment = Attachment.objects.create(title=revision.title) 92 revision.attachment = attachment 93 revision.save() 94 # adding the attachment to the document's files (M2M) 95 attachment.attach(document, request.user, revision) 96 return redirect(document.get_edit_url()) 97 else: 98 context = { 99 'form': form, 100 'document': document, 101 } 102 return render(request, 'attachments/edit_attachment.html', context) 103 [end of kuma/attachments/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kuma/attachments/forms.py b/kuma/attachments/forms.py --- a/kuma/attachments/forms.py +++ b/kuma/attachments/forms.py @@ -24,6 +24,7 @@ fields = ('file', 'title', 'description', 'comment') def __init__(self, *args, **kwargs): + self.allow_svg_uploads = kwargs.pop('allow_svg_uploads', False) super(AttachmentRevisionForm, self).__init__(*args, **kwargs) self.mime_type = None @@ -39,11 +40,21 @@ nulls = EMPTY_VALUES + (AttachmentRevision.DEFAULT_MIME_TYPE,) submitted_mime_type = cleaned_data.get('mime_type') - if (submitted_mime_type in nulls) and ('file' in cleaned_data): + if submitted_mime_type in nulls and 'file' in cleaned_data: self.mime_type = self.mime_type_from_file(cleaned_data['file']) - allowed_mime_types = config.WIKI_ATTACHMENT_ALLOWED_TYPES.split() - if self.mime_type not in allowed_mime_types: - raise forms.ValidationError(MIME_TYPE_INVALID, code='invalid') + if self.mime_type.startswith('image/svg') and self.allow_svg_uploads: + # The `magic.Magic()` will, for unknown reasons, sometimes + # think an SVG image's mime type is `image/svg` which not + # a valid mime type actually. + # See https://www.iana.org/assignments/media-types/media-types.xhtml#image + # So correct that. + if self.mime_type == 'image/svg': + self.mime_type = 'image/svg+xml' + else: + allowed_mime_types = config.WIKI_ATTACHMENT_ALLOWED_TYPES.split() + if self.mime_type not in allowed_mime_types: + raise forms.ValidationError( + MIME_TYPE_INVALID, code='invalid') return cleaned_data diff --git a/kuma/attachments/views.py b/kuma/attachments/views.py --- a/kuma/attachments/views.py +++ b/kuma/attachments/views.py @@ -84,7 +84,13 @@ if not allow_add_attachment_by(request.user): raise PermissionDenied - form = AttachmentRevisionForm(data=request.POST, files=request.FILES) + form = AttachmentRevisionForm( + data=request.POST, + files=request.FILES, + # Only staff users are allowed to upload SVG files because SVG files + # can contain embedded inline scripts. + allow_svg_uploads=request.user.is_staff + ) if form.is_valid(): revision = form.save(commit=False) revision.creator = request.user
{"golden_diff": "diff --git a/kuma/attachments/forms.py b/kuma/attachments/forms.py\n--- a/kuma/attachments/forms.py\n+++ b/kuma/attachments/forms.py\n@@ -24,6 +24,7 @@\n fields = ('file', 'title', 'description', 'comment')\n \n def __init__(self, *args, **kwargs):\n+ self.allow_svg_uploads = kwargs.pop('allow_svg_uploads', False)\n super(AttachmentRevisionForm, self).__init__(*args, **kwargs)\n self.mime_type = None\n \n@@ -39,11 +40,21 @@\n nulls = EMPTY_VALUES + (AttachmentRevision.DEFAULT_MIME_TYPE,)\n submitted_mime_type = cleaned_data.get('mime_type')\n \n- if (submitted_mime_type in nulls) and ('file' in cleaned_data):\n+ if submitted_mime_type in nulls and 'file' in cleaned_data:\n self.mime_type = self.mime_type_from_file(cleaned_data['file'])\n- allowed_mime_types = config.WIKI_ATTACHMENT_ALLOWED_TYPES.split()\n- if self.mime_type not in allowed_mime_types:\n- raise forms.ValidationError(MIME_TYPE_INVALID, code='invalid')\n+ if self.mime_type.startswith('image/svg') and self.allow_svg_uploads:\n+ # The `magic.Magic()` will, for unknown reasons, sometimes\n+ # think an SVG image's mime type is `image/svg` which not\n+ # a valid mime type actually.\n+ # See https://www.iana.org/assignments/media-types/media-types.xhtml#image\n+ # So correct that.\n+ if self.mime_type == 'image/svg':\n+ self.mime_type = 'image/svg+xml'\n+ else:\n+ allowed_mime_types = config.WIKI_ATTACHMENT_ALLOWED_TYPES.split()\n+ if self.mime_type not in allowed_mime_types:\n+ raise forms.ValidationError(\n+ MIME_TYPE_INVALID, code='invalid')\n \n return cleaned_data\n \ndiff --git a/kuma/attachments/views.py b/kuma/attachments/views.py\n--- a/kuma/attachments/views.py\n+++ b/kuma/attachments/views.py\n@@ -84,7 +84,13 @@\n if not allow_add_attachment_by(request.user):\n raise PermissionDenied\n \n- form = AttachmentRevisionForm(data=request.POST, files=request.FILES)\n+ form = AttachmentRevisionForm(\n+ data=request.POST,\n+ files=request.FILES,\n+ # Only staff users are allowed to upload SVG files because SVG files\n+ # can contain embedded inline scripts.\n+ allow_svg_uploads=request.user.is_staff\n+ )\n if form.is_valid():\n revision = form.save(commit=False)\n revision.creator = request.user\n", "issue": "Magic is incorrectly using image/svg as MIME type for SVG files, preventing uploads of SVG files\nMagic or libmagic is incorrectly using `image/svg` as the MIME type for SVG uploads, rather than `image/svg+xml`, which is the only correct MIME type [1] (and the one we've always used in the past).\r\n\r\nNormally, we keep SVG uploads disabled for security reasons since SVG can contain JavaScript code. However, we like to use it for diagrams and the like because they can be translated, and they are more efficient for that purpose in many cases. When we want to upload SVG, we go to the admin panel in Django and in the Constance panel, add \"image/svg+xml\" to the `WIKI_ATTACHMENT_ALLOWED_TYPES` string. Then we upload the file and restore the previous string to that preference.\r\n\r\nThis is no longer working; even with `image/svg+xml` listed in the preference, uploads fail with an error announcing that \"Files of this type are not permitted.\" This should work, however.\r\n\r\nThis issue is blocking work on WebXR and potentially other documentation that requires the use of diagrams to explain things properly. Hopefully the fix is not complicated.\r\n\r\n[1] https://www.iana.org/assignments/media-types/media-types.xhtml#image\n", "before_files": [{"content": "import magic\nfrom constance import config\nfrom django import forms\nfrom django.core.validators import EMPTY_VALUES\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom .models import AttachmentRevision\n\n\nMIME_TYPE_INVALID = _('Files of this type are not permitted.')\n\n\nclass AttachmentRevisionForm(forms.ModelForm):\n \"\"\"\n Unlike the DocumentForm/RevisionForm split, we have only one\n form for file attachments. The handling view will determine if\n this is a new revision of an existing file, or the first version\n of a new file.\n\n As a result of this, calling save(commit=True) is off-limits.\n \"\"\"\n class Meta:\n model = AttachmentRevision\n fields = ('file', 'title', 'description', 'comment')\n\n def __init__(self, *args, **kwargs):\n super(AttachmentRevisionForm, self).__init__(*args, **kwargs)\n self.mime_type = None\n\n def clean(self):\n \"\"\"\n Check the submitted file for its MIME type in case the provided\n MIME type is missing or is the default MIME type as given in the\n model field definition.\n\n That allows overriding the MIME type via the admin UI.\n \"\"\"\n cleaned_data = super(AttachmentRevisionForm, self).clean()\n nulls = EMPTY_VALUES + (AttachmentRevision.DEFAULT_MIME_TYPE,)\n submitted_mime_type = cleaned_data.get('mime_type')\n\n if (submitted_mime_type in nulls) and ('file' in cleaned_data):\n self.mime_type = self.mime_type_from_file(cleaned_data['file'])\n allowed_mime_types = config.WIKI_ATTACHMENT_ALLOWED_TYPES.split()\n if self.mime_type not in allowed_mime_types:\n raise forms.ValidationError(MIME_TYPE_INVALID, code='invalid')\n\n return cleaned_data\n\n def save(self, *args, **kwargs):\n revision = super(AttachmentRevisionForm, self).save(*args, **kwargs)\n if self.mime_type is not None:\n revision.mime_type = self.mime_type\n return revision\n\n def mime_type_from_file(self, file):\n m_mime = magic.Magic(mime=True)\n mime_type = m_mime.from_buffer(file.read(1024)).split(';')[0]\n file.seek(0)\n return mime_type\n\n\nclass AdminAttachmentRevisionForm(AttachmentRevisionForm):\n class Meta(AttachmentRevisionForm.Meta):\n fields = ['attachment', 'file', 'title', 'mime_type', 'description',\n 'is_approved']\n", "path": "kuma/attachments/forms.py"}, {"content": "import mimetypes\n\nfrom django.conf import settings\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import Http404, StreamingHttpResponse\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.views.decorators.cache import cache_control, never_cache\nfrom django.views.decorators.clickjacking import xframe_options_sameorigin\n\nfrom kuma.core.decorators import (ensure_wiki_domain, login_required,\n shared_cache_control)\nfrom kuma.core.utils import is_untrusted\nfrom kuma.wiki.decorators import process_document_path\nfrom kuma.wiki.models import Document\n\nfrom .forms import AttachmentRevisionForm\nfrom .models import Attachment\nfrom .utils import allow_add_attachment_by, convert_to_http_date\n\n\n# Mime types used on MDN\nOVERRIDE_MIMETYPES = {\n 'image/jpeg': '.jpeg, .jpg, .jpe',\n 'image/vnd.adobe.photoshop': '.psd',\n}\n\nIMAGE_MIMETYPES = ['image/png', 'image/jpeg', 'image/jpg', 'image/gif']\n\n\ndef guess_extension(_type):\n return OVERRIDE_MIMETYPES.get(_type, mimetypes.guess_extension(_type))\n\n\n@cache_control(public=True, max_age=60 * 15)\ndef raw_file(request, attachment_id, filename):\n \"\"\"\n Serve up an attachment's file.\n \"\"\"\n qs = Attachment.objects.select_related('current_revision')\n attachment = get_object_or_404(qs, pk=attachment_id)\n if attachment.current_revision is None:\n raise Http404\n\n if is_untrusted(request):\n rev = attachment.current_revision\n response = StreamingHttpResponse(rev.file, content_type=rev.mime_type)\n response['Content-Length'] = rev.file.size\n response['Last-Modified'] = convert_to_http_date(rev.created)\n response['X-Frame-Options'] = 'ALLOW-FROM %s' % settings.DOMAIN\n return response\n\n return redirect(attachment.get_file_url(), permanent=True)\n\n\n@shared_cache_control(s_maxage=60 * 60 * 24 * 30)\ndef mindtouch_file_redirect(request, file_id, filename):\n \"\"\"Redirect an old MindTouch file URL to a new kuma file URL.\"\"\"\n attachment = get_object_or_404(Attachment, mindtouch_attachment_id=file_id)\n return redirect(attachment.get_file_url(), permanent=True)\n\n\n@ensure_wiki_domain\n@never_cache\n@xframe_options_sameorigin\n@login_required\n@process_document_path\ndef edit_attachment(request, document_slug, document_locale):\n \"\"\"\n Create a new Attachment object and populate its initial\n revision or show a separate form view that allows to fix form submission\n errors.\n\n Redirects back to the document's editing URL on success.\n \"\"\"\n document = get_object_or_404(\n Document,\n locale=document_locale,\n slug=document_slug,\n )\n if request.method != 'POST':\n return redirect(document.get_edit_url())\n\n # No access if no permissions to upload\n if not allow_add_attachment_by(request.user):\n raise PermissionDenied\n\n form = AttachmentRevisionForm(data=request.POST, files=request.FILES)\n if form.is_valid():\n revision = form.save(commit=False)\n revision.creator = request.user\n attachment = Attachment.objects.create(title=revision.title)\n revision.attachment = attachment\n revision.save()\n # adding the attachment to the document's files (M2M)\n attachment.attach(document, request.user, revision)\n return redirect(document.get_edit_url())\n else:\n context = {\n 'form': form,\n 'document': document,\n }\n return render(request, 'attachments/edit_attachment.html', context)\n", "path": "kuma/attachments/views.py"}]}
2,461
575
gh_patches_debug_4556
rasdani/github-patches
git_diff
scrapy__scrapy-4599
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> KeyError in is_generator_with_return_value ```Python traceback Traceback (most recent call last): File "/usr/local/lib/python3.8/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/app/python/lib/python3.8/site-packages/scrapy/core/downloader/middleware.py", line 42, in process_request defer.returnValue((yield download_func(request=request, spider=spider))) File "/usr/local/lib/python3.8/site-packages/twisted/internet/defer.py", line 1362, in returnValue raise _DefGen_Return(val) twisted.internet.defer._DefGen_Return: <200 https://www.example.com> During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/app/python/lib/python3.8/site-packages/scrapy/utils/defer.py", line 55, in mustbe_deferred result = f(*args, **kw) File "/app/python/lib/python3.8/site-packages/scrapy/core/spidermw.py", line 60, in process_spider_input return scrape_func(response, request, spider) File "/app/python/lib/python3.8/site-packages/scrapy/core/scraper.py", line 148, in call_spider warn_on_generator_with_return_value(spider, callback) File "/app/python/lib/python3.8/site-packages/scrapy/utils/misc.py", line 202, in warn_on_generator_with_return_value if is_generator_with_return_value(callable): File "/app/python/lib/python3.8/site-packages/scrapy/utils/misc.py", line 180, in is_generator_with_return_value return _generator_callbacks_cache[callable] File "/app/python/lib/python3.8/site-packages/scrapy/utils/datatypes.py", line 281, in __getitem__ return super(LocalWeakReferencedCache, self).__getitem__(key) File "/usr/local/lib/python3.8/weakref.py", line 383, in __getitem__ return self.data[ref(key)] KeyError: <weakref at 0x7f06ff011720; to 'method' at 0x7f07042b5e00 (parse_foo)> ``` This is Scrapy 2.0.1. The problem happens only sometimes, but in different spiders in the same project. </issue> <code> [start of scrapy/utils/datatypes.py] 1 """ 2 This module contains data types used by Scrapy which are not included in the 3 Python Standard Library. 4 5 This module must not depend on any module outside the Standard Library. 6 """ 7 8 import collections 9 import weakref 10 from collections.abc import Mapping 11 12 13 class CaselessDict(dict): 14 15 __slots__ = () 16 17 def __init__(self, seq=None): 18 super(CaselessDict, self).__init__() 19 if seq: 20 self.update(seq) 21 22 def __getitem__(self, key): 23 return dict.__getitem__(self, self.normkey(key)) 24 25 def __setitem__(self, key, value): 26 dict.__setitem__(self, self.normkey(key), self.normvalue(value)) 27 28 def __delitem__(self, key): 29 dict.__delitem__(self, self.normkey(key)) 30 31 def __contains__(self, key): 32 return dict.__contains__(self, self.normkey(key)) 33 has_key = __contains__ 34 35 def __copy__(self): 36 return self.__class__(self) 37 copy = __copy__ 38 39 def normkey(self, key): 40 """Method to normalize dictionary key access""" 41 return key.lower() 42 43 def normvalue(self, value): 44 """Method to normalize values prior to be setted""" 45 return value 46 47 def get(self, key, def_val=None): 48 return dict.get(self, self.normkey(key), self.normvalue(def_val)) 49 50 def setdefault(self, key, def_val=None): 51 return dict.setdefault(self, self.normkey(key), self.normvalue(def_val)) 52 53 def update(self, seq): 54 seq = seq.items() if isinstance(seq, Mapping) else seq 55 iseq = ((self.normkey(k), self.normvalue(v)) for k, v in seq) 56 super(CaselessDict, self).update(iseq) 57 58 @classmethod 59 def fromkeys(cls, keys, value=None): 60 return cls((k, value) for k in keys) 61 62 def pop(self, key, *args): 63 return dict.pop(self, self.normkey(key), *args) 64 65 66 class LocalCache(collections.OrderedDict): 67 """Dictionary with a finite number of keys. 68 69 Older items expires first. 70 """ 71 72 def __init__(self, limit=None): 73 super(LocalCache, self).__init__() 74 self.limit = limit 75 76 def __setitem__(self, key, value): 77 if self.limit: 78 while len(self) >= self.limit: 79 self.popitem(last=False) 80 super(LocalCache, self).__setitem__(key, value) 81 82 83 class LocalWeakReferencedCache(weakref.WeakKeyDictionary): 84 """ 85 A weakref.WeakKeyDictionary implementation that uses LocalCache as its 86 underlying data structure, making it ordered and capable of being size-limited. 87 88 Useful for memoization, while avoiding keeping received 89 arguments in memory only because of the cached references. 90 91 Note: like LocalCache and unlike weakref.WeakKeyDictionary, 92 it cannot be instantiated with an initial dictionary. 93 """ 94 95 def __init__(self, limit=None): 96 super(LocalWeakReferencedCache, self).__init__() 97 self.data = LocalCache(limit=limit) 98 99 def __setitem__(self, key, value): 100 try: 101 super(LocalWeakReferencedCache, self).__setitem__(key, value) 102 except TypeError: 103 pass # key is not weak-referenceable, skip caching 104 105 def __getitem__(self, key): 106 try: 107 return super(LocalWeakReferencedCache, self).__getitem__(key) 108 except TypeError: 109 return None # key is not weak-referenceable, it's not cached 110 111 112 class SequenceExclude: 113 """Object to test if an item is NOT within some sequence.""" 114 115 def __init__(self, seq): 116 self.seq = seq 117 118 def __contains__(self, item): 119 return item not in self.seq 120 [end of scrapy/utils/datatypes.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scrapy/utils/datatypes.py b/scrapy/utils/datatypes.py --- a/scrapy/utils/datatypes.py +++ b/scrapy/utils/datatypes.py @@ -105,8 +105,8 @@ def __getitem__(self, key): try: return super(LocalWeakReferencedCache, self).__getitem__(key) - except TypeError: - return None # key is not weak-referenceable, it's not cached + except (TypeError, KeyError): + return None # key is either not weak-referenceable or not cached class SequenceExclude:
{"golden_diff": "diff --git a/scrapy/utils/datatypes.py b/scrapy/utils/datatypes.py\n--- a/scrapy/utils/datatypes.py\n+++ b/scrapy/utils/datatypes.py\n@@ -105,8 +105,8 @@\n def __getitem__(self, key):\n try:\n return super(LocalWeakReferencedCache, self).__getitem__(key)\n- except TypeError:\n- return None # key is not weak-referenceable, it's not cached\n+ except (TypeError, KeyError):\n+ return None # key is either not weak-referenceable or not cached\n \n \n class SequenceExclude:\n", "issue": "KeyError in is_generator_with_return_value\n\r\n```Python traceback\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.8/site-packages/twisted/internet/defer.py\", line 1418, in _inlineCallbacks\r\n result = g.send(result)\r\n File \"/app/python/lib/python3.8/site-packages/scrapy/core/downloader/middleware.py\", line 42, in process_request\r\n defer.returnValue((yield download_func(request=request, spider=spider)))\r\n File \"/usr/local/lib/python3.8/site-packages/twisted/internet/defer.py\", line 1362, in returnValue\r\n raise _DefGen_Return(val)\r\ntwisted.internet.defer._DefGen_Return: <200 https://www.example.com>\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/app/python/lib/python3.8/site-packages/scrapy/utils/defer.py\", line 55, in mustbe_deferred\r\n result = f(*args, **kw)\r\n File \"/app/python/lib/python3.8/site-packages/scrapy/core/spidermw.py\", line 60, in process_spider_input\r\n return scrape_func(response, request, spider)\r\n File \"/app/python/lib/python3.8/site-packages/scrapy/core/scraper.py\", line 148, in call_spider\r\n warn_on_generator_with_return_value(spider, callback)\r\n File \"/app/python/lib/python3.8/site-packages/scrapy/utils/misc.py\", line 202, in warn_on_generator_with_return_value\r\n if is_generator_with_return_value(callable):\r\n File \"/app/python/lib/python3.8/site-packages/scrapy/utils/misc.py\", line 180, in is_generator_with_return_value\r\n return _generator_callbacks_cache[callable]\r\n File \"/app/python/lib/python3.8/site-packages/scrapy/utils/datatypes.py\", line 281, in __getitem__\r\n return super(LocalWeakReferencedCache, self).__getitem__(key)\r\n File \"/usr/local/lib/python3.8/weakref.py\", line 383, in __getitem__\r\n return self.data[ref(key)]\r\nKeyError: <weakref at 0x7f06ff011720; to 'method' at 0x7f07042b5e00 (parse_foo)>\r\n```\r\n\r\nThis is Scrapy 2.0.1. The problem happens only sometimes, but in different spiders in the same project.\n", "before_files": [{"content": "\"\"\"\nThis module contains data types used by Scrapy which are not included in the\nPython Standard Library.\n\nThis module must not depend on any module outside the Standard Library.\n\"\"\"\n\nimport collections\nimport weakref\nfrom collections.abc import Mapping\n\n\nclass CaselessDict(dict):\n\n __slots__ = ()\n\n def __init__(self, seq=None):\n super(CaselessDict, self).__init__()\n if seq:\n self.update(seq)\n\n def __getitem__(self, key):\n return dict.__getitem__(self, self.normkey(key))\n\n def __setitem__(self, key, value):\n dict.__setitem__(self, self.normkey(key), self.normvalue(value))\n\n def __delitem__(self, key):\n dict.__delitem__(self, self.normkey(key))\n\n def __contains__(self, key):\n return dict.__contains__(self, self.normkey(key))\n has_key = __contains__\n\n def __copy__(self):\n return self.__class__(self)\n copy = __copy__\n\n def normkey(self, key):\n \"\"\"Method to normalize dictionary key access\"\"\"\n return key.lower()\n\n def normvalue(self, value):\n \"\"\"Method to normalize values prior to be setted\"\"\"\n return value\n\n def get(self, key, def_val=None):\n return dict.get(self, self.normkey(key), self.normvalue(def_val))\n\n def setdefault(self, key, def_val=None):\n return dict.setdefault(self, self.normkey(key), self.normvalue(def_val))\n\n def update(self, seq):\n seq = seq.items() if isinstance(seq, Mapping) else seq\n iseq = ((self.normkey(k), self.normvalue(v)) for k, v in seq)\n super(CaselessDict, self).update(iseq)\n\n @classmethod\n def fromkeys(cls, keys, value=None):\n return cls((k, value) for k in keys)\n\n def pop(self, key, *args):\n return dict.pop(self, self.normkey(key), *args)\n\n\nclass LocalCache(collections.OrderedDict):\n \"\"\"Dictionary with a finite number of keys.\n\n Older items expires first.\n \"\"\"\n\n def __init__(self, limit=None):\n super(LocalCache, self).__init__()\n self.limit = limit\n\n def __setitem__(self, key, value):\n if self.limit:\n while len(self) >= self.limit:\n self.popitem(last=False)\n super(LocalCache, self).__setitem__(key, value)\n\n\nclass LocalWeakReferencedCache(weakref.WeakKeyDictionary):\n \"\"\"\n A weakref.WeakKeyDictionary implementation that uses LocalCache as its\n underlying data structure, making it ordered and capable of being size-limited.\n\n Useful for memoization, while avoiding keeping received\n arguments in memory only because of the cached references.\n\n Note: like LocalCache and unlike weakref.WeakKeyDictionary,\n it cannot be instantiated with an initial dictionary.\n \"\"\"\n\n def __init__(self, limit=None):\n super(LocalWeakReferencedCache, self).__init__()\n self.data = LocalCache(limit=limit)\n\n def __setitem__(self, key, value):\n try:\n super(LocalWeakReferencedCache, self).__setitem__(key, value)\n except TypeError:\n pass # key is not weak-referenceable, skip caching\n\n def __getitem__(self, key):\n try:\n return super(LocalWeakReferencedCache, self).__getitem__(key)\n except TypeError:\n return None # key is not weak-referenceable, it's not cached\n\n\nclass SequenceExclude:\n \"\"\"Object to test if an item is NOT within some sequence.\"\"\"\n\n def __init__(self, seq):\n self.seq = seq\n\n def __contains__(self, item):\n return item not in self.seq\n", "path": "scrapy/utils/datatypes.py"}]}
2,170
130
gh_patches_debug_23019
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-contrib-523
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Compatible with web.py? Is it possible to use opentelemetry-insrumentation-wsgi with web.py framework? If yes, some examples would be appreciated. </issue> <code> [start of instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py] 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 """ 15 This library provides a WSGI middleware that can be used on any WSGI framework 16 (such as Django / Flask) to track requests timing through OpenTelemetry. 17 18 Usage (Flask) 19 ------------- 20 21 .. code-block:: python 22 23 from flask import Flask 24 from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware 25 26 app = Flask(__name__) 27 app.wsgi_app = OpenTelemetryMiddleware(app.wsgi_app) 28 29 @app.route("/") 30 def hello(): 31 return "Hello!" 32 33 if __name__ == "__main__": 34 app.run(debug=True) 35 36 37 Usage (Django) 38 -------------- 39 40 Modify the application's ``wsgi.py`` file as shown below. 41 42 .. code-block:: python 43 44 import os 45 from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware 46 from django.core.wsgi import get_wsgi_application 47 48 os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'application.settings') 49 50 application = get_wsgi_application() 51 application = OpenTelemetryMiddleware(application) 52 53 API 54 --- 55 """ 56 57 import functools 58 import typing 59 import wsgiref.util as wsgiref_util 60 61 from opentelemetry import context, trace 62 from opentelemetry.instrumentation.utils import http_status_to_status_code 63 from opentelemetry.instrumentation.wsgi.version import __version__ 64 from opentelemetry.propagate import extract 65 from opentelemetry.propagators.textmap import Getter 66 from opentelemetry.semconv.trace import SpanAttributes 67 from opentelemetry.trace.status import Status, StatusCode 68 from opentelemetry.util.http import remove_url_credentials 69 70 _HTTP_VERSION_PREFIX = "HTTP/" 71 _CARRIER_KEY_PREFIX = "HTTP_" 72 _CARRIER_KEY_PREFIX_LEN = len(_CARRIER_KEY_PREFIX) 73 74 75 class WSGIGetter(Getter): 76 def get( 77 self, carrier: dict, key: str 78 ) -> typing.Optional[typing.List[str]]: 79 """Getter implementation to retrieve a HTTP header value from the 80 PEP3333-conforming WSGI environ 81 82 Args: 83 carrier: WSGI environ object 84 key: header name in environ object 85 Returns: 86 A list with a single string with the header value if it exists, 87 else None. 88 """ 89 environ_key = "HTTP_" + key.upper().replace("-", "_") 90 value = carrier.get(environ_key) 91 if value is not None: 92 return [value] 93 return None 94 95 def keys(self, carrier): 96 return [ 97 key[_CARRIER_KEY_PREFIX_LEN:].lower().replace("_", "-") 98 for key in carrier 99 if key.startswith(_CARRIER_KEY_PREFIX) 100 ] 101 102 103 wsgi_getter = WSGIGetter() 104 105 106 def setifnotnone(dic, key, value): 107 if value is not None: 108 dic[key] = value 109 110 111 def collect_request_attributes(environ): 112 """Collects HTTP request attributes from the PEP3333-conforming 113 WSGI environ and returns a dictionary to be used as span creation attributes.""" 114 115 result = { 116 SpanAttributes.HTTP_METHOD: environ.get("REQUEST_METHOD"), 117 SpanAttributes.HTTP_SERVER_NAME: environ.get("SERVER_NAME"), 118 SpanAttributes.HTTP_SCHEME: environ.get("wsgi.url_scheme"), 119 } 120 121 host_port = environ.get("SERVER_PORT") 122 if host_port is not None and not host_port == "": 123 result.update({SpanAttributes.NET_HOST_PORT: int(host_port)}) 124 125 setifnotnone(result, SpanAttributes.HTTP_HOST, environ.get("HTTP_HOST")) 126 target = environ.get("RAW_URI") 127 if target is None: # Note: `"" or None is None` 128 target = environ.get("REQUEST_URI") 129 if target is not None: 130 result[SpanAttributes.HTTP_TARGET] = target 131 else: 132 result[SpanAttributes.HTTP_URL] = remove_url_credentials( 133 wsgiref_util.request_uri(environ) 134 ) 135 136 remote_addr = environ.get("REMOTE_ADDR") 137 if remote_addr: 138 result[SpanAttributes.NET_PEER_IP] = remote_addr 139 remote_host = environ.get("REMOTE_HOST") 140 if remote_host and remote_host != remote_addr: 141 result[SpanAttributes.NET_PEER_NAME] = remote_host 142 143 user_agent = environ.get("HTTP_USER_AGENT") 144 if user_agent is not None and len(user_agent) > 0: 145 result[SpanAttributes.HTTP_USER_AGENT] = user_agent 146 147 setifnotnone( 148 result, SpanAttributes.NET_PEER_PORT, environ.get("REMOTE_PORT") 149 ) 150 flavor = environ.get("SERVER_PROTOCOL", "") 151 if flavor.upper().startswith(_HTTP_VERSION_PREFIX): 152 flavor = flavor[len(_HTTP_VERSION_PREFIX) :] 153 if flavor: 154 result[SpanAttributes.HTTP_FLAVOR] = flavor 155 156 return result 157 158 159 def add_response_attributes( 160 span, start_response_status, response_headers 161 ): # pylint: disable=unused-argument 162 """Adds HTTP response attributes to span using the arguments 163 passed to a PEP3333-conforming start_response callable.""" 164 if not span.is_recording(): 165 return 166 status_code, _ = start_response_status.split(" ", 1) 167 168 try: 169 status_code = int(status_code) 170 except ValueError: 171 span.set_status( 172 Status( 173 StatusCode.ERROR, 174 "Non-integer HTTP status: " + repr(status_code), 175 ) 176 ) 177 else: 178 span.set_attribute(SpanAttributes.HTTP_STATUS_CODE, status_code) 179 span.set_status(Status(http_status_to_status_code(status_code))) 180 181 182 def get_default_span_name(environ): 183 """Default implementation for name_callback, returns HTTP {METHOD_NAME}.""" 184 return "HTTP {}".format(environ.get("REQUEST_METHOD", "")).strip() 185 186 187 class OpenTelemetryMiddleware: 188 """The WSGI application middleware. 189 190 This class is a PEP 3333 conforming WSGI middleware that starts and 191 annotates spans for any requests it is invoked with. 192 193 Args: 194 wsgi: The WSGI application callable to forward requests to. 195 request_hook: Optional callback which is called with the server span and WSGI 196 environ object for every incoming request. 197 response_hook: Optional callback which is called with the server span, 198 WSGI environ, status_code and response_headers for every 199 incoming request. 200 tracer_provider: Optional tracer provider to use. If omitted the current 201 globally configured one is used. 202 """ 203 204 def __init__( 205 self, wsgi, request_hook=None, response_hook=None, tracer_provider=None 206 ): 207 self.wsgi = wsgi 208 self.tracer = trace.get_tracer(__name__, __version__, tracer_provider) 209 self.request_hook = request_hook 210 self.response_hook = response_hook 211 212 @staticmethod 213 def _create_start_response(span, start_response, response_hook): 214 @functools.wraps(start_response) 215 def _start_response(status, response_headers, *args, **kwargs): 216 add_response_attributes(span, status, response_headers) 217 if response_hook: 218 response_hook(status, response_headers) 219 return start_response(status, response_headers, *args, **kwargs) 220 221 return _start_response 222 223 def __call__(self, environ, start_response): 224 """The WSGI application 225 226 Args: 227 environ: A WSGI environment. 228 start_response: The WSGI start_response callable. 229 """ 230 231 token = context.attach(extract(environ, getter=wsgi_getter)) 232 233 span = self.tracer.start_span( 234 get_default_span_name(environ), 235 kind=trace.SpanKind.SERVER, 236 attributes=collect_request_attributes(environ), 237 ) 238 239 if self.request_hook: 240 self.request_hook(span, environ) 241 242 response_hook = self.response_hook 243 if response_hook: 244 response_hook = functools.partial(response_hook, span, environ) 245 246 try: 247 with trace.use_span(span): 248 start_response = self._create_start_response( 249 span, start_response, response_hook 250 ) 251 iterable = self.wsgi(environ, start_response) 252 return _end_span_after_iterating( 253 iterable, span, self.tracer, token 254 ) 255 except Exception as ex: 256 if span.is_recording(): 257 span.set_status(Status(StatusCode.ERROR, str(ex))) 258 span.end() 259 context.detach(token) 260 raise 261 262 263 # Put this in a subfunction to not delay the call to the wrapped 264 # WSGI application (instrumentation should change the application 265 # behavior as little as possible). 266 def _end_span_after_iterating(iterable, span, tracer, token): 267 try: 268 with trace.use_span(span): 269 for yielded in iterable: 270 yield yielded 271 finally: 272 close = getattr(iterable, "close", None) 273 if close: 274 close() 275 span.end() 276 context.detach(token) 277 278 279 # TODO: inherit from opentelemetry.instrumentation.propagators.Setter 280 281 282 class ResponsePropagationSetter: 283 def set(self, carrier, key, value): # pylint: disable=no-self-use 284 carrier.append((key, value)) 285 286 287 default_response_propagation_setter = ResponsePropagationSetter() 288 [end of instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py b/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py --- a/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py @@ -13,7 +13,7 @@ # limitations under the License. """ This library provides a WSGI middleware that can be used on any WSGI framework -(such as Django / Flask) to track requests timing through OpenTelemetry. +(such as Django / Flask / Web.py) to track requests timing through OpenTelemetry. Usage (Flask) ------------- @@ -50,6 +50,35 @@ application = get_wsgi_application() application = OpenTelemetryMiddleware(application) +Usage (Web.py) +-------------- + +.. code-block:: python + + import web + from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware + from cheroot import wsgi + + urls = ('/', 'index') + + + class index: + + def GET(self): + return "Hello, world!" + + + if __name__ == "__main__": + app = web.application(urls, globals()) + func = app.wsgifunc() + + func = OpenTelemetryMiddleware(func) + + server = wsgi.WSGIServer( + ("localhost", 5100), func, server_name="localhost" + ) + server.start() + API --- """
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py b/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py\n@@ -13,7 +13,7 @@\n # limitations under the License.\n \"\"\"\n This library provides a WSGI middleware that can be used on any WSGI framework\n-(such as Django / Flask) to track requests timing through OpenTelemetry.\n+(such as Django / Flask / Web.py) to track requests timing through OpenTelemetry.\n \n Usage (Flask)\n -------------\n@@ -50,6 +50,35 @@\n application = get_wsgi_application()\n application = OpenTelemetryMiddleware(application)\n \n+Usage (Web.py)\n+--------------\n+\n+.. code-block:: python\n+\n+ import web\n+ from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware\n+ from cheroot import wsgi\n+\n+ urls = ('/', 'index')\n+\n+\n+ class index:\n+\n+ def GET(self):\n+ return \"Hello, world!\"\n+\n+\n+ if __name__ == \"__main__\":\n+ app = web.application(urls, globals())\n+ func = app.wsgifunc()\n+\n+ func = OpenTelemetryMiddleware(func)\n+\n+ server = wsgi.WSGIServer(\n+ (\"localhost\", 5100), func, server_name=\"localhost\"\n+ )\n+ server.start()\n+\n API\n ---\n \"\"\"\n", "issue": "Compatible with web.py?\nIs it possible to use opentelemetry-insrumentation-wsgi with web.py framework? If yes, some examples would be appreciated.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis library provides a WSGI middleware that can be used on any WSGI framework\n(such as Django / Flask) to track requests timing through OpenTelemetry.\n\nUsage (Flask)\n-------------\n\n.. code-block:: python\n\n from flask import Flask\n from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware\n\n app = Flask(__name__)\n app.wsgi_app = OpenTelemetryMiddleware(app.wsgi_app)\n\n @app.route(\"/\")\n def hello():\n return \"Hello!\"\n\n if __name__ == \"__main__\":\n app.run(debug=True)\n\n\nUsage (Django)\n--------------\n\nModify the application's ``wsgi.py`` file as shown below.\n\n.. code-block:: python\n\n import os\n from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware\n from django.core.wsgi import get_wsgi_application\n\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'application.settings')\n\n application = get_wsgi_application()\n application = OpenTelemetryMiddleware(application)\n\nAPI\n---\n\"\"\"\n\nimport functools\nimport typing\nimport wsgiref.util as wsgiref_util\n\nfrom opentelemetry import context, trace\nfrom opentelemetry.instrumentation.utils import http_status_to_status_code\nfrom opentelemetry.instrumentation.wsgi.version import __version__\nfrom opentelemetry.propagate import extract\nfrom opentelemetry.propagators.textmap import Getter\nfrom opentelemetry.semconv.trace import SpanAttributes\nfrom opentelemetry.trace.status import Status, StatusCode\nfrom opentelemetry.util.http import remove_url_credentials\n\n_HTTP_VERSION_PREFIX = \"HTTP/\"\n_CARRIER_KEY_PREFIX = \"HTTP_\"\n_CARRIER_KEY_PREFIX_LEN = len(_CARRIER_KEY_PREFIX)\n\n\nclass WSGIGetter(Getter):\n def get(\n self, carrier: dict, key: str\n ) -> typing.Optional[typing.List[str]]:\n \"\"\"Getter implementation to retrieve a HTTP header value from the\n PEP3333-conforming WSGI environ\n\n Args:\n carrier: WSGI environ object\n key: header name in environ object\n Returns:\n A list with a single string with the header value if it exists,\n else None.\n \"\"\"\n environ_key = \"HTTP_\" + key.upper().replace(\"-\", \"_\")\n value = carrier.get(environ_key)\n if value is not None:\n return [value]\n return None\n\n def keys(self, carrier):\n return [\n key[_CARRIER_KEY_PREFIX_LEN:].lower().replace(\"_\", \"-\")\n for key in carrier\n if key.startswith(_CARRIER_KEY_PREFIX)\n ]\n\n\nwsgi_getter = WSGIGetter()\n\n\ndef setifnotnone(dic, key, value):\n if value is not None:\n dic[key] = value\n\n\ndef collect_request_attributes(environ):\n \"\"\"Collects HTTP request attributes from the PEP3333-conforming\n WSGI environ and returns a dictionary to be used as span creation attributes.\"\"\"\n\n result = {\n SpanAttributes.HTTP_METHOD: environ.get(\"REQUEST_METHOD\"),\n SpanAttributes.HTTP_SERVER_NAME: environ.get(\"SERVER_NAME\"),\n SpanAttributes.HTTP_SCHEME: environ.get(\"wsgi.url_scheme\"),\n }\n\n host_port = environ.get(\"SERVER_PORT\")\n if host_port is not None and not host_port == \"\":\n result.update({SpanAttributes.NET_HOST_PORT: int(host_port)})\n\n setifnotnone(result, SpanAttributes.HTTP_HOST, environ.get(\"HTTP_HOST\"))\n target = environ.get(\"RAW_URI\")\n if target is None: # Note: `\"\" or None is None`\n target = environ.get(\"REQUEST_URI\")\n if target is not None:\n result[SpanAttributes.HTTP_TARGET] = target\n else:\n result[SpanAttributes.HTTP_URL] = remove_url_credentials(\n wsgiref_util.request_uri(environ)\n )\n\n remote_addr = environ.get(\"REMOTE_ADDR\")\n if remote_addr:\n result[SpanAttributes.NET_PEER_IP] = remote_addr\n remote_host = environ.get(\"REMOTE_HOST\")\n if remote_host and remote_host != remote_addr:\n result[SpanAttributes.NET_PEER_NAME] = remote_host\n\n user_agent = environ.get(\"HTTP_USER_AGENT\")\n if user_agent is not None and len(user_agent) > 0:\n result[SpanAttributes.HTTP_USER_AGENT] = user_agent\n\n setifnotnone(\n result, SpanAttributes.NET_PEER_PORT, environ.get(\"REMOTE_PORT\")\n )\n flavor = environ.get(\"SERVER_PROTOCOL\", \"\")\n if flavor.upper().startswith(_HTTP_VERSION_PREFIX):\n flavor = flavor[len(_HTTP_VERSION_PREFIX) :]\n if flavor:\n result[SpanAttributes.HTTP_FLAVOR] = flavor\n\n return result\n\n\ndef add_response_attributes(\n span, start_response_status, response_headers\n): # pylint: disable=unused-argument\n \"\"\"Adds HTTP response attributes to span using the arguments\n passed to a PEP3333-conforming start_response callable.\"\"\"\n if not span.is_recording():\n return\n status_code, _ = start_response_status.split(\" \", 1)\n\n try:\n status_code = int(status_code)\n except ValueError:\n span.set_status(\n Status(\n StatusCode.ERROR,\n \"Non-integer HTTP status: \" + repr(status_code),\n )\n )\n else:\n span.set_attribute(SpanAttributes.HTTP_STATUS_CODE, status_code)\n span.set_status(Status(http_status_to_status_code(status_code)))\n\n\ndef get_default_span_name(environ):\n \"\"\"Default implementation for name_callback, returns HTTP {METHOD_NAME}.\"\"\"\n return \"HTTP {}\".format(environ.get(\"REQUEST_METHOD\", \"\")).strip()\n\n\nclass OpenTelemetryMiddleware:\n \"\"\"The WSGI application middleware.\n\n This class is a PEP 3333 conforming WSGI middleware that starts and\n annotates spans for any requests it is invoked with.\n\n Args:\n wsgi: The WSGI application callable to forward requests to.\n request_hook: Optional callback which is called with the server span and WSGI\n environ object for every incoming request.\n response_hook: Optional callback which is called with the server span,\n WSGI environ, status_code and response_headers for every\n incoming request.\n tracer_provider: Optional tracer provider to use. If omitted the current\n globally configured one is used.\n \"\"\"\n\n def __init__(\n self, wsgi, request_hook=None, response_hook=None, tracer_provider=None\n ):\n self.wsgi = wsgi\n self.tracer = trace.get_tracer(__name__, __version__, tracer_provider)\n self.request_hook = request_hook\n self.response_hook = response_hook\n\n @staticmethod\n def _create_start_response(span, start_response, response_hook):\n @functools.wraps(start_response)\n def _start_response(status, response_headers, *args, **kwargs):\n add_response_attributes(span, status, response_headers)\n if response_hook:\n response_hook(status, response_headers)\n return start_response(status, response_headers, *args, **kwargs)\n\n return _start_response\n\n def __call__(self, environ, start_response):\n \"\"\"The WSGI application\n\n Args:\n environ: A WSGI environment.\n start_response: The WSGI start_response callable.\n \"\"\"\n\n token = context.attach(extract(environ, getter=wsgi_getter))\n\n span = self.tracer.start_span(\n get_default_span_name(environ),\n kind=trace.SpanKind.SERVER,\n attributes=collect_request_attributes(environ),\n )\n\n if self.request_hook:\n self.request_hook(span, environ)\n\n response_hook = self.response_hook\n if response_hook:\n response_hook = functools.partial(response_hook, span, environ)\n\n try:\n with trace.use_span(span):\n start_response = self._create_start_response(\n span, start_response, response_hook\n )\n iterable = self.wsgi(environ, start_response)\n return _end_span_after_iterating(\n iterable, span, self.tracer, token\n )\n except Exception as ex:\n if span.is_recording():\n span.set_status(Status(StatusCode.ERROR, str(ex)))\n span.end()\n context.detach(token)\n raise\n\n\n# Put this in a subfunction to not delay the call to the wrapped\n# WSGI application (instrumentation should change the application\n# behavior as little as possible).\ndef _end_span_after_iterating(iterable, span, tracer, token):\n try:\n with trace.use_span(span):\n for yielded in iterable:\n yield yielded\n finally:\n close = getattr(iterable, \"close\", None)\n if close:\n close()\n span.end()\n context.detach(token)\n\n\n# TODO: inherit from opentelemetry.instrumentation.propagators.Setter\n\n\nclass ResponsePropagationSetter:\n def set(self, carrier, key, value): # pylint: disable=no-self-use\n carrier.append((key, value))\n\n\ndefault_response_propagation_setter = ResponsePropagationSetter()\n", "path": "instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py"}]}
3,431
385
gh_patches_debug_11812
rasdani/github-patches
git_diff
craiga__will-of-the-prophets-196
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Visiting /roll raises an error if no rolls exist ``` Environment: Request Method: GET Request URL: http://localhost:8000/roll/ Django Version: 2.2 Python Version: 3.7.3 Installed Applications: ['raven.contrib.django.raven_compat', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'sass_processor', 'widget_tweaks', 'tz_detect', 'debug_toolbar', 's3direct', 'bootstrap', 'will_of_the_prophets'] Installed Middleware: ('raven.contrib.django.middleware.DjangoRestFrameworkCompatMiddleware', 'raven.contrib.django.middleware.SentryMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', 'raven.contrib.django.raven_compat.middleware.Sentry404CatchMiddleware', 'debug_toolbar.middleware.DebugToolbarMiddleware', 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'tz_detect.middleware.TimezoneMiddleware') Traceback: File "/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/core/handlers/exception.py" in inner 34. response = get_response(request) File "/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/core/handlers/base.py" in _get_response 115. response = self.process_exception_by_middleware(e, request) File "/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/core/handlers/base.py" in _get_response 113. response = wrapped_callback(request, *callback_args, **callback_kwargs) File "/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/views/generic/base.py" in view 71. return self.dispatch(request, *args, **kwargs) File "/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/contrib/auth/mixins.py" in dispatch 52. return super().dispatch(request, *args, **kwargs) File "/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/views/generic/base.py" in dispatch 97. return handler(request, *args, **kwargs) File "/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/views/generic/edit.py" in get 168. return super().get(request, *args, **kwargs) File "/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/views/generic/edit.py" in get 133. return self.render_to_response(self.get_context_data()) File "/Users/craiga/will_of_the_prophets/will_of_the_prophets/views.py" in get_context_data 82. board=board.Board(now=last_roll.embargo), Exception Type: AttributeError at /roll/ Exception Value: 'NoneType' object has no attribute 'embargo' ``` </issue> <code> [start of will_of_the_prophets/views.py] 1 """Views.""" 2 3 from django.conf import settings 4 from django.contrib.auth.mixins import LoginRequiredMixin 5 from django.shortcuts import render 6 from django.urls import reverse 7 from django.utils import timezone 8 from django.views.decorators.cache import cache_control 9 from django.views.decorators.clickjacking import xframe_options_exempt 10 from django.views.decorators.http import condition 11 from django.views.generic.edit import CreateView 12 13 from will_of_the_prophets import board, forms, models 14 15 16 def get_last_modified(request): 17 """Get board's last modified datetime.""" 18 try: 19 return ( 20 models.Roll.objects.filter(embargo__lte=timezone.now()) 21 .latest("embargo") 22 .embargo 23 ) 24 except models.Roll.DoesNotExist: 25 return None 26 27 28 @xframe_options_exempt 29 @condition(last_modified_func=get_last_modified) 30 @cache_control(max_age=3600) 31 def public_board(request): 32 """ 33 Board for the public. 34 35 Does not take embargoed rolls into account. 36 """ 37 response = render( 38 request, 39 "will_of_the_prophets/public_board.html", 40 { 41 "board": board.Board(), 42 "special_square_types": models.SpecialSquareType.objects.all(), 43 }, 44 ) 45 46 canonical_url = settings.PUBLIC_BOARD_CANONICAL_URL 47 if canonical_url: 48 response["Link"] = f'<{canonical_url}>; rel="canonical"' 49 50 return response 51 52 53 @xframe_options_exempt 54 @condition(last_modified_func=get_last_modified) 55 @cache_control(max_age=3600) 56 def roll_frequency(request): 57 """ 58 Show roll frequency. 59 """ 60 roll_count = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0} 61 for roll in board.Board().rolls: 62 roll_count[roll] += 1 63 64 return render( 65 request, 66 "will_of_the_prophets/roll_frequency.html", 67 {"roll_frequency": roll_count}, 68 ) 69 70 71 class RollView(LoginRequiredMixin, CreateView): 72 """View for rolling the die.""" 73 74 form_class = forms.RollForm 75 template_name = "will_of_the_prophets/roll.html" 76 77 def get_context_data(self, **kwargs): 78 last_roll = models.Roll.objects.order_by("-embargo").first() 79 return super().get_context_data( 80 **kwargs, 81 last_roll=last_roll, 82 board=board.Board(now=last_roll.embargo), 83 special_square_types=models.SpecialSquareType.objects.all(), 84 ) 85 86 def get_success_url(self): 87 return reverse("roll") + "#chula" 88 [end of will_of_the_prophets/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/will_of_the_prophets/views.py b/will_of_the_prophets/views.py --- a/will_of_the_prophets/views.py +++ b/will_of_the_prophets/views.py @@ -76,10 +76,14 @@ def get_context_data(self, **kwargs): last_roll = models.Roll.objects.order_by("-embargo").first() + last_roll_embargo = None + if last_roll: + last_roll_embargo = last_roll.embargo + return super().get_context_data( **kwargs, last_roll=last_roll, - board=board.Board(now=last_roll.embargo), + board=board.Board(now=last_roll_embargo), special_square_types=models.SpecialSquareType.objects.all(), )
{"golden_diff": "diff --git a/will_of_the_prophets/views.py b/will_of_the_prophets/views.py\n--- a/will_of_the_prophets/views.py\n+++ b/will_of_the_prophets/views.py\n@@ -76,10 +76,14 @@\n \n def get_context_data(self, **kwargs):\n last_roll = models.Roll.objects.order_by(\"-embargo\").first()\n+ last_roll_embargo = None\n+ if last_roll:\n+ last_roll_embargo = last_roll.embargo\n+\n return super().get_context_data(\n **kwargs,\n last_roll=last_roll,\n- board=board.Board(now=last_roll.embargo),\n+ board=board.Board(now=last_roll_embargo),\n special_square_types=models.SpecialSquareType.objects.all(),\n )\n", "issue": "Visiting /roll raises an error if no rolls exist\n```\r\nEnvironment:\r\n\r\n\r\nRequest Method: GET\r\nRequest URL: http://localhost:8000/roll/\r\n\r\nDjango Version: 2.2\r\nPython Version: 3.7.3\r\nInstalled Applications:\r\n['raven.contrib.django.raven_compat',\r\n 'django.contrib.admin',\r\n 'django.contrib.auth',\r\n 'django.contrib.contenttypes',\r\n 'django.contrib.sessions',\r\n 'django.contrib.messages',\r\n 'django.contrib.staticfiles',\r\n 'sass_processor',\r\n 'widget_tweaks',\r\n 'tz_detect',\r\n 'debug_toolbar',\r\n 's3direct',\r\n 'bootstrap',\r\n 'will_of_the_prophets']\r\nInstalled Middleware:\r\n('raven.contrib.django.middleware.DjangoRestFrameworkCompatMiddleware',\r\n 'raven.contrib.django.middleware.SentryMiddleware',\r\n 'whitenoise.middleware.WhiteNoiseMiddleware',\r\n 'raven.contrib.django.raven_compat.middleware.Sentry404CatchMiddleware',\r\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\r\n 'django.middleware.security.SecurityMiddleware',\r\n 'django.contrib.sessions.middleware.SessionMiddleware',\r\n 'django.middleware.common.CommonMiddleware',\r\n 'django.middleware.csrf.CsrfViewMiddleware',\r\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\r\n 'django.contrib.messages.middleware.MessageMiddleware',\r\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\r\n 'tz_detect.middleware.TimezoneMiddleware')\r\n\r\n\r\n\r\nTraceback:\r\n\r\nFile \"/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/core/handlers/exception.py\" in inner\r\n 34. response = get_response(request)\r\n\r\nFile \"/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/core/handlers/base.py\" in _get_response\r\n 115. response = self.process_exception_by_middleware(e, request)\r\n\r\nFile \"/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/core/handlers/base.py\" in _get_response\r\n 113. response = wrapped_callback(request, *callback_args, **callback_kwargs)\r\n\r\nFile \"/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/views/generic/base.py\" in view\r\n 71. return self.dispatch(request, *args, **kwargs)\r\n\r\nFile \"/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/contrib/auth/mixins.py\" in dispatch\r\n 52. return super().dispatch(request, *args, **kwargs)\r\n\r\nFile \"/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/views/generic/base.py\" in dispatch\r\n 97. return handler(request, *args, **kwargs)\r\n\r\nFile \"/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/views/generic/edit.py\" in get\r\n 168. return super().get(request, *args, **kwargs)\r\n\r\nFile \"/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/views/generic/edit.py\" in get\r\n 133. return self.render_to_response(self.get_context_data())\r\n\r\nFile \"/Users/craiga/will_of_the_prophets/will_of_the_prophets/views.py\" in get_context_data\r\n 82. board=board.Board(now=last_roll.embargo),\r\n\r\nException Type: AttributeError at /roll/\r\nException Value: 'NoneType' object has no attribute 'embargo'\r\n```\n", "before_files": [{"content": "\"\"\"Views.\"\"\"\n\nfrom django.conf import settings\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.views.decorators.cache import cache_control\nfrom django.views.decorators.clickjacking import xframe_options_exempt\nfrom django.views.decorators.http import condition\nfrom django.views.generic.edit import CreateView\n\nfrom will_of_the_prophets import board, forms, models\n\n\ndef get_last_modified(request):\n \"\"\"Get board's last modified datetime.\"\"\"\n try:\n return (\n models.Roll.objects.filter(embargo__lte=timezone.now())\n .latest(\"embargo\")\n .embargo\n )\n except models.Roll.DoesNotExist:\n return None\n\n\n@xframe_options_exempt\n@condition(last_modified_func=get_last_modified)\n@cache_control(max_age=3600)\ndef public_board(request):\n \"\"\"\n Board for the public.\n\n Does not take embargoed rolls into account.\n \"\"\"\n response = render(\n request,\n \"will_of_the_prophets/public_board.html\",\n {\n \"board\": board.Board(),\n \"special_square_types\": models.SpecialSquareType.objects.all(),\n },\n )\n\n canonical_url = settings.PUBLIC_BOARD_CANONICAL_URL\n if canonical_url:\n response[\"Link\"] = f'<{canonical_url}>; rel=\"canonical\"'\n\n return response\n\n\n@xframe_options_exempt\n@condition(last_modified_func=get_last_modified)\n@cache_control(max_age=3600)\ndef roll_frequency(request):\n \"\"\"\n Show roll frequency.\n \"\"\"\n roll_count = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}\n for roll in board.Board().rolls:\n roll_count[roll] += 1\n\n return render(\n request,\n \"will_of_the_prophets/roll_frequency.html\",\n {\"roll_frequency\": roll_count},\n )\n\n\nclass RollView(LoginRequiredMixin, CreateView):\n \"\"\"View for rolling the die.\"\"\"\n\n form_class = forms.RollForm\n template_name = \"will_of_the_prophets/roll.html\"\n\n def get_context_data(self, **kwargs):\n last_roll = models.Roll.objects.order_by(\"-embargo\").first()\n return super().get_context_data(\n **kwargs,\n last_roll=last_roll,\n board=board.Board(now=last_roll.embargo),\n special_square_types=models.SpecialSquareType.objects.all(),\n )\n\n def get_success_url(self):\n return reverse(\"roll\") + \"#chula\"\n", "path": "will_of_the_prophets/views.py"}]}
2,077
176
gh_patches_debug_3598
rasdani/github-patches
git_diff
Zeroto521__my-data-toolkit-580
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> MAINT: Simplify `register_method_factory` <!-- Thanks for contributing a pull request! Please follow these standard acronyms to start the commit message: - ENH: enhancement - BUG: bug fix - DOC: documentation - TYP: type annotations - TST: addition or modification of tests - MAINT: maintenance commit (refactoring, typos, etc.) - BLD: change related to building - REL: related to releasing - API: an (incompatible) API change - DEP: deprecate something, or remove a deprecated object - DEV: development tool or utility - REV: revert an earlier commit - PERF: performance improvement - BOT: always commit via a bot - CI: related to CI or CD - CLN: Code cleanup --> - [ ] closes #xxxx - [x] whatsnew entry Delete duplicate `wraps` lines </issue> <code> [start of dtoolkit/accessor/register.py] 1 from __future__ import annotations 2 3 from functools import wraps 4 from typing import Callable 5 6 from pandas.api.extensions import register_dataframe_accessor 7 from pandas.api.extensions import register_index_accessor 8 from pandas.api.extensions import register_series_accessor 9 from pandas.util._decorators import doc 10 11 from dtoolkit._typing import SeriesOrFrame 12 13 14 def register_method_factory(register_accessor): 15 """ 16 Let pandas-object like accessor which only hooks class also hooks function easily. 17 18 Read more in the `User Guide`_. 19 20 .. _User Guide: ../../guide/tips_about_accessor.ipynb#Extend-to-Pandas-like-Object 21 22 Parameters 23 ---------- 24 register_accessor : Pandas-object like accessor 25 26 See Also 27 -------- 28 register_dataframe_method 29 register_series_method 30 register_index_method 31 dtoolkit.geoaccessor.register_geoseries_method 32 dtoolkit.geoaccessor.register_geodataframe_method 33 """ 34 35 # based on pandas_flavor/register.py 36 def register_accessor_method(method: Callable, name: str): 37 def method_accessor(pd_obj: SeriesOrFrame): 38 @wraps(method) 39 def wrapper(*args, **kwargs): 40 return method(pd_obj, *args, **kwargs) 41 42 return wrapper 43 44 # Register method as pandas object inner method. 45 register_accessor(name)(method_accessor) 46 47 # Must return method itself, otherwise would get None. 48 return method 49 50 def register_accessor_alias(name: str = None): 51 def wrapper(method: Callable): 52 return register_accessor_method(method, name or method.__name__) 53 54 return wrapper 55 56 @wraps(register_accessor) 57 def decorator(name: Callable | str = None): 58 if callable(name): # Supports `@register_*_method` using. 59 method = name # This 'name' variable actually is a function. 60 return register_accessor_method(method, method.__name__) 61 62 # Supports `@register_*_method()` and `@register_*_method(name="")` using. 63 return register_accessor_alias(name) 64 65 return decorator 66 67 68 @register_method_factory 69 @doc(klass=":class:`~pandas.Series`") 70 def register_series_method(name: str = None): 71 """ 72 {klass} register accessor for human. 73 74 Write method normally, use method naturally. 75 76 Read more in the `User Guide`_. 77 78 .. _User Guide: ../../guide/tips_about_accessor.ipynb 79 80 Parameters 81 ---------- 82 name : str, optional 83 Use the ``method`` name as the default accessor entrance if ``name`` is None. 84 85 See Also 86 -------- 87 register_dataframe_method 88 register_series_method 89 register_index_method 90 pandas.api.extensions.register_dataframe_accessor 91 pandas.api.extensions.register_series_accessor 92 pandas.api.extensions.register_index_accessor 93 94 Examples 95 -------- 96 In your library code:: 97 98 from __future__ import annotations 99 100 from dtoolkit.accessor import register_dataframe_method 101 from dtoolkit.accessor import register_series_method 102 from dtoolkit.accessor import register_index_method 103 import pandas as pd 104 105 @register_index_method("col") # Support alias name also. 106 @register_series_method("col") 107 @register_dataframe_method(name="col") 108 @register_index_method # Use accessor method's `__name__` as the entrance. 109 @register_series_method 110 @register_dataframe_method 111 def cols(pd_obj) -> int | str | list[int | str] | None: 112 ''' 113 An API to gather :attr:`~pandas.Series.name` and 114 :attr:`~pandas.DataFrame.columns` to one. 115 ''' 116 117 if isinstance(pd_obj, (pd.Series, pd.Index)): 118 return pd_obj.name 119 120 return pd_obj.columns.tolist() 121 122 Back in an interactive IPython session: 123 124 .. code-block:: ipython 125 126 In [1]: import pandas as pd 127 128 In [2]: df = pd.DataFrame( 129 ...: {{ 130 ...: "a": [1, 2], 131 ...: "b": [3, 4], 132 ...: }}, 133 ...: index=pd.Index( 134 ...: ["x", "y"], 135 ...: name="c", 136 ...: ), 137 ...: ) 138 139 In [3]: df 140 Out[3]: 141 a b 142 c 143 x 1 3 144 y 2 4 145 146 Get the columns of DataFrame via `cols` or `col` method 147 148 In [4]: df.col() 149 Out[4]: ['a', 'b'] 150 151 Get name of Series via `cols` or `col` method 152 153 In [5]: df.a.col() 154 Out[5]: 'a' 155 156 Get name of Index via `cols` or `col` method 157 158 In [6]: df.index.col() 159 Out[6]: 'c' 160 """ 161 162 return register_series_accessor(name) 163 164 165 @register_method_factory 166 @doc(register_series_method, klass=":class:`~pandas.DataFrame`") 167 def register_dataframe_method(name: str = None): 168 return register_dataframe_accessor(name) 169 170 171 @register_method_factory 172 @doc(register_series_method, klass=":class:`~pandas.Index`") 173 def register_index_method(name: str = None): 174 return register_index_accessor(name) 175 [end of dtoolkit/accessor/register.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/dtoolkit/accessor/register.py b/dtoolkit/accessor/register.py --- a/dtoolkit/accessor/register.py +++ b/dtoolkit/accessor/register.py @@ -34,6 +34,7 @@ # based on pandas_flavor/register.py def register_accessor_method(method: Callable, name: str): + @wraps(method) def method_accessor(pd_obj: SeriesOrFrame): @wraps(method) def wrapper(*args, **kwargs):
{"golden_diff": "diff --git a/dtoolkit/accessor/register.py b/dtoolkit/accessor/register.py\n--- a/dtoolkit/accessor/register.py\n+++ b/dtoolkit/accessor/register.py\n@@ -34,6 +34,7 @@\n \n # based on pandas_flavor/register.py\n def register_accessor_method(method: Callable, name: str):\n+ @wraps(method)\n def method_accessor(pd_obj: SeriesOrFrame):\n @wraps(method)\n def wrapper(*args, **kwargs):\n", "issue": "MAINT: Simplify `register_method_factory`\n<!--\r\nThanks for contributing a pull request!\r\n\r\nPlease follow these standard acronyms to start the commit message:\r\n\r\n- ENH: enhancement\r\n- BUG: bug fix\r\n- DOC: documentation\r\n- TYP: type annotations\r\n- TST: addition or modification of tests\r\n- MAINT: maintenance commit (refactoring, typos, etc.)\r\n- BLD: change related to building\r\n- REL: related to releasing\r\n- API: an (incompatible) API change\r\n- DEP: deprecate something, or remove a deprecated object\r\n- DEV: development tool or utility\r\n- REV: revert an earlier commit\r\n- PERF: performance improvement\r\n- BOT: always commit via a bot\r\n- CI: related to CI or CD\r\n- CLN: Code cleanup\r\n-->\r\n\r\n- [ ] closes #xxxx\r\n- [x] whatsnew entry\r\n\r\nDelete duplicate `wraps` lines\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom functools import wraps\nfrom typing import Callable\n\nfrom pandas.api.extensions import register_dataframe_accessor\nfrom pandas.api.extensions import register_index_accessor\nfrom pandas.api.extensions import register_series_accessor\nfrom pandas.util._decorators import doc\n\nfrom dtoolkit._typing import SeriesOrFrame\n\n\ndef register_method_factory(register_accessor):\n \"\"\"\n Let pandas-object like accessor which only hooks class also hooks function easily.\n\n Read more in the `User Guide`_.\n\n .. _User Guide: ../../guide/tips_about_accessor.ipynb#Extend-to-Pandas-like-Object\n\n Parameters\n ----------\n register_accessor : Pandas-object like accessor\n\n See Also\n --------\n register_dataframe_method\n register_series_method\n register_index_method\n dtoolkit.geoaccessor.register_geoseries_method\n dtoolkit.geoaccessor.register_geodataframe_method\n \"\"\"\n\n # based on pandas_flavor/register.py\n def register_accessor_method(method: Callable, name: str):\n def method_accessor(pd_obj: SeriesOrFrame):\n @wraps(method)\n def wrapper(*args, **kwargs):\n return method(pd_obj, *args, **kwargs)\n\n return wrapper\n\n # Register method as pandas object inner method.\n register_accessor(name)(method_accessor)\n\n # Must return method itself, otherwise would get None.\n return method\n\n def register_accessor_alias(name: str = None):\n def wrapper(method: Callable):\n return register_accessor_method(method, name or method.__name__)\n\n return wrapper\n\n @wraps(register_accessor)\n def decorator(name: Callable | str = None):\n if callable(name): # Supports `@register_*_method` using.\n method = name # This 'name' variable actually is a function.\n return register_accessor_method(method, method.__name__)\n\n # Supports `@register_*_method()` and `@register_*_method(name=\"\")` using.\n return register_accessor_alias(name)\n\n return decorator\n\n\n@register_method_factory\n@doc(klass=\":class:`~pandas.Series`\")\ndef register_series_method(name: str = None):\n \"\"\"\n {klass} register accessor for human.\n\n Write method normally, use method naturally.\n\n Read more in the `User Guide`_.\n\n .. _User Guide: ../../guide/tips_about_accessor.ipynb\n\n Parameters\n ----------\n name : str, optional\n Use the ``method`` name as the default accessor entrance if ``name`` is None.\n\n See Also\n --------\n register_dataframe_method\n register_series_method\n register_index_method\n pandas.api.extensions.register_dataframe_accessor\n pandas.api.extensions.register_series_accessor\n pandas.api.extensions.register_index_accessor\n\n Examples\n --------\n In your library code::\n\n from __future__ import annotations\n\n from dtoolkit.accessor import register_dataframe_method\n from dtoolkit.accessor import register_series_method\n from dtoolkit.accessor import register_index_method\n import pandas as pd\n\n @register_index_method(\"col\") # Support alias name also.\n @register_series_method(\"col\")\n @register_dataframe_method(name=\"col\")\n @register_index_method # Use accessor method's `__name__` as the entrance.\n @register_series_method\n @register_dataframe_method\n def cols(pd_obj) -> int | str | list[int | str] | None:\n '''\n An API to gather :attr:`~pandas.Series.name` and\n :attr:`~pandas.DataFrame.columns` to one.\n '''\n\n if isinstance(pd_obj, (pd.Series, pd.Index)):\n return pd_obj.name\n\n return pd_obj.columns.tolist()\n\n Back in an interactive IPython session:\n\n .. code-block:: ipython\n\n In [1]: import pandas as pd\n\n In [2]: df = pd.DataFrame(\n ...: {{\n ...: \"a\": [1, 2],\n ...: \"b\": [3, 4],\n ...: }},\n ...: index=pd.Index(\n ...: [\"x\", \"y\"],\n ...: name=\"c\",\n ...: ),\n ...: )\n\n In [3]: df\n Out[3]:\n a b\n c\n x 1 3\n y 2 4\n\n Get the columns of DataFrame via `cols` or `col` method\n\n In [4]: df.col()\n Out[4]: ['a', 'b']\n\n Get name of Series via `cols` or `col` method\n\n In [5]: df.a.col()\n Out[5]: 'a'\n\n Get name of Index via `cols` or `col` method\n\n In [6]: df.index.col()\n Out[6]: 'c'\n \"\"\"\n\n return register_series_accessor(name)\n\n\n@register_method_factory\n@doc(register_series_method, klass=\":class:`~pandas.DataFrame`\")\ndef register_dataframe_method(name: str = None):\n return register_dataframe_accessor(name)\n\n\n@register_method_factory\n@doc(register_series_method, klass=\":class:`~pandas.Index`\")\ndef register_index_method(name: str = None):\n return register_index_accessor(name)\n", "path": "dtoolkit/accessor/register.py"}]}
2,297
109
gh_patches_debug_5416
rasdani/github-patches
git_diff
rasterio__rasterio-1192
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> query params in https url is not working Hi I wanted to use rasterio with a https url that has query-params, for example: https://.....?a=a&b=b First I wanted to see if gdal supports that kind of url and gdal does, so I looked for the code in rasterio that is responsible for cutting the url, It can be found here: https://github.com/mapbox/rasterio/blob/master/rasterio/vfs.py#L32 I added in my env the following code: ``` if parts.query: path += "?" + parts.query ``` and it solved the issue, Is there a reason for cutting the querystring? Thanks, Guy query params in https url is not working Hi I wanted to use rasterio with a https url that has query-params, for example: https://.....?a=a&b=b First I wanted to see if gdal supports that kind of url and gdal does, so I looked for the code in rasterio that is responsible for cutting the url, It can be found here: https://github.com/mapbox/rasterio/blob/master/rasterio/vfs.py#L32 I added in my env the following code: ``` if parts.query: path += "?" + parts.query ``` and it solved the issue, Is there a reason for cutting the querystring? Thanks, Guy </issue> <code> [start of rasterio/vfs.py] 1 """Implementation of Apache VFS schemes and URLs.""" 2 3 import os 4 5 from rasterio.compat import urlparse 6 7 8 # NB: As not to propagate fallacies of distributed computing, Rasterio 9 # does not support HTTP or FTP URLs via GDAL's vsicurl handler. Only 10 # the following local filesystem schemes are supported. 11 SCHEMES = { 12 'gzip': 'gzip', 13 'gzip+file': 'gzip', 14 'zip': 'zip', 15 'zip+file': 'zip', 16 'tar': 'tar', 17 'tar+file': 'tar', 18 'https': 'curl', 19 'http': 'curl', 20 's3': 's3'} 21 22 FILE_SCHEMES = [ 23 '', 'file', 'gzip', 'gzip+file', 'zip', 'zip+file', 'tar', 'tar+file'] 24 25 26 def parse_path(uri, vfs=None): 27 """Parse a URI or Apache VFS URL into its parts 28 29 Returns: tuple 30 (path, archive, scheme) 31 """ 32 archive = scheme = None 33 path = uri 34 if vfs: 35 parts = urlparse(vfs) 36 scheme = parts.scheme 37 archive = parts.path 38 if parts.netloc and parts.netloc != 'localhost': # pragma: no cover 39 archive = parts.netloc + archive 40 else: 41 parts = urlparse(path) 42 scheme = parts.scheme 43 path = parts.path 44 if parts.netloc and parts.netloc != 'localhost': 45 path = parts.netloc + path 46 # There are certain URI schemes we favor over GDAL's names. 47 if scheme in SCHEMES: 48 parts = path.split('!') 49 path = parts.pop() if parts else None 50 archive = parts.pop() if parts else None 51 # For filesystem paths. 52 elif scheme.lower() in FILE_SCHEMES: 53 pass 54 # We permit GDAL's idiosyncratic URI-like dataset paths such as 55 # 'netcdf':... to fall right through with no parsed archive 56 # or scheme. 57 else: 58 archive = scheme = None 59 path = uri 60 61 return path, archive, scheme 62 63 64 def vsi_path(path, archive=None, scheme=None): 65 """Convert a parsed path to a GDAL VSI path.""" 66 # If a VSF and archive file are specified, we convert the path to 67 # a GDAL VSI path (see cpl_vsi.h). 68 if scheme and scheme.startswith('http'): 69 result = "/vsicurl/{0}://{1}".format(scheme, path) 70 elif scheme and scheme == 's3': 71 result = "/vsis3/{0}".format(path) 72 elif scheme and scheme != 'file': 73 if archive: 74 result = '/vsi{0}/{1}/{2}'.format( 75 scheme, archive, path.lstrip('/')) 76 else: 77 result = '/vsi{0}/{1}'.format(scheme, path.lstrip('/')) 78 else: 79 result = path 80 return result 81 [end of rasterio/vfs.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/rasterio/vfs.py b/rasterio/vfs.py --- a/rasterio/vfs.py +++ b/rasterio/vfs.py @@ -41,6 +41,8 @@ parts = urlparse(path) scheme = parts.scheme path = parts.path + if parts.query: + path += "?" + parts.query if parts.netloc and parts.netloc != 'localhost': path = parts.netloc + path # There are certain URI schemes we favor over GDAL's names.
{"golden_diff": "diff --git a/rasterio/vfs.py b/rasterio/vfs.py\n--- a/rasterio/vfs.py\n+++ b/rasterio/vfs.py\n@@ -41,6 +41,8 @@\n parts = urlparse(path)\n scheme = parts.scheme\n path = parts.path\n+ if parts.query:\n+ path += \"?\" + parts.query\n if parts.netloc and parts.netloc != 'localhost':\n path = parts.netloc + path\n # There are certain URI schemes we favor over GDAL's names.\n", "issue": "query params in https url is not working\nHi \r\n\r\nI wanted to use rasterio with a https url that has query-params, for example: https://.....?a=a&b=b\r\nFirst I wanted to see if gdal supports that kind of url and gdal does, so I looked for the code in rasterio that is responsible for cutting the url, \r\n\r\nIt can be found here:\r\nhttps://github.com/mapbox/rasterio/blob/master/rasterio/vfs.py#L32\r\n\r\nI added in my env the following code:\r\n```\r\n if parts.query: \r\n path += \"?\" + parts.query \r\n```\r\n\r\nand it solved the issue, \r\n\r\nIs there a reason for cutting the querystring?\r\n\r\n\r\n\r\nThanks, Guy\r\n\nquery params in https url is not working\nHi \r\n\r\nI wanted to use rasterio with a https url that has query-params, for example: https://.....?a=a&b=b\r\nFirst I wanted to see if gdal supports that kind of url and gdal does, so I looked for the code in rasterio that is responsible for cutting the url, \r\n\r\nIt can be found here:\r\nhttps://github.com/mapbox/rasterio/blob/master/rasterio/vfs.py#L32\r\n\r\nI added in my env the following code:\r\n```\r\n if parts.query: \r\n path += \"?\" + parts.query \r\n```\r\n\r\nand it solved the issue, \r\n\r\nIs there a reason for cutting the querystring?\r\n\r\n\r\n\r\nThanks, Guy\r\n\n", "before_files": [{"content": "\"\"\"Implementation of Apache VFS schemes and URLs.\"\"\"\n\nimport os\n\nfrom rasterio.compat import urlparse\n\n\n# NB: As not to propagate fallacies of distributed computing, Rasterio\n# does not support HTTP or FTP URLs via GDAL's vsicurl handler. Only\n# the following local filesystem schemes are supported.\nSCHEMES = {\n 'gzip': 'gzip',\n 'gzip+file': 'gzip',\n 'zip': 'zip',\n 'zip+file': 'zip',\n 'tar': 'tar',\n 'tar+file': 'tar',\n 'https': 'curl',\n 'http': 'curl',\n 's3': 's3'}\n\nFILE_SCHEMES = [\n '', 'file', 'gzip', 'gzip+file', 'zip', 'zip+file', 'tar', 'tar+file']\n\n\ndef parse_path(uri, vfs=None):\n \"\"\"Parse a URI or Apache VFS URL into its parts\n\n Returns: tuple\n (path, archive, scheme)\n \"\"\"\n archive = scheme = None\n path = uri\n if vfs:\n parts = urlparse(vfs)\n scheme = parts.scheme\n archive = parts.path\n if parts.netloc and parts.netloc != 'localhost': # pragma: no cover\n archive = parts.netloc + archive\n else:\n parts = urlparse(path)\n scheme = parts.scheme\n path = parts.path\n if parts.netloc and parts.netloc != 'localhost':\n path = parts.netloc + path\n # There are certain URI schemes we favor over GDAL's names.\n if scheme in SCHEMES:\n parts = path.split('!')\n path = parts.pop() if parts else None\n archive = parts.pop() if parts else None\n # For filesystem paths.\n elif scheme.lower() in FILE_SCHEMES:\n pass\n # We permit GDAL's idiosyncratic URI-like dataset paths such as\n # 'netcdf':... to fall right through with no parsed archive\n # or scheme.\n else:\n archive = scheme = None\n path = uri\n\n return path, archive, scheme\n\n\ndef vsi_path(path, archive=None, scheme=None):\n \"\"\"Convert a parsed path to a GDAL VSI path.\"\"\"\n # If a VSF and archive file are specified, we convert the path to\n # a GDAL VSI path (see cpl_vsi.h).\n if scheme and scheme.startswith('http'):\n result = \"/vsicurl/{0}://{1}\".format(scheme, path)\n elif scheme and scheme == 's3':\n result = \"/vsis3/{0}\".format(path)\n elif scheme and scheme != 'file':\n if archive:\n result = '/vsi{0}/{1}/{2}'.format(\n scheme, archive, path.lstrip('/'))\n else:\n result = '/vsi{0}/{1}'.format(scheme, path.lstrip('/'))\n else:\n result = path\n return result\n", "path": "rasterio/vfs.py"}]}
1,645
118
gh_patches_debug_35266
rasdani/github-patches
git_diff
feast-dev__feast-2430
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Missing documentation for abstract `pull_all_from_table_or_query` offline store method ## Expected Behavior Since https://github.com/feast-dev/feast/pull/2197, offline store method `pull_all_from_table_or_query` must be overridden by custom offline stores. This is currently not documented. Expectations: - [Docstring](https://github.com/feast-dev/feast/blob/b35e1e84720523cef70cba6d6306af8f193b469f/sdk/python/feast/infra/offline_stores/offline_store.py#L203) of `pull_all_from_table_or_query` contains a meaningful description of the method - [Web doc](https://docs.feast.dev/how-to-guides/adding-a-new-offline-store) mentions that `pull_all_from_table_or_query` must be overriden. ## Current Behavior No documentation for `pull_all_from_table_or_query`. </issue> <code> [start of sdk/python/feast/infra/offline_stores/offline_store.py] 1 # Copyright 2019 The Feast Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # https://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 import warnings 15 from abc import ABC, abstractmethod 16 from datetime import datetime 17 from typing import TYPE_CHECKING, List, Optional, Union 18 19 import pandas as pd 20 import pyarrow 21 22 from feast.data_source import DataSource 23 from feast.dqm.errors import ValidationFailed 24 from feast.feature_view import FeatureView 25 from feast.on_demand_feature_view import OnDemandFeatureView 26 from feast.registry import Registry 27 from feast.repo_config import RepoConfig 28 from feast.saved_dataset import SavedDatasetStorage 29 30 if TYPE_CHECKING: 31 from feast.saved_dataset import ValidationReference 32 33 34 class RetrievalMetadata: 35 min_event_timestamp: Optional[datetime] 36 max_event_timestamp: Optional[datetime] 37 38 # List of feature references 39 features: List[str] 40 # List of entity keys + ODFV inputs 41 keys: List[str] 42 43 def __init__( 44 self, 45 features: List[str], 46 keys: List[str], 47 min_event_timestamp: Optional[datetime] = None, 48 max_event_timestamp: Optional[datetime] = None, 49 ): 50 self.features = features 51 self.keys = keys 52 self.min_event_timestamp = min_event_timestamp 53 self.max_event_timestamp = max_event_timestamp 54 55 56 class RetrievalJob(ABC): 57 """RetrievalJob is used to manage the execution of a historical feature retrieval""" 58 59 @property 60 @abstractmethod 61 def full_feature_names(self) -> bool: 62 pass 63 64 @property 65 @abstractmethod 66 def on_demand_feature_views(self) -> Optional[List[OnDemandFeatureView]]: 67 pass 68 69 def to_df( 70 self, validation_reference: Optional["ValidationReference"] = None 71 ) -> pd.DataFrame: 72 """ 73 Return dataset as Pandas DataFrame synchronously including on demand transforms 74 Args: 75 validation_reference: If provided resulting dataset will be validated against this reference profile. 76 """ 77 features_df = self._to_df_internal() 78 79 if self.on_demand_feature_views: 80 # TODO(adchia): Fix requirement to specify dependent feature views in feature_refs 81 for odfv in self.on_demand_feature_views: 82 features_df = features_df.join( 83 odfv.get_transformed_features_df( 84 features_df, self.full_feature_names, 85 ) 86 ) 87 88 if validation_reference: 89 warnings.warn( 90 "Dataset validation is an experimental feature. " 91 "This API is unstable and it could and most probably will be changed in the future. " 92 "We do not guarantee that future changes will maintain backward compatibility.", 93 RuntimeWarning, 94 ) 95 96 validation_result = validation_reference.profile.validate(features_df) 97 if not validation_result.is_success: 98 raise ValidationFailed(validation_result) 99 100 return features_df 101 102 @abstractmethod 103 def _to_df_internal(self) -> pd.DataFrame: 104 """Return dataset as Pandas DataFrame synchronously""" 105 pass 106 107 @abstractmethod 108 def _to_arrow_internal(self) -> pyarrow.Table: 109 """Return dataset as pyarrow Table synchronously""" 110 pass 111 112 def to_arrow( 113 self, validation_reference: Optional["ValidationReference"] = None 114 ) -> pyarrow.Table: 115 """ 116 Return dataset as pyarrow Table synchronously 117 Args: 118 validation_reference: If provided resulting dataset will be validated against this reference profile. 119 """ 120 if not self.on_demand_feature_views and not validation_reference: 121 return self._to_arrow_internal() 122 123 features_df = self._to_df_internal() 124 if self.on_demand_feature_views: 125 for odfv in self.on_demand_feature_views: 126 features_df = features_df.join( 127 odfv.get_transformed_features_df( 128 features_df, self.full_feature_names, 129 ) 130 ) 131 132 if validation_reference: 133 warnings.warn( 134 "Dataset validation is an experimental feature. " 135 "This API is unstable and it could and most probably will be changed in the future. " 136 "We do not guarantee that future changes will maintain backward compatibility.", 137 RuntimeWarning, 138 ) 139 140 validation_result = validation_reference.profile.validate(features_df) 141 if not validation_result.is_success: 142 raise ValidationFailed(validation_result) 143 144 return pyarrow.Table.from_pandas(features_df) 145 146 @abstractmethod 147 def persist(self, storage: SavedDatasetStorage): 148 """ 149 Run the retrieval and persist the results in the same offline store used for read. 150 """ 151 pass 152 153 @property 154 @abstractmethod 155 def metadata(self) -> Optional[RetrievalMetadata]: 156 """ 157 Return metadata information about retrieval. 158 Should be available even before materializing the dataset itself. 159 """ 160 pass 161 162 163 class OfflineStore(ABC): 164 """ 165 OfflineStore is an object used for all interaction between Feast and the service used for offline storage of 166 features. 167 """ 168 169 @staticmethod 170 @abstractmethod 171 def pull_latest_from_table_or_query( 172 config: RepoConfig, 173 data_source: DataSource, 174 join_key_columns: List[str], 175 feature_name_columns: List[str], 176 event_timestamp_column: str, 177 created_timestamp_column: Optional[str], 178 start_date: datetime, 179 end_date: datetime, 180 ) -> RetrievalJob: 181 """ 182 Note that join_key_columns, feature_name_columns, event_timestamp_column, and created_timestamp_column 183 have all already been mapped to column names of the source table and those column names are the values passed 184 into this function. 185 """ 186 pass 187 188 @staticmethod 189 @abstractmethod 190 def get_historical_features( 191 config: RepoConfig, 192 feature_views: List[FeatureView], 193 feature_refs: List[str], 194 entity_df: Union[pd.DataFrame, str], 195 registry: Registry, 196 project: str, 197 full_feature_names: bool = False, 198 ) -> RetrievalJob: 199 pass 200 201 @staticmethod 202 @abstractmethod 203 def pull_all_from_table_or_query( 204 config: RepoConfig, 205 data_source: DataSource, 206 join_key_columns: List[str], 207 feature_name_columns: List[str], 208 event_timestamp_column: str, 209 start_date: datetime, 210 end_date: datetime, 211 ) -> RetrievalJob: 212 """ 213 Note that join_key_columns, feature_name_columns, event_timestamp_column, and created_timestamp_column 214 have all already been mapped to column names of the source table and those column names are the values passed 215 into this function. 216 """ 217 pass 218 [end of sdk/python/feast/infra/offline_stores/offline_store.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sdk/python/feast/infra/offline_stores/offline_store.py b/sdk/python/feast/infra/offline_stores/offline_store.py --- a/sdk/python/feast/infra/offline_stores/offline_store.py +++ b/sdk/python/feast/infra/offline_stores/offline_store.py @@ -179,9 +179,24 @@ end_date: datetime, ) -> RetrievalJob: """ + This method pulls data from the offline store, and the FeatureStore class is used to write + this data into the online store. This method is invoked when running materialization (using + the `feast materialize` or `feast materialize-incremental` commands, or the corresponding + FeatureStore.materialize() method. This method pulls data from the offline store, and the FeatureStore + class is used to write this data into the online store. + Note that join_key_columns, feature_name_columns, event_timestamp_column, and created_timestamp_column have all already been mapped to column names of the source table and those column names are the values passed into this function. + + Args: + config: Repo configuration object + data_source: Data source to pull all of the columns from + join_key_columns: Columns of the join keys + feature_name_columns: Columns of the feature names needed + event_timestamp_column: Timestamp column + start_date: Starting date of query + end_date: Ending date of query """ pass @@ -210,8 +225,19 @@ end_date: datetime, ) -> RetrievalJob: """ + Returns a Retrieval Job for all join key columns, feature name columns, and the event timestamp columns that occur between the start_date and end_date. + Note that join_key_columns, feature_name_columns, event_timestamp_column, and created_timestamp_column have all already been mapped to column names of the source table and those column names are the values passed into this function. + + Args: + config: Repo configuration object + data_source: Data source to pull all of the columns from + join_key_columns: Columns of the join keys + feature_name_columns: Columns of the feature names needed + event_timestamp_column: Timestamp column + start_date: Starting date of query + end_date: Ending date of query """ pass
{"golden_diff": "diff --git a/sdk/python/feast/infra/offline_stores/offline_store.py b/sdk/python/feast/infra/offline_stores/offline_store.py\n--- a/sdk/python/feast/infra/offline_stores/offline_store.py\n+++ b/sdk/python/feast/infra/offline_stores/offline_store.py\n@@ -179,9 +179,24 @@\n end_date: datetime,\n ) -> RetrievalJob:\n \"\"\"\n+ This method pulls data from the offline store, and the FeatureStore class is used to write\n+ this data into the online store. This method is invoked when running materialization (using\n+ the `feast materialize` or `feast materialize-incremental` commands, or the corresponding\n+ FeatureStore.materialize() method. This method pulls data from the offline store, and the FeatureStore\n+ class is used to write this data into the online store.\n+\n Note that join_key_columns, feature_name_columns, event_timestamp_column, and created_timestamp_column\n have all already been mapped to column names of the source table and those column names are the values passed\n into this function.\n+\n+ Args:\n+ config: Repo configuration object\n+ data_source: Data source to pull all of the columns from\n+ join_key_columns: Columns of the join keys\n+ feature_name_columns: Columns of the feature names needed\n+ event_timestamp_column: Timestamp column\n+ start_date: Starting date of query\n+ end_date: Ending date of query\n \"\"\"\n pass\n \n@@ -210,8 +225,19 @@\n end_date: datetime,\n ) -> RetrievalJob:\n \"\"\"\n+ Returns a Retrieval Job for all join key columns, feature name columns, and the event timestamp columns that occur between the start_date and end_date.\n+\n Note that join_key_columns, feature_name_columns, event_timestamp_column, and created_timestamp_column\n have all already been mapped to column names of the source table and those column names are the values passed\n into this function.\n+\n+ Args:\n+ config: Repo configuration object\n+ data_source: Data source to pull all of the columns from\n+ join_key_columns: Columns of the join keys\n+ feature_name_columns: Columns of the feature names needed\n+ event_timestamp_column: Timestamp column\n+ start_date: Starting date of query\n+ end_date: Ending date of query\n \"\"\"\n pass\n", "issue": "Missing documentation for abstract `pull_all_from_table_or_query` offline store method\n## Expected Behavior \r\n\r\nSince https://github.com/feast-dev/feast/pull/2197, offline store method `pull_all_from_table_or_query` must be overridden by custom offline stores. This is currently not documented.\r\n\r\nExpectations: \r\n- [Docstring](https://github.com/feast-dev/feast/blob/b35e1e84720523cef70cba6d6306af8f193b469f/sdk/python/feast/infra/offline_stores/offline_store.py#L203) of `pull_all_from_table_or_query` contains a meaningful description of the method\r\n\r\n- [Web doc](https://docs.feast.dev/how-to-guides/adding-a-new-offline-store) mentions that `pull_all_from_table_or_query` must be overriden.\r\n\r\n## Current Behavior\r\nNo documentation for `pull_all_from_table_or_query`.\r\n\r\n\n", "before_files": [{"content": "# Copyright 2019 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport warnings\nfrom abc import ABC, abstractmethod\nfrom datetime import datetime\nfrom typing import TYPE_CHECKING, List, Optional, Union\n\nimport pandas as pd\nimport pyarrow\n\nfrom feast.data_source import DataSource\nfrom feast.dqm.errors import ValidationFailed\nfrom feast.feature_view import FeatureView\nfrom feast.on_demand_feature_view import OnDemandFeatureView\nfrom feast.registry import Registry\nfrom feast.repo_config import RepoConfig\nfrom feast.saved_dataset import SavedDatasetStorage\n\nif TYPE_CHECKING:\n from feast.saved_dataset import ValidationReference\n\n\nclass RetrievalMetadata:\n min_event_timestamp: Optional[datetime]\n max_event_timestamp: Optional[datetime]\n\n # List of feature references\n features: List[str]\n # List of entity keys + ODFV inputs\n keys: List[str]\n\n def __init__(\n self,\n features: List[str],\n keys: List[str],\n min_event_timestamp: Optional[datetime] = None,\n max_event_timestamp: Optional[datetime] = None,\n ):\n self.features = features\n self.keys = keys\n self.min_event_timestamp = min_event_timestamp\n self.max_event_timestamp = max_event_timestamp\n\n\nclass RetrievalJob(ABC):\n \"\"\"RetrievalJob is used to manage the execution of a historical feature retrieval\"\"\"\n\n @property\n @abstractmethod\n def full_feature_names(self) -> bool:\n pass\n\n @property\n @abstractmethod\n def on_demand_feature_views(self) -> Optional[List[OnDemandFeatureView]]:\n pass\n\n def to_df(\n self, validation_reference: Optional[\"ValidationReference\"] = None\n ) -> pd.DataFrame:\n \"\"\"\n Return dataset as Pandas DataFrame synchronously including on demand transforms\n Args:\n validation_reference: If provided resulting dataset will be validated against this reference profile.\n \"\"\"\n features_df = self._to_df_internal()\n\n if self.on_demand_feature_views:\n # TODO(adchia): Fix requirement to specify dependent feature views in feature_refs\n for odfv in self.on_demand_feature_views:\n features_df = features_df.join(\n odfv.get_transformed_features_df(\n features_df, self.full_feature_names,\n )\n )\n\n if validation_reference:\n warnings.warn(\n \"Dataset validation is an experimental feature. \"\n \"This API is unstable and it could and most probably will be changed in the future. \"\n \"We do not guarantee that future changes will maintain backward compatibility.\",\n RuntimeWarning,\n )\n\n validation_result = validation_reference.profile.validate(features_df)\n if not validation_result.is_success:\n raise ValidationFailed(validation_result)\n\n return features_df\n\n @abstractmethod\n def _to_df_internal(self) -> pd.DataFrame:\n \"\"\"Return dataset as Pandas DataFrame synchronously\"\"\"\n pass\n\n @abstractmethod\n def _to_arrow_internal(self) -> pyarrow.Table:\n \"\"\"Return dataset as pyarrow Table synchronously\"\"\"\n pass\n\n def to_arrow(\n self, validation_reference: Optional[\"ValidationReference\"] = None\n ) -> pyarrow.Table:\n \"\"\"\n Return dataset as pyarrow Table synchronously\n Args:\n validation_reference: If provided resulting dataset will be validated against this reference profile.\n \"\"\"\n if not self.on_demand_feature_views and not validation_reference:\n return self._to_arrow_internal()\n\n features_df = self._to_df_internal()\n if self.on_demand_feature_views:\n for odfv in self.on_demand_feature_views:\n features_df = features_df.join(\n odfv.get_transformed_features_df(\n features_df, self.full_feature_names,\n )\n )\n\n if validation_reference:\n warnings.warn(\n \"Dataset validation is an experimental feature. \"\n \"This API is unstable and it could and most probably will be changed in the future. \"\n \"We do not guarantee that future changes will maintain backward compatibility.\",\n RuntimeWarning,\n )\n\n validation_result = validation_reference.profile.validate(features_df)\n if not validation_result.is_success:\n raise ValidationFailed(validation_result)\n\n return pyarrow.Table.from_pandas(features_df)\n\n @abstractmethod\n def persist(self, storage: SavedDatasetStorage):\n \"\"\"\n Run the retrieval and persist the results in the same offline store used for read.\n \"\"\"\n pass\n\n @property\n @abstractmethod\n def metadata(self) -> Optional[RetrievalMetadata]:\n \"\"\"\n Return metadata information about retrieval.\n Should be available even before materializing the dataset itself.\n \"\"\"\n pass\n\n\nclass OfflineStore(ABC):\n \"\"\"\n OfflineStore is an object used for all interaction between Feast and the service used for offline storage of\n features.\n \"\"\"\n\n @staticmethod\n @abstractmethod\n def pull_latest_from_table_or_query(\n config: RepoConfig,\n data_source: DataSource,\n join_key_columns: List[str],\n feature_name_columns: List[str],\n event_timestamp_column: str,\n created_timestamp_column: Optional[str],\n start_date: datetime,\n end_date: datetime,\n ) -> RetrievalJob:\n \"\"\"\n Note that join_key_columns, feature_name_columns, event_timestamp_column, and created_timestamp_column\n have all already been mapped to column names of the source table and those column names are the values passed\n into this function.\n \"\"\"\n pass\n\n @staticmethod\n @abstractmethod\n def get_historical_features(\n config: RepoConfig,\n feature_views: List[FeatureView],\n feature_refs: List[str],\n entity_df: Union[pd.DataFrame, str],\n registry: Registry,\n project: str,\n full_feature_names: bool = False,\n ) -> RetrievalJob:\n pass\n\n @staticmethod\n @abstractmethod\n def pull_all_from_table_or_query(\n config: RepoConfig,\n data_source: DataSource,\n join_key_columns: List[str],\n feature_name_columns: List[str],\n event_timestamp_column: str,\n start_date: datetime,\n end_date: datetime,\n ) -> RetrievalJob:\n \"\"\"\n Note that join_key_columns, feature_name_columns, event_timestamp_column, and created_timestamp_column\n have all already been mapped to column names of the source table and those column names are the values passed\n into this function.\n \"\"\"\n pass\n", "path": "sdk/python/feast/infra/offline_stores/offline_store.py"}]}
2,778
537
gh_patches_debug_2624
rasdani/github-patches
git_diff
zestedesavoir__zds-site-6488
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Possible erreur 500 à la résolution d'une alerte sur un contenu qui n'est plus public Rapporté par Sentry. J'ai eu du mal à comprendre comment le bug a pu se produire, mais j'ai réussi à le reproduire (d'une façon peut-être un peu tirée par les cheveux...). **Comment reproduire ?** 1. Se connecter en tant que `user1` 2. Signaler un billet 3. Se connecter en tant que `staff` 4. Ouvrir la page du billet signalé dans deux onglets différents 5. Sur un des onglets, dépublier le billet 6. Sur l'autre onglet, résoudre l'alerte (ne pas recharger la page juste avant, le billet n'est en fait plus publié, c'est là qu'est l'astuce) Une erreur 500 va alors apparaître. Elle provient d'ici : https://github.com/zestedesavoir/zds-site/blob/c06671c4901a95c30f31067c09d5e4526fd86575/zds/tutorialv2/views/alerts.py#L88 Le contenu n'a plus de version publique, donc plus d'URL publique, et `content.get_absolute_url_online()` renvoie alors `''`. La correction de ce bug passe sans doute par la vérification si l'alerte est déjà résolue ou si le contenu signalé a bien une version publique : si l'une de ces conditions n'est pas remplie, une erreur 404 devrait être levée. </issue> <code> [start of zds/tutorialv2/views/alerts.py] 1 from datetime import datetime 2 3 from django.contrib import messages 4 from django.core.exceptions import PermissionDenied 5 from django.contrib.auth.mixins import LoginRequiredMixin 6 from django.db import transaction 7 from django.http import Http404 8 from django.shortcuts import get_object_or_404, redirect 9 from django.template.loader import render_to_string 10 from django.utils.decorators import method_decorator 11 from django.utils.translation import gettext_lazy as _ 12 from django.views.generic import FormView 13 14 from zds.tutorialv2.models import TYPE_CHOICES_DICT 15 from zds.tutorialv2.models.database import PublishableContent 16 from zds.utils.models import Alert 17 18 19 class SendContentAlert(LoginRequiredMixin, FormView): 20 http_method_names = ["post"] 21 22 @method_decorator(transaction.atomic) 23 def dispatch(self, *args, **kwargs): 24 return super().dispatch(*args, **kwargs) 25 26 def post(self, request, *args, **kwargs): 27 try: 28 content_pk = int(self.kwargs["pk"]) 29 except (KeyError, ValueError): 30 raise Http404("Identifiant manquant ou conversion en entier impossible.") 31 content = get_object_or_404(PublishableContent, pk=content_pk) 32 33 if len(request.POST["signal_text"].strip()) == 0: 34 messages.error(request, _("La raison du signalement ne peut pas être vide.")) 35 else: 36 alert = Alert( 37 author=request.user, 38 content=content, 39 scope="CONTENT", 40 text=request.POST["signal_text"], 41 pubdate=datetime.now(), 42 ) 43 alert.save() 44 45 human_content_type = TYPE_CHOICES_DICT[content.type].lower() 46 messages.success(self.request, _("Ce {} a bien été signalé aux modérateurs.").format(human_content_type)) 47 48 return redirect(content.get_absolute_url_online()) 49 50 51 class SolveContentAlert(LoginRequiredMixin, FormView): 52 @method_decorator(transaction.atomic) 53 def dispatch(self, *args, **kwargs): 54 return super().dispatch(*args, **kwargs) 55 56 def post(self, request, *args, **kwargs): 57 if not request.user.has_perm("tutorialv2.change_contentreaction"): 58 raise PermissionDenied 59 try: 60 alert = get_object_or_404(Alert, pk=int(request.POST["alert_pk"])) 61 content = PublishableContent.objects.get(pk=alert.content.id) 62 except (KeyError, ValueError): 63 raise Http404("L'alerte n'existe pas.") 64 65 resolve_reason = "" 66 msg_title = "" 67 msg_content = "" 68 if "text" in request.POST and request.POST["text"]: 69 resolve_reason = request.POST["text"] 70 authors = alert.content.authors.values_list("username", flat=True) 71 authors = ", ".join(authors) 72 msg_title = _("Résolution d'alerte : {0}").format(content.title) 73 msg_content = render_to_string( 74 "tutorialv2/messages/resolve_alert.md", 75 { 76 "content": content, 77 "url": content.get_absolute_url_online(), 78 "name": alert.author.username, 79 "target_name": authors, 80 "modo_name": request.user.username, 81 "message": "\n".join(["> " + line for line in resolve_reason.split("\n")]), 82 "alert_text": "\n".join(["> " + line for line in alert.text.split("\n")]), 83 }, 84 ) 85 alert.solve(request.user, resolve_reason, msg_title, msg_content) 86 87 messages.success(self.request, _("L'alerte a bien été résolue.")) 88 return redirect(content.get_absolute_url_online()) 89 [end of zds/tutorialv2/views/alerts.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/zds/tutorialv2/views/alerts.py b/zds/tutorialv2/views/alerts.py --- a/zds/tutorialv2/views/alerts.py +++ b/zds/tutorialv2/views/alerts.py @@ -62,6 +62,9 @@ except (KeyError, ValueError): raise Http404("L'alerte n'existe pas.") + if alert.solved: + raise Http404("L'alerte a déjà été résolue.") + resolve_reason = "" msg_title = "" msg_content = ""
{"golden_diff": "diff --git a/zds/tutorialv2/views/alerts.py b/zds/tutorialv2/views/alerts.py\n--- a/zds/tutorialv2/views/alerts.py\n+++ b/zds/tutorialv2/views/alerts.py\n@@ -62,6 +62,9 @@\n except (KeyError, ValueError):\n raise Http404(\"L'alerte n'existe pas.\")\n \n+ if alert.solved:\n+ raise Http404(\"L'alerte a d\u00e9j\u00e0 \u00e9t\u00e9 r\u00e9solue.\")\n+\n resolve_reason = \"\"\n msg_title = \"\"\n msg_content = \"\"\n", "issue": "Possible erreur 500 \u00e0 la r\u00e9solution d'une alerte sur un contenu qui n'est plus public\nRapport\u00e9 par Sentry. J'ai eu du mal \u00e0 comprendre comment le bug a pu se produire, mais j'ai r\u00e9ussi \u00e0 le reproduire (d'une fa\u00e7on peut-\u00eatre un peu tir\u00e9e par les cheveux...).\r\n\r\n**Comment reproduire ?**\r\n\r\n1. Se connecter en tant que `user1`\r\n2. Signaler un billet\r\n3. Se connecter en tant que `staff`\r\n4. Ouvrir la page du billet signal\u00e9 dans deux onglets diff\u00e9rents\r\n5. Sur un des onglets, d\u00e9publier le billet\r\n6. Sur l'autre onglet, r\u00e9soudre l'alerte (ne pas recharger la page juste avant, le billet n'est en fait plus publi\u00e9, c'est l\u00e0 qu'est l'astuce)\r\n\r\nUne erreur 500 va alors appara\u00eetre. Elle provient d'ici : https://github.com/zestedesavoir/zds-site/blob/c06671c4901a95c30f31067c09d5e4526fd86575/zds/tutorialv2/views/alerts.py#L88\r\nLe contenu n'a plus de version publique, donc plus d'URL publique, et `content.get_absolute_url_online()` renvoie alors `''`.\r\n\r\nLa correction de ce bug passe sans doute par la v\u00e9rification si l'alerte est d\u00e9j\u00e0 r\u00e9solue ou si le contenu signal\u00e9 a bien une version publique : si l'une de ces conditions n'est pas remplie, une erreur 404 devrait \u00eatre lev\u00e9e.\r\n\n", "before_files": [{"content": "from datetime import datetime\n\nfrom django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.db import transaction\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.loader import render_to_string\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.generic import FormView\n\nfrom zds.tutorialv2.models import TYPE_CHOICES_DICT\nfrom zds.tutorialv2.models.database import PublishableContent\nfrom zds.utils.models import Alert\n\n\nclass SendContentAlert(LoginRequiredMixin, FormView):\n http_method_names = [\"post\"]\n\n @method_decorator(transaction.atomic)\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n try:\n content_pk = int(self.kwargs[\"pk\"])\n except (KeyError, ValueError):\n raise Http404(\"Identifiant manquant ou conversion en entier impossible.\")\n content = get_object_or_404(PublishableContent, pk=content_pk)\n\n if len(request.POST[\"signal_text\"].strip()) == 0:\n messages.error(request, _(\"La raison du signalement ne peut pas \u00eatre vide.\"))\n else:\n alert = Alert(\n author=request.user,\n content=content,\n scope=\"CONTENT\",\n text=request.POST[\"signal_text\"],\n pubdate=datetime.now(),\n )\n alert.save()\n\n human_content_type = TYPE_CHOICES_DICT[content.type].lower()\n messages.success(self.request, _(\"Ce {} a bien \u00e9t\u00e9 signal\u00e9 aux mod\u00e9rateurs.\").format(human_content_type))\n\n return redirect(content.get_absolute_url_online())\n\n\nclass SolveContentAlert(LoginRequiredMixin, FormView):\n @method_decorator(transaction.atomic)\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n if not request.user.has_perm(\"tutorialv2.change_contentreaction\"):\n raise PermissionDenied\n try:\n alert = get_object_or_404(Alert, pk=int(request.POST[\"alert_pk\"]))\n content = PublishableContent.objects.get(pk=alert.content.id)\n except (KeyError, ValueError):\n raise Http404(\"L'alerte n'existe pas.\")\n\n resolve_reason = \"\"\n msg_title = \"\"\n msg_content = \"\"\n if \"text\" in request.POST and request.POST[\"text\"]:\n resolve_reason = request.POST[\"text\"]\n authors = alert.content.authors.values_list(\"username\", flat=True)\n authors = \", \".join(authors)\n msg_title = _(\"R\u00e9solution d'alerte : {0}\").format(content.title)\n msg_content = render_to_string(\n \"tutorialv2/messages/resolve_alert.md\",\n {\n \"content\": content,\n \"url\": content.get_absolute_url_online(),\n \"name\": alert.author.username,\n \"target_name\": authors,\n \"modo_name\": request.user.username,\n \"message\": \"\\n\".join([\"> \" + line for line in resolve_reason.split(\"\\n\")]),\n \"alert_text\": \"\\n\".join([\"> \" + line for line in alert.text.split(\"\\n\")]),\n },\n )\n alert.solve(request.user, resolve_reason, msg_title, msg_content)\n\n messages.success(self.request, _(\"L'alerte a bien \u00e9t\u00e9 r\u00e9solue.\"))\n return redirect(content.get_absolute_url_online())\n", "path": "zds/tutorialv2/views/alerts.py"}]}
1,842
127
gh_patches_debug_30624
rasdani/github-patches
git_diff
pyro-ppl__pyro-1702
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Get error when running lda.py with “pyro.enable_validation(True)” As title described, I add "pyro.enable_validation(True)" at the head of script (examples/lda.py), and run with "python lda.py". I get an error, saying "ValueError: Error while computing log_prob at site 'topic_words':". However, the original script without "pyro.enable_validation(True)" can run smoothly. Thanks for suggestion. ### Environment - Centos 7,python 3.6. - PyTorch: 1.0.0 - Pyro version: 0.3.0 </issue> <code> [start of examples/lda.py] 1 """ 2 This example demonstrates how to marginalize out discrete assignment variables 3 in a Pyro model. 4 5 Our example model is Latent Dirichlet Allocation. While the model in this 6 example does work, it is not the recommended way of coding up LDA in Pyro. 7 Whereas the model in this example treats documents as vectors of categorical 8 variables (vectors of word ids), it is usually more efficient to treat 9 documents as bags of words (histograms of word counts). 10 """ 11 from __future__ import absolute_import, division, print_function 12 13 import argparse 14 import functools 15 import logging 16 17 import torch 18 from torch import nn 19 from torch.distributions import constraints 20 21 import pyro 22 import pyro.distributions as dist 23 from pyro.infer import SVI, JitTraceEnum_ELBO, TraceEnum_ELBO 24 from pyro.optim import Adam 25 26 logging.basicConfig(format='%(relativeCreated) 9d %(message)s', level=logging.INFO) 27 28 29 # This is a fully generative model of a batch of documents. 30 # data is a [num_words_per_doc, num_documents] shaped array of word ids 31 # (specifically it is not a histogram). We assume in this simple example 32 # that all documents have the same number of words. 33 def model(data=None, args=None, batch_size=None): 34 # Globals. 35 with pyro.plate("topics", args.num_topics): 36 topic_weights = pyro.sample("topic_weights", dist.Gamma(1. / args.num_topics, 1.)) 37 topic_words = pyro.sample("topic_words", 38 dist.Dirichlet(torch.ones(args.num_words) / args.num_words)) 39 40 # Locals. 41 with pyro.plate("documents", args.num_docs) as ind: 42 if data is not None: 43 with pyro.util.ignore_jit_warnings(): 44 assert data.shape == (args.num_words_per_doc, args.num_docs) 45 data = data[:, ind] 46 doc_topics = pyro.sample("doc_topics", dist.Dirichlet(topic_weights)) 47 with pyro.plate("words", args.num_words_per_doc): 48 # The word_topics variable is marginalized out during inference, 49 # achieved by specifying infer={"enumerate": "parallel"} and using 50 # TraceEnum_ELBO for inference. Thus we can ignore this variable in 51 # the guide. 52 word_topics = pyro.sample("word_topics", dist.Categorical(doc_topics), 53 infer={"enumerate": "parallel"}) 54 data = pyro.sample("doc_words", dist.Categorical(topic_words[word_topics]), 55 obs=data) 56 57 return topic_weights, topic_words, data 58 59 60 # We will use amortized inference of the local topic variables, achieved by a 61 # multi-layer perceptron. We'll wrap the guide in an nn.Module. 62 def make_predictor(args): 63 layer_sizes = ([args.num_words] + 64 [int(s) for s in args.layer_sizes.split('-')] + 65 [args.num_topics]) 66 logging.info('Creating MLP with sizes {}'.format(layer_sizes)) 67 layers = [] 68 for in_size, out_size in zip(layer_sizes, layer_sizes[1:]): 69 layer = nn.Linear(in_size, out_size) 70 layer.weight.data.normal_(0, 0.001) 71 layer.bias.data.normal_(0, 0.001) 72 layers.append(layer) 73 layers.append(nn.Sigmoid()) 74 return nn.Sequential(*layers) 75 76 77 def parametrized_guide(predictor, data, args, batch_size=None): 78 # Use a conjugate guide for global variables. 79 topic_weights_posterior = pyro.param( 80 "topic_weights_posterior", 81 lambda: torch.ones(args.num_topics) / args.num_topics, 82 constraint=constraints.positive) 83 topic_words_posterior = pyro.param( 84 "topic_words_posterior", 85 lambda: torch.ones(args.num_topics, args.num_words) / args.num_words, 86 constraint=constraints.positive) 87 with pyro.plate("topics", args.num_topics): 88 pyro.sample("topic_weights", dist.Gamma(topic_weights_posterior, 1.)) 89 pyro.sample("topic_words", dist.Dirichlet(topic_words_posterior)) 90 91 # Use an amortized guide for local variables. 92 pyro.module("predictor", predictor) 93 with pyro.plate("documents", args.num_docs, batch_size) as ind: 94 # The neural network will operate on histograms rather than word 95 # index vectors, so we'll convert the raw data to a histogram. 96 if torch._C._get_tracing_state(): 97 counts = torch.eye(1024)[data[:, ind]].sum(0).t() 98 else: 99 counts = torch.zeros(args.num_words, ind.size(0)) 100 counts.scatter_add_(0, data[:, ind], torch.tensor(1.).expand(counts.shape)) 101 doc_topics = predictor(counts.transpose(0, 1)) 102 pyro.sample("doc_topics", dist.Delta(doc_topics, event_dim=1)) 103 104 105 def main(args): 106 logging.info('Generating data') 107 pyro.set_rng_seed(0) 108 # We can generate synthetic data directly by calling the model. 109 true_topic_weights, true_topic_words, data = model(args=args) 110 111 # We'll train using SVI. 112 logging.info('-' * 40) 113 logging.info('Training on {} documents'.format(args.num_docs)) 114 predictor = make_predictor(args) 115 guide = functools.partial(parametrized_guide, predictor) 116 Elbo = JitTraceEnum_ELBO if args.jit else TraceEnum_ELBO 117 elbo = Elbo(max_plate_nesting=2) 118 optim = Adam({'lr': args.learning_rate}) 119 svi = SVI(model, guide, optim, elbo) 120 logging.info('Step\tLoss') 121 for step in range(args.num_steps): 122 loss = svi.step(data, args=args, batch_size=args.batch_size) 123 if step % 10 == 0: 124 logging.info('{: >5d}\t{}'.format(step, loss)) 125 loss = elbo.loss(model, guide, data, args=args) 126 logging.info('final loss = {}'.format(loss)) 127 128 129 if __name__ == '__main__': 130 assert pyro.__version__.startswith('0.3.0') 131 parser = argparse.ArgumentParser(description="Amortized Latent Dirichlet Allocation") 132 parser.add_argument("-t", "--num-topics", default=8, type=int) 133 parser.add_argument("-w", "--num-words", default=1024, type=int) 134 parser.add_argument("-d", "--num-docs", default=1000, type=int) 135 parser.add_argument("-wd", "--num-words-per-doc", default=64, type=int) 136 parser.add_argument("-n", "--num-steps", default=1000, type=int) 137 parser.add_argument("-l", "--layer-sizes", default="100-100") 138 parser.add_argument("-lr", "--learning-rate", default=0.001, type=float) 139 parser.add_argument("-b", "--batch-size", default=32, type=int) 140 parser.add_argument('--jit', action='store_true') 141 args = parser.parse_args() 142 main(args) 143 [end of examples/lda.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/lda.py b/examples/lda.py --- a/examples/lda.py +++ b/examples/lda.py @@ -71,6 +71,7 @@ layer.bias.data.normal_(0, 0.001) layers.append(layer) layers.append(nn.Sigmoid()) + layers.append(nn.Softmax(dim=-1)) return nn.Sequential(*layers) @@ -78,12 +79,12 @@ # Use a conjugate guide for global variables. topic_weights_posterior = pyro.param( "topic_weights_posterior", - lambda: torch.ones(args.num_topics) / args.num_topics, + lambda: torch.ones(args.num_topics), constraint=constraints.positive) topic_words_posterior = pyro.param( "topic_words_posterior", - lambda: torch.ones(args.num_topics, args.num_words) / args.num_words, - constraint=constraints.positive) + lambda: torch.ones(args.num_topics, args.num_words), + constraint=constraints.greater_than(0.5)) with pyro.plate("topics", args.num_topics): pyro.sample("topic_weights", dist.Gamma(topic_weights_posterior, 1.)) pyro.sample("topic_words", dist.Dirichlet(topic_words_posterior)) @@ -105,6 +106,9 @@ def main(args): logging.info('Generating data') pyro.set_rng_seed(0) + pyro.clear_param_store() + pyro.enable_validation(True) + # We can generate synthetic data directly by calling the model. true_topic_weights, true_topic_words, data = model(args=args)
{"golden_diff": "diff --git a/examples/lda.py b/examples/lda.py\n--- a/examples/lda.py\n+++ b/examples/lda.py\n@@ -71,6 +71,7 @@\n layer.bias.data.normal_(0, 0.001)\n layers.append(layer)\n layers.append(nn.Sigmoid())\n+ layers.append(nn.Softmax(dim=-1))\n return nn.Sequential(*layers)\n \n \n@@ -78,12 +79,12 @@\n # Use a conjugate guide for global variables.\n topic_weights_posterior = pyro.param(\n \"topic_weights_posterior\",\n- lambda: torch.ones(args.num_topics) / args.num_topics,\n+ lambda: torch.ones(args.num_topics),\n constraint=constraints.positive)\n topic_words_posterior = pyro.param(\n \"topic_words_posterior\",\n- lambda: torch.ones(args.num_topics, args.num_words) / args.num_words,\n- constraint=constraints.positive)\n+ lambda: torch.ones(args.num_topics, args.num_words),\n+ constraint=constraints.greater_than(0.5))\n with pyro.plate(\"topics\", args.num_topics):\n pyro.sample(\"topic_weights\", dist.Gamma(topic_weights_posterior, 1.))\n pyro.sample(\"topic_words\", dist.Dirichlet(topic_words_posterior))\n@@ -105,6 +106,9 @@\n def main(args):\n logging.info('Generating data')\n pyro.set_rng_seed(0)\n+ pyro.clear_param_store()\n+ pyro.enable_validation(True)\n+\n # We can generate synthetic data directly by calling the model.\n true_topic_weights, true_topic_words, data = model(args=args)\n", "issue": "Get error when running lda.py with \u201cpyro.enable_validation(True)\u201d \n\r\nAs title described, I add \"pyro.enable_validation(True)\" at the head of script (examples/lda.py), and run with \"python lda.py\".\r\n\r\nI get an error, saying \"ValueError: Error while computing log_prob at site 'topic_words':\".\r\n\r\nHowever, the original script without \"pyro.enable_validation(True)\" can run smoothly.\r\n\r\nThanks for suggestion. \r\n\r\n### Environment\r\n - Centos 7,python 3.6.\r\n - PyTorch: 1.0.0\r\n - Pyro version: 0.3.0\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nThis example demonstrates how to marginalize out discrete assignment variables\nin a Pyro model.\n\nOur example model is Latent Dirichlet Allocation. While the model in this\nexample does work, it is not the recommended way of coding up LDA in Pyro.\nWhereas the model in this example treats documents as vectors of categorical\nvariables (vectors of word ids), it is usually more efficient to treat\ndocuments as bags of words (histograms of word counts).\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nimport argparse\nimport functools\nimport logging\n\nimport torch\nfrom torch import nn\nfrom torch.distributions import constraints\n\nimport pyro\nimport pyro.distributions as dist\nfrom pyro.infer import SVI, JitTraceEnum_ELBO, TraceEnum_ELBO\nfrom pyro.optim import Adam\n\nlogging.basicConfig(format='%(relativeCreated) 9d %(message)s', level=logging.INFO)\n\n\n# This is a fully generative model of a batch of documents.\n# data is a [num_words_per_doc, num_documents] shaped array of word ids\n# (specifically it is not a histogram). We assume in this simple example\n# that all documents have the same number of words.\ndef model(data=None, args=None, batch_size=None):\n # Globals.\n with pyro.plate(\"topics\", args.num_topics):\n topic_weights = pyro.sample(\"topic_weights\", dist.Gamma(1. / args.num_topics, 1.))\n topic_words = pyro.sample(\"topic_words\",\n dist.Dirichlet(torch.ones(args.num_words) / args.num_words))\n\n # Locals.\n with pyro.plate(\"documents\", args.num_docs) as ind:\n if data is not None:\n with pyro.util.ignore_jit_warnings():\n assert data.shape == (args.num_words_per_doc, args.num_docs)\n data = data[:, ind]\n doc_topics = pyro.sample(\"doc_topics\", dist.Dirichlet(topic_weights))\n with pyro.plate(\"words\", args.num_words_per_doc):\n # The word_topics variable is marginalized out during inference,\n # achieved by specifying infer={\"enumerate\": \"parallel\"} and using\n # TraceEnum_ELBO for inference. Thus we can ignore this variable in\n # the guide.\n word_topics = pyro.sample(\"word_topics\", dist.Categorical(doc_topics),\n infer={\"enumerate\": \"parallel\"})\n data = pyro.sample(\"doc_words\", dist.Categorical(topic_words[word_topics]),\n obs=data)\n\n return topic_weights, topic_words, data\n\n\n# We will use amortized inference of the local topic variables, achieved by a\n# multi-layer perceptron. We'll wrap the guide in an nn.Module.\ndef make_predictor(args):\n layer_sizes = ([args.num_words] +\n [int(s) for s in args.layer_sizes.split('-')] +\n [args.num_topics])\n logging.info('Creating MLP with sizes {}'.format(layer_sizes))\n layers = []\n for in_size, out_size in zip(layer_sizes, layer_sizes[1:]):\n layer = nn.Linear(in_size, out_size)\n layer.weight.data.normal_(0, 0.001)\n layer.bias.data.normal_(0, 0.001)\n layers.append(layer)\n layers.append(nn.Sigmoid())\n return nn.Sequential(*layers)\n\n\ndef parametrized_guide(predictor, data, args, batch_size=None):\n # Use a conjugate guide for global variables.\n topic_weights_posterior = pyro.param(\n \"topic_weights_posterior\",\n lambda: torch.ones(args.num_topics) / args.num_topics,\n constraint=constraints.positive)\n topic_words_posterior = pyro.param(\n \"topic_words_posterior\",\n lambda: torch.ones(args.num_topics, args.num_words) / args.num_words,\n constraint=constraints.positive)\n with pyro.plate(\"topics\", args.num_topics):\n pyro.sample(\"topic_weights\", dist.Gamma(topic_weights_posterior, 1.))\n pyro.sample(\"topic_words\", dist.Dirichlet(topic_words_posterior))\n\n # Use an amortized guide for local variables.\n pyro.module(\"predictor\", predictor)\n with pyro.plate(\"documents\", args.num_docs, batch_size) as ind:\n # The neural network will operate on histograms rather than word\n # index vectors, so we'll convert the raw data to a histogram.\n if torch._C._get_tracing_state():\n counts = torch.eye(1024)[data[:, ind]].sum(0).t()\n else:\n counts = torch.zeros(args.num_words, ind.size(0))\n counts.scatter_add_(0, data[:, ind], torch.tensor(1.).expand(counts.shape))\n doc_topics = predictor(counts.transpose(0, 1))\n pyro.sample(\"doc_topics\", dist.Delta(doc_topics, event_dim=1))\n\n\ndef main(args):\n logging.info('Generating data')\n pyro.set_rng_seed(0)\n # We can generate synthetic data directly by calling the model.\n true_topic_weights, true_topic_words, data = model(args=args)\n\n # We'll train using SVI.\n logging.info('-' * 40)\n logging.info('Training on {} documents'.format(args.num_docs))\n predictor = make_predictor(args)\n guide = functools.partial(parametrized_guide, predictor)\n Elbo = JitTraceEnum_ELBO if args.jit else TraceEnum_ELBO\n elbo = Elbo(max_plate_nesting=2)\n optim = Adam({'lr': args.learning_rate})\n svi = SVI(model, guide, optim, elbo)\n logging.info('Step\\tLoss')\n for step in range(args.num_steps):\n loss = svi.step(data, args=args, batch_size=args.batch_size)\n if step % 10 == 0:\n logging.info('{: >5d}\\t{}'.format(step, loss))\n loss = elbo.loss(model, guide, data, args=args)\n logging.info('final loss = {}'.format(loss))\n\n\nif __name__ == '__main__':\n assert pyro.__version__.startswith('0.3.0')\n parser = argparse.ArgumentParser(description=\"Amortized Latent Dirichlet Allocation\")\n parser.add_argument(\"-t\", \"--num-topics\", default=8, type=int)\n parser.add_argument(\"-w\", \"--num-words\", default=1024, type=int)\n parser.add_argument(\"-d\", \"--num-docs\", default=1000, type=int)\n parser.add_argument(\"-wd\", \"--num-words-per-doc\", default=64, type=int)\n parser.add_argument(\"-n\", \"--num-steps\", default=1000, type=int)\n parser.add_argument(\"-l\", \"--layer-sizes\", default=\"100-100\")\n parser.add_argument(\"-lr\", \"--learning-rate\", default=0.001, type=float)\n parser.add_argument(\"-b\", \"--batch-size\", default=32, type=int)\n parser.add_argument('--jit', action='store_true')\n args = parser.parse_args()\n main(args)\n", "path": "examples/lda.py"}]}
2,518
361
gh_patches_debug_27718
rasdani/github-patches
git_diff
DDMAL__CantusDB-839
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Admin Area: Cannot delete Source that contains chants When trying to delete a source that contains chants on the source admin page, we get a source DoesNotExist error. The source delete function works fine for sources that do not contain any chants, so I believe this has to do with deleting the associated chants after the source has already been deleted. </issue> <code> [start of django/cantusdb_project/main_app/signals.py] 1 import operator 2 from functools import reduce 3 4 from django.contrib.postgres.search import SearchVector 5 from django.db import models 6 from django.db.models import Value 7 from django.db.models.signals import post_save, post_delete 8 from django.dispatch import receiver 9 10 import re 11 12 from main_app.models import Chant 13 from main_app.models import Sequence 14 from main_app.models import Feast 15 16 17 @receiver(post_save, sender=Chant) 18 def on_chant_save(instance, **kwargs): 19 update_source_chant_count(instance) 20 update_source_melody_count(instance) 21 22 update_chant_search_vector(instance) 23 update_volpiano_fields(instance) 24 25 26 @receiver(post_delete, sender=Chant) 27 def on_chant_delete(instance, **kwargs): 28 update_source_chant_count(instance) 29 update_source_melody_count(instance) 30 31 32 @receiver(post_save, sender=Sequence) 33 def on_sequence_save(instance, **kwargs): 34 update_source_chant_count(instance) 35 36 37 @receiver(post_delete, sender=Sequence) 38 def on_sequence_delete(instance, **kwargs): 39 update_source_chant_count(instance) 40 41 42 @receiver(post_save, sender=Feast) 43 def on_feast_save(instance, **kwargs): 44 update_prefix_field(instance) 45 46 47 def update_chant_search_vector(instance): 48 """When saving an instance of Chant, update its search vector field. 49 50 Called in on_chant_save() 51 """ 52 index_components = instance.index_components() 53 pk = instance.pk 54 search_vectors = [] 55 56 for weight, data in index_components.items(): 57 search_vectors.append( 58 SearchVector(Value(data, output_field=models.TextField()), weight=weight) 59 ) 60 instance.__class__.objects.filter(pk=pk).update( 61 search_vector=reduce(operator.add, search_vectors) 62 ) 63 64 65 def update_source_chant_count(instance): 66 """When saving or deleting a Chant or Sequence, update its Source's number_of_chants field 67 68 Called in on_chant_save(), on_chant_delete(), on_sequence_save() and on_sequence_delete() 69 """ 70 71 source = instance.source 72 if source is not None: 73 source.number_of_chants = source.chant_set.count() + source.sequence_set.count() 74 source.save() 75 76 77 def update_source_melody_count(instance): 78 """When saving or deleting a Chant, update its Source's number_of_melodies field 79 80 Called in on_chant_save() and on_chant_delete() 81 """ 82 source = instance.source 83 if source is not None: 84 source.number_of_melodies = source.chant_set.filter( 85 volpiano__isnull=False 86 ).count() 87 source.save() 88 89 90 def update_volpiano_fields(instance): 91 """When saving a Chant, make sure the chant's volpiano_notes and volpiano_intervals are up-to-date 92 93 Called in on_chant_save() 94 """ 95 96 def generate_volpiano_notes(volpiano): 97 """ 98 Populate the ``volpiano_notes`` field of the ``Chant`` model 99 100 This field is used for melody search 101 102 Args: 103 volpiano (str): The content of ``chant.volpiano`` 104 105 Returns: 106 str: Volpiano str with non-note chars and duplicate consecutive notes removed 107 """ 108 # unwanted_chars are non-note chars, including the clefs, barlines, and accidentals etc. 109 # the `searchMelody.js` on old cantus makes no reference to the b-flat accidentals ("y", "i", "z") 110 # so put them in unwanted chars for now 111 unwanted_chars = [ 112 "-", 113 "1", 114 "2", 115 "3", 116 "4", 117 "5", 118 "6", 119 "7", 120 "?", 121 ".", 122 " ", 123 "y", 124 "i", 125 "z", 126 ] 127 # convert all charactors to lower-case, upper-case letters stand for liquescent of the same pitch 128 volpiano_lower = volpiano.lower() 129 # `)` stands for the lowest `g` note liquescent in volpiano, its 'lower case' is `9` 130 volpiano_notes = volpiano_lower.replace(")", "9") 131 # remove none-note charactors 132 for unwanted_char in unwanted_chars: 133 volpiano_notes = volpiano_notes.replace(unwanted_char, "") 134 # remove duplicate consecutive chars 135 volpiano_notes = re.sub(r"(.)\1+", r"\1", volpiano_notes) 136 return volpiano_notes 137 138 def generate_volpiano_intervals(volpiano_notes): 139 """ 140 Populate the ``volpiano_intervals`` field of the ``Chant`` model 141 142 This field is used for melody search when searching for transpositions 143 144 Args: 145 volpiano_notes (str): The content of ``chant.volpiano_notes``, 146 populated by the ``generate_volpiano_notes`` function 147 148 Returns: 149 str: A str of digits, recording the intervals between adjacent notes 150 """ 151 # replace '9' (the note G) with the char corresponding to (ASCII(a) - 1), because 'a' denotes the note A 152 volpiano_notes = volpiano_notes.replace("9", chr(ord("a") - 1)) 153 # we model the interval between notes using the difference between the ASCII codes of corresponding letters 154 # the letter for the note B is "j" (106), note A is "h" (104), the letter "i" (105) is skipped 155 # move all notes above A down by one letter 156 volpiano_notes = list(volpiano_notes) 157 for j, note in enumerate(volpiano_notes): 158 if ord(note) >= 106: 159 volpiano_notes[j] = chr(ord(note) - 1) 160 161 # `intervals` records the difference between two adjacent notes. 162 # Note that intervals are encoded by counting the number of scale 163 # steps between adjacent notes: an ascending second is thus encoded 164 # as "1"; a descending third is encoded "-2", and so on. 165 intervals = [] 166 for j in range(1, len(volpiano_notes)): 167 intervals.append(ord(volpiano_notes[j]) - ord(volpiano_notes[j - 1])) 168 # convert `intervals` to str 169 volpiano_intervals = "".join([str(interval) for interval in intervals]) 170 return volpiano_intervals 171 172 if instance.volpiano is None: 173 return 174 175 volpiano_notes = generate_volpiano_notes(instance.volpiano) 176 volpiano_intervals = generate_volpiano_intervals(volpiano_notes) 177 178 Chant.objects.filter(id=instance.id).update( 179 volpiano_notes=volpiano_notes, 180 volpiano_intervals=volpiano_intervals, 181 ) 182 183 184 def update_prefix_field(instance): 185 pk = instance.pk 186 187 if instance.feast_code: 188 prefix = str(instance.feast_code)[0:2] 189 instance.__class__.objects.filter(pk=pk).update(prefix=prefix) 190 else: # feast_code is None, "" 191 instance.__class__.objects.filter(pk=pk).update(prefix="") 192 [end of django/cantusdb_project/main_app/signals.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/django/cantusdb_project/main_app/signals.py b/django/cantusdb_project/main_app/signals.py --- a/django/cantusdb_project/main_app/signals.py +++ b/django/cantusdb_project/main_app/signals.py @@ -12,6 +12,7 @@ from main_app.models import Chant from main_app.models import Sequence from main_app.models import Feast +from main_app.models import Source @receiver(post_save, sender=Chant) @@ -68,7 +69,11 @@ Called in on_chant_save(), on_chant_delete(), on_sequence_save() and on_sequence_delete() """ - source = instance.source + # When a source is deleted (which in turn calls on_chant_delete() on all of its chants) instance.source does not exist + try: + source = instance.source + except Source.DoesNotExist: + source = None if source is not None: source.number_of_chants = source.chant_set.count() + source.sequence_set.count() source.save() @@ -79,7 +84,12 @@ Called in on_chant_save() and on_chant_delete() """ - source = instance.source + + # When a source is deleted (which in turn calls on_chant_delete() on all of its chants) instance.source does not exist + try: + source = instance.source + except Source.DoesNotExist: + source = None if source is not None: source.number_of_melodies = source.chant_set.filter( volpiano__isnull=False
{"golden_diff": "diff --git a/django/cantusdb_project/main_app/signals.py b/django/cantusdb_project/main_app/signals.py\n--- a/django/cantusdb_project/main_app/signals.py\n+++ b/django/cantusdb_project/main_app/signals.py\n@@ -12,6 +12,7 @@\n from main_app.models import Chant\n from main_app.models import Sequence\n from main_app.models import Feast\n+from main_app.models import Source\n \n \n @receiver(post_save, sender=Chant)\n@@ -68,7 +69,11 @@\n Called in on_chant_save(), on_chant_delete(), on_sequence_save() and on_sequence_delete()\n \"\"\"\n \n- source = instance.source\n+ # When a source is deleted (which in turn calls on_chant_delete() on all of its chants) instance.source does not exist\n+ try:\n+ source = instance.source\n+ except Source.DoesNotExist:\n+ source = None\n if source is not None:\n source.number_of_chants = source.chant_set.count() + source.sequence_set.count()\n source.save()\n@@ -79,7 +84,12 @@\n \n Called in on_chant_save() and on_chant_delete()\n \"\"\"\n- source = instance.source\n+\n+ # When a source is deleted (which in turn calls on_chant_delete() on all of its chants) instance.source does not exist\n+ try:\n+ source = instance.source\n+ except Source.DoesNotExist:\n+ source = None\n if source is not None:\n source.number_of_melodies = source.chant_set.filter(\n volpiano__isnull=False\n", "issue": "Admin Area: Cannot delete Source that contains chants\nWhen trying to delete a source that contains chants on the source admin page, we get a source DoesNotExist error. The source delete function works fine for sources that do not contain any chants, so I believe this has to do with deleting the associated chants after the source has already been deleted.\n", "before_files": [{"content": "import operator\nfrom functools import reduce\n\nfrom django.contrib.postgres.search import SearchVector\nfrom django.db import models\nfrom django.db.models import Value\nfrom django.db.models.signals import post_save, post_delete\nfrom django.dispatch import receiver\n\nimport re\n\nfrom main_app.models import Chant\nfrom main_app.models import Sequence\nfrom main_app.models import Feast\n\n\n@receiver(post_save, sender=Chant)\ndef on_chant_save(instance, **kwargs):\n update_source_chant_count(instance)\n update_source_melody_count(instance)\n\n update_chant_search_vector(instance)\n update_volpiano_fields(instance)\n\n\n@receiver(post_delete, sender=Chant)\ndef on_chant_delete(instance, **kwargs):\n update_source_chant_count(instance)\n update_source_melody_count(instance)\n\n\n@receiver(post_save, sender=Sequence)\ndef on_sequence_save(instance, **kwargs):\n update_source_chant_count(instance)\n\n\n@receiver(post_delete, sender=Sequence)\ndef on_sequence_delete(instance, **kwargs):\n update_source_chant_count(instance)\n\n\n@receiver(post_save, sender=Feast)\ndef on_feast_save(instance, **kwargs):\n update_prefix_field(instance)\n\n\ndef update_chant_search_vector(instance):\n \"\"\"When saving an instance of Chant, update its search vector field.\n\n Called in on_chant_save()\n \"\"\"\n index_components = instance.index_components()\n pk = instance.pk\n search_vectors = []\n\n for weight, data in index_components.items():\n search_vectors.append(\n SearchVector(Value(data, output_field=models.TextField()), weight=weight)\n )\n instance.__class__.objects.filter(pk=pk).update(\n search_vector=reduce(operator.add, search_vectors)\n )\n\n\ndef update_source_chant_count(instance):\n \"\"\"When saving or deleting a Chant or Sequence, update its Source's number_of_chants field\n\n Called in on_chant_save(), on_chant_delete(), on_sequence_save() and on_sequence_delete()\n \"\"\"\n\n source = instance.source\n if source is not None:\n source.number_of_chants = source.chant_set.count() + source.sequence_set.count()\n source.save()\n\n\ndef update_source_melody_count(instance):\n \"\"\"When saving or deleting a Chant, update its Source's number_of_melodies field\n\n Called in on_chant_save() and on_chant_delete()\n \"\"\"\n source = instance.source\n if source is not None:\n source.number_of_melodies = source.chant_set.filter(\n volpiano__isnull=False\n ).count()\n source.save()\n\n\ndef update_volpiano_fields(instance):\n \"\"\"When saving a Chant, make sure the chant's volpiano_notes and volpiano_intervals are up-to-date\n\n Called in on_chant_save()\n \"\"\"\n\n def generate_volpiano_notes(volpiano):\n \"\"\"\n Populate the ``volpiano_notes`` field of the ``Chant`` model\n\n This field is used for melody search\n\n Args:\n volpiano (str): The content of ``chant.volpiano``\n\n Returns:\n str: Volpiano str with non-note chars and duplicate consecutive notes removed\n \"\"\"\n # unwanted_chars are non-note chars, including the clefs, barlines, and accidentals etc.\n # the `searchMelody.js` on old cantus makes no reference to the b-flat accidentals (\"y\", \"i\", \"z\")\n # so put them in unwanted chars for now\n unwanted_chars = [\n \"-\",\n \"1\",\n \"2\",\n \"3\",\n \"4\",\n \"5\",\n \"6\",\n \"7\",\n \"?\",\n \".\",\n \" \",\n \"y\",\n \"i\",\n \"z\",\n ]\n # convert all charactors to lower-case, upper-case letters stand for liquescent of the same pitch\n volpiano_lower = volpiano.lower()\n # `)` stands for the lowest `g` note liquescent in volpiano, its 'lower case' is `9`\n volpiano_notes = volpiano_lower.replace(\")\", \"9\")\n # remove none-note charactors\n for unwanted_char in unwanted_chars:\n volpiano_notes = volpiano_notes.replace(unwanted_char, \"\")\n # remove duplicate consecutive chars\n volpiano_notes = re.sub(r\"(.)\\1+\", r\"\\1\", volpiano_notes)\n return volpiano_notes\n\n def generate_volpiano_intervals(volpiano_notes):\n \"\"\"\n Populate the ``volpiano_intervals`` field of the ``Chant`` model\n\n This field is used for melody search when searching for transpositions\n\n Args:\n volpiano_notes (str): The content of ``chant.volpiano_notes``,\n populated by the ``generate_volpiano_notes`` function\n\n Returns:\n str: A str of digits, recording the intervals between adjacent notes\n \"\"\"\n # replace '9' (the note G) with the char corresponding to (ASCII(a) - 1), because 'a' denotes the note A\n volpiano_notes = volpiano_notes.replace(\"9\", chr(ord(\"a\") - 1))\n # we model the interval between notes using the difference between the ASCII codes of corresponding letters\n # the letter for the note B is \"j\" (106), note A is \"h\" (104), the letter \"i\" (105) is skipped\n # move all notes above A down by one letter\n volpiano_notes = list(volpiano_notes)\n for j, note in enumerate(volpiano_notes):\n if ord(note) >= 106:\n volpiano_notes[j] = chr(ord(note) - 1)\n\n # `intervals` records the difference between two adjacent notes.\n # Note that intervals are encoded by counting the number of scale\n # steps between adjacent notes: an ascending second is thus encoded\n # as \"1\"; a descending third is encoded \"-2\", and so on.\n intervals = []\n for j in range(1, len(volpiano_notes)):\n intervals.append(ord(volpiano_notes[j]) - ord(volpiano_notes[j - 1]))\n # convert `intervals` to str\n volpiano_intervals = \"\".join([str(interval) for interval in intervals])\n return volpiano_intervals\n\n if instance.volpiano is None:\n return\n\n volpiano_notes = generate_volpiano_notes(instance.volpiano)\n volpiano_intervals = generate_volpiano_intervals(volpiano_notes)\n\n Chant.objects.filter(id=instance.id).update(\n volpiano_notes=volpiano_notes,\n volpiano_intervals=volpiano_intervals,\n )\n\n\ndef update_prefix_field(instance):\n pk = instance.pk\n\n if instance.feast_code:\n prefix = str(instance.feast_code)[0:2]\n instance.__class__.objects.filter(pk=pk).update(prefix=prefix)\n else: # feast_code is None, \"\"\n instance.__class__.objects.filter(pk=pk).update(prefix=\"\")\n", "path": "django/cantusdb_project/main_app/signals.py"}]}
2,619
362
gh_patches_debug_16991
rasdani/github-patches
git_diff
pypi__warehouse-3989
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Search reindex task leaves empty index. The 'Search projects' function does not work for me on [https://pypi.org](https://pypi.org). Irrespective of the query, the search does not return any results. (Example: [https://pypi.org/search/?q=numpy](https://pypi.org/search/?q=numpy)) </issue> <code> [start of warehouse/search/tasks.py] 1 # Licensed under the Apache License, Version 2.0 (the "License"); 2 # you may not use this file except in compliance with the License. 3 # You may obtain a copy of the License at 4 # 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # 7 # Unless required by applicable law or agreed to in writing, software 8 # distributed under the License is distributed on an "AS IS" BASIS, 9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 # See the License for the specific language governing permissions and 11 # limitations under the License. 12 13 import binascii 14 import urllib 15 import os 16 17 from elasticsearch.helpers import parallel_bulk 18 from elasticsearch_dsl import serializer 19 from sqlalchemy import and_, func 20 from sqlalchemy.orm import aliased 21 import certifi 22 import elasticsearch 23 24 from warehouse.packaging.models import ( 25 Classifier, Project, Release, release_classifiers) 26 from warehouse.packaging.search import Project as ProjectDocType 27 from warehouse.search.utils import get_index 28 from warehouse import tasks 29 from warehouse.utils.db import windowed_query 30 31 32 def _project_docs(db): 33 34 releases_list = ( 35 db.query(Release.name, Release.version) 36 .order_by( 37 Release.name, 38 Release.is_prerelease.nullslast(), 39 Release._pypi_ordering.desc(), 40 ) 41 .distinct(Release.name) 42 .subquery("release_list") 43 ) 44 45 r = aliased(Release, name="r") 46 47 all_versions = ( 48 db.query(func.array_agg(r.version)) 49 .filter(r.name == Release.name) 50 .correlate(Release) 51 .as_scalar() 52 .label("all_versions") 53 ) 54 55 classifiers = ( 56 db.query(func.array_agg(Classifier.classifier)) 57 .select_from(release_classifiers) 58 .join(Classifier, Classifier.id == release_classifiers.c.trove_id) 59 .filter(Release.name == release_classifiers.c.name) 60 .filter(Release.version == release_classifiers.c.version) 61 .correlate(Release) 62 .as_scalar() 63 .label("classifiers") 64 ) 65 66 release_data = ( 67 db.query( 68 Release.description, 69 Release.name, 70 Release.version.label("latest_version"), 71 all_versions, 72 Release.author, 73 Release.author_email, 74 Release.maintainer, 75 Release.maintainer_email, 76 Release.home_page, 77 Release.summary, 78 Release.keywords, 79 Release.platform, 80 Release.download_url, 81 Release.created, 82 classifiers, 83 Project.normalized_name, 84 Project.name, 85 ) 86 .select_from(releases_list) 87 .join(Release, and_( 88 Release.name == releases_list.c.name, 89 Release.version == releases_list.c.version)) 90 .outerjoin(Release.project) 91 .order_by(Release.name) 92 ) 93 94 for release in windowed_query(release_data, Release.name, 50000): 95 p = ProjectDocType.from_db(release) 96 p.full_clean() 97 yield p.to_dict(include_meta=True) 98 99 100 @tasks.task(ignore_result=True, acks_late=True) 101 def reindex(request): 102 """ 103 Recreate the Search Index. 104 """ 105 p = urllib.parse.urlparse(request.registry.settings["elasticsearch.url"]) 106 client = elasticsearch.Elasticsearch( 107 [urllib.parse.urlunparse(p[:2] + ("",) * 4)], 108 verify_certs=True, 109 ca_certs=certifi.where(), 110 timeout=30, 111 retry_on_timeout=True, 112 serializer=serializer.serializer, 113 ) 114 number_of_replicas = request.registry.get("elasticsearch.replicas", 0) 115 refresh_interval = request.registry.get("elasticsearch.interval", "1s") 116 117 # We use a randomly named index so that we can do a zero downtime reindex. 118 # Essentially we'll use a randomly named index which we will use until all 119 # of the data has been reindexed, at which point we'll point an alias at 120 # our randomly named index, and then delete the old randomly named index. 121 122 # Create the new index and associate all of our doc types with it. 123 index_base = request.registry["elasticsearch.index"] 124 random_token = binascii.hexlify(os.urandom(5)).decode("ascii") 125 new_index_name = "{}-{}".format(index_base, random_token) 126 doc_types = request.registry.get("search.doc_types", set()) 127 shards = request.registry.get("elasticsearch.shards", 1) 128 129 # Create the new index with zero replicas and index refreshes disabled 130 # while we are bulk indexing. 131 new_index = get_index( 132 new_index_name, 133 doc_types, 134 using=client, 135 shards=shards, 136 replicas=0, 137 interval="-1", 138 ) 139 new_index.create(wait_for_active_shards=shards) 140 141 # From this point on, if any error occurs, we want to be able to delete our 142 # in progress index. 143 try: 144 request.db.execute("SET statement_timeout = '600s'") 145 146 for _ in parallel_bulk(client, _project_docs(request.db)): 147 pass 148 except: # noqa 149 new_index.delete() 150 raise 151 finally: 152 request.db.rollback() 153 request.db.close() 154 155 # Now that we've finished indexing all of our data we can update the 156 # replicas and refresh intervals. 157 client.indices.put_settings( 158 index=new_index_name, 159 body={ 160 "index": { 161 "number_of_replicas": number_of_replicas, 162 "refresh_interval": refresh_interval, 163 } 164 } 165 ) 166 167 # Point the alias at our new randomly named index and delete the old index. 168 if client.indices.exists_alias(name=index_base): 169 to_delete = set() 170 actions = [] 171 for name in client.indices.get_alias(name=index_base): 172 to_delete.add(name) 173 actions.append({"remove": {"index": name, "alias": index_base}}) 174 actions.append({"add": {"index": new_index_name, "alias": index_base}}) 175 client.indices.update_aliases({"actions": actions}) 176 client.indices.delete(",".join(to_delete)) 177 else: 178 client.indices.put_alias(name=index_base, index=new_index_name) 179 [end of warehouse/search/tasks.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/warehouse/search/tasks.py b/warehouse/search/tasks.py --- a/warehouse/search/tasks.py +++ b/warehouse/search/tasks.py @@ -94,7 +94,9 @@ for release in windowed_query(release_data, Release.name, 50000): p = ProjectDocType.from_db(release) p.full_clean() - yield p.to_dict(include_meta=True) + doc = p.to_dict(include_meta=True) + doc.pop('_index', None) + yield doc @tasks.task(ignore_result=True, acks_late=True) @@ -143,7 +145,8 @@ try: request.db.execute("SET statement_timeout = '600s'") - for _ in parallel_bulk(client, _project_docs(request.db)): + for _ in parallel_bulk(client, _project_docs(request.db), + index=new_index_name): pass except: # noqa new_index.delete()
{"golden_diff": "diff --git a/warehouse/search/tasks.py b/warehouse/search/tasks.py\n--- a/warehouse/search/tasks.py\n+++ b/warehouse/search/tasks.py\n@@ -94,7 +94,9 @@\n for release in windowed_query(release_data, Release.name, 50000):\n p = ProjectDocType.from_db(release)\n p.full_clean()\n- yield p.to_dict(include_meta=True)\n+ doc = p.to_dict(include_meta=True)\n+ doc.pop('_index', None)\n+ yield doc\n \n \n @tasks.task(ignore_result=True, acks_late=True)\n@@ -143,7 +145,8 @@\n try:\n request.db.execute(\"SET statement_timeout = '600s'\")\n \n- for _ in parallel_bulk(client, _project_docs(request.db)):\n+ for _ in parallel_bulk(client, _project_docs(request.db),\n+ index=new_index_name):\n pass\n except: # noqa\n new_index.delete()\n", "issue": "Search reindex task leaves empty index.\nThe 'Search projects' function does not work for me on [https://pypi.org](https://pypi.org). Irrespective of the query, the search does not return any results. (Example: [https://pypi.org/search/?q=numpy](https://pypi.org/search/?q=numpy))\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport binascii\nimport urllib\nimport os\n\nfrom elasticsearch.helpers import parallel_bulk\nfrom elasticsearch_dsl import serializer\nfrom sqlalchemy import and_, func\nfrom sqlalchemy.orm import aliased\nimport certifi\nimport elasticsearch\n\nfrom warehouse.packaging.models import (\n Classifier, Project, Release, release_classifiers)\nfrom warehouse.packaging.search import Project as ProjectDocType\nfrom warehouse.search.utils import get_index\nfrom warehouse import tasks\nfrom warehouse.utils.db import windowed_query\n\n\ndef _project_docs(db):\n\n releases_list = (\n db.query(Release.name, Release.version)\n .order_by(\n Release.name,\n Release.is_prerelease.nullslast(),\n Release._pypi_ordering.desc(),\n )\n .distinct(Release.name)\n .subquery(\"release_list\")\n )\n\n r = aliased(Release, name=\"r\")\n\n all_versions = (\n db.query(func.array_agg(r.version))\n .filter(r.name == Release.name)\n .correlate(Release)\n .as_scalar()\n .label(\"all_versions\")\n )\n\n classifiers = (\n db.query(func.array_agg(Classifier.classifier))\n .select_from(release_classifiers)\n .join(Classifier, Classifier.id == release_classifiers.c.trove_id)\n .filter(Release.name == release_classifiers.c.name)\n .filter(Release.version == release_classifiers.c.version)\n .correlate(Release)\n .as_scalar()\n .label(\"classifiers\")\n )\n\n release_data = (\n db.query(\n Release.description,\n Release.name,\n Release.version.label(\"latest_version\"),\n all_versions,\n Release.author,\n Release.author_email,\n Release.maintainer,\n Release.maintainer_email,\n Release.home_page,\n Release.summary,\n Release.keywords,\n Release.platform,\n Release.download_url,\n Release.created,\n classifiers,\n Project.normalized_name,\n Project.name,\n )\n .select_from(releases_list)\n .join(Release, and_(\n Release.name == releases_list.c.name,\n Release.version == releases_list.c.version))\n .outerjoin(Release.project)\n .order_by(Release.name)\n )\n\n for release in windowed_query(release_data, Release.name, 50000):\n p = ProjectDocType.from_db(release)\n p.full_clean()\n yield p.to_dict(include_meta=True)\n\n\[email protected](ignore_result=True, acks_late=True)\ndef reindex(request):\n \"\"\"\n Recreate the Search Index.\n \"\"\"\n p = urllib.parse.urlparse(request.registry.settings[\"elasticsearch.url\"])\n client = elasticsearch.Elasticsearch(\n [urllib.parse.urlunparse(p[:2] + (\"\",) * 4)],\n verify_certs=True,\n ca_certs=certifi.where(),\n timeout=30,\n retry_on_timeout=True,\n serializer=serializer.serializer,\n )\n number_of_replicas = request.registry.get(\"elasticsearch.replicas\", 0)\n refresh_interval = request.registry.get(\"elasticsearch.interval\", \"1s\")\n\n # We use a randomly named index so that we can do a zero downtime reindex.\n # Essentially we'll use a randomly named index which we will use until all\n # of the data has been reindexed, at which point we'll point an alias at\n # our randomly named index, and then delete the old randomly named index.\n\n # Create the new index and associate all of our doc types with it.\n index_base = request.registry[\"elasticsearch.index\"]\n random_token = binascii.hexlify(os.urandom(5)).decode(\"ascii\")\n new_index_name = \"{}-{}\".format(index_base, random_token)\n doc_types = request.registry.get(\"search.doc_types\", set())\n shards = request.registry.get(\"elasticsearch.shards\", 1)\n\n # Create the new index with zero replicas and index refreshes disabled\n # while we are bulk indexing.\n new_index = get_index(\n new_index_name,\n doc_types,\n using=client,\n shards=shards,\n replicas=0,\n interval=\"-1\",\n )\n new_index.create(wait_for_active_shards=shards)\n\n # From this point on, if any error occurs, we want to be able to delete our\n # in progress index.\n try:\n request.db.execute(\"SET statement_timeout = '600s'\")\n\n for _ in parallel_bulk(client, _project_docs(request.db)):\n pass\n except: # noqa\n new_index.delete()\n raise\n finally:\n request.db.rollback()\n request.db.close()\n\n # Now that we've finished indexing all of our data we can update the\n # replicas and refresh intervals.\n client.indices.put_settings(\n index=new_index_name,\n body={\n \"index\": {\n \"number_of_replicas\": number_of_replicas,\n \"refresh_interval\": refresh_interval,\n }\n }\n )\n\n # Point the alias at our new randomly named index and delete the old index.\n if client.indices.exists_alias(name=index_base):\n to_delete = set()\n actions = []\n for name in client.indices.get_alias(name=index_base):\n to_delete.add(name)\n actions.append({\"remove\": {\"index\": name, \"alias\": index_base}})\n actions.append({\"add\": {\"index\": new_index_name, \"alias\": index_base}})\n client.indices.update_aliases({\"actions\": actions})\n client.indices.delete(\",\".join(to_delete))\n else:\n client.indices.put_alias(name=index_base, index=new_index_name)\n", "path": "warehouse/search/tasks.py"}]}
2,344
215
gh_patches_debug_1631
rasdani/github-patches
git_diff
vyperlang__vyper-3340
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bug: compiler dislikes `x not in [a, b]` in 0.3.8, whereas it was fine in 0.3.7 ### Version Information * vyper Version (output of `vyper --version`): 0.3.8 * OS: osx * Python Version (output of `python --version`): 3.10.4 ### What's your issue about? <img width="705" alt="image" src="https://user-images.githubusercontent.com/11488427/230437774-c3b68030-9319-4169-b344-dbb470002102.png"> </issue> <code> [start of vyper/semantics/environment.py] 1 from typing import Dict 2 3 from vyper.semantics.analysis.base import VarInfo 4 from vyper.semantics.types import AddressT, BytesT, VyperType 5 from vyper.semantics.types.shortcuts import BYTES32_T, UINT256_T 6 7 8 # common properties for environment variables 9 class _EnvType(VyperType): 10 def __eq__(self, other): 11 return self is other 12 13 def __hash__(self): 14 return hash(id(self)) 15 16 17 class _Block(_EnvType): 18 _id = "block" 19 _type_members = { 20 "coinbase": AddressT(), 21 "difficulty": UINT256_T, 22 "prevrandao": UINT256_T, 23 "number": UINT256_T, 24 "gaslimit": UINT256_T, 25 "basefee": UINT256_T, 26 "prevhash": BYTES32_T, 27 "timestamp": UINT256_T, 28 } 29 30 31 class _Chain(_EnvType): 32 _id = "chain" 33 _type_members = {"id": UINT256_T} 34 35 36 class _Msg(_EnvType): 37 _id = "msg" 38 _type_members = {"data": BytesT(), "gas": UINT256_T, "sender": AddressT(), "value": UINT256_T} 39 40 41 class _Tx(_EnvType): 42 _id = "tx" 43 _type_members = {"origin": AddressT(), "gasprice": UINT256_T} 44 45 46 CONSTANT_ENVIRONMENT_VARS = {t._id: t for t in (_Block(), _Chain(), _Tx(), _Msg())} 47 48 49 def get_constant_vars() -> Dict: 50 """ 51 Get a dictionary of constant environment variables. 52 """ 53 result = {} 54 for k, v in CONSTANT_ENVIRONMENT_VARS.items(): 55 result[k] = VarInfo(v, is_constant=True) 56 57 return result 58 59 60 # Not sure this is necessary, but add an ad-hoc type for `self` for clarity 61 class _SelfT(AddressT): 62 pass 63 64 65 MUTABLE_ENVIRONMENT_VARS: Dict[str, type] = {"self": _SelfT} 66 67 68 def get_mutable_vars() -> Dict: 69 """ 70 Get a dictionary of mutable environment variables (those that are 71 modified during the course of contract execution, such as `self`). 72 """ 73 return {name: VarInfo(type_()) for name, type_ in MUTABLE_ENVIRONMENT_VARS.items()} 74 [end of vyper/semantics/environment.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/vyper/semantics/environment.py b/vyper/semantics/environment.py --- a/vyper/semantics/environment.py +++ b/vyper/semantics/environment.py @@ -57,12 +57,7 @@ return result -# Not sure this is necessary, but add an ad-hoc type for `self` for clarity -class _SelfT(AddressT): - pass - - -MUTABLE_ENVIRONMENT_VARS: Dict[str, type] = {"self": _SelfT} +MUTABLE_ENVIRONMENT_VARS: Dict[str, type] = {"self": AddressT} def get_mutable_vars() -> Dict:
{"golden_diff": "diff --git a/vyper/semantics/environment.py b/vyper/semantics/environment.py\n--- a/vyper/semantics/environment.py\n+++ b/vyper/semantics/environment.py\n@@ -57,12 +57,7 @@\n return result\n \n \n-# Not sure this is necessary, but add an ad-hoc type for `self` for clarity\n-class _SelfT(AddressT):\n- pass\n-\n-\n-MUTABLE_ENVIRONMENT_VARS: Dict[str, type] = {\"self\": _SelfT}\n+MUTABLE_ENVIRONMENT_VARS: Dict[str, type] = {\"self\": AddressT}\n \n \n def get_mutable_vars() -> Dict:\n", "issue": "Bug: compiler dislikes `x not in [a, b]` in 0.3.8, whereas it was fine in 0.3.7\n### Version Information\r\n\r\n* vyper Version (output of `vyper --version`): 0.3.8\r\n* OS: osx\r\n* Python Version (output of `python --version`): 3.10.4\r\n\r\n### What's your issue about?\r\n\r\n<img width=\"705\" alt=\"image\" src=\"https://user-images.githubusercontent.com/11488427/230437774-c3b68030-9319-4169-b344-dbb470002102.png\">\r\n\r\n\n", "before_files": [{"content": "from typing import Dict\n\nfrom vyper.semantics.analysis.base import VarInfo\nfrom vyper.semantics.types import AddressT, BytesT, VyperType\nfrom vyper.semantics.types.shortcuts import BYTES32_T, UINT256_T\n\n\n# common properties for environment variables\nclass _EnvType(VyperType):\n def __eq__(self, other):\n return self is other\n\n def __hash__(self):\n return hash(id(self))\n\n\nclass _Block(_EnvType):\n _id = \"block\"\n _type_members = {\n \"coinbase\": AddressT(),\n \"difficulty\": UINT256_T,\n \"prevrandao\": UINT256_T,\n \"number\": UINT256_T,\n \"gaslimit\": UINT256_T,\n \"basefee\": UINT256_T,\n \"prevhash\": BYTES32_T,\n \"timestamp\": UINT256_T,\n }\n\n\nclass _Chain(_EnvType):\n _id = \"chain\"\n _type_members = {\"id\": UINT256_T}\n\n\nclass _Msg(_EnvType):\n _id = \"msg\"\n _type_members = {\"data\": BytesT(), \"gas\": UINT256_T, \"sender\": AddressT(), \"value\": UINT256_T}\n\n\nclass _Tx(_EnvType):\n _id = \"tx\"\n _type_members = {\"origin\": AddressT(), \"gasprice\": UINT256_T}\n\n\nCONSTANT_ENVIRONMENT_VARS = {t._id: t for t in (_Block(), _Chain(), _Tx(), _Msg())}\n\n\ndef get_constant_vars() -> Dict:\n \"\"\"\n Get a dictionary of constant environment variables.\n \"\"\"\n result = {}\n for k, v in CONSTANT_ENVIRONMENT_VARS.items():\n result[k] = VarInfo(v, is_constant=True)\n\n return result\n\n\n# Not sure this is necessary, but add an ad-hoc type for `self` for clarity\nclass _SelfT(AddressT):\n pass\n\n\nMUTABLE_ENVIRONMENT_VARS: Dict[str, type] = {\"self\": _SelfT}\n\n\ndef get_mutable_vars() -> Dict:\n \"\"\"\n Get a dictionary of mutable environment variables (those that are\n modified during the course of contract execution, such as `self`).\n \"\"\"\n return {name: VarInfo(type_()) for name, type_ in MUTABLE_ENVIRONMENT_VARS.items()}\n", "path": "vyper/semantics/environment.py"}]}
1,377
143
gh_patches_debug_41243
rasdani/github-patches
git_diff
biopython__biopython-3285
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Version switching on the Sphinx API docs We now have: - https://biopython.org/docs/1.74/api/ - https://biopython.org/docs/1.75/api/ - https://biopython.org/docs/1.76/api/ - https://biopython.org/docs/1.77/api/ - ... - https://biopython.org/docs/dev/api/ The webpage source for these exists here: https://github.com/biopython/docs The Sphinx configuration to build them is here: https://github.com/biopython/biopython/tree/master/Doc/api Can we add some JavaScript or otherwise like on ReadTheDocs to make it easy to toggle between version from within the browse? This likely means some changes to our Sphinx configuration (e.g. the common templates), and therefore would require backporting to regenerate the old pages - which would be OK. This could also resolve #2904. </issue> <code> [start of Doc/api/conf.py] 1 #!/usr/bin/env python3 2 """Biopython Sphinx documentation build configuration file. 3 4 After generating ``*.rst`` files from the source code, this 5 file controls running ``sphinx-build`` to turn these into 6 human readable documentation. 7 """ 8 9 import os 10 import shutil 11 import sys 12 import tempfile 13 14 from sphinx.ext import autodoc 15 16 from Bio import __version__, Application 17 18 # -- General configuration ------------------------------------------------ 19 20 # If your documentation needs a minimal Sphinx version, state it here. 21 # 22 # needs_sphinx = '1.0' 23 needs_sphinx = "1.8" 24 25 # Add any Sphinx extension module names here, as strings. They can be 26 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 27 # ones. 28 extensions = [ 29 "sphinx.ext.autodoc", 30 "sphinx.ext.todo", 31 # Don't want to include source code in the API docs 32 # 'sphinx.ext.viewcode', 33 "sphinx.ext.autosummary", 34 "numpydoc", 35 ] 36 37 # Add any paths that contain templates here, relative to this directory. 38 templates_path = ["_templates"] 39 40 # The suffix(es) of source filenames. 41 # You can specify multiple suffix as a list of string: 42 # 43 # source_suffix = ['.rst', '.md'] 44 source_suffix = ".rst" 45 46 # The master toctree document. 47 master_doc = "index" 48 49 # General information about the project. 50 project = "Biopython" 51 copyright = "1999-2020, The Biopython Contributors" 52 author = "The Biopython Contributors" 53 document = "Biopython API Documentation" 54 55 # The version info for the project you're documenting, acts as replacement for 56 # |version| and |release|, also used in various other places throughout the 57 # built documents. 58 # 59 # The short X.Y version. 60 version = __version__ # TODO: Shorten this 61 # The full version, including alpha/beta/rc tags. 62 release = __version__ 63 64 # The language for content autogenerated by Sphinx. Refer to documentation 65 # for a list of supported languages. 66 # 67 # This is also used if you do content translation via gettext catalogs. 68 # Usually you set "language" from the command line for these cases. 69 language = "en" 70 71 # List of patterns, relative to source directory, that match files and 72 # directories to ignore when looking for source files. 73 # This patterns also effect to html_static_path and html_extra_path 74 exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] 75 76 # The name of the Pygments (syntax highlighting) style to use. 77 pygments_style = "sphinx" 78 79 # If true, `todo` and `todoList` produce output, else they produce nothing. 80 todo_include_todos = True 81 82 # -- Options for autodoc -------------------------------------------------- 83 84 # This requires Sphinx 1.8 or later: 85 autodoc_default_options = { 86 "members": None, 87 "undoc-members": None, 88 "special-members": None, 89 "show-inheritance": None, 90 "member-order": "bysource", 91 "exclude-members": "__dict__,__weakref__,__module__", 92 } 93 94 # To avoid import errors. 95 autodoc_mock_imports = ["MySQLdb", "Bio.Restriction.Restriction", "Bio.Alphabet"] 96 97 # -- Options for HTML output ---------------------------------------------- 98 99 # Sphinx default was html_theme = "alabaster" 100 html_theme = "sphinx_rtd_theme" 101 102 # Sphinx Read The Docs theme settings, see 103 # https://sphinx-rtd-theme.readthedocs.io/en/latest/configuring.html 104 html_theme_options = { 105 "prev_next_buttons_location": "both", 106 # Same a Hyde theme sidebar on biopython.org: 107 "style_nav_header_background": "#10100F", 108 # Since we have the Biopython logo via html_logo, 109 "logo_only": True, 110 } 111 112 # Based on: 113 # https://github.com/readthedocs/sphinx_rtd_theme/issues/231#issuecomment-126447493 114 html_context = { 115 "display_github": True, # Add 'Edit on Github' link instead of 'View page source' 116 "github_user": "biopython", 117 "github_repo": "biopython", 118 "github_version": "master", 119 "conf_py_path": "/Doc/api/", 120 # "source_suffix": source_suffix, 121 } 122 123 html_logo = "../images/biopython_logo.svg" 124 125 # The RST source is transient, don't need/want to include it 126 html_show_sourcelink = False 127 html_copy_source = False 128 129 # Add any paths that contain custom static files (such as style sheets) here, 130 # relative to this directory. They are copied after the builtin static files, 131 # so a file named "default.css" will overwrite the builtin "default.css". 132 html_static_path = ["_static"] 133 134 # Custom sidebar templates, must be a dictionary that maps document names 135 # to template names. 136 # 137 # This is required for the alabaster theme 138 # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars 139 html_sidebars = { 140 "**": [ 141 "about.html", 142 "navigation.html", 143 "relations.html", # needs 'show_related': True theme option to display 144 "searchbox.html", 145 "donate.html", 146 ] 147 } 148 149 150 # -- Options for HTMLHelp output ------------------------------------------ 151 152 # Output file base name for HTML help builder. 153 htmlhelp_basename = "Biopython_doc" 154 155 156 # -- Options for LaTeX output --------------------------------------------- 157 158 latex_elements = { 159 # The paper size ('letterpaper' or 'a4paper'). 160 # 161 # 'papersize': 'letterpaper', 162 # The font size ('10pt', '11pt' or '12pt'). 163 # 164 # 'pointsize': '10pt', 165 # Additional stuff for the LaTeX preamble. 166 # 167 # 'preamble': '', 168 # Latex figure (float) alignment 169 # 170 # 'figure_align': 'htbp', 171 } 172 173 # Grouping the document tree into LaTeX files. List of tuples 174 # (source start file, target name, title, 175 # author, documentclass [howto, manual, or own class]). 176 latex_documents = [(master_doc, "Biopython_API.tex", document, author, "manual")] 177 178 179 # -- Options for manual page output --------------------------------------- 180 181 # One entry per manual page. List of tuples 182 # (source start file, name, description, authors, manual section). 183 man_pages = [(master_doc, "biopython", document, [author], 1)] 184 185 186 # -- Options for Texinfo output ------------------------------------------- 187 188 # Grouping the document tree into Texinfo files. List of tuples 189 # (source start file, target name, title, author, 190 # dir menu entry, description, category) 191 texinfo_documents = [ 192 ( 193 master_doc, 194 "Biopython", 195 document, 196 author, 197 "Biopython", 198 "Collection of modules for dealing with biological data in Python.", 199 "Miscellaneous", 200 ) 201 ] 202 203 204 # -- Options for Epub output ---------------------------------------------- 205 206 # Bibliographic Dublin Core info. 207 epub_title = document # project 208 epub_author = author 209 epub_publisher = author 210 epub_copyright = copyright 211 212 # The unique identifier of the text. This can be a ISBN number 213 # or the project homepage. 214 # 215 # epub_identifier = '' 216 217 # A unique identification for the text. 218 # 219 # epub_uid = '' 220 221 # A list of files that should not be packed into the epub file. 222 epub_exclude_files = ["search.html"] 223 224 # -- Options for numpydoc ------------------------------------------------- 225 226 numpydoc_class_members_toctree = False 227 # Prevents the attributes and methods from being shown twice 228 numpydoc_show_class_members = False 229 230 # -- Magic to run sphinx-apidoc automatically ----------------------------- 231 232 # See https://github.com/rtfd/readthedocs.org/issues/1139 233 # on which this is based. 234 235 236 def insert_github_link(filename): 237 """Insert file specific :github_url: metadata for theme breadcrumbs.""" 238 assert "/" not in filename and filename.endswith(".rst") 239 with open(filename) as handle: 240 text = handle.read() 241 if ":github_url:" in text: 242 return 243 244 python = filename[:-4].replace(".", "/") + "/__init__.py" 245 if not os.path.isfile(os.path.join("../../", python)): 246 python = filename[:-4].replace(".", "/") + ".py" 247 if not os.path.isfile(os.path.join("../../", python)): 248 sys.stderr.write( 249 "WARNING: Could not map %s to a Python file, e.g. %s\n" % (filename, python) 250 ) 251 return 252 253 text = ":github_url: https://github.com/%s/%s/blob/%s/%s\n\n%s" % ( 254 html_context["github_user"], 255 html_context["github_repo"], 256 html_context["github_version"], 257 python, 258 text, 259 ) 260 with open(filename, "w") as handle: 261 handle.write(text) 262 263 264 def run_apidoc(_): 265 """Call sphinx-apidoc on Bio and BioSQL modules.""" 266 from sphinx.ext.apidoc import main as apidoc_main 267 268 cur_dir = os.path.abspath(os.path.dirname(__file__)) 269 # Can't see a better way than running apidoc twice, for Bio & BioSQL 270 # We don't care about the index.rst / conf.py (we have our own) 271 # or the Makefile / make.bat (effectively same) clashing, 272 # $ sphinx-apidoc -e -F -o /tmp/api/BioSQL BioSQL 273 # $ sphinx-apidoc -e -F -o /tmp/api/Bio Bio 274 tmp_path = tempfile.mkdtemp() 275 apidoc_main(["-e", "-F", "-o", tmp_path, "../../BioSQL"]) 276 apidoc_main(["-e", "-F", "-o", tmp_path, "../../Bio"]) 277 os.remove(os.path.join(tmp_path, "index.rst")) # Using our own 278 for filename in os.listdir(tmp_path): 279 if filename.endswith(".rst"): 280 shutil.move( 281 os.path.join(tmp_path, filename), os.path.join(cur_dir, filename) 282 ) 283 shutil.rmtree(tmp_path) 284 285 for f in os.listdir(cur_dir): 286 if f.startswith("Bio") and f.endswith(".rst"): 287 insert_github_link(f) 288 289 290 class BioPythonAPI(autodoc.ClassDocumenter): 291 """Custom Class Documenter for AbstractCommandline classes.""" 292 293 def import_object(self): 294 """Import the class.""" 295 ret = super().import_object() 296 297 if not issubclass(self.object, Application.AbstractCommandline): 298 return ret 299 300 try: 301 # If the object is an AbstractCommandline we instantiate it. 302 self.object() 303 except TypeError: 304 # Throws if the object is the base AbstractCommandline class 305 pass 306 return ret 307 308 309 def setup(app): 310 """Over-ride Sphinx setup to trigger sphinx-apidoc.""" 311 app.connect("builder-inited", run_apidoc) 312 313 def add_documenter(app, env, docnames): 314 app.add_autodocumenter(BioPythonAPI, True) 315 316 # Over-ride autodoc documenter 317 app.connect("env-before-read-docs", add_documenter) 318 [end of Doc/api/conf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/Doc/api/conf.py b/Doc/api/conf.py --- a/Doc/api/conf.py +++ b/Doc/api/conf.py @@ -61,6 +61,25 @@ # The full version, including alpha/beta/rc tags. release = __version__ +# Versions for versions.html: +# (this will break if we have version gaps) +try: + main_version, minor_version, _ = version.split(".") # e.g. 1.79.dev0 + dev_version = True +except ValueError: + main_version, minor_version = version.split(".") # e.g. 1.78 + dev_version = False +prev_minor_version = int(minor_version) - (2 if dev_version else 1) +previous_version = f"{main_version}.{prev_minor_version}" +versions = [ + ("Previous", f"../../{previous_version}/api/"), + ("Latest", "../../latest/api/"), + ("Develop", "../../dev/api/"), +] + +if version < "1.75": # 1.74 is the earliest Sphinx-generated api documentation + del versions[0] + # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # @@ -92,7 +111,9 @@ } # To avoid import errors. -autodoc_mock_imports = ["MySQLdb", "Bio.Restriction.Restriction", "Bio.Alphabet"] +autodoc_mock_imports = ["MySQLdb", "Bio.Restriction.Restriction"] +if version > "1.77": + autodoc_mock_imports.append("Bio.Alphabet") # -- Options for HTML output ---------------------------------------------- @@ -118,9 +139,16 @@ "github_version": "master", "conf_py_path": "/Doc/api/", # "source_suffix": source_suffix, + "theme_display_version": False, + # Biopython-specific values for version-footer (versions.html): + "display_version_footer": True, + "current_version": version, + "versions": versions, + "project_home_url": "https://biopython.org", + "project_github_url": "https://github.com/biopython/biopython", } -html_logo = "../images/biopython_logo.svg" +html_logo = "../images/biopython_logo_white.png" # The RST source is transient, don't need/want to include it html_show_sourcelink = False @@ -131,20 +159,21 @@ # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] +# The following is not applicable to the Read-the-docs theme: # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # This is required for the alabaster theme # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars -html_sidebars = { - "**": [ - "about.html", - "navigation.html", - "relations.html", # needs 'show_related': True theme option to display - "searchbox.html", - "donate.html", - ] -} +# html_sidebars = { +# "**": [ +# "about.html", +# "navigation.html", +# "relations.html", # needs 'show_related': True theme option to display +# "searchbox.html", +# "donate.html", +# ] +# } # -- Options for HTMLHelp output ------------------------------------------ @@ -310,6 +339,8 @@ """Over-ride Sphinx setup to trigger sphinx-apidoc.""" app.connect("builder-inited", run_apidoc) + app.add_css_file("biopython.css") + def add_documenter(app, env, docnames): app.add_autodocumenter(BioPythonAPI, True)
{"golden_diff": "diff --git a/Doc/api/conf.py b/Doc/api/conf.py\n--- a/Doc/api/conf.py\n+++ b/Doc/api/conf.py\n@@ -61,6 +61,25 @@\n # The full version, including alpha/beta/rc tags.\n release = __version__\n \n+# Versions for versions.html:\n+# (this will break if we have version gaps)\n+try:\n+ main_version, minor_version, _ = version.split(\".\") # e.g. 1.79.dev0\n+ dev_version = True\n+except ValueError:\n+ main_version, minor_version = version.split(\".\") # e.g. 1.78\n+ dev_version = False\n+prev_minor_version = int(minor_version) - (2 if dev_version else 1)\n+previous_version = f\"{main_version}.{prev_minor_version}\"\n+versions = [\n+ (\"Previous\", f\"../../{previous_version}/api/\"),\n+ (\"Latest\", \"../../latest/api/\"),\n+ (\"Develop\", \"../../dev/api/\"),\n+]\n+\n+if version < \"1.75\": # 1.74 is the earliest Sphinx-generated api documentation\n+ del versions[0]\n+\n # The language for content autogenerated by Sphinx. Refer to documentation\n # for a list of supported languages.\n #\n@@ -92,7 +111,9 @@\n }\n \n # To avoid import errors.\n-autodoc_mock_imports = [\"MySQLdb\", \"Bio.Restriction.Restriction\", \"Bio.Alphabet\"]\n+autodoc_mock_imports = [\"MySQLdb\", \"Bio.Restriction.Restriction\"]\n+if version > \"1.77\":\n+ autodoc_mock_imports.append(\"Bio.Alphabet\")\n \n # -- Options for HTML output ----------------------------------------------\n \n@@ -118,9 +139,16 @@\n \"github_version\": \"master\",\n \"conf_py_path\": \"/Doc/api/\",\n # \"source_suffix\": source_suffix,\n+ \"theme_display_version\": False,\n+ # Biopython-specific values for version-footer (versions.html):\n+ \"display_version_footer\": True,\n+ \"current_version\": version,\n+ \"versions\": versions,\n+ \"project_home_url\": \"https://biopython.org\",\n+ \"project_github_url\": \"https://github.com/biopython/biopython\",\n }\n \n-html_logo = \"../images/biopython_logo.svg\"\n+html_logo = \"../images/biopython_logo_white.png\"\n \n # The RST source is transient, don't need/want to include it\n html_show_sourcelink = False\n@@ -131,20 +159,21 @@\n # so a file named \"default.css\" will overwrite the builtin \"default.css\".\n html_static_path = [\"_static\"]\n \n+# The following is not applicable to the Read-the-docs theme:\n # Custom sidebar templates, must be a dictionary that maps document names\n # to template names.\n #\n # This is required for the alabaster theme\n # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars\n-html_sidebars = {\n- \"**\": [\n- \"about.html\",\n- \"navigation.html\",\n- \"relations.html\", # needs 'show_related': True theme option to display\n- \"searchbox.html\",\n- \"donate.html\",\n- ]\n-}\n+# html_sidebars = {\n+# \"**\": [\n+# \"about.html\",\n+# \"navigation.html\",\n+# \"relations.html\", # needs 'show_related': True theme option to display\n+# \"searchbox.html\",\n+# \"donate.html\",\n+# ]\n+# }\n \n \n # -- Options for HTMLHelp output ------------------------------------------\n@@ -310,6 +339,8 @@\n \"\"\"Over-ride Sphinx setup to trigger sphinx-apidoc.\"\"\"\n app.connect(\"builder-inited\", run_apidoc)\n \n+ app.add_css_file(\"biopython.css\")\n+\n def add_documenter(app, env, docnames):\n app.add_autodocumenter(BioPythonAPI, True)\n", "issue": "Version switching on the Sphinx API docs\nWe now have:\r\n\r\n- https://biopython.org/docs/1.74/api/\r\n- https://biopython.org/docs/1.75/api/\r\n- https://biopython.org/docs/1.76/api/\r\n- https://biopython.org/docs/1.77/api/\r\n- ...\r\n- https://biopython.org/docs/dev/api/\r\n\r\nThe webpage source for these exists here:\r\n\r\nhttps://github.com/biopython/docs\r\n\r\nThe Sphinx configuration to build them is here:\r\n\r\nhttps://github.com/biopython/biopython/tree/master/Doc/api\r\n\r\nCan we add some JavaScript or otherwise like on ReadTheDocs to make it easy to toggle between version from within the browse?\r\n\r\nThis likely means some changes to our Sphinx configuration (e.g. the common templates), and therefore would require backporting to regenerate the old pages - which would be OK.\r\n\r\nThis could also resolve #2904.\n", "before_files": [{"content": "#!/usr/bin/env python3\n\"\"\"Biopython Sphinx documentation build configuration file.\n\nAfter generating ``*.rst`` files from the source code, this\nfile controls running ``sphinx-build`` to turn these into\nhuman readable documentation.\n\"\"\"\n\nimport os\nimport shutil\nimport sys\nimport tempfile\n\nfrom sphinx.ext import autodoc\n\nfrom Bio import __version__, Application\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\nneeds_sphinx = \"1.8\"\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.todo\",\n # Don't want to include source code in the API docs\n # 'sphinx.ext.viewcode',\n \"sphinx.ext.autosummary\",\n \"numpydoc\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = \".rst\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"Biopython\"\ncopyright = \"1999-2020, The Biopython Contributors\"\nauthor = \"The Biopython Contributors\"\ndocument = \"Biopython API Documentation\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = __version__ # TODO: Shorten this\n# The full version, including alpha/beta/rc tags.\nrelease = __version__\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = \"en\"\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n# -- Options for autodoc --------------------------------------------------\n\n# This requires Sphinx 1.8 or later:\nautodoc_default_options = {\n \"members\": None,\n \"undoc-members\": None,\n \"special-members\": None,\n \"show-inheritance\": None,\n \"member-order\": \"bysource\",\n \"exclude-members\": \"__dict__,__weakref__,__module__\",\n}\n\n# To avoid import errors.\nautodoc_mock_imports = [\"MySQLdb\", \"Bio.Restriction.Restriction\", \"Bio.Alphabet\"]\n\n# -- Options for HTML output ----------------------------------------------\n\n# Sphinx default was html_theme = \"alabaster\"\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Sphinx Read The Docs theme settings, see\n# https://sphinx-rtd-theme.readthedocs.io/en/latest/configuring.html\nhtml_theme_options = {\n \"prev_next_buttons_location\": \"both\",\n # Same a Hyde theme sidebar on biopython.org:\n \"style_nav_header_background\": \"#10100F\",\n # Since we have the Biopython logo via html_logo,\n \"logo_only\": True,\n}\n\n# Based on:\n# https://github.com/readthedocs/sphinx_rtd_theme/issues/231#issuecomment-126447493\nhtml_context = {\n \"display_github\": True, # Add 'Edit on Github' link instead of 'View page source'\n \"github_user\": \"biopython\",\n \"github_repo\": \"biopython\",\n \"github_version\": \"master\",\n \"conf_py_path\": \"/Doc/api/\",\n # \"source_suffix\": source_suffix,\n}\n\nhtml_logo = \"../images/biopython_logo.svg\"\n\n# The RST source is transient, don't need/want to include it\nhtml_show_sourcelink = False\nhtml_copy_source = False\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# This is required for the alabaster theme\n# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars\nhtml_sidebars = {\n \"**\": [\n \"about.html\",\n \"navigation.html\",\n \"relations.html\", # needs 'show_related': True theme option to display\n \"searchbox.html\",\n \"donate.html\",\n ]\n}\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"Biopython_doc\"\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [(master_doc, \"Biopython_API.tex\", document, author, \"manual\")]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"biopython\", document, [author], 1)]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"Biopython\",\n document,\n author,\n \"Biopython\",\n \"Collection of modules for dealing with biological data in Python.\",\n \"Miscellaneous\",\n )\n]\n\n\n# -- Options for Epub output ----------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = document # project\nepub_author = author\nepub_publisher = author\nepub_copyright = copyright\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#\n# epub_identifier = ''\n\n# A unique identification for the text.\n#\n# epub_uid = ''\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = [\"search.html\"]\n\n# -- Options for numpydoc -------------------------------------------------\n\nnumpydoc_class_members_toctree = False\n# Prevents the attributes and methods from being shown twice\nnumpydoc_show_class_members = False\n\n# -- Magic to run sphinx-apidoc automatically -----------------------------\n\n# See https://github.com/rtfd/readthedocs.org/issues/1139\n# on which this is based.\n\n\ndef insert_github_link(filename):\n \"\"\"Insert file specific :github_url: metadata for theme breadcrumbs.\"\"\"\n assert \"/\" not in filename and filename.endswith(\".rst\")\n with open(filename) as handle:\n text = handle.read()\n if \":github_url:\" in text:\n return\n\n python = filename[:-4].replace(\".\", \"/\") + \"/__init__.py\"\n if not os.path.isfile(os.path.join(\"../../\", python)):\n python = filename[:-4].replace(\".\", \"/\") + \".py\"\n if not os.path.isfile(os.path.join(\"../../\", python)):\n sys.stderr.write(\n \"WARNING: Could not map %s to a Python file, e.g. %s\\n\" % (filename, python)\n )\n return\n\n text = \":github_url: https://github.com/%s/%s/blob/%s/%s\\n\\n%s\" % (\n html_context[\"github_user\"],\n html_context[\"github_repo\"],\n html_context[\"github_version\"],\n python,\n text,\n )\n with open(filename, \"w\") as handle:\n handle.write(text)\n\n\ndef run_apidoc(_):\n \"\"\"Call sphinx-apidoc on Bio and BioSQL modules.\"\"\"\n from sphinx.ext.apidoc import main as apidoc_main\n\n cur_dir = os.path.abspath(os.path.dirname(__file__))\n # Can't see a better way than running apidoc twice, for Bio & BioSQL\n # We don't care about the index.rst / conf.py (we have our own)\n # or the Makefile / make.bat (effectively same) clashing,\n # $ sphinx-apidoc -e -F -o /tmp/api/BioSQL BioSQL\n # $ sphinx-apidoc -e -F -o /tmp/api/Bio Bio\n tmp_path = tempfile.mkdtemp()\n apidoc_main([\"-e\", \"-F\", \"-o\", tmp_path, \"../../BioSQL\"])\n apidoc_main([\"-e\", \"-F\", \"-o\", tmp_path, \"../../Bio\"])\n os.remove(os.path.join(tmp_path, \"index.rst\")) # Using our own\n for filename in os.listdir(tmp_path):\n if filename.endswith(\".rst\"):\n shutil.move(\n os.path.join(tmp_path, filename), os.path.join(cur_dir, filename)\n )\n shutil.rmtree(tmp_path)\n\n for f in os.listdir(cur_dir):\n if f.startswith(\"Bio\") and f.endswith(\".rst\"):\n insert_github_link(f)\n\n\nclass BioPythonAPI(autodoc.ClassDocumenter):\n \"\"\"Custom Class Documenter for AbstractCommandline classes.\"\"\"\n\n def import_object(self):\n \"\"\"Import the class.\"\"\"\n ret = super().import_object()\n\n if not issubclass(self.object, Application.AbstractCommandline):\n return ret\n\n try:\n # If the object is an AbstractCommandline we instantiate it.\n self.object()\n except TypeError:\n # Throws if the object is the base AbstractCommandline class\n pass\n return ret\n\n\ndef setup(app):\n \"\"\"Over-ride Sphinx setup to trigger sphinx-apidoc.\"\"\"\n app.connect(\"builder-inited\", run_apidoc)\n\n def add_documenter(app, env, docnames):\n app.add_autodocumenter(BioPythonAPI, True)\n\n # Over-ride autodoc documenter\n app.connect(\"env-before-read-docs\", add_documenter)\n", "path": "Doc/api/conf.py"}]}
4,025
879
gh_patches_debug_12382
rasdani/github-patches
git_diff
Lightning-AI__pytorch-lightning-1748
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Checkpoint adding "version_" at the start of the logger name **To reproduce :** ```python logger = pl.loggers.TensorBoardLogger( save_dir='.', version='my_name' name='lightning_logs' ) trainer = pl.Trainer(logger=logger, log_gpu_memory='all', max_epochs=10) ``` **Giving as a result:** - /lightning_logs/my_name: Where is saved the logs - /lightning_logs/version_my_name : Where is saved the checkpoints **Possible Explanation:** It seems like the checkpoint saving add "version_" to the start of the name even if the name have been given as a parameter : https://github.com/PyTorchLightning/pytorch-lightning/blob/3e8f2d99a9951bfb5fc67a98614128317913be1d/pytorch_lightning/trainer/callback_config.py#L52-L57 Even if in the Tensorboard Logger if the name is provided there is no "version_" prefix : https://github.com/PyTorchLightning/pytorch-lightning/blob/8b82ce09039e75f3fcb77a987c964249e38def3b/pytorch_lightning/loggers/tensorboard.py#L81 </issue> <code> [start of pytorch_lightning/trainer/callback_config.py] 1 import os 2 from abc import ABC, abstractmethod 3 from typing import Union, List 4 5 6 from pytorch_lightning.callbacks import Callback, ModelCheckpoint, EarlyStopping, ProgressBarBase, ProgressBar 7 from pytorch_lightning.loggers import LightningLoggerBase 8 from pytorch_lightning.utilities.exceptions import MisconfigurationException 9 10 11 class TrainerCallbackConfigMixin(ABC): 12 13 # this is just a summary on variables used in this abstract class, 14 # the proper values/initialisation should be done in child class 15 callbacks: List[Callback] 16 default_root_dir: str 17 logger: Union[LightningLoggerBase, bool] 18 weights_save_path: str 19 ckpt_path: str 20 checkpoint_callback: ModelCheckpoint 21 progress_bar_refresh_rate: int 22 process_position: int 23 24 @property 25 @abstractmethod 26 def slurm_job_id(self) -> int: 27 """Warning: this is just empty shell for code implemented in other class.""" 28 29 @abstractmethod 30 def save_checkpoint(self, *args): 31 """Warning: this is just empty shell for code implemented in other class.""" 32 33 def configure_checkpoint_callback(self): 34 """ 35 Weight path set in this priority: 36 Checkpoint_callback's path (if passed in). 37 User provided weights_saved_path 38 Otherwise use os.getcwd() 39 """ 40 ckpt_path = self.default_root_dir 41 if self.checkpoint_callback: 42 # init a default one 43 if self.logger is not None: 44 save_dir = (getattr(self.logger, 'save_dir', None) or 45 getattr(self.logger, '_save_dir', None) or 46 self.default_root_dir) 47 48 # weights_save_path overrides anything 49 if self.weights_save_path is not None: 50 save_dir = self.weights_save_path 51 52 ckpt_path = os.path.join( 53 save_dir, 54 self.logger.name, 55 f'version_{self.logger.version}', 56 "checkpoints" 57 ) 58 else: 59 ckpt_path = os.path.join(self.default_root_dir, "checkpoints") 60 61 # when no val step is defined, use 'loss' otherwise 'val_loss' 62 train_step_only = not self.is_overriden('validation_step') 63 monitor_key = 'loss' if train_step_only else 'val_loss' 64 65 if self.checkpoint_callback is True: 66 os.makedirs(ckpt_path, exist_ok=True) 67 self.checkpoint_callback = ModelCheckpoint( 68 filepath=ckpt_path, 69 monitor=monitor_key 70 ) 71 # If user specified None in filepath, override with runtime default 72 elif isinstance(self.checkpoint_callback, ModelCheckpoint) \ 73 and self.checkpoint_callback.dirpath is None: 74 self.checkpoint_callback.dirpath = ckpt_path 75 self.checkpoint_callback.filename = '{epoch}' 76 os.makedirs(self.checkpoint_callback.dirpath, exist_ok=True) 77 elif self.checkpoint_callback is False: 78 self.checkpoint_callback = None 79 80 self.ckpt_path = ckpt_path 81 82 if self.checkpoint_callback: 83 # set the path for the callbacks 84 self.checkpoint_callback.save_function = self.save_checkpoint 85 86 # if checkpoint callback used, then override the weights path 87 self.weights_save_path = self.checkpoint_callback.dirpath 88 89 # if weights_save_path is still none here, set to current working dir 90 if self.weights_save_path is None: 91 self.weights_save_path = self.default_root_dir 92 93 def configure_early_stopping(self, early_stop_callback): 94 if early_stop_callback is True or None: 95 self.early_stop_callback = EarlyStopping( 96 monitor='val_loss', 97 patience=3, 98 strict=True, 99 verbose=True, 100 mode='min' 101 ) 102 self.enable_early_stop = True 103 elif not early_stop_callback: 104 self.early_stop_callback = None 105 self.enable_early_stop = False 106 else: 107 self.early_stop_callback = early_stop_callback 108 self.enable_early_stop = True 109 110 def configure_progress_bar(self): 111 progress_bars = [c for c in self.callbacks if isinstance(c, ProgressBarBase)] 112 if len(progress_bars) > 1: 113 raise MisconfigurationException( 114 'You added multiple progress bar callbacks to the Trainer, but currently only one' 115 ' progress bar is supported.' 116 ) 117 elif len(progress_bars) == 1: 118 self.progress_bar_callback = progress_bars[0] 119 elif self.progress_bar_refresh_rate > 0: 120 self.progress_bar_callback = ProgressBar( 121 refresh_rate=self.progress_bar_refresh_rate, 122 process_position=self.process_position, 123 ) 124 self.callbacks.append(self.progress_bar_callback) 125 else: 126 self.progress_bar_callback = None 127 [end of pytorch_lightning/trainer/callback_config.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pytorch_lightning/trainer/callback_config.py b/pytorch_lightning/trainer/callback_config.py --- a/pytorch_lightning/trainer/callback_config.py +++ b/pytorch_lightning/trainer/callback_config.py @@ -49,10 +49,12 @@ if self.weights_save_path is not None: save_dir = self.weights_save_path + version = self.logger.version if isinstance( + self.logger.version, str) else f'version_{self.logger.version}' ckpt_path = os.path.join( save_dir, self.logger.name, - f'version_{self.logger.version}', + version, "checkpoints" ) else:
{"golden_diff": "diff --git a/pytorch_lightning/trainer/callback_config.py b/pytorch_lightning/trainer/callback_config.py\n--- a/pytorch_lightning/trainer/callback_config.py\n+++ b/pytorch_lightning/trainer/callback_config.py\n@@ -49,10 +49,12 @@\n if self.weights_save_path is not None:\n save_dir = self.weights_save_path\n \n+ version = self.logger.version if isinstance(\n+ self.logger.version, str) else f'version_{self.logger.version}'\n ckpt_path = os.path.join(\n save_dir,\n self.logger.name,\n- f'version_{self.logger.version}',\n+ version,\n \"checkpoints\"\n )\n else:\n", "issue": "Checkpoint adding \"version_\" at the start of the logger name\n**To reproduce :** \r\n```python\r\nlogger = pl.loggers.TensorBoardLogger(\r\n save_dir='.',\r\n version='my_name'\r\n name='lightning_logs'\r\n )\r\n\r\ntrainer = pl.Trainer(logger=logger, log_gpu_memory='all', max_epochs=10)\r\n```\r\n\r\n**Giving as a result:**\r\n\r\n- /lightning_logs/my_name: Where is saved the logs\r\n- /lightning_logs/version_my_name : Where is saved the checkpoints\r\n\r\n\r\n\r\n\r\n**Possible Explanation:** \r\nIt seems like the checkpoint saving add \"version_\" to the start of the name even if the name have been given as a parameter : \r\n\r\nhttps://github.com/PyTorchLightning/pytorch-lightning/blob/3e8f2d99a9951bfb5fc67a98614128317913be1d/pytorch_lightning/trainer/callback_config.py#L52-L57\r\n\r\nEven if in the Tensorboard Logger if the name is provided there is no \"version_\" prefix :\r\n\r\nhttps://github.com/PyTorchLightning/pytorch-lightning/blob/8b82ce09039e75f3fcb77a987c964249e38def3b/pytorch_lightning/loggers/tensorboard.py#L81\r\n\r\n\r\n\n", "before_files": [{"content": "import os\nfrom abc import ABC, abstractmethod\nfrom typing import Union, List\n\n\nfrom pytorch_lightning.callbacks import Callback, ModelCheckpoint, EarlyStopping, ProgressBarBase, ProgressBar\nfrom pytorch_lightning.loggers import LightningLoggerBase\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\n\n\nclass TrainerCallbackConfigMixin(ABC):\n\n # this is just a summary on variables used in this abstract class,\n # the proper values/initialisation should be done in child class\n callbacks: List[Callback]\n default_root_dir: str\n logger: Union[LightningLoggerBase, bool]\n weights_save_path: str\n ckpt_path: str\n checkpoint_callback: ModelCheckpoint\n progress_bar_refresh_rate: int\n process_position: int\n\n @property\n @abstractmethod\n def slurm_job_id(self) -> int:\n \"\"\"Warning: this is just empty shell for code implemented in other class.\"\"\"\n\n @abstractmethod\n def save_checkpoint(self, *args):\n \"\"\"Warning: this is just empty shell for code implemented in other class.\"\"\"\n\n def configure_checkpoint_callback(self):\n \"\"\"\n Weight path set in this priority:\n Checkpoint_callback's path (if passed in).\n User provided weights_saved_path\n Otherwise use os.getcwd()\n \"\"\"\n ckpt_path = self.default_root_dir\n if self.checkpoint_callback:\n # init a default one\n if self.logger is not None:\n save_dir = (getattr(self.logger, 'save_dir', None) or\n getattr(self.logger, '_save_dir', None) or\n self.default_root_dir)\n\n # weights_save_path overrides anything\n if self.weights_save_path is not None:\n save_dir = self.weights_save_path\n\n ckpt_path = os.path.join(\n save_dir,\n self.logger.name,\n f'version_{self.logger.version}',\n \"checkpoints\"\n )\n else:\n ckpt_path = os.path.join(self.default_root_dir, \"checkpoints\")\n\n # when no val step is defined, use 'loss' otherwise 'val_loss'\n train_step_only = not self.is_overriden('validation_step')\n monitor_key = 'loss' if train_step_only else 'val_loss'\n\n if self.checkpoint_callback is True:\n os.makedirs(ckpt_path, exist_ok=True)\n self.checkpoint_callback = ModelCheckpoint(\n filepath=ckpt_path,\n monitor=monitor_key\n )\n # If user specified None in filepath, override with runtime default\n elif isinstance(self.checkpoint_callback, ModelCheckpoint) \\\n and self.checkpoint_callback.dirpath is None:\n self.checkpoint_callback.dirpath = ckpt_path\n self.checkpoint_callback.filename = '{epoch}'\n os.makedirs(self.checkpoint_callback.dirpath, exist_ok=True)\n elif self.checkpoint_callback is False:\n self.checkpoint_callback = None\n\n self.ckpt_path = ckpt_path\n\n if self.checkpoint_callback:\n # set the path for the callbacks\n self.checkpoint_callback.save_function = self.save_checkpoint\n\n # if checkpoint callback used, then override the weights path\n self.weights_save_path = self.checkpoint_callback.dirpath\n\n # if weights_save_path is still none here, set to current working dir\n if self.weights_save_path is None:\n self.weights_save_path = self.default_root_dir\n\n def configure_early_stopping(self, early_stop_callback):\n if early_stop_callback is True or None:\n self.early_stop_callback = EarlyStopping(\n monitor='val_loss',\n patience=3,\n strict=True,\n verbose=True,\n mode='min'\n )\n self.enable_early_stop = True\n elif not early_stop_callback:\n self.early_stop_callback = None\n self.enable_early_stop = False\n else:\n self.early_stop_callback = early_stop_callback\n self.enable_early_stop = True\n\n def configure_progress_bar(self):\n progress_bars = [c for c in self.callbacks if isinstance(c, ProgressBarBase)]\n if len(progress_bars) > 1:\n raise MisconfigurationException(\n 'You added multiple progress bar callbacks to the Trainer, but currently only one'\n ' progress bar is supported.'\n )\n elif len(progress_bars) == 1:\n self.progress_bar_callback = progress_bars[0]\n elif self.progress_bar_refresh_rate > 0:\n self.progress_bar_callback = ProgressBar(\n refresh_rate=self.progress_bar_refresh_rate,\n process_position=self.process_position,\n )\n self.callbacks.append(self.progress_bar_callback)\n else:\n self.progress_bar_callback = None\n", "path": "pytorch_lightning/trainer/callback_config.py"}]}
2,107
152
gh_patches_debug_20094
rasdani/github-patches
git_diff
Flexget__Flexget-2495
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Unhandled error in plugin exists: 'PosixPath' object has no attribute 'walk' <!--- Before opening an issue, verify: - Is this a feature request? Post it on https://feathub.com/Flexget/Flexget - Did you recently upgrade? Look at the Change Log and Upgrade Actions to make sure that you don't need to make any changes to your config https://flexget.com/ChangeLog https://flexget.com/UpgradeActions - Are you running FlexGet as a daemon? Stop it completely and then start it again https://flexget.com/CLI/daemon - Did you search to see if the issue already exists? https://github.com/Flexget/Flexget/issues - Did you fill out the issue template as completely as possible? The issue template is here because it helps to ensure you submitted all the necessary information the first time, and allows us to more quickly review issues. Please fill it out correctly and do not ignore it, no matter how irrelevant you think it may be. Thanks in advance for your help with this! ---> ### Expected behaviour: After daemon startup, tasks are run without errors <!--- Please don't just say "it doesn't crash" or "it works". Explain what the expected result is. ---> ### Actual behaviour: Tasks are aborted ### Steps to reproduce: - Step 1: Configure exists plugin - Step 2: Run flexget #### Config: AFAIR, any config using exists plugin #### Log: <details> <summary>(click to expand)</summary> ``` 2019-11-13 20:32 CRITICAL task tv_rarbg BUG: Unhandled error in plugin exists: 'PosixPath' object has no attribute 'walk' Traceback (most recent call last): File "/home/carno/envs/flexget3/lib/python3.7/site-packages/flexget/task.py", line 520, in __run_plugin result = method(*args, **kwargs) File "/home/carno/envs/flexget3/lib/python3.7/site-packages/flexget/event.py", line 20, in __call__ return self.func(*args, **kwargs) File "/home/carno/envs/flexget3/lib/python3.7/site-packages/flexget/plugins/filter/exists.py", line 41, in on_task_filter for p in folder.walk(errors='ignore'): AttributeError: 'PosixPath' object has no attribute 'walk' ``` </details> ### Additional information: - FlexGet version: 3.0.1 - Python version: 3.7.2 - Installation method: pip - Using daemon (yes/no): yes - OS and version: debian unstable - Link to crash log: N/A <!--- In config and debug/crash logs, remember to redact any personal or sensitive information such as passwords, API keys, private URLs and so on. Please verify that the following data is present before submitting your issue: - Link to a paste service or paste above the relevant config (preferably full config, including templates if present). Please make sure the paste does not expire, if possible. - Link to a paste service or paste above debug-level logs of the relevant task/s (use `flexget -L debug execute --tasks <Task_name>`). - FlexGet version (use `flexget -V` to get it). - Full Python version, for example `2.7.11` (use `python -V` to get it). - Installation method (pip, git install, etc). - Whether or not you're running FlexGet as a daemon. - OS and version. - Attach crash log if one was generated, in addition to the debug-level log. It can be found in the directory with your config file. ---> </issue> <code> [start of flexget/plugins/filter/exists.py] 1 import logging 2 import platform 3 from pathlib import Path 4 5 from flexget import plugin 6 from flexget.config_schema import one_or_more 7 from flexget.event import event 8 9 log = logging.getLogger('exists') 10 11 12 class FilterExists: 13 """ 14 Reject entries that already exist in given path. 15 16 Example:: 17 18 exists: /storage/movies/ 19 """ 20 21 schema = one_or_more({'type': 'string', 'format': 'path'}) 22 23 def prepare_config(self, config): 24 # If only a single path is passed turn it into a 1 element list 25 if isinstance(config, str): 26 config = [config] 27 return config 28 29 @plugin.priority(-1) 30 def on_task_filter(self, task, config): 31 if not task.accepted: 32 log.debug('No accepted entries, not scanning for existing.') 33 return 34 log.verbose('Scanning path(s) for existing files.') 35 config = self.prepare_config(config) 36 filenames = {} 37 for folder in config: 38 folder = Path(folder).expanduser() 39 if not folder.exists(): 40 raise plugin.PluginWarning('Path %s does not exist' % folder, log) 41 for p in folder.walk(errors='ignore'): 42 key = p.name 43 # windows file system is not case sensitive 44 if platform.system() == 'Windows': 45 key = key.lower() 46 filenames[key] = p 47 for entry in task.accepted: 48 # priority is: filename, location (filename only), title 49 name = Path(entry.get('filename', entry.get('location', entry['title']))).name 50 if platform.system() == 'Windows': 51 name = name.lower() 52 if name in filenames: 53 log.debug('Found %s in %s' % (name, filenames[name])) 54 entry.reject('exists in %s' % filenames[name]) 55 56 57 @event('plugin.register') 58 def register_plugin(): 59 plugin.register(FilterExists, 'exists', api_ver=2) 60 [end of flexget/plugins/filter/exists.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/flexget/plugins/filter/exists.py b/flexget/plugins/filter/exists.py --- a/flexget/plugins/filter/exists.py +++ b/flexget/plugins/filter/exists.py @@ -38,12 +38,13 @@ folder = Path(folder).expanduser() if not folder.exists(): raise plugin.PluginWarning('Path %s does not exist' % folder, log) - for p in folder.walk(errors='ignore'): - key = p.name - # windows file system is not case sensitive - if platform.system() == 'Windows': - key = key.lower() - filenames[key] = p + for p in folder.rglob('*'): + if p.is_file(): + key = p.name + # windows file system is not case sensitive + if platform.system() == 'Windows': + key = key.lower() + filenames[key] = p for entry in task.accepted: # priority is: filename, location (filename only), title name = Path(entry.get('filename', entry.get('location', entry['title']))).name
{"golden_diff": "diff --git a/flexget/plugins/filter/exists.py b/flexget/plugins/filter/exists.py\n--- a/flexget/plugins/filter/exists.py\n+++ b/flexget/plugins/filter/exists.py\n@@ -38,12 +38,13 @@\n folder = Path(folder).expanduser()\n if not folder.exists():\n raise plugin.PluginWarning('Path %s does not exist' % folder, log)\n- for p in folder.walk(errors='ignore'):\n- key = p.name\n- # windows file system is not case sensitive\n- if platform.system() == 'Windows':\n- key = key.lower()\n- filenames[key] = p\n+ for p in folder.rglob('*'):\n+ if p.is_file():\n+ key = p.name\n+ # windows file system is not case sensitive\n+ if platform.system() == 'Windows':\n+ key = key.lower()\n+ filenames[key] = p\n for entry in task.accepted:\n # priority is: filename, location (filename only), title\n name = Path(entry.get('filename', entry.get('location', entry['title']))).name\n", "issue": "Unhandled error in plugin exists: 'PosixPath' object has no attribute 'walk'\n<!---\r\nBefore opening an issue, verify:\r\n\r\n- Is this a feature request? Post it on https://feathub.com/Flexget/Flexget\r\n- Did you recently upgrade? Look at the Change Log and Upgrade Actions to make sure that you don't need to make any changes to your config https://flexget.com/ChangeLog https://flexget.com/UpgradeActions\r\n- Are you running FlexGet as a daemon? Stop it completely and then start it again https://flexget.com/CLI/daemon\r\n- Did you search to see if the issue already exists? https://github.com/Flexget/Flexget/issues\r\n- Did you fill out the issue template as completely as possible?\r\n\r\nThe issue template is here because it helps to ensure you submitted all the necessary information the first time, and allows us to more quickly review issues. Please fill it out correctly and do not ignore it, no matter how irrelevant you think it may be. Thanks in advance for your help with this!\r\n--->\r\n\r\n### Expected behaviour:\r\nAfter daemon startup, tasks are run without errors\r\n<!---\r\nPlease don't just say \"it doesn't crash\" or \"it works\". Explain what the expected result is.\r\n--->\r\n\r\n### Actual behaviour:\r\nTasks are aborted\r\n### Steps to reproduce:\r\n- Step 1: Configure exists plugin\r\n- Step 2: Run flexget\r\n\r\n#### Config:\r\nAFAIR, any config using exists plugin\r\n \r\n#### Log:\r\n<details>\r\n <summary>(click to expand)</summary>\r\n\r\n```\r\n2019-11-13 20:32 CRITICAL task tv_rarbg BUG: Unhandled error in plugin exists: 'PosixPath' object has no attribute 'walk'\r\nTraceback (most recent call last):\r\n File \"/home/carno/envs/flexget3/lib/python3.7/site-packages/flexget/task.py\", line 520, in __run_plugin\r\n result = method(*args, **kwargs)\r\n File \"/home/carno/envs/flexget3/lib/python3.7/site-packages/flexget/event.py\", line 20, in __call__\r\n return self.func(*args, **kwargs)\r\n File \"/home/carno/envs/flexget3/lib/python3.7/site-packages/flexget/plugins/filter/exists.py\", line 41, in on_task_filter\r\n for p in folder.walk(errors='ignore'):\r\nAttributeError: 'PosixPath' object has no attribute 'walk'\r\n\r\n```\r\n</details>\r\n\r\n### Additional information:\r\n\r\n- FlexGet version: 3.0.1\r\n- Python version: 3.7.2\r\n- Installation method: pip\r\n- Using daemon (yes/no): yes\r\n- OS and version: debian unstable\r\n- Link to crash log: N/A\r\n\r\n<!---\r\nIn config and debug/crash logs, remember to redact any personal or sensitive information such as passwords, API keys, private URLs and so on.\r\n\r\nPlease verify that the following data is present before submitting your issue:\r\n\r\n- Link to a paste service or paste above the relevant config (preferably full config, including templates if present). Please make sure the paste does not expire, if possible.\r\n- Link to a paste service or paste above debug-level logs of the relevant task/s (use `flexget -L debug execute --tasks <Task_name>`).\r\n- FlexGet version (use `flexget -V` to get it).\r\n- Full Python version, for example `2.7.11` (use `python -V` to get it). \r\n- Installation method (pip, git install, etc).\r\n- Whether or not you're running FlexGet as a daemon.\r\n- OS and version.\r\n- Attach crash log if one was generated, in addition to the debug-level log. It can be found in the directory with your config file.\r\n--->\r\n\n", "before_files": [{"content": "import logging\nimport platform\nfrom pathlib import Path\n\nfrom flexget import plugin\nfrom flexget.config_schema import one_or_more\nfrom flexget.event import event\n\nlog = logging.getLogger('exists')\n\n\nclass FilterExists:\n \"\"\"\n Reject entries that already exist in given path.\n\n Example::\n\n exists: /storage/movies/\n \"\"\"\n\n schema = one_or_more({'type': 'string', 'format': 'path'})\n\n def prepare_config(self, config):\n # If only a single path is passed turn it into a 1 element list\n if isinstance(config, str):\n config = [config]\n return config\n\n @plugin.priority(-1)\n def on_task_filter(self, task, config):\n if not task.accepted:\n log.debug('No accepted entries, not scanning for existing.')\n return\n log.verbose('Scanning path(s) for existing files.')\n config = self.prepare_config(config)\n filenames = {}\n for folder in config:\n folder = Path(folder).expanduser()\n if not folder.exists():\n raise plugin.PluginWarning('Path %s does not exist' % folder, log)\n for p in folder.walk(errors='ignore'):\n key = p.name\n # windows file system is not case sensitive\n if platform.system() == 'Windows':\n key = key.lower()\n filenames[key] = p\n for entry in task.accepted:\n # priority is: filename, location (filename only), title\n name = Path(entry.get('filename', entry.get('location', entry['title']))).name\n if platform.system() == 'Windows':\n name = name.lower()\n if name in filenames:\n log.debug('Found %s in %s' % (name, filenames[name]))\n entry.reject('exists in %s' % filenames[name])\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(FilterExists, 'exists', api_ver=2)\n", "path": "flexget/plugins/filter/exists.py"}]}
1,885
248
gh_patches_debug_2309
rasdani/github-patches
git_diff
chaoss__augur-759
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Need Model and worker info for releases worker The new releases worker needs to have sections added for the model and the worker in the augur.config.json in order to run. </issue> <code> [start of augur/cli/configure.py] 1 #SPDX-License-Identifier: MIT 2 """ 3 Augur library script for generating a config file 4 """ 5 6 import os 7 import click 8 import json 9 10 from augur import logger 11 12 ENVVAR_PREFIX = "AUGUR_" 13 14 default_config = { 15 "Database": { 16 "name": "augur", 17 "host": "localhost", 18 "key": "key", 19 "password": "augur", 20 "port": 5432, 21 "user": "augur", 22 "gitlab_api_key":"gitlab_api_key" 23 }, 24 "Housekeeper": { 25 "jobs": [ 26 { 27 "all_focused": 1, 28 "delay": 150000, 29 "given": [ 30 "github_url" 31 ], 32 "model": "issues", 33 "repo_group_id": 0 34 }, 35 { 36 "delay": 150000, 37 "given": [ 38 "github_url" 39 ], 40 "model": "pull_request_commits", 41 "repo_group_id": 0 42 }, 43 { 44 "delay": 150000, 45 "given": [ 46 "github_url" 47 ], 48 "model": "repo_info", 49 "repo_group_id": 0 50 }, 51 { 52 "delay": 150000, 53 "given": [ 54 "repo_group" 55 ], 56 "model": "commits", 57 "repo_group_id": 0 58 }, 59 { 60 "delay": 1000000, 61 "given": [ 62 "github_url" 63 ], 64 "model": "pull_requests", 65 "repo_group_id": 0 66 }, 67 { 68 "delay": 1000000, 69 "given": [ 70 "git_url" 71 ], 72 "model": "contributors", 73 "repo_group_id": 0 74 }, 75 { 76 "delay": 1000000, 77 "given": [ 78 "git_url" 79 ], 80 "model": "insights", 81 "repo_group_id": 0 82 }, 83 { 84 "delay": 1000000, 85 "given": [ 86 "git_url" 87 ], 88 "model": "badges", 89 "repo_group_id": 0 90 }, 91 { 92 "delay": 1000000, 93 "given": [ 94 "git_url" 95 ], 96 "model": "value", 97 "repo_group_id": 0 98 }, 99 { 100 "delay": 100000, 101 "given": [ 102 "github_url" 103 ], 104 "model": "pull_request_files", 105 "repo_group_id": 0 106 } 107 ] 108 }, 109 "Workers": { 110 "facade_worker": { 111 "port": 50100, 112 "repo_directory": "repos/", 113 "switch": 1, 114 "workers": 1 115 }, 116 "github_worker": { 117 "port": 50200, 118 "switch": 1, 119 "workers": 1 120 }, 121 "insight_worker": { 122 "port": 50300, 123 "metrics": {"issues-new": "issues", "code-changes": "commit_count", "code-changes-lines": "added", 124 "reviews": "pull_requests", "contributors-new": "new_contributors"}, 125 "contamination": 0.041, 126 "switch": 0, 127 "workers": 1, 128 "training_days": 365, 129 "anomaly_days": 2 130 }, 131 "linux_badge_worker": { 132 "port": 50400, 133 "switch": 1, 134 "workers": 1 135 }, 136 "metric_status_worker": { 137 "port": 50500, 138 "switch": 0, 139 "workers": 1 140 }, 141 "pull_request_worker": { 142 "port": 50600, 143 "switch": 1, 144 "workers": 1 145 }, 146 "repo_info_worker": { 147 "port": 50700, 148 "switch": 1, 149 "workers": 1 150 }, 151 "value_worker": { 152 "port": 50800, 153 "scc_bin": "scc", 154 "switch": 0, 155 "workers": 1 156 }, 157 "contributor_worker": { 158 "port": 50900, 159 "switch": 1, 160 "workers": 1 161 }, 162 "gitlab_issues_worker": { 163 "port": 51000, 164 "switch": 1, 165 "workers": 1 166 }, 167 }, 168 "Facade": { 169 "check_updates": 1, 170 "clone_repos": 1, 171 "create_xlsx_summary_files": 1, 172 "delete_marked_repos": 0, 173 "fix_affiliations": 1, 174 "force_analysis": 1, 175 "force_invalidate_caches": 1, 176 "force_updates": 1, 177 "limited_run": 0, 178 "multithreaded": 0, 179 "nuke_stored_affiliations": 0, 180 "pull_repos": 1, 181 "rebuild_caches": 1, 182 "run_analysis": 1 183 }, 184 "Server": { 185 "cache_expire": "3600", 186 "host": "0.0.0.0", 187 "port": "5000", 188 "workers": 4, 189 "timeout": 60 190 }, 191 "Frontend": { 192 "host": "0.0.0.0", 193 "port": "5000" 194 }, 195 "Development": { 196 "log_level": "INFO" 197 } 198 } 199 200 @click.group('configure', short_help='Generate an augur.config.json') 201 def cli(): 202 pass 203 204 @cli.command('generate') 205 @click.option('--db_name', help="Database name for your data collection database", envvar=ENVVAR_PREFIX + 'DB_NAME') 206 @click.option('--db_host', help="Host for your data collection database", envvar=ENVVAR_PREFIX + 'DB_HOST') 207 @click.option('--db_user', help="User for your data collection database", envvar=ENVVAR_PREFIX + 'DB_USER') 208 @click.option('--db_port', help="Port for your data collection database", envvar=ENVVAR_PREFIX + 'DB_PORT') 209 @click.option('--db_password', help="Password for your data collection database", envvar=ENVVAR_PREFIX + 'DB_PASSWORD') 210 @click.option('--gitlab_api_key', help="GitLab API key for data collection from the GitLab API", envvar=ENVVAR_PREFIX + 'GITLAB_API_KEY') 211 @click.option('--github_api_key', help="GitHub API key for data collection from the GitHub API", envvar=ENVVAR_PREFIX + 'GITHUB_API_KEY') 212 @click.option('--facade_repo_directory', help="Directory on the database server where Facade should clone repos", envvar=ENVVAR_PREFIX + 'FACADE_REPO_DIRECTORY') 213 @click.option('--rc-config-file', help="File containing existing config whose values will be used as the defaults", type=click.Path(exists=True)) 214 def generate(db_name, db_host, db_user, db_port, db_password, github_api_key, facade_repo_directory, rc_config_file, gitlab_api_key): 215 """ 216 Generate an augur.config.json 217 """ 218 219 config = default_config 220 rc_config = None 221 222 if rc_config_file != None: 223 try: 224 with open(os.path.abspath(rc_config_file), 'r') as f: 225 rc_config = json.load(f) 226 for item in rc_config.items(): 227 if item[0] == 'Workers': 228 for index in range(0, len(item[1])): 229 key = list(item[1].keys())[index] 230 secondary_dict = list(item[1].values())[index] 231 232 for secondary_dict_index in range(0, len(secondary_dict)): 233 secondary_key = list(secondary_dict.keys())[secondary_dict_index] 234 value = list(secondary_dict.values())[secondary_dict_index] 235 236 config[item[0]][key][secondary_key] = value 237 else: 238 for index, key in enumerate(list(item[1].keys())): 239 config[item[0]][key] = list(item[1].values())[index] 240 241 logger.info('Predefined config successfully loaded') 242 243 except Exception as e: 244 logger.error(f"Error opening {rc_config_file}: {str(e)}") 245 246 if db_name is not None: 247 config['Database']['database'] = db_name # this is for backwards compatibility 248 if db_name is not None: 249 config['Database']['name'] = db_name 250 if db_host is not None: 251 config['Database']['host'] = db_host 252 if db_port is not None: 253 config['Database']['port'] = int(db_port) 254 if db_user is not None: 255 config['Database']['user'] = db_user 256 if db_password is not None: 257 config['Database']['password'] = db_password 258 if github_api_key is not None: 259 config['Database']['key'] = github_api_key 260 if gitlab_api_key is not None: 261 config['Database']['gitlab_api_key'] = gitlab_api_key 262 if facade_repo_directory is not None: 263 config['Workers']['facade_worker']['repo_directory'] = facade_repo_directory 264 265 try: 266 with open(os.path.abspath('augur.config.json'), 'w') as f: 267 json.dump(config, f, indent=4) 268 logger.info('augur.config.json successfully created') 269 except Exception as e: 270 logger.error("Error writing augur.config.json " + str(e)) 271 [end of augur/cli/configure.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/augur/cli/configure.py b/augur/cli/configure.py --- a/augur/cli/configure.py +++ b/augur/cli/configure.py @@ -164,6 +164,11 @@ "switch": 1, "workers": 1 }, + "release_worker": { + "port": 51100, + "switch": 1, + "workers": 1 + }, }, "Facade": { "check_updates": 1,
{"golden_diff": "diff --git a/augur/cli/configure.py b/augur/cli/configure.py\n--- a/augur/cli/configure.py\n+++ b/augur/cli/configure.py\n@@ -164,6 +164,11 @@\n \"switch\": 1,\n \"workers\": 1\n },\n+ \"release_worker\": {\n+ \"port\": 51100,\n+ \"switch\": 1,\n+ \"workers\": 1\n+ },\n },\n \"Facade\": {\n \"check_updates\": 1,\n", "issue": "Need Model and worker info for releases worker\nThe new releases worker needs to have sections added for the model and the worker in the augur.config.json in order to run. \r\n\n", "before_files": [{"content": "#SPDX-License-Identifier: MIT\n\"\"\"\nAugur library script for generating a config file\n\"\"\"\n\nimport os\nimport click\nimport json\n\nfrom augur import logger\n\nENVVAR_PREFIX = \"AUGUR_\"\n\ndefault_config = {\n \"Database\": {\n \"name\": \"augur\",\n \"host\": \"localhost\",\n \"key\": \"key\",\n \"password\": \"augur\",\n \"port\": 5432,\n \"user\": \"augur\",\n \"gitlab_api_key\":\"gitlab_api_key\"\n },\n \"Housekeeper\": {\n \"jobs\": [\n {\n \"all_focused\": 1,\n \"delay\": 150000,\n \"given\": [\n \"github_url\"\n ],\n \"model\": \"issues\",\n \"repo_group_id\": 0\n },\n {\n \"delay\": 150000,\n \"given\": [\n \"github_url\"\n ],\n \"model\": \"pull_request_commits\",\n \"repo_group_id\": 0\n },\n {\n \"delay\": 150000,\n \"given\": [\n \"github_url\"\n ],\n \"model\": \"repo_info\",\n \"repo_group_id\": 0\n },\n {\n \"delay\": 150000,\n \"given\": [\n \"repo_group\"\n ],\n \"model\": \"commits\",\n \"repo_group_id\": 0\n },\n {\n \"delay\": 1000000,\n \"given\": [\n \"github_url\"\n ],\n \"model\": \"pull_requests\",\n \"repo_group_id\": 0\n },\n {\n \"delay\": 1000000,\n \"given\": [\n \"git_url\"\n ],\n \"model\": \"contributors\",\n \"repo_group_id\": 0\n }, \n {\n \"delay\": 1000000,\n \"given\": [\n \"git_url\"\n ],\n \"model\": \"insights\",\n \"repo_group_id\": 0\n },\n {\n \"delay\": 1000000,\n \"given\": [\n \"git_url\"\n ],\n \"model\": \"badges\",\n \"repo_group_id\": 0\n },\n {\n \"delay\": 1000000,\n \"given\": [\n \"git_url\"\n ],\n \"model\": \"value\",\n \"repo_group_id\": 0\n },\n {\n \"delay\": 100000,\n \"given\": [\n \"github_url\"\n ],\n \"model\": \"pull_request_files\",\n \"repo_group_id\": 0\n }\n ]\n },\n \"Workers\": {\n \"facade_worker\": {\n \"port\": 50100,\n \"repo_directory\": \"repos/\",\n \"switch\": 1,\n \"workers\": 1\n },\n \"github_worker\": {\n \"port\": 50200,\n \"switch\": 1,\n \"workers\": 1\n },\n \"insight_worker\": {\n \"port\": 50300,\n \"metrics\": {\"issues-new\": \"issues\", \"code-changes\": \"commit_count\", \"code-changes-lines\": \"added\", \n \"reviews\": \"pull_requests\", \"contributors-new\": \"new_contributors\"},\n \"contamination\": 0.041,\n \"switch\": 0,\n \"workers\": 1,\n \"training_days\": 365,\n \"anomaly_days\": 2\n },\n \"linux_badge_worker\": {\n \"port\": 50400,\n \"switch\": 1,\n \"workers\": 1\n },\n \"metric_status_worker\": {\n \"port\": 50500,\n \"switch\": 0,\n \"workers\": 1\n },\n \"pull_request_worker\": {\n \"port\": 50600,\n \"switch\": 1,\n \"workers\": 1\n },\n \"repo_info_worker\": {\n \"port\": 50700,\n \"switch\": 1,\n \"workers\": 1\n },\n \"value_worker\": {\n \"port\": 50800,\n \"scc_bin\": \"scc\",\n \"switch\": 0,\n \"workers\": 1\n },\n \"contributor_worker\": {\n \"port\": 50900,\n \"switch\": 1,\n \"workers\": 1\n },\n \"gitlab_issues_worker\": {\n \"port\": 51000,\n \"switch\": 1,\n \"workers\": 1\n },\n },\n \"Facade\": {\n \"check_updates\": 1,\n \"clone_repos\": 1,\n \"create_xlsx_summary_files\": 1,\n \"delete_marked_repos\": 0,\n \"fix_affiliations\": 1,\n \"force_analysis\": 1,\n \"force_invalidate_caches\": 1,\n \"force_updates\": 1,\n \"limited_run\": 0,\n \"multithreaded\": 0,\n \"nuke_stored_affiliations\": 0,\n \"pull_repos\": 1,\n \"rebuild_caches\": 1,\n \"run_analysis\": 1\n },\n \"Server\": {\n \"cache_expire\": \"3600\",\n \"host\": \"0.0.0.0\",\n \"port\": \"5000\",\n \"workers\": 4,\n \"timeout\": 60\n },\n \"Frontend\": {\n \"host\": \"0.0.0.0\",\n \"port\": \"5000\"\n },\n \"Development\": {\n \"log_level\": \"INFO\"\n }\n }\n\[email protected]('configure', short_help='Generate an augur.config.json')\ndef cli():\n pass\n\[email protected]('generate')\[email protected]('--db_name', help=\"Database name for your data collection database\", envvar=ENVVAR_PREFIX + 'DB_NAME')\[email protected]('--db_host', help=\"Host for your data collection database\", envvar=ENVVAR_PREFIX + 'DB_HOST')\[email protected]('--db_user', help=\"User for your data collection database\", envvar=ENVVAR_PREFIX + 'DB_USER')\[email protected]('--db_port', help=\"Port for your data collection database\", envvar=ENVVAR_PREFIX + 'DB_PORT')\[email protected]('--db_password', help=\"Password for your data collection database\", envvar=ENVVAR_PREFIX + 'DB_PASSWORD')\[email protected]('--gitlab_api_key', help=\"GitLab API key for data collection from the GitLab API\", envvar=ENVVAR_PREFIX + 'GITLAB_API_KEY')\[email protected]('--github_api_key', help=\"GitHub API key for data collection from the GitHub API\", envvar=ENVVAR_PREFIX + 'GITHUB_API_KEY')\[email protected]('--facade_repo_directory', help=\"Directory on the database server where Facade should clone repos\", envvar=ENVVAR_PREFIX + 'FACADE_REPO_DIRECTORY')\[email protected]('--rc-config-file', help=\"File containing existing config whose values will be used as the defaults\", type=click.Path(exists=True))\ndef generate(db_name, db_host, db_user, db_port, db_password, github_api_key, facade_repo_directory, rc_config_file, gitlab_api_key):\n \"\"\"\n Generate an augur.config.json\n \"\"\"\n\n config = default_config\n rc_config = None\n\n if rc_config_file != None:\n try:\n with open(os.path.abspath(rc_config_file), 'r') as f:\n rc_config = json.load(f)\n for item in rc_config.items():\n if item[0] == 'Workers':\n for index in range(0, len(item[1])):\n key = list(item[1].keys())[index]\n secondary_dict = list(item[1].values())[index]\n\n for secondary_dict_index in range(0, len(secondary_dict)):\n secondary_key = list(secondary_dict.keys())[secondary_dict_index]\n value = list(secondary_dict.values())[secondary_dict_index]\n\n config[item[0]][key][secondary_key] = value\n else:\n for index, key in enumerate(list(item[1].keys())):\n config[item[0]][key] = list(item[1].values())[index]\n\n logger.info('Predefined config successfully loaded')\n\n except Exception as e:\n logger.error(f\"Error opening {rc_config_file}: {str(e)}\")\n\n if db_name is not None:\n config['Database']['database'] = db_name # this is for backwards compatibility\n if db_name is not None:\n config['Database']['name'] = db_name\n if db_host is not None:\n config['Database']['host'] = db_host\n if db_port is not None:\n config['Database']['port'] = int(db_port)\n if db_user is not None:\n config['Database']['user'] = db_user\n if db_password is not None:\n config['Database']['password'] = db_password\n if github_api_key is not None:\n config['Database']['key'] = github_api_key\n if gitlab_api_key is not None:\n config['Database']['gitlab_api_key'] = gitlab_api_key\n if facade_repo_directory is not None:\n config['Workers']['facade_worker']['repo_directory'] = facade_repo_directory\n\n try:\n with open(os.path.abspath('augur.config.json'), 'w') as f:\n json.dump(config, f, indent=4)\n logger.info('augur.config.json successfully created')\n except Exception as e:\n logger.error(\"Error writing augur.config.json \" + str(e))\n", "path": "augur/cli/configure.py"}]}
3,413
124
gh_patches_debug_31336
rasdani/github-patches
git_diff
zestedesavoir__zds-site-6556
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> La connexion avec Google ne fonctionne pas sur le serveur de bêta Problème rapporté par Sentry. La connexion avec Google ne fonctionne pas sur le serveur de bêta, on a une erreur 500 à cause d'une erreur dans le paquet social-auth-core. Même en testant avec la dernière version de ce paquet le problème n'est pas corrigé. J'ai ouvert un ticket chez social-auth-core : https://github.com/python-social-auth/social-core/issues/847 </issue> <code> [start of zds/settings/prod.py] 1 from sentry_sdk.integrations.django import DjangoIntegration 2 from sentry_sdk.integrations.logging import ignore_logger 3 import sentry_sdk 4 5 from .abstract_base import * 6 7 # For secrets, prefer `config[key]` over `config.get(key)` in this 8 # file because we really want to raise an error if a secret is not 9 # defined. 10 11 12 ############################################################################### 13 # DJANGO SETTINGS 14 15 16 DEBUG = False 17 18 USE_L10N = True 19 20 DATABASES = { 21 "default": { 22 "ENGINE": "django.db.backends.mysql", 23 "NAME": config["databases"]["default"].get("name", "zdsdb"), 24 "USER": config["databases"]["default"].get("user", "zds"), 25 "PASSWORD": config["databases"]["default"]["password"], 26 "HOST": "localhost", 27 "PORT": "", 28 "CONN_MAX_AGE": 600, 29 "OPTIONS": { 30 "charset": "utf8mb4", 31 }, 32 }, 33 } 34 35 ALLOWED_HOSTS = [ 36 "beta.zestedesavoir.com", 37 "scaleway.zestedesavoir.com", 38 "zdsappserver", 39 "gandi.zestedesavoir.com", 40 "gandi.zestedesavoir.com.", 41 ".zestedesavoir.com", 42 ".zestedesavoir.com.", 43 "127.0.0.1", 44 "localhost", 45 "163.172.171.246", 46 ] 47 48 EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend" 49 EMAIL_USE_TLS = False 50 EMAIL_HOST = "localhost" 51 EMAIL_PORT = 25 52 53 CACHES = { 54 "default": { 55 "BACKEND": "django.core.cache.backends.memcached.PyMemcacheCache", 56 "LOCATION": "127.0.0.1:11211", 57 } 58 } 59 60 SESSION_ENGINE = "django.contrib.sessions.backends.cached_db" 61 SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 4 62 63 MEDIA_ROOT = Path("/opt/zds/data/media") 64 65 STATIC_ROOT = Path("/opt/zds/data/static") 66 STATICFILES_STORAGE = "django.contrib.staticfiles.storage.ManifestStaticFilesStorage" 67 68 django_template_engine["APP_DIRS"] = False 69 django_template_engine["OPTIONS"]["loaders"] = [ 70 ( 71 "django.template.loaders.cached.Loader", 72 [ 73 "django.template.loaders.filesystem.Loader", 74 "django.template.loaders.app_directories.Loader", 75 ], 76 ), 77 ] 78 79 80 def _get_version(): 81 from zds import __version__, git_version 82 83 if git_version is None: 84 return __version__ 85 else: 86 return f"{__version__}/{git_version[:7]}" 87 88 89 sentry_sdk.init( 90 dsn=config["sentry"]["dsn"], 91 integrations=[DjangoIntegration()], 92 # Set traces_sample_rate to 1.0 to capture 100% 93 # of transactions for performance monitoring. 94 # We recommend adjusting this value in production, 95 traces_sample_rate=1.0, 96 # If you wish to associate users to errors (assuming you are using 97 # django.contrib.auth) you may enable sending PII data. 98 send_default_pii=True, 99 # By default the SDK will try to use the SENTRY_RELEASE 100 # environment variable, or infer a git commit 101 # SHA as release, however you may want to set 102 # something more human-readable. 103 release=_get_version().replace("/", "#"), 104 # /!\ It cannot contain slashes 105 environment=config["sentry"]["environment"], 106 ) 107 108 # Ignoring emarkdown logging because it is too noisy 109 ignore_logger("zds.utils.templatetags.emarkdown") 110 111 112 ############################################################################### 113 # REQUIREMENTS SETTINGS 114 115 116 # easy-thumbnails 117 # http://easy-thumbnails.readthedocs.io/en/2.1/ref/optimize/ 118 THUMBNAIL_OPTIMIZE_COMMAND = { 119 "png": "/usr/bin/optipng {filename}", 120 "gif": "/usr/bin/optipng {filename}", 121 "jpeg": "/usr/bin/jpegoptim {filename}", 122 } 123 124 125 # python-social-auth 126 # http://psa.matiasaguirre.net/docs/configuration/django.html 127 SOCIAL_AUTH_PIPELINE = ( 128 "social.pipeline.social_auth.social_details", 129 "social.pipeline.social_auth.social_uid", 130 "social.pipeline.social_auth.auth_allowed", 131 "social.pipeline.social_auth.social_user", 132 "social.pipeline.user.get_username", 133 "social.pipeline.social_auth.associate_by_email", 134 "social.pipeline.user.create_user", 135 "zds.member.models.save_profile", 136 "social.pipeline.social_auth.associate_user", 137 "social.pipeline.social_auth.load_extra_data", 138 "social.pipeline.user.user_details", 139 ) 140 141 142 ############################################################################### 143 # ZESTE DE SAVOIR SETTINGS 144 145 146 ES_SEARCH_INDEX["shards"] = config["elasticsearch"].get("shards", 3) 147 148 149 ZDS_APP["site"]["association"]["email"] = "[email protected]" 150 151 # content 152 # ZDS_APP['content']['build_pdf_when_published'] = False 153 ZDS_APP["article"]["repo_path"] = "/opt/zds/data/articles-data" 154 ZDS_APP["content"]["repo_private_path"] = "/opt/zds/data/contents-private" 155 ZDS_APP["content"]["repo_public_path"] = "/opt/zds/data/contents-public" 156 ZDS_APP["content"]["extra_content_generation_policy"] = "WATCHDOG" 157 158 ZDS_APP["visual_changes"] = zds_config.get("visual_changes", []) 159 160 ZDS_APP["very_top_banner"] = config.get("very_top_banner", False) 161 [end of zds/settings/prod.py] [start of zds/settings/abstract_base/requirements.py] 1 from .config import config 2 3 # best quality, 100 is the same but documentation says 4 # ' values up to 100 are allowed, but this is not recommended' 5 # so let's use 95 6 THUMBNAIL_QUALITY = 95 7 # Let's use the default value BUT if we want to let png in lossless format, we have tu use (png,) instead of None 8 THUMBNAIL_PRESERVE_EXTENSIONS = ("svg",) 9 10 11 social_auth_config = config.get("social_auth", {}) 12 13 SOCIAL_AUTH_RAISE_EXCEPTIONS = False 14 15 SOCIAL_AUTH_FACEBOOK_SCOPE = ["email"] 16 17 SOCIAL_AUTH_PIPELINE = ( 18 "social.pipeline.social_auth.social_details", 19 "social.pipeline.social_auth.social_uid", 20 "social.pipeline.social_auth.auth_allowed", 21 "social.pipeline.social_auth.social_user", 22 "social.pipeline.user.get_username", 23 "social.pipeline.social_auth.associate_by_email", 24 "social.pipeline.user.create_user", 25 "zds.member.models.save_profile", 26 "social.pipeline.social_auth.associate_user", 27 "social.pipeline.social_auth.load_extra_data", 28 "social.pipeline.user.user_details", 29 ) 30 31 # Before adding new providers such as Facebook and Google, 32 # you need to make sure they validate the user's email address on sign up! 33 # If they don't, a malicious person could take control of someone else account! 34 SOCIAL_AUTH_FACEBOOK_KEY = social_auth_config.get("facebook_key", "") 35 SOCIAL_AUTH_FACEBOOK_SECRET = social_auth_config.get("facebook_secret", "") 36 SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = social_auth_config.get( 37 "google_oauth2_key", 38 "696570367703-r6hc7mdd27t1sktdkivpnc5b25i0uip2.apps.googleusercontent.com", 39 ) 40 SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = social_auth_config.get( 41 "google_oauth2_secret", 42 "mApWNh3stCsYHwsGuWdbZWP8", 43 ) 44 45 SOCIAL_AUTH_SANITIZE_REDIRECTS = social_auth_config.get( 46 "sanitize_redirects", 47 False, 48 ) 49 50 51 recaptcha_config = config.get("recaptcha", {}) 52 53 USE_CAPTCHA = recaptcha_config.get("use_captcha", False) 54 # Seems to be used by `django-recaptcha` (what a poorly-namespaced 55 # setting!). 56 # Set to `True` to use the “No Captcha” engine instead of the old API. 57 NOCAPTCHA = True 58 RECAPTCHA_USE_SSL = True 59 RECAPTCHA_PUBLIC_KEY = recaptcha_config.get("public_key", "dummy") 60 RECAPTCHA_PRIVATE_KEY = recaptcha_config.get("private_key", "dummy") 61 62 63 OAUTH2_PROVIDER = {"OAUTH2_BACKEND_CLASS": "oauth2_provider.oauth2_backends.JSONOAuthLibCore"} 64 [end of zds/settings/abstract_base/requirements.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/zds/settings/abstract_base/requirements.py b/zds/settings/abstract_base/requirements.py --- a/zds/settings/abstract_base/requirements.py +++ b/zds/settings/abstract_base/requirements.py @@ -15,17 +15,17 @@ SOCIAL_AUTH_FACEBOOK_SCOPE = ["email"] SOCIAL_AUTH_PIPELINE = ( - "social.pipeline.social_auth.social_details", - "social.pipeline.social_auth.social_uid", - "social.pipeline.social_auth.auth_allowed", - "social.pipeline.social_auth.social_user", - "social.pipeline.user.get_username", - "social.pipeline.social_auth.associate_by_email", - "social.pipeline.user.create_user", + "social_core.pipeline.social_auth.social_details", + "social_core.pipeline.social_auth.social_uid", + "social_core.pipeline.social_auth.auth_allowed", + "social_core.pipeline.social_auth.social_user", + "social_core.pipeline.user.get_username", + "social_core.pipeline.social_auth.associate_by_email", + "social_core.pipeline.user.create_user", "zds.member.models.save_profile", - "social.pipeline.social_auth.associate_user", - "social.pipeline.social_auth.load_extra_data", - "social.pipeline.user.user_details", + "social_core.pipeline.social_auth.associate_user", + "social_core.pipeline.social_auth.load_extra_data", + "social_core.pipeline.user.user_details", ) # Before adding new providers such as Facebook and Google, diff --git a/zds/settings/prod.py b/zds/settings/prod.py --- a/zds/settings/prod.py +++ b/zds/settings/prod.py @@ -122,23 +122,6 @@ } -# python-social-auth -# http://psa.matiasaguirre.net/docs/configuration/django.html -SOCIAL_AUTH_PIPELINE = ( - "social.pipeline.social_auth.social_details", - "social.pipeline.social_auth.social_uid", - "social.pipeline.social_auth.auth_allowed", - "social.pipeline.social_auth.social_user", - "social.pipeline.user.get_username", - "social.pipeline.social_auth.associate_by_email", - "social.pipeline.user.create_user", - "zds.member.models.save_profile", - "social.pipeline.social_auth.associate_user", - "social.pipeline.social_auth.load_extra_data", - "social.pipeline.user.user_details", -) - - ############################################################################### # ZESTE DE SAVOIR SETTINGS
{"golden_diff": "diff --git a/zds/settings/abstract_base/requirements.py b/zds/settings/abstract_base/requirements.py\n--- a/zds/settings/abstract_base/requirements.py\n+++ b/zds/settings/abstract_base/requirements.py\n@@ -15,17 +15,17 @@\n SOCIAL_AUTH_FACEBOOK_SCOPE = [\"email\"]\n \n SOCIAL_AUTH_PIPELINE = (\n- \"social.pipeline.social_auth.social_details\",\n- \"social.pipeline.social_auth.social_uid\",\n- \"social.pipeline.social_auth.auth_allowed\",\n- \"social.pipeline.social_auth.social_user\",\n- \"social.pipeline.user.get_username\",\n- \"social.pipeline.social_auth.associate_by_email\",\n- \"social.pipeline.user.create_user\",\n+ \"social_core.pipeline.social_auth.social_details\",\n+ \"social_core.pipeline.social_auth.social_uid\",\n+ \"social_core.pipeline.social_auth.auth_allowed\",\n+ \"social_core.pipeline.social_auth.social_user\",\n+ \"social_core.pipeline.user.get_username\",\n+ \"social_core.pipeline.social_auth.associate_by_email\",\n+ \"social_core.pipeline.user.create_user\",\n \"zds.member.models.save_profile\",\n- \"social.pipeline.social_auth.associate_user\",\n- \"social.pipeline.social_auth.load_extra_data\",\n- \"social.pipeline.user.user_details\",\n+ \"social_core.pipeline.social_auth.associate_user\",\n+ \"social_core.pipeline.social_auth.load_extra_data\",\n+ \"social_core.pipeline.user.user_details\",\n )\n \n # Before adding new providers such as Facebook and Google,\ndiff --git a/zds/settings/prod.py b/zds/settings/prod.py\n--- a/zds/settings/prod.py\n+++ b/zds/settings/prod.py\n@@ -122,23 +122,6 @@\n }\n \n \n-# python-social-auth\n-# http://psa.matiasaguirre.net/docs/configuration/django.html\n-SOCIAL_AUTH_PIPELINE = (\n- \"social.pipeline.social_auth.social_details\",\n- \"social.pipeline.social_auth.social_uid\",\n- \"social.pipeline.social_auth.auth_allowed\",\n- \"social.pipeline.social_auth.social_user\",\n- \"social.pipeline.user.get_username\",\n- \"social.pipeline.social_auth.associate_by_email\",\n- \"social.pipeline.user.create_user\",\n- \"zds.member.models.save_profile\",\n- \"social.pipeline.social_auth.associate_user\",\n- \"social.pipeline.social_auth.load_extra_data\",\n- \"social.pipeline.user.user_details\",\n-)\n-\n-\n ###############################################################################\n # ZESTE DE SAVOIR SETTINGS\n", "issue": "La connexion avec Google ne fonctionne pas sur le serveur de b\u00eata\nProbl\u00e8me rapport\u00e9 par Sentry.\r\n\r\nLa connexion avec Google ne fonctionne pas sur le serveur de b\u00eata, on a une erreur 500 \u00e0 cause d'une erreur dans le paquet social-auth-core. M\u00eame en testant avec la derni\u00e8re version de ce paquet le probl\u00e8me n'est pas corrig\u00e9.\r\n\r\nJ'ai ouvert un ticket chez social-auth-core : https://github.com/python-social-auth/social-core/issues/847\r\n\n", "before_files": [{"content": "from sentry_sdk.integrations.django import DjangoIntegration\nfrom sentry_sdk.integrations.logging import ignore_logger\nimport sentry_sdk\n\nfrom .abstract_base import *\n\n# For secrets, prefer `config[key]` over `config.get(key)` in this\n# file because we really want to raise an error if a secret is not\n# defined.\n\n\n###############################################################################\n# DJANGO SETTINGS\n\n\nDEBUG = False\n\nUSE_L10N = True\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.mysql\",\n \"NAME\": config[\"databases\"][\"default\"].get(\"name\", \"zdsdb\"),\n \"USER\": config[\"databases\"][\"default\"].get(\"user\", \"zds\"),\n \"PASSWORD\": config[\"databases\"][\"default\"][\"password\"],\n \"HOST\": \"localhost\",\n \"PORT\": \"\",\n \"CONN_MAX_AGE\": 600,\n \"OPTIONS\": {\n \"charset\": \"utf8mb4\",\n },\n },\n}\n\nALLOWED_HOSTS = [\n \"beta.zestedesavoir.com\",\n \"scaleway.zestedesavoir.com\",\n \"zdsappserver\",\n \"gandi.zestedesavoir.com\",\n \"gandi.zestedesavoir.com.\",\n \".zestedesavoir.com\",\n \".zestedesavoir.com.\",\n \"127.0.0.1\",\n \"localhost\",\n \"163.172.171.246\",\n]\n\nEMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\nEMAIL_USE_TLS = False\nEMAIL_HOST = \"localhost\"\nEMAIL_PORT = 25\n\nCACHES = {\n \"default\": {\n \"BACKEND\": \"django.core.cache.backends.memcached.PyMemcacheCache\",\n \"LOCATION\": \"127.0.0.1:11211\",\n }\n}\n\nSESSION_ENGINE = \"django.contrib.sessions.backends.cached_db\"\nSESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 4\n\nMEDIA_ROOT = Path(\"/opt/zds/data/media\")\n\nSTATIC_ROOT = Path(\"/opt/zds/data/static\")\nSTATICFILES_STORAGE = \"django.contrib.staticfiles.storage.ManifestStaticFilesStorage\"\n\ndjango_template_engine[\"APP_DIRS\"] = False\ndjango_template_engine[\"OPTIONS\"][\"loaders\"] = [\n (\n \"django.template.loaders.cached.Loader\",\n [\n \"django.template.loaders.filesystem.Loader\",\n \"django.template.loaders.app_directories.Loader\",\n ],\n ),\n]\n\n\ndef _get_version():\n from zds import __version__, git_version\n\n if git_version is None:\n return __version__\n else:\n return f\"{__version__}/{git_version[:7]}\"\n\n\nsentry_sdk.init(\n dsn=config[\"sentry\"][\"dsn\"],\n integrations=[DjangoIntegration()],\n # Set traces_sample_rate to 1.0 to capture 100%\n # of transactions for performance monitoring.\n # We recommend adjusting this value in production,\n traces_sample_rate=1.0,\n # If you wish to associate users to errors (assuming you are using\n # django.contrib.auth) you may enable sending PII data.\n send_default_pii=True,\n # By default the SDK will try to use the SENTRY_RELEASE\n # environment variable, or infer a git commit\n # SHA as release, however you may want to set\n # something more human-readable.\n release=_get_version().replace(\"/\", \"#\"),\n # /!\\ It cannot contain slashes\n environment=config[\"sentry\"][\"environment\"],\n)\n\n# Ignoring emarkdown logging because it is too noisy\nignore_logger(\"zds.utils.templatetags.emarkdown\")\n\n\n###############################################################################\n# REQUIREMENTS SETTINGS\n\n\n# easy-thumbnails\n# http://easy-thumbnails.readthedocs.io/en/2.1/ref/optimize/\nTHUMBNAIL_OPTIMIZE_COMMAND = {\n \"png\": \"/usr/bin/optipng {filename}\",\n \"gif\": \"/usr/bin/optipng {filename}\",\n \"jpeg\": \"/usr/bin/jpegoptim {filename}\",\n}\n\n\n# python-social-auth\n# http://psa.matiasaguirre.net/docs/configuration/django.html\nSOCIAL_AUTH_PIPELINE = (\n \"social.pipeline.social_auth.social_details\",\n \"social.pipeline.social_auth.social_uid\",\n \"social.pipeline.social_auth.auth_allowed\",\n \"social.pipeline.social_auth.social_user\",\n \"social.pipeline.user.get_username\",\n \"social.pipeline.social_auth.associate_by_email\",\n \"social.pipeline.user.create_user\",\n \"zds.member.models.save_profile\",\n \"social.pipeline.social_auth.associate_user\",\n \"social.pipeline.social_auth.load_extra_data\",\n \"social.pipeline.user.user_details\",\n)\n\n\n###############################################################################\n# ZESTE DE SAVOIR SETTINGS\n\n\nES_SEARCH_INDEX[\"shards\"] = config[\"elasticsearch\"].get(\"shards\", 3)\n\n\nZDS_APP[\"site\"][\"association\"][\"email\"] = \"[email protected]\"\n\n# content\n# ZDS_APP['content']['build_pdf_when_published'] = False\nZDS_APP[\"article\"][\"repo_path\"] = \"/opt/zds/data/articles-data\"\nZDS_APP[\"content\"][\"repo_private_path\"] = \"/opt/zds/data/contents-private\"\nZDS_APP[\"content\"][\"repo_public_path\"] = \"/opt/zds/data/contents-public\"\nZDS_APP[\"content\"][\"extra_content_generation_policy\"] = \"WATCHDOG\"\n\nZDS_APP[\"visual_changes\"] = zds_config.get(\"visual_changes\", [])\n\nZDS_APP[\"very_top_banner\"] = config.get(\"very_top_banner\", False)\n", "path": "zds/settings/prod.py"}, {"content": "from .config import config\n\n# best quality, 100 is the same but documentation says\n# ' values up to 100 are allowed, but this is not recommended'\n# so let's use 95\nTHUMBNAIL_QUALITY = 95\n# Let's use the default value BUT if we want to let png in lossless format, we have tu use (png,) instead of None\nTHUMBNAIL_PRESERVE_EXTENSIONS = (\"svg\",)\n\n\nsocial_auth_config = config.get(\"social_auth\", {})\n\nSOCIAL_AUTH_RAISE_EXCEPTIONS = False\n\nSOCIAL_AUTH_FACEBOOK_SCOPE = [\"email\"]\n\nSOCIAL_AUTH_PIPELINE = (\n \"social.pipeline.social_auth.social_details\",\n \"social.pipeline.social_auth.social_uid\",\n \"social.pipeline.social_auth.auth_allowed\",\n \"social.pipeline.social_auth.social_user\",\n \"social.pipeline.user.get_username\",\n \"social.pipeline.social_auth.associate_by_email\",\n \"social.pipeline.user.create_user\",\n \"zds.member.models.save_profile\",\n \"social.pipeline.social_auth.associate_user\",\n \"social.pipeline.social_auth.load_extra_data\",\n \"social.pipeline.user.user_details\",\n)\n\n# Before adding new providers such as Facebook and Google,\n# you need to make sure they validate the user's email address on sign up!\n# If they don't, a malicious person could take control of someone else account!\nSOCIAL_AUTH_FACEBOOK_KEY = social_auth_config.get(\"facebook_key\", \"\")\nSOCIAL_AUTH_FACEBOOK_SECRET = social_auth_config.get(\"facebook_secret\", \"\")\nSOCIAL_AUTH_GOOGLE_OAUTH2_KEY = social_auth_config.get(\n \"google_oauth2_key\",\n \"696570367703-r6hc7mdd27t1sktdkivpnc5b25i0uip2.apps.googleusercontent.com\",\n)\nSOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = social_auth_config.get(\n \"google_oauth2_secret\",\n \"mApWNh3stCsYHwsGuWdbZWP8\",\n)\n\nSOCIAL_AUTH_SANITIZE_REDIRECTS = social_auth_config.get(\n \"sanitize_redirects\",\n False,\n)\n\n\nrecaptcha_config = config.get(\"recaptcha\", {})\n\nUSE_CAPTCHA = recaptcha_config.get(\"use_captcha\", False)\n# Seems to be used by `django-recaptcha` (what a poorly-namespaced\n# setting!).\n# Set to `True` to use the \u201cNo Captcha\u201d engine instead of the old API.\nNOCAPTCHA = True\nRECAPTCHA_USE_SSL = True\nRECAPTCHA_PUBLIC_KEY = recaptcha_config.get(\"public_key\", \"dummy\")\nRECAPTCHA_PRIVATE_KEY = recaptcha_config.get(\"private_key\", \"dummy\")\n\n\nOAUTH2_PROVIDER = {\"OAUTH2_BACKEND_CLASS\": \"oauth2_provider.oauth2_backends.JSONOAuthLibCore\"}\n", "path": "zds/settings/abstract_base/requirements.py"}]}
2,971
523
gh_patches_debug_33506
rasdani/github-patches
git_diff
optuna__optuna-1285
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `experimental` decorator breaks class documentation. The `experimental` decorator used on classes break documentation. This could be one manifestation but there is an issue with how the documentation including type hints are propagated to the decorated class. This does not apply for free functions. See https://github.com/optuna/optuna/pull/1265#issuecomment-633195955 for how it may break. ## Expected behavior Class documentation should not be altered by applying the experimental decorator. ## Steps to reproduce 1. Apply the experimental decorator to a class. 1. Build the document (`cd docs && make html`) 1. Open the rendered documentation and note that the class signatures is broken. ## Additional context (optional) - An issue regarding the indentation https://github.com/optuna/optuna/issues/1213. </issue> <code> [start of optuna/_experimental.py] 1 import functools 2 import inspect 3 from typing import Any 4 from typing import Callable 5 import warnings 6 7 from optuna.exceptions import ExperimentalWarning 8 9 10 # White spaces of each line are necessary to beautifully rendered documentation. 11 # NOTE(crcrpar): When `experimental` decorator is applied to member methods, these lines require 12 # another four spaces. 13 _EXPERIMENTAL_DOCSTRING_TEMPLATE = """ 14 15 .. note:: 16 Added in v{ver} as an experimental feature. The interface may change in newer versions 17 without prior notice. See https://github.com/optuna/optuna/releases/tag/v{ver}. 18 """ 19 20 21 def _make_func_spec_str(func: Callable[..., Any]) -> str: 22 23 name = func.__name__ 24 argspec = inspect.getfullargspec(func) 25 26 n_defaults = len(argspec.defaults) if argspec.defaults is not None else 0 27 offset = int(len(argspec.args) > 0 and argspec.args[0] == "self") 28 29 if n_defaults > 0: 30 args = ", ".join(argspec.args[offset:-n_defaults]) 31 with_default_values = ", ".join( 32 [ 33 "{}={}".format(a, d) 34 for a, d in zip(argspec.args[-n_defaults:], argspec.defaults) # type: ignore 35 ] 36 ) 37 else: 38 args = ", ".join(argspec.args[offset:]) 39 with_default_values = "" 40 41 if len(args) > 0 and len(with_default_values) > 0: 42 args += ", " 43 44 # NOTE(crcrpar): The four spaces are necessary to correctly render documentation. 45 # Different classes or methods require more spaces. 46 str_args_description = "(" + args + with_default_values + ")\n\n " 47 return name + str_args_description 48 49 50 def _validate_version(version: str) -> None: 51 52 if not isinstance(version, str) or len(version.split(".")) != 3: 53 raise ValueError( 54 "Invalid version specification. Must follow `x.y.z` format but `{}` is given".format( 55 version 56 ) 57 ) 58 59 60 def experimental(version: str, name: str = None) -> Any: 61 """Decorate class or function as experimental. 62 63 Args: 64 version: The first version that supports the target feature. 65 name: The name of the feature. Defaults to the function or class name. Optional. 66 """ 67 68 _validate_version(version) 69 70 def _experimental_wrapper(f: Any) -> Any: 71 # f is either func or class. 72 73 def _experimental_func(func: Callable[[Any], Any]) -> Callable[[Any], Any]: 74 75 docstring = _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version) 76 if func.__doc__ is None: 77 func.__doc__ = "" 78 func.__doc__ += docstring 79 80 # TODO(crcrpar): Annotate this correctly. 81 @functools.wraps(func) 82 def new_func(*args: Any, **kwargs: Any) -> Any: 83 """Wrapped function.""" 84 85 warnings.warn( 86 "{} is experimental (supported from v{}). " 87 "The interface can change in the future.".format( 88 name if name is not None else func.__name__, version 89 ), 90 ExperimentalWarning, 91 ) 92 93 return func(*args, **kwargs) # type: ignore 94 95 return new_func 96 97 def _experimental_class(cls: Any) -> Any: 98 """Decorates a class as experimental. 99 100 This decorator is supposed to be applied to the experimental class. 101 """ 102 103 _original_init = cls.__init__ 104 105 def wrapped_init(self, *args, **kwargs) -> None: # type: ignore 106 warnings.warn( 107 "{} is experimental (supported from v{}). " 108 "The interface can change in the future.".format( 109 name if name is not None else cls.__name__, version 110 ), 111 ExperimentalWarning, 112 ) 113 114 _original_init(self, *args, **kwargs) 115 116 cls.__init__ = wrapped_init 117 118 if cls.__doc__ is None: 119 cls.__doc__ = "" 120 cls.__doc__ = ( 121 _make_func_spec_str(_original_init) 122 + cls.__doc__ 123 + _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version) 124 ) 125 return cls 126 127 return _experimental_class(f) if inspect.isclass(f) else _experimental_func(f) 128 129 return _experimental_wrapper 130 [end of optuna/_experimental.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/optuna/_experimental.py b/optuna/_experimental.py --- a/optuna/_experimental.py +++ b/optuna/_experimental.py @@ -18,35 +18,6 @@ """ -def _make_func_spec_str(func: Callable[..., Any]) -> str: - - name = func.__name__ - argspec = inspect.getfullargspec(func) - - n_defaults = len(argspec.defaults) if argspec.defaults is not None else 0 - offset = int(len(argspec.args) > 0 and argspec.args[0] == "self") - - if n_defaults > 0: - args = ", ".join(argspec.args[offset:-n_defaults]) - with_default_values = ", ".join( - [ - "{}={}".format(a, d) - for a, d in zip(argspec.args[-n_defaults:], argspec.defaults) # type: ignore - ] - ) - else: - args = ", ".join(argspec.args[offset:]) - with_default_values = "" - - if len(args) > 0 and len(with_default_values) > 0: - args += ", " - - # NOTE(crcrpar): The four spaces are necessary to correctly render documentation. - # Different classes or methods require more spaces. - str_args_description = "(" + args + with_default_values + ")\n\n " - return name + str_args_description - - def _validate_version(version: str) -> None: if not isinstance(version, str) or len(version.split(".")) != 3: @@ -102,6 +73,7 @@ _original_init = cls.__init__ + @functools.wraps(_original_init) def wrapped_init(self, *args, **kwargs) -> None: # type: ignore warnings.warn( "{} is experimental (supported from v{}). " @@ -117,11 +89,7 @@ if cls.__doc__ is None: cls.__doc__ = "" - cls.__doc__ = ( - _make_func_spec_str(_original_init) - + cls.__doc__ - + _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version) - ) + cls.__doc__ += _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version) return cls return _experimental_class(f) if inspect.isclass(f) else _experimental_func(f)
{"golden_diff": "diff --git a/optuna/_experimental.py b/optuna/_experimental.py\n--- a/optuna/_experimental.py\n+++ b/optuna/_experimental.py\n@@ -18,35 +18,6 @@\n \"\"\"\n \n \n-def _make_func_spec_str(func: Callable[..., Any]) -> str:\n-\n- name = func.__name__\n- argspec = inspect.getfullargspec(func)\n-\n- n_defaults = len(argspec.defaults) if argspec.defaults is not None else 0\n- offset = int(len(argspec.args) > 0 and argspec.args[0] == \"self\")\n-\n- if n_defaults > 0:\n- args = \", \".join(argspec.args[offset:-n_defaults])\n- with_default_values = \", \".join(\n- [\n- \"{}={}\".format(a, d)\n- for a, d in zip(argspec.args[-n_defaults:], argspec.defaults) # type: ignore\n- ]\n- )\n- else:\n- args = \", \".join(argspec.args[offset:])\n- with_default_values = \"\"\n-\n- if len(args) > 0 and len(with_default_values) > 0:\n- args += \", \"\n-\n- # NOTE(crcrpar): The four spaces are necessary to correctly render documentation.\n- # Different classes or methods require more spaces.\n- str_args_description = \"(\" + args + with_default_values + \")\\n\\n \"\n- return name + str_args_description\n-\n-\n def _validate_version(version: str) -> None:\n \n if not isinstance(version, str) or len(version.split(\".\")) != 3:\n@@ -102,6 +73,7 @@\n \n _original_init = cls.__init__\n \n+ @functools.wraps(_original_init)\n def wrapped_init(self, *args, **kwargs) -> None: # type: ignore\n warnings.warn(\n \"{} is experimental (supported from v{}). \"\n@@ -117,11 +89,7 @@\n \n if cls.__doc__ is None:\n cls.__doc__ = \"\"\n- cls.__doc__ = (\n- _make_func_spec_str(_original_init)\n- + cls.__doc__\n- + _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version)\n- )\n+ cls.__doc__ += _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version)\n return cls\n \n return _experimental_class(f) if inspect.isclass(f) else _experimental_func(f)\n", "issue": "`experimental` decorator breaks class documentation.\nThe `experimental` decorator used on classes break documentation. This could be one manifestation but there is an issue with how the documentation including type hints are propagated to the decorated class. This does not apply for free functions.\r\n\r\nSee https://github.com/optuna/optuna/pull/1265#issuecomment-633195955 for how it may break.\r\n\r\n## Expected behavior\r\n\r\nClass documentation should not be altered by applying the experimental decorator.\r\n\r\n## Steps to reproduce\r\n\r\n1. Apply the experimental decorator to a class.\r\n1. Build the document (`cd docs && make html`) \r\n1. Open the rendered documentation and note that the class signatures is broken.\r\n\r\n## Additional context (optional)\r\n\r\n- An issue regarding the indentation https://github.com/optuna/optuna/issues/1213.\r\n\n", "before_files": [{"content": "import functools\nimport inspect\nfrom typing import Any\nfrom typing import Callable\nimport warnings\n\nfrom optuna.exceptions import ExperimentalWarning\n\n\n# White spaces of each line are necessary to beautifully rendered documentation.\n# NOTE(crcrpar): When `experimental` decorator is applied to member methods, these lines require\n# another four spaces.\n_EXPERIMENTAL_DOCSTRING_TEMPLATE = \"\"\"\n\n .. note::\n Added in v{ver} as an experimental feature. The interface may change in newer versions\n without prior notice. See https://github.com/optuna/optuna/releases/tag/v{ver}.\n\"\"\"\n\n\ndef _make_func_spec_str(func: Callable[..., Any]) -> str:\n\n name = func.__name__\n argspec = inspect.getfullargspec(func)\n\n n_defaults = len(argspec.defaults) if argspec.defaults is not None else 0\n offset = int(len(argspec.args) > 0 and argspec.args[0] == \"self\")\n\n if n_defaults > 0:\n args = \", \".join(argspec.args[offset:-n_defaults])\n with_default_values = \", \".join(\n [\n \"{}={}\".format(a, d)\n for a, d in zip(argspec.args[-n_defaults:], argspec.defaults) # type: ignore\n ]\n )\n else:\n args = \", \".join(argspec.args[offset:])\n with_default_values = \"\"\n\n if len(args) > 0 and len(with_default_values) > 0:\n args += \", \"\n\n # NOTE(crcrpar): The four spaces are necessary to correctly render documentation.\n # Different classes or methods require more spaces.\n str_args_description = \"(\" + args + with_default_values + \")\\n\\n \"\n return name + str_args_description\n\n\ndef _validate_version(version: str) -> None:\n\n if not isinstance(version, str) or len(version.split(\".\")) != 3:\n raise ValueError(\n \"Invalid version specification. Must follow `x.y.z` format but `{}` is given\".format(\n version\n )\n )\n\n\ndef experimental(version: str, name: str = None) -> Any:\n \"\"\"Decorate class or function as experimental.\n\n Args:\n version: The first version that supports the target feature.\n name: The name of the feature. Defaults to the function or class name. Optional.\n \"\"\"\n\n _validate_version(version)\n\n def _experimental_wrapper(f: Any) -> Any:\n # f is either func or class.\n\n def _experimental_func(func: Callable[[Any], Any]) -> Callable[[Any], Any]:\n\n docstring = _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version)\n if func.__doc__ is None:\n func.__doc__ = \"\"\n func.__doc__ += docstring\n\n # TODO(crcrpar): Annotate this correctly.\n @functools.wraps(func)\n def new_func(*args: Any, **kwargs: Any) -> Any:\n \"\"\"Wrapped function.\"\"\"\n\n warnings.warn(\n \"{} is experimental (supported from v{}). \"\n \"The interface can change in the future.\".format(\n name if name is not None else func.__name__, version\n ),\n ExperimentalWarning,\n )\n\n return func(*args, **kwargs) # type: ignore\n\n return new_func\n\n def _experimental_class(cls: Any) -> Any:\n \"\"\"Decorates a class as experimental.\n\n This decorator is supposed to be applied to the experimental class.\n \"\"\"\n\n _original_init = cls.__init__\n\n def wrapped_init(self, *args, **kwargs) -> None: # type: ignore\n warnings.warn(\n \"{} is experimental (supported from v{}). \"\n \"The interface can change in the future.\".format(\n name if name is not None else cls.__name__, version\n ),\n ExperimentalWarning,\n )\n\n _original_init(self, *args, **kwargs)\n\n cls.__init__ = wrapped_init\n\n if cls.__doc__ is None:\n cls.__doc__ = \"\"\n cls.__doc__ = (\n _make_func_spec_str(_original_init)\n + cls.__doc__\n + _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version)\n )\n return cls\n\n return _experimental_class(f) if inspect.isclass(f) else _experimental_func(f)\n\n return _experimental_wrapper\n", "path": "optuna/_experimental.py"}]}
1,938
540
gh_patches_debug_25202
rasdani/github-patches
git_diff
opsdroid__opsdroid-12
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Connectors should fork When a connector is started it should fork into its own process. This is because connectors block to accept messages from their source. This requires #5 to enable persistent memory between connector processes. </issue> <code> [start of opsdroid/core.py] 1 """Core components of OpsDroid.""" 2 3 import logging 4 import sys 5 import weakref 6 from opsdroid.helper import match 7 from opsdroid.memory import Memory 8 9 10 class OpsDroid(): 11 """Root object for opsdroid.""" 12 13 instances = [] 14 15 def __init__(self): 16 """Start opsdroid.""" 17 self.bot_name = 'opsdroid' 18 self.sys_status = 0 19 self.connectors = [] 20 self.skills = [] 21 self.memory = Memory() 22 logging.info("Created main opsdroid object") 23 24 def __enter__(self): 25 """Add self to existing instances.""" 26 if len(self.__class__.instances) == 0: 27 self.__class__.instances.append(weakref.proxy(self)) 28 else: 29 self.critical("opsdroid has already been started", 1) 30 return self 31 32 def __exit__(self, exc_type, exc_value, traceback): 33 """Remove self from existing instances.""" 34 self.__class__.instances = [] 35 36 def exit(self): 37 """Exit application.""" 38 logging.info("Exiting application with return code " + 39 str(self.sys_status)) 40 sys.exit(self.sys_status) 41 42 def critical(self, error, code): 43 """Exit due to unrecoverable error.""" 44 self.sys_status = code 45 logging.critical(error) 46 print("Error: " + error) 47 self.exit() 48 49 def start_connectors(self, connectors): 50 """Start the connectors.""" 51 if len(connectors) == 0: 52 self.critical("All connectors failed to load", 1) 53 for connector_module in connectors: 54 for name, cls in connector_module["module"].__dict__.items(): 55 if isinstance(cls, type) and "Connector" in name: 56 connector_module["config"]["bot-name"] = self.bot_name 57 connector = cls(connector_module["config"]) 58 self.connectors.append(connector) 59 connector.connect(self) 60 61 def start_databases(self, databases): 62 """Start the databases.""" 63 if len(databases) == 0: 64 logging.warning("All databases failed to load") 65 for database_module in databases: 66 for name, cls in database_module["module"].__dict__.items(): 67 if isinstance(cls, type) and "Database" in name: 68 logging.debug("Adding database: " + name) 69 database = cls(database_module["config"]) 70 self.memory.databases.append(database) 71 database.connect() 72 73 def load_regex_skill(self, regex, skill): 74 """Load skills.""" 75 self.skills.append({"regex": regex, "skill": skill}) 76 77 def parse(self, message): 78 """Parse a string against all skills.""" 79 if message.text.strip() != "": 80 logging.debug("Parsing input: " + message.text) 81 for skill in self.skills: 82 if "regex" in skill: 83 regex = match(skill["regex"], message.text) 84 if regex: 85 message.regex = regex 86 skill["skill"](self, message) 87 [end of opsdroid/core.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/opsdroid/core.py b/opsdroid/core.py --- a/opsdroid/core.py +++ b/opsdroid/core.py @@ -3,6 +3,7 @@ import logging import sys import weakref +from multiprocessing import Process from opsdroid.helper import match from opsdroid.memory import Memory @@ -17,6 +18,7 @@ self.bot_name = 'opsdroid' self.sys_status = 0 self.connectors = [] + self.connector_jobs = [] self.skills = [] self.memory = Memory() logging.info("Created main opsdroid object") @@ -56,7 +58,11 @@ connector_module["config"]["bot-name"] = self.bot_name connector = cls(connector_module["config"]) self.connectors.append(connector) - connector.connect(self) + job = Process(target=connector.connect, args=(self,)) + job.start() + self.connector_jobs.append(job) + for job in self.connector_jobs: + job.join() def start_databases(self, databases): """Start the databases."""
{"golden_diff": "diff --git a/opsdroid/core.py b/opsdroid/core.py\n--- a/opsdroid/core.py\n+++ b/opsdroid/core.py\n@@ -3,6 +3,7 @@\n import logging\n import sys\n import weakref\n+from multiprocessing import Process\n from opsdroid.helper import match\n from opsdroid.memory import Memory\n \n@@ -17,6 +18,7 @@\n self.bot_name = 'opsdroid'\n self.sys_status = 0\n self.connectors = []\n+ self.connector_jobs = []\n self.skills = []\n self.memory = Memory()\n logging.info(\"Created main opsdroid object\")\n@@ -56,7 +58,11 @@\n connector_module[\"config\"][\"bot-name\"] = self.bot_name\n connector = cls(connector_module[\"config\"])\n self.connectors.append(connector)\n- connector.connect(self)\n+ job = Process(target=connector.connect, args=(self,))\n+ job.start()\n+ self.connector_jobs.append(job)\n+ for job in self.connector_jobs:\n+ job.join()\n \n def start_databases(self, databases):\n \"\"\"Start the databases.\"\"\"\n", "issue": "Connectors should fork\nWhen a connector is started it should fork into its own process. This is because connectors block to accept messages from their source.\n\nThis requires #5 to enable persistent memory between connector processes.\n\n", "before_files": [{"content": "\"\"\"Core components of OpsDroid.\"\"\"\n\nimport logging\nimport sys\nimport weakref\nfrom opsdroid.helper import match\nfrom opsdroid.memory import Memory\n\n\nclass OpsDroid():\n \"\"\"Root object for opsdroid.\"\"\"\n\n instances = []\n\n def __init__(self):\n \"\"\"Start opsdroid.\"\"\"\n self.bot_name = 'opsdroid'\n self.sys_status = 0\n self.connectors = []\n self.skills = []\n self.memory = Memory()\n logging.info(\"Created main opsdroid object\")\n\n def __enter__(self):\n \"\"\"Add self to existing instances.\"\"\"\n if len(self.__class__.instances) == 0:\n self.__class__.instances.append(weakref.proxy(self))\n else:\n self.critical(\"opsdroid has already been started\", 1)\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n \"\"\"Remove self from existing instances.\"\"\"\n self.__class__.instances = []\n\n def exit(self):\n \"\"\"Exit application.\"\"\"\n logging.info(\"Exiting application with return code \" +\n str(self.sys_status))\n sys.exit(self.sys_status)\n\n def critical(self, error, code):\n \"\"\"Exit due to unrecoverable error.\"\"\"\n self.sys_status = code\n logging.critical(error)\n print(\"Error: \" + error)\n self.exit()\n\n def start_connectors(self, connectors):\n \"\"\"Start the connectors.\"\"\"\n if len(connectors) == 0:\n self.critical(\"All connectors failed to load\", 1)\n for connector_module in connectors:\n for name, cls in connector_module[\"module\"].__dict__.items():\n if isinstance(cls, type) and \"Connector\" in name:\n connector_module[\"config\"][\"bot-name\"] = self.bot_name\n connector = cls(connector_module[\"config\"])\n self.connectors.append(connector)\n connector.connect(self)\n\n def start_databases(self, databases):\n \"\"\"Start the databases.\"\"\"\n if len(databases) == 0:\n logging.warning(\"All databases failed to load\")\n for database_module in databases:\n for name, cls in database_module[\"module\"].__dict__.items():\n if isinstance(cls, type) and \"Database\" in name:\n logging.debug(\"Adding database: \" + name)\n database = cls(database_module[\"config\"])\n self.memory.databases.append(database)\n database.connect()\n\n def load_regex_skill(self, regex, skill):\n \"\"\"Load skills.\"\"\"\n self.skills.append({\"regex\": regex, \"skill\": skill})\n\n def parse(self, message):\n \"\"\"Parse a string against all skills.\"\"\"\n if message.text.strip() != \"\":\n logging.debug(\"Parsing input: \" + message.text)\n for skill in self.skills:\n if \"regex\" in skill:\n regex = match(skill[\"regex\"], message.text)\n if regex:\n message.regex = regex\n skill[\"skill\"](self, message)\n", "path": "opsdroid/core.py"}]}
1,369
251
gh_patches_debug_65506
rasdani/github-patches
git_diff
localstack__localstack-1842
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Elasticsearch domain managed by Terraform cannot be updated; request for /tags/? returns 404 <!-- Love localstack? Please consider supporting our collective: 👉 https://opencollective.com/localstack/donate --> We use Terraform to create and update resources in Localstack, which has worked for services like S3 and Dynamo so far. We hit an issue with Elasticsearch domains, where the domain is created successfully but Terraform fails to apply in subsequent runs, when it makes a request to: ``` logs: ---[ REQUEST POST-SIGN ]----------------------------- logs: GET /2015-01-01/tags/?arn=arn%3Aaws%3Aes%3Aus-east-1%3A000000000000%3Adomain%2Fepdam-local-amd HTTP/1.1 logs: Host: localhost:4578 logs: User-Agent: aws-sdk-go/1.14.31 (go1.9.2; darwin; amd64) APN/1.0 HashiCorp/1.0 Terraform/0.11.8-dev logs: Authorization: AWS4-HMAC-SHA256 Credential=mock_access_key/20190221/us-west-2/es/aws4_request, SignedHeaders=host;x-amz-date, Signature=26f42429e2af2240466635ab9202c8888617afe9be7b8ef91a8831d6b4160bd1 logs: X-Amz-Date: 20190221T191447Z logs: Accept-Encoding: gzip ``` and the response is: ``` logs: ---[ RESPONSE ]-------------------------------------- logs: HTTP/1.0 404 NOT FOUND logs: Connection: close logs: Content-Length: 233 logs: Access-Control-Allow-Origin: * logs: Content-Type: text/html logs: Date: Thu, 21 Feb 2019 19:14:47 GMT logs: Server: Werkzeug/0.14.1 Python/2.7.15 ``` While a request to `localhost:4578/2015-01-01/tags/?arn=...` gets 404, a request to `localhost:4578/2015-01-01/tags?arn=...`, (without the `/` before the query params), is successful. The reason we are reporting this against Localstack and not [terraform](https://github.com/hashicorp/terraform) or [terraform-provider-aws](https://github.com/terraform-providers/terraform-provider-aws) is that the AWS REST API apparently supports requests with slashes before query parameters, or else Terraform could not be used to manage Elasticsearch domains in AWS. </issue> <code> [start of localstack/services/es/es_api.py] 1 import json 2 import time 3 from random import randint 4 from flask import Flask, jsonify, request, make_response 5 from localstack.services import generic_proxy 6 from localstack.utils.aws import aws_stack 7 from localstack.constants import TEST_AWS_ACCOUNT_ID 8 from localstack.utils.common import to_str 9 from localstack.utils.analytics import event_publisher 10 11 APP_NAME = 'es_api' 12 API_PREFIX = '/2015-01-01' 13 14 ES_DOMAINS = {} 15 16 app = Flask(APP_NAME) 17 18 19 def error_response(error_type, code=400, message='Unknown error.'): 20 if not message: 21 if error_type == 'ResourceNotFoundException': 22 message = 'Resource not found.' 23 elif error_type == 'ResourceAlreadyExistsException': 24 message = 'Resource already exists.' 25 response = make_response(jsonify({'error': message})) 26 response.headers['x-amzn-errortype'] = error_type 27 return response, code 28 29 30 def get_domain_config_status(): 31 return { 32 'CreationDate': '%.2f' % time.time(), 33 'PendingDeletion': False, 34 'State': 'Active', 35 'UpdateDate': '%.2f' % time.time(), 36 'UpdateVersion': randint(1, 100) 37 } 38 39 40 def get_domain_config(domain_name): 41 config_status = get_domain_config_status() 42 return { 43 'DomainConfig': { 44 'AccessPolicies': { 45 'Options': '{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":"arn:aws:iam::%s:root"},"Action":"es:*","Resource":"arn:aws:es:%s:%s:domain/%s/*"}]}' % (TEST_AWS_ACCOUNT_ID, aws_stack.get_region(), TEST_AWS_ACCOUNT_ID, domain_name), # noqa: E501 46 'Status': config_status 47 }, 48 'AdvancedOptions': { 49 'Options': { 50 'indices.fielddata.cache.size': '', 51 'rest.action.multi.allow_explicit_index': 'true' 52 }, 53 'Status': config_status 54 }, 55 'EBSOptions': { 56 'Options': { 57 'EBSEnabled': True, 58 'EncryptionEnabled': False, 59 'Iops': 0, 60 'VolumeSize': 10, 61 'VolumeType': 'gp2' 62 }, 63 'Status': config_status 64 }, 65 'ElasticsearchClusterConfig': { 66 'Options': { 67 'DedicatedMasterCount': 1, 68 'DedicatedMasterEnabled': True, 69 'DedicatedMasterType': 'm3.medium.elasticsearch', 70 'InstanceCount': 1, 71 'InstanceType': 'm3.medium.elasticsearch', 72 'ZoneAwarenessEnabled': False 73 }, 74 'Status': config_status 75 }, 76 'ElasticsearchVersion': { 77 'Options': '5.3', 78 'Status': config_status 79 }, 80 'EncryptionAtRestOptions': { 81 'Options': { 82 'Enabled': False, 83 'KmsKeyId': '' 84 }, 85 'Status': config_status 86 }, 87 'LogPublishingOptions': { 88 'Options': { 89 'INDEX_SLOW_LOGS': { 90 'CloudWatchLogsLogGroupArn': 'arn:aws:logs:%s:%s:log-group:sample-domain' % (aws_stack.get_region(), TEST_AWS_ACCOUNT_ID), # noqa: E501 91 'Enabled': False 92 }, 93 'SEARCH_SLOW_LOGS': { 94 'CloudWatchLogsLogGroupArn': 'arn:aws:logs:%s:%s:log-group:sample-domain' % (aws_stack.get_region(), TEST_AWS_ACCOUNT_ID), # noqa: E501 95 'Enabled': False, 96 } 97 }, 98 'Status': config_status 99 }, 100 'SnapshotOptions': { 101 'Options': { 102 'AutomatedSnapshotStartHour': randint(0, 23) 103 }, 104 'Status': config_status 105 }, 106 'VPCOptions': { 107 'Options': { 108 'AvailabilityZones': [ 109 'us-east-1b' 110 ], 111 'SecurityGroupIds': [ 112 'sg-12345678' 113 ], 114 'SubnetIds': [ 115 'subnet-12345678' 116 ], 117 'VPCId': 'vpc-12345678' 118 }, 119 'Status': config_status 120 } 121 } 122 } 123 124 125 def get_domain_status(domain_name, deleted=False): 126 return { 127 'DomainStatus': { 128 'ARN': 'arn:aws:es:%s:%s:domain/%s' % (aws_stack.get_region(), TEST_AWS_ACCOUNT_ID, domain_name), 129 'Created': True, 130 'Deleted': deleted, 131 'DomainId': '%s/%s' % (TEST_AWS_ACCOUNT_ID, domain_name), 132 'DomainName': domain_name, 133 'ElasticsearchClusterConfig': { 134 'DedicatedMasterCount': 1, 135 'DedicatedMasterEnabled': True, 136 'DedicatedMasterType': 'm3.medium.elasticsearch', 137 'InstanceCount': 1, 138 'InstanceType': 'm3.medium.elasticsearch', 139 'ZoneAwarenessEnabled': False 140 }, 141 'ElasticsearchVersion': '6.7', 142 'Endpoint': aws_stack.get_elasticsearch_endpoint(domain_name), 143 'Processing': False, 144 'EBSOptions': { 145 'EBSEnabled': True, 146 'VolumeType': 'gp2', 147 'VolumeSize': 10, 148 'Iops': 0 149 }, 150 } 151 } 152 153 154 @app.route('%s/domain' % API_PREFIX, methods=['GET']) 155 def list_domain_names(): 156 result = { 157 'DomainNames': [{'DomainName': name} for name in ES_DOMAINS.keys()] 158 } 159 return jsonify(result) 160 161 162 @app.route('%s/es/domain' % API_PREFIX, methods=['POST']) 163 def create_domain(): 164 data = json.loads(to_str(request.data)) 165 domain_name = data['DomainName'] 166 if domain_name in ES_DOMAINS: 167 return error_response(error_type='ResourceAlreadyExistsException') 168 ES_DOMAINS[domain_name] = data 169 result = get_domain_status(domain_name) 170 # record event 171 event_publisher.fire_event(event_publisher.EVENT_ES_CREATE_DOMAIN, 172 payload={'n': event_publisher.get_hash(domain_name)}) 173 return jsonify(result) 174 175 176 @app.route('%s/es/domain/<domain_name>' % API_PREFIX, methods=['GET']) 177 def describe_domain(domain_name): 178 if domain_name not in ES_DOMAINS: 179 return error_response(error_type='ResourceNotFoundException') 180 result = get_domain_status(domain_name) 181 return jsonify(result) 182 183 184 @app.route('%s/es/domain/<domain_name>/config' % API_PREFIX, methods=['GET', 'POST']) 185 def domain_config(domain_name): 186 config = get_domain_config(domain_name) 187 return jsonify(config) 188 189 190 @app.route('%s/es/domain/<domain_name>' % API_PREFIX, methods=['DELETE']) 191 def delete_domain(domain_name): 192 if domain_name not in ES_DOMAINS: 193 return error_response(error_type='ResourceNotFoundException') 194 result = get_domain_status(domain_name, deleted=True) 195 ES_DOMAINS.pop(domain_name) 196 # record event 197 event_publisher.fire_event(event_publisher.EVENT_ES_DELETE_DOMAIN, 198 payload={'n': event_publisher.get_hash(domain_name)}) 199 return jsonify(result) 200 201 202 @app.route('%s/tags' % API_PREFIX, methods=['GET', 'POST']) 203 def add_list_tags(): 204 if request.method == 'GET' and request.args.get('arn'): 205 response = { 206 'TagList': [ 207 { 208 'Key': 'Example1', 209 'Value': 'Value' 210 }, 211 { 212 'Key': 'Example2', 213 'Value': 'Value' 214 } 215 ] 216 } 217 return jsonify(response) 218 219 return jsonify({}) 220 221 222 def serve(port, quiet=True): 223 generic_proxy.serve_flask_app(app=app, port=port, quiet=quiet) 224 [end of localstack/services/es/es_api.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/localstack/services/es/es_api.py b/localstack/services/es/es_api.py --- a/localstack/services/es/es_api.py +++ b/localstack/services/es/es_api.py @@ -14,6 +14,7 @@ ES_DOMAINS = {} app = Flask(APP_NAME) +app.url_map.strict_slashes = False def error_response(error_type, code=400, message='Unknown error.'):
{"golden_diff": "diff --git a/localstack/services/es/es_api.py b/localstack/services/es/es_api.py\n--- a/localstack/services/es/es_api.py\n+++ b/localstack/services/es/es_api.py\n@@ -14,6 +14,7 @@\n ES_DOMAINS = {}\n \n app = Flask(APP_NAME)\n+app.url_map.strict_slashes = False\n \n \n def error_response(error_type, code=400, message='Unknown error.'):\n", "issue": "Elasticsearch domain managed by Terraform cannot be updated; request for /tags/? returns 404\n<!-- Love localstack? Please consider supporting our collective:\r\n\ud83d\udc49 https://opencollective.com/localstack/donate -->\r\n\r\nWe use Terraform to create and update resources in Localstack, which has worked for services like S3 and Dynamo so far.\r\n\r\nWe hit an issue with Elasticsearch domains, where the domain is created successfully but Terraform fails to apply in subsequent runs, when it makes a request to:\r\n\r\n```\r\nlogs: ---[ REQUEST POST-SIGN ]-----------------------------\r\nlogs: GET /2015-01-01/tags/?arn=arn%3Aaws%3Aes%3Aus-east-1%3A000000000000%3Adomain%2Fepdam-local-amd HTTP/1.1\r\nlogs: Host: localhost:4578\r\nlogs: User-Agent: aws-sdk-go/1.14.31 (go1.9.2; darwin; amd64) APN/1.0 HashiCorp/1.0 Terraform/0.11.8-dev\r\nlogs: Authorization: AWS4-HMAC-SHA256 Credential=mock_access_key/20190221/us-west-2/es/aws4_request, SignedHeaders=host;x-amz-date, Signature=26f42429e2af2240466635ab9202c8888617afe9be7b8ef91a8831d6b4160bd1\r\nlogs: X-Amz-Date: 20190221T191447Z\r\nlogs: Accept-Encoding: gzip\r\n```\r\n\r\nand the response is:\r\n\r\n```\r\nlogs: ---[ RESPONSE ]--------------------------------------\r\nlogs: HTTP/1.0 404 NOT FOUND\r\nlogs: Connection: close\r\nlogs: Content-Length: 233\r\nlogs: Access-Control-Allow-Origin: *\r\nlogs: Content-Type: text/html\r\nlogs: Date: Thu, 21 Feb 2019 19:14:47 GMT\r\nlogs: Server: Werkzeug/0.14.1 Python/2.7.15\r\n```\r\n\r\nWhile a request to `localhost:4578/2015-01-01/tags/?arn=...` gets 404, a request to `localhost:4578/2015-01-01/tags?arn=...`, (without the `/` before the query params), is successful.\r\n\r\nThe reason we are reporting this against Localstack and not [terraform](https://github.com/hashicorp/terraform) or [terraform-provider-aws](https://github.com/terraform-providers/terraform-provider-aws) is that the AWS REST API apparently supports requests with slashes before query parameters, or else Terraform could not be used to manage Elasticsearch domains in AWS.\n", "before_files": [{"content": "import json\nimport time\nfrom random import randint\nfrom flask import Flask, jsonify, request, make_response\nfrom localstack.services import generic_proxy\nfrom localstack.utils.aws import aws_stack\nfrom localstack.constants import TEST_AWS_ACCOUNT_ID\nfrom localstack.utils.common import to_str\nfrom localstack.utils.analytics import event_publisher\n\nAPP_NAME = 'es_api'\nAPI_PREFIX = '/2015-01-01'\n\nES_DOMAINS = {}\n\napp = Flask(APP_NAME)\n\n\ndef error_response(error_type, code=400, message='Unknown error.'):\n if not message:\n if error_type == 'ResourceNotFoundException':\n message = 'Resource not found.'\n elif error_type == 'ResourceAlreadyExistsException':\n message = 'Resource already exists.'\n response = make_response(jsonify({'error': message}))\n response.headers['x-amzn-errortype'] = error_type\n return response, code\n\n\ndef get_domain_config_status():\n return {\n 'CreationDate': '%.2f' % time.time(),\n 'PendingDeletion': False,\n 'State': 'Active',\n 'UpdateDate': '%.2f' % time.time(),\n 'UpdateVersion': randint(1, 100)\n }\n\n\ndef get_domain_config(domain_name):\n config_status = get_domain_config_status()\n return {\n 'DomainConfig': {\n 'AccessPolicies': {\n 'Options': '{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::%s:root\"},\"Action\":\"es:*\",\"Resource\":\"arn:aws:es:%s:%s:domain/%s/*\"}]}' % (TEST_AWS_ACCOUNT_ID, aws_stack.get_region(), TEST_AWS_ACCOUNT_ID, domain_name), # noqa: E501\n 'Status': config_status\n },\n 'AdvancedOptions': {\n 'Options': {\n 'indices.fielddata.cache.size': '',\n 'rest.action.multi.allow_explicit_index': 'true'\n },\n 'Status': config_status\n },\n 'EBSOptions': {\n 'Options': {\n 'EBSEnabled': True,\n 'EncryptionEnabled': False,\n 'Iops': 0,\n 'VolumeSize': 10,\n 'VolumeType': 'gp2'\n },\n 'Status': config_status\n },\n 'ElasticsearchClusterConfig': {\n 'Options': {\n 'DedicatedMasterCount': 1,\n 'DedicatedMasterEnabled': True,\n 'DedicatedMasterType': 'm3.medium.elasticsearch',\n 'InstanceCount': 1,\n 'InstanceType': 'm3.medium.elasticsearch',\n 'ZoneAwarenessEnabled': False\n },\n 'Status': config_status\n },\n 'ElasticsearchVersion': {\n 'Options': '5.3',\n 'Status': config_status\n },\n 'EncryptionAtRestOptions': {\n 'Options': {\n 'Enabled': False,\n 'KmsKeyId': ''\n },\n 'Status': config_status\n },\n 'LogPublishingOptions': {\n 'Options': {\n 'INDEX_SLOW_LOGS': {\n 'CloudWatchLogsLogGroupArn': 'arn:aws:logs:%s:%s:log-group:sample-domain' % (aws_stack.get_region(), TEST_AWS_ACCOUNT_ID), # noqa: E501\n 'Enabled': False\n },\n 'SEARCH_SLOW_LOGS': {\n 'CloudWatchLogsLogGroupArn': 'arn:aws:logs:%s:%s:log-group:sample-domain' % (aws_stack.get_region(), TEST_AWS_ACCOUNT_ID), # noqa: E501\n 'Enabled': False,\n }\n },\n 'Status': config_status\n },\n 'SnapshotOptions': {\n 'Options': {\n 'AutomatedSnapshotStartHour': randint(0, 23)\n },\n 'Status': config_status\n },\n 'VPCOptions': {\n 'Options': {\n 'AvailabilityZones': [\n 'us-east-1b'\n ],\n 'SecurityGroupIds': [\n 'sg-12345678'\n ],\n 'SubnetIds': [\n 'subnet-12345678'\n ],\n 'VPCId': 'vpc-12345678'\n },\n 'Status': config_status\n }\n }\n }\n\n\ndef get_domain_status(domain_name, deleted=False):\n return {\n 'DomainStatus': {\n 'ARN': 'arn:aws:es:%s:%s:domain/%s' % (aws_stack.get_region(), TEST_AWS_ACCOUNT_ID, domain_name),\n 'Created': True,\n 'Deleted': deleted,\n 'DomainId': '%s/%s' % (TEST_AWS_ACCOUNT_ID, domain_name),\n 'DomainName': domain_name,\n 'ElasticsearchClusterConfig': {\n 'DedicatedMasterCount': 1,\n 'DedicatedMasterEnabled': True,\n 'DedicatedMasterType': 'm3.medium.elasticsearch',\n 'InstanceCount': 1,\n 'InstanceType': 'm3.medium.elasticsearch',\n 'ZoneAwarenessEnabled': False\n },\n 'ElasticsearchVersion': '6.7',\n 'Endpoint': aws_stack.get_elasticsearch_endpoint(domain_name),\n 'Processing': False,\n 'EBSOptions': {\n 'EBSEnabled': True,\n 'VolumeType': 'gp2',\n 'VolumeSize': 10,\n 'Iops': 0\n },\n }\n }\n\n\[email protected]('%s/domain' % API_PREFIX, methods=['GET'])\ndef list_domain_names():\n result = {\n 'DomainNames': [{'DomainName': name} for name in ES_DOMAINS.keys()]\n }\n return jsonify(result)\n\n\[email protected]('%s/es/domain' % API_PREFIX, methods=['POST'])\ndef create_domain():\n data = json.loads(to_str(request.data))\n domain_name = data['DomainName']\n if domain_name in ES_DOMAINS:\n return error_response(error_type='ResourceAlreadyExistsException')\n ES_DOMAINS[domain_name] = data\n result = get_domain_status(domain_name)\n # record event\n event_publisher.fire_event(event_publisher.EVENT_ES_CREATE_DOMAIN,\n payload={'n': event_publisher.get_hash(domain_name)})\n return jsonify(result)\n\n\[email protected]('%s/es/domain/<domain_name>' % API_PREFIX, methods=['GET'])\ndef describe_domain(domain_name):\n if domain_name not in ES_DOMAINS:\n return error_response(error_type='ResourceNotFoundException')\n result = get_domain_status(domain_name)\n return jsonify(result)\n\n\[email protected]('%s/es/domain/<domain_name>/config' % API_PREFIX, methods=['GET', 'POST'])\ndef domain_config(domain_name):\n config = get_domain_config(domain_name)\n return jsonify(config)\n\n\[email protected]('%s/es/domain/<domain_name>' % API_PREFIX, methods=['DELETE'])\ndef delete_domain(domain_name):\n if domain_name not in ES_DOMAINS:\n return error_response(error_type='ResourceNotFoundException')\n result = get_domain_status(domain_name, deleted=True)\n ES_DOMAINS.pop(domain_name)\n # record event\n event_publisher.fire_event(event_publisher.EVENT_ES_DELETE_DOMAIN,\n payload={'n': event_publisher.get_hash(domain_name)})\n return jsonify(result)\n\n\[email protected]('%s/tags' % API_PREFIX, methods=['GET', 'POST'])\ndef add_list_tags():\n if request.method == 'GET' and request.args.get('arn'):\n response = {\n 'TagList': [\n {\n 'Key': 'Example1',\n 'Value': 'Value'\n },\n {\n 'Key': 'Example2',\n 'Value': 'Value'\n }\n ]\n }\n return jsonify(response)\n\n return jsonify({})\n\n\ndef serve(port, quiet=True):\n generic_proxy.serve_flask_app(app=app, port=port, quiet=quiet)\n", "path": "localstack/services/es/es_api.py"}]}
3,481
93
gh_patches_debug_31638
rasdani/github-patches
git_diff
bridgecrewio__checkov-5766
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Azure Function App Slots - Ensure web app redirects all HTTP traffic to HTTPS in Azure Function App Slots **Describe the issue** It seems that there are no checks that ensure that the following resource only allows HTTPS: - azurerm_function_app_slot - azurerm_linux_function_app_slot - azurerm_windows_function_app_slot **Examples** ````hcl resource "azurerm_function_app_slot" "example" { name = "test-azure-functions_slot" location = azurerm_resource_group.example.location resource_group_name = azurerm_resource_group.example.name app_service_plan_id = azurerm_app_service_plan.example.id function_app_name = azurerm_function_app.example.name storage_account_name = azurerm_storage_account.example.name storage_account_access_key = azurerm_storage_account.example.primary_access_key https_only = true } resource "azurerm_linux_function_app_slot" "example" { name = "example-linux-function-app-slot" function_app_id = azurerm_linux_function_app.example.id storage_account_name = azurerm_storage_account.example.name site_config { require_https = true } } resource "azurerm_windows_function_app" "example" { name = "example-windows-function-app" resource_group_name = azurerm_resource_group.example.name location = azurerm_resource_group.example.location storage_account_name = azurerm_storage_account.example.name service_plan_id = azurerm_service_plan.example.id site_config { require_https = true } } ```` **Version (please complete the following information):** - N/A **Additional context** N/A </issue> <code> [start of checkov/terraform/checks/resource/azure/FunctionAppsAccessibleOverHttps.py] 1 from checkov.common.models.enums import CheckCategories 2 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck 3 4 5 class FunctionAppsAccessibleOverHttps(BaseResourceValueCheck): 6 def __init__(self): 7 name = "Ensure that Function apps is only accessible over HTTPS" 8 id = "CKV_AZURE_70" 9 supported_resources = ['azurerm_function_app'] 10 categories = [CheckCategories.NETWORKING] 11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) 12 13 def get_inspected_key(self): 14 return 'https_only' 15 16 17 check = FunctionAppsAccessibleOverHttps() 18 [end of checkov/terraform/checks/resource/azure/FunctionAppsAccessibleOverHttps.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/checkov/terraform/checks/resource/azure/FunctionAppsAccessibleOverHttps.py b/checkov/terraform/checks/resource/azure/FunctionAppsAccessibleOverHttps.py --- a/checkov/terraform/checks/resource/azure/FunctionAppsAccessibleOverHttps.py +++ b/checkov/terraform/checks/resource/azure/FunctionAppsAccessibleOverHttps.py @@ -1,17 +1,44 @@ -from checkov.common.models.enums import CheckCategories -from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck +from __future__ import annotations +from typing import Any -class FunctionAppsAccessibleOverHttps(BaseResourceValueCheck): - def __init__(self): +from checkov.common.models.enums import CheckCategories, CheckResult +from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck + + +class FunctionAppsAccessibleOverHttps(BaseResourceCheck): + + def __init__(self) -> None: name = "Ensure that Function apps is only accessible over HTTPS" id = "CKV_AZURE_70" - supported_resources = ['azurerm_function_app'] + supported_resources = ['azurerm_function_app', 'azurerm_linux_function_app', 'azurerm_windows_function_app', + 'azurerm_function_app_slot', 'azurerm_linux_function_app_slot', + 'azurerm_windows_function_app_slot'] categories = [CheckCategories.NETWORKING] super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) - def get_inspected_key(self): - return 'https_only' + def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult: + # default=false for https_only + if 'https_only' not in conf.keys(): + return CheckResult.FAILED + + https_only = conf.get('https_only')[0] + if not https_only: + return CheckResult.FAILED + + # relevant for linux/windows resources + if 'auth_settings_v2' in conf.keys(): + auth_settings_v2 = conf['auth_settings_v2'][0] + + # default=true for require_https + if 'require_https' not in auth_settings_v2.keys(): + return CheckResult.PASSED + + require_https = auth_settings_v2.get('require_https')[0] + if not require_https: + return CheckResult.FAILED + + return CheckResult.PASSED check = FunctionAppsAccessibleOverHttps()
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/azure/FunctionAppsAccessibleOverHttps.py b/checkov/terraform/checks/resource/azure/FunctionAppsAccessibleOverHttps.py\n--- a/checkov/terraform/checks/resource/azure/FunctionAppsAccessibleOverHttps.py\n+++ b/checkov/terraform/checks/resource/azure/FunctionAppsAccessibleOverHttps.py\n@@ -1,17 +1,44 @@\n-from checkov.common.models.enums import CheckCategories\n-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n+from __future__ import annotations\n \n+from typing import Any\n \n-class FunctionAppsAccessibleOverHttps(BaseResourceValueCheck):\n- def __init__(self):\n+from checkov.common.models.enums import CheckCategories, CheckResult\n+from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n+\n+\n+class FunctionAppsAccessibleOverHttps(BaseResourceCheck):\n+\n+ def __init__(self) -> None:\n name = \"Ensure that Function apps is only accessible over HTTPS\"\n id = \"CKV_AZURE_70\"\n- supported_resources = ['azurerm_function_app']\n+ supported_resources = ['azurerm_function_app', 'azurerm_linux_function_app', 'azurerm_windows_function_app',\n+ 'azurerm_function_app_slot', 'azurerm_linux_function_app_slot',\n+ 'azurerm_windows_function_app_slot']\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n- def get_inspected_key(self):\n- return 'https_only'\n+ def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:\n+ # default=false for https_only\n+ if 'https_only' not in conf.keys():\n+ return CheckResult.FAILED\n+\n+ https_only = conf.get('https_only')[0]\n+ if not https_only:\n+ return CheckResult.FAILED\n+\n+ # relevant for linux/windows resources\n+ if 'auth_settings_v2' in conf.keys():\n+ auth_settings_v2 = conf['auth_settings_v2'][0]\n+\n+ # default=true for require_https\n+ if 'require_https' not in auth_settings_v2.keys():\n+ return CheckResult.PASSED\n+\n+ require_https = auth_settings_v2.get('require_https')[0]\n+ if not require_https:\n+ return CheckResult.FAILED\n+\n+ return CheckResult.PASSED\n \n \n check = FunctionAppsAccessibleOverHttps()\n", "issue": "Azure Function App Slots - Ensure web app redirects all HTTP traffic to HTTPS in Azure Function App Slots\n**Describe the issue**\r\nIt seems that there are no checks that ensure that the following resource only allows HTTPS:\r\n\r\n- azurerm_function_app_slot\r\n- azurerm_linux_function_app_slot\r\n- azurerm_windows_function_app_slot\r\n\r\n**Examples**\r\n\r\n````hcl\r\nresource \"azurerm_function_app_slot\" \"example\" {\r\n name = \"test-azure-functions_slot\"\r\n location = azurerm_resource_group.example.location\r\n resource_group_name = azurerm_resource_group.example.name\r\n app_service_plan_id = azurerm_app_service_plan.example.id\r\n function_app_name = azurerm_function_app.example.name\r\n storage_account_name = azurerm_storage_account.example.name\r\n storage_account_access_key = azurerm_storage_account.example.primary_access_key\r\n https_only = true\r\n}\r\n\r\nresource \"azurerm_linux_function_app_slot\" \"example\" {\r\n name = \"example-linux-function-app-slot\"\r\n function_app_id = azurerm_linux_function_app.example.id\r\n storage_account_name = azurerm_storage_account.example.name\r\n\r\n site_config {\r\n require_https = true \r\n }\r\n}\r\n\r\nresource \"azurerm_windows_function_app\" \"example\" {\r\n name = \"example-windows-function-app\"\r\n resource_group_name = azurerm_resource_group.example.name\r\n location = azurerm_resource_group.example.location\r\n storage_account_name = azurerm_storage_account.example.name\r\n service_plan_id = azurerm_service_plan.example.id\r\n\r\n site_config {\r\n require_https = true \r\n }\r\n}\r\n\r\n````\r\n\r\n**Version (please complete the following information):**\r\n - N/A\r\n \r\n**Additional context**\r\n\r\nN/A\n", "before_files": [{"content": "from checkov.common.models.enums import CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass FunctionAppsAccessibleOverHttps(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure that Function apps is only accessible over HTTPS\"\n id = \"CKV_AZURE_70\"\n supported_resources = ['azurerm_function_app']\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return 'https_only'\n\n\ncheck = FunctionAppsAccessibleOverHttps()\n", "path": "checkov/terraform/checks/resource/azure/FunctionAppsAccessibleOverHttps.py"}]}
1,102
555
gh_patches_debug_2348
rasdani/github-patches
git_diff
cornellius-gp__gpytorch-2285
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Docs] Bernoulli likelihoods # 📚 Documentation/Examples In the document for [Bernoulli likelihoods](https://docs.gpytorch.ai/en/stable/likelihoods.html), since the labels take value in {0, 1}, the likelihood should be $$p(Y=y | f) = \Phi ((2y-1)f)$$ instead of the currently displayed $$p(Y=y|f) = \Phi(yf).$$ </issue> <code> [start of gpytorch/likelihoods/bernoulli_likelihood.py] 1 #!/usr/bin/env python3 2 3 import warnings 4 5 import torch 6 7 from ..distributions import base_distributions 8 from ..functions import log_normal_cdf 9 from .likelihood import _OneDimensionalLikelihood 10 11 12 class BernoulliLikelihood(_OneDimensionalLikelihood): 13 r""" 14 Implements the Bernoulli likelihood used for GP classification, using 15 Probit regression (i.e., the latent function is warped to be in [0,1] 16 using the standard Normal CDF :math:`\Phi(x)`). Given the identity 17 :math:`\Phi(-x) = 1-\Phi(x)`, we can write the likelihood compactly as: 18 19 .. math:: 20 \begin{equation*} 21 p(Y=y|f)=\Phi(yf) 22 \end{equation*} 23 """ 24 25 def forward(self, function_samples, **kwargs): 26 output_probs = base_distributions.Normal(0, 1).cdf(function_samples) 27 return base_distributions.Bernoulli(probs=output_probs) 28 29 def log_marginal(self, observations, function_dist, *args, **kwargs): 30 marginal = self.marginal(function_dist, *args, **kwargs) 31 return marginal.log_prob(observations) 32 33 def marginal(self, function_dist, **kwargs): 34 mean = function_dist.mean 35 var = function_dist.variance 36 link = mean.div(torch.sqrt(1 + var)) 37 output_probs = base_distributions.Normal(0, 1).cdf(link) 38 return base_distributions.Bernoulli(probs=output_probs) 39 40 def expected_log_prob(self, observations, function_dist, *params, **kwargs): 41 if torch.any(observations.eq(-1)): 42 # Remove after 1.0 43 warnings.warn( 44 "BernoulliLikelihood.expected_log_prob expects observations with labels in {0, 1}. " 45 "Observations with labels in {-1, 1} are deprecated.", 46 DeprecationWarning, 47 ) 48 else: 49 observations = observations.mul(2).sub(1) 50 # Custom function here so we can use log_normal_cdf rather than Normal.cdf 51 # This is going to be less prone to overflow errors 52 log_prob_lambda = lambda function_samples: log_normal_cdf(function_samples.mul(observations)) 53 log_prob = self.quadrature(log_prob_lambda, function_dist) 54 return log_prob 55 [end of gpytorch/likelihoods/bernoulli_likelihood.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/gpytorch/likelihoods/bernoulli_likelihood.py b/gpytorch/likelihoods/bernoulli_likelihood.py --- a/gpytorch/likelihoods/bernoulli_likelihood.py +++ b/gpytorch/likelihoods/bernoulli_likelihood.py @@ -18,8 +18,11 @@ .. math:: \begin{equation*} - p(Y=y|f)=\Phi(yf) + p(Y=y|f)=\Phi((2y - 1)f) \end{equation*} + + .. note:: + The labels should take values in {0, 1}. """ def forward(self, function_samples, **kwargs):
{"golden_diff": "diff --git a/gpytorch/likelihoods/bernoulli_likelihood.py b/gpytorch/likelihoods/bernoulli_likelihood.py\n--- a/gpytorch/likelihoods/bernoulli_likelihood.py\n+++ b/gpytorch/likelihoods/bernoulli_likelihood.py\n@@ -18,8 +18,11 @@\n \n .. math::\n \\begin{equation*}\n- p(Y=y|f)=\\Phi(yf)\n+ p(Y=y|f)=\\Phi((2y - 1)f)\n \\end{equation*}\n+\n+ .. note::\n+ The labels should take values in {0, 1}.\n \"\"\"\n \n def forward(self, function_samples, **kwargs):\n", "issue": "[Docs] Bernoulli likelihoods \n# \ud83d\udcda Documentation/Examples\r\n\r\nIn the document for [Bernoulli likelihoods](https://docs.gpytorch.ai/en/stable/likelihoods.html), since the labels take value in {0, 1}, the likelihood should be \r\n\r\n$$p(Y=y | f) = \\Phi ((2y-1)f)$$\r\n\r\ninstead of the currently displayed\r\n\r\n$$p(Y=y|f) = \\Phi(yf).$$\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport warnings\n\nimport torch\n\nfrom ..distributions import base_distributions\nfrom ..functions import log_normal_cdf\nfrom .likelihood import _OneDimensionalLikelihood\n\n\nclass BernoulliLikelihood(_OneDimensionalLikelihood):\n r\"\"\"\n Implements the Bernoulli likelihood used for GP classification, using\n Probit regression (i.e., the latent function is warped to be in [0,1]\n using the standard Normal CDF :math:`\\Phi(x)`). Given the identity\n :math:`\\Phi(-x) = 1-\\Phi(x)`, we can write the likelihood compactly as:\n\n .. math::\n \\begin{equation*}\n p(Y=y|f)=\\Phi(yf)\n \\end{equation*}\n \"\"\"\n\n def forward(self, function_samples, **kwargs):\n output_probs = base_distributions.Normal(0, 1).cdf(function_samples)\n return base_distributions.Bernoulli(probs=output_probs)\n\n def log_marginal(self, observations, function_dist, *args, **kwargs):\n marginal = self.marginal(function_dist, *args, **kwargs)\n return marginal.log_prob(observations)\n\n def marginal(self, function_dist, **kwargs):\n mean = function_dist.mean\n var = function_dist.variance\n link = mean.div(torch.sqrt(1 + var))\n output_probs = base_distributions.Normal(0, 1).cdf(link)\n return base_distributions.Bernoulli(probs=output_probs)\n\n def expected_log_prob(self, observations, function_dist, *params, **kwargs):\n if torch.any(observations.eq(-1)):\n # Remove after 1.0\n warnings.warn(\n \"BernoulliLikelihood.expected_log_prob expects observations with labels in {0, 1}. \"\n \"Observations with labels in {-1, 1} are deprecated.\",\n DeprecationWarning,\n )\n else:\n observations = observations.mul(2).sub(1)\n # Custom function here so we can use log_normal_cdf rather than Normal.cdf\n # This is going to be less prone to overflow errors\n log_prob_lambda = lambda function_samples: log_normal_cdf(function_samples.mul(observations))\n log_prob = self.quadrature(log_prob_lambda, function_dist)\n return log_prob\n", "path": "gpytorch/likelihoods/bernoulli_likelihood.py"}]}
1,261
159
gh_patches_debug_22586
rasdani/github-patches
git_diff
praw-dev__praw-1918
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Praw incorrectly uses the "after" parameter to paginate Mode Note API Queries. ### Describe the Bug The Reddit API does not support using the parameter "after" on queries for /api/mod/notes The default listing generator uses the "after" parameter to paginate results. Generator.py ``` if self._listing.after and self._listing.after != self.params.get("after"): self.params["after"] = self._listing.after else: self._exhausted = True ``` The "after" parameter is set as part of the listing class. ``` class ModNoteListing(Listing): """Special Listing for handling :class:`.ModNote` lists.""" CHILD_ATTRIBUTE = "mod_notes" @property def after(self) -> Optional[Any]: """Return the next attribute or None.""" if not getattr(self, "has_next_page", True): return None return getattr(self, "end_cursor", None) ``` The result is that the reddit API will ignore the 'after' parameter and return an identical result to the first query. When PRAW receives a second response with an identical 'end_cursor' parameter, it will end the query. This means that the maximum number of mod notes pulled by PRAW is 100. ### Desired Result PRAW should record the 'end_cursor' parameter from responses to the Mod Notes API and transmit them as "before" in the next query. This will properly collect the next page of results from the API. I do not have the python knowledge to provide a best practice fix. Below is my hack that correctly provides all user notes. listing.py ``` class ModNoteListing(Listing): """Special Listing for handling :class:`.ModNote` lists.""" CHILD_ATTRIBUTE = "mod_notes" @property def before(self) -> Optional[Any]: """Return the next attribute or None.""" if not getattr(self, "has_next_page", True): return None return getattr(self, "end_cursor", None) ``` generator.py ``` def _next_batch(self): if self._exhausted: raise StopIteration() self._listing = self._reddit.get(self.url, params=self.params) self._listing = self._extract_sublist(self._listing) self._list_index = 0 if not self._listing: raise StopIteration() if hasattr(self._listing, "after"): if self._listing.after and self._listing.after != self.params.get("after"): self.params["after"] = self._listing.after else: self._exhausted = True elif hasattr(self._listing, "before"): if self._listing.before and self._listing.before != self.params.get("before"): self.params["before"] = self._listing.before else: self._exhausted = True else: self._exhausted = True ``` ### Relevant Logs ```shell DEBUG:prawcore:Params: {'subreddit': Subreddit(display_name='test'), 'user': 'TestUser', 'limit': 1024, 'raw_json': 1} DEBUG:prawcore:Response: 200 (5089 bytes) DEBUG:prawcore:Params: {'subreddit': Subreddit(display_name='test), 'user': 'testUser', 'limit': 1024, 'after': 'MTY2MDIzMTM3MDk5Mw==', 'raw_json': 1} DEBUG:prawcore:Response: 200 (5089 bytes) ``` ### Code to reproduce the bug ```python for note in reddit.subreddit("test").mod.notes.redditors(userName, limit = None): ``` ### My code example does not include the `Reddit()` initialization to prevent credential leakage. Yes ### This code has previously worked as intended. No ### Operating System/Environment Windows 10 ### Python Version Python 3.10 ### PRAW Version Version: 7.6.1 ### Prawcore Version Version: 2.3.0 ### Anything else? The user will need more than 100 mod notes in order to need to paginate requests. </issue> <code> [start of praw/models/listing/generator.py] 1 """Provide the ListingGenerator class.""" 2 from copy import deepcopy 3 from typing import TYPE_CHECKING, Any, Dict, Iterator, Optional, Union 4 5 from ..base import PRAWBase 6 from .listing import FlairListing, ModNoteListing 7 8 if TYPE_CHECKING: # pragma: no cover 9 import praw 10 11 12 class ListingGenerator(PRAWBase, Iterator): 13 """Instances of this class generate :class:`.RedditBase` instances. 14 15 .. warning:: 16 17 This class should not be directly utilized. Instead, you will find a number of 18 methods that return instances of the class here_. 19 20 .. _here: https://praw.readthedocs.io/en/latest/search.html?q=ListingGenerator 21 22 """ 23 24 def __init__( 25 self, 26 reddit: "praw.Reddit", 27 url: str, 28 limit: int = 100, 29 params: Optional[Dict[str, Union[str, int]]] = None, 30 ): 31 """Initialize a :class:`.ListingGenerator` instance. 32 33 :param reddit: An instance of :class:`.Reddit`. 34 :param url: A URL returning a Reddit listing. 35 :param limit: The number of content entries to fetch. If ``limit`` is ``None``, 36 then fetch as many entries as possible. Most of Reddit's listings contain a 37 maximum of 1000 items, and are returned 100 at a time. This class will 38 automatically issue all necessary requests (default: ``100``). 39 :param params: A dictionary containing additional query string parameters to 40 send with the request. 41 42 """ 43 super().__init__(reddit, _data=None) 44 self._exhausted = False 45 self._listing = None 46 self._list_index = None 47 self.limit = limit 48 self.params = deepcopy(params) if params else {} 49 self.params["limit"] = limit or 1024 50 self.url = url 51 self.yielded = 0 52 53 def __iter__(self) -> Iterator[Any]: 54 """Permit :class:`.ListingGenerator` to operate as an iterator.""" 55 return self 56 57 def __next__(self) -> Any: 58 """Permit :class:`.ListingGenerator` to operate as a generator.""" 59 if self.limit is not None and self.yielded >= self.limit: 60 raise StopIteration() 61 62 if self._listing is None or self._list_index >= len(self._listing): 63 self._next_batch() 64 65 self._list_index += 1 66 self.yielded += 1 67 return self._listing[self._list_index - 1] 68 69 def _extract_sublist(self, listing): 70 if isinstance(listing, list): 71 return listing[1] # for submission duplicates 72 elif isinstance(listing, dict): 73 classes = [FlairListing, ModNoteListing] 74 75 for listing_type in classes: 76 if listing_type.CHILD_ATTRIBUTE in listing: 77 return listing_type(self._reddit, listing) 78 else: 79 raise ValueError( 80 "The generator returned a dictionary PRAW didn't recognize." 81 " File a bug report at PRAW." 82 ) 83 return listing 84 85 def _next_batch(self): 86 if self._exhausted: 87 raise StopIteration() 88 89 self._listing = self._reddit.get(self.url, params=self.params) 90 self._listing = self._extract_sublist(self._listing) 91 self._list_index = 0 92 93 if not self._listing: 94 raise StopIteration() 95 96 if self._listing.after and self._listing.after != self.params.get("after"): 97 self.params["after"] = self._listing.after 98 else: 99 self._exhausted = True 100 [end of praw/models/listing/generator.py] [start of praw/models/listing/listing.py] 1 """Provide the Listing class.""" 2 from typing import Any, Optional 3 4 from ..base import PRAWBase 5 6 7 class Listing(PRAWBase): 8 """A listing is a collection of :class:`.RedditBase` instances.""" 9 10 CHILD_ATTRIBUTE = "children" 11 12 def __len__(self) -> int: 13 """Return the number of items in the Listing.""" 14 return len(getattr(self, self.CHILD_ATTRIBUTE)) 15 16 def __getitem__(self, index: int) -> Any: 17 """Return the item at position index in the list.""" 18 return getattr(self, self.CHILD_ATTRIBUTE)[index] 19 20 def __setattr__(self, attribute: str, value: Any): 21 """Objectify the ``CHILD_ATTRIBUTE`` attribute.""" 22 if attribute == self.CHILD_ATTRIBUTE: 23 value = self._reddit._objector.objectify(value) 24 super().__setattr__(attribute, value) 25 26 27 class FlairListing(Listing): 28 """Special Listing for handling flair lists.""" 29 30 CHILD_ATTRIBUTE = "users" 31 32 @property 33 def after(self) -> Optional[Any]: 34 """Return the next attribute or ``None``.""" 35 return getattr(self, "next", None) 36 37 38 class ModeratorListing(Listing): 39 """Special Listing for handling moderator lists.""" 40 41 CHILD_ATTRIBUTE = "moderators" 42 43 44 class ModNoteListing(Listing): 45 """Special Listing for handling :class:`.ModNote` lists.""" 46 47 CHILD_ATTRIBUTE = "mod_notes" 48 49 @property 50 def after(self) -> Optional[Any]: 51 """Return the next attribute or None.""" 52 if not getattr(self, "has_next_page", True): 53 return None 54 return getattr(self, "end_cursor", None) 55 56 57 class ModmailConversationsListing(Listing): 58 """Special Listing for handling :class:`.ModmailConversation` lists.""" 59 60 CHILD_ATTRIBUTE = "conversations" 61 62 @property 63 def after(self) -> Optional[str]: 64 """Return the next attribute or ``None``.""" 65 try: 66 return self.conversations[-1].id 67 except IndexError: 68 return None 69 [end of praw/models/listing/listing.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/praw/models/listing/generator.py b/praw/models/listing/generator.py --- a/praw/models/listing/generator.py +++ b/praw/models/listing/generator.py @@ -93,7 +93,9 @@ if not self._listing: raise StopIteration() - if self._listing.after and self._listing.after != self.params.get("after"): - self.params["after"] = self._listing.after + if self._listing.after and self._listing.after != self.params.get( + self._listing.AFTER_PARAM + ): + self.params[self._listing.AFTER_PARAM] = self._listing.after else: self._exhausted = True diff --git a/praw/models/listing/listing.py b/praw/models/listing/listing.py --- a/praw/models/listing/listing.py +++ b/praw/models/listing/listing.py @@ -7,6 +7,7 @@ class Listing(PRAWBase): """A listing is a collection of :class:`.RedditBase` instances.""" + AFTER_PARAM = "after" CHILD_ATTRIBUTE = "children" def __len__(self) -> int: @@ -44,6 +45,7 @@ class ModNoteListing(Listing): """Special Listing for handling :class:`.ModNote` lists.""" + AFTER_PARAM = "before" CHILD_ATTRIBUTE = "mod_notes" @property
{"golden_diff": "diff --git a/praw/models/listing/generator.py b/praw/models/listing/generator.py\n--- a/praw/models/listing/generator.py\n+++ b/praw/models/listing/generator.py\n@@ -93,7 +93,9 @@\n if not self._listing:\n raise StopIteration()\n \n- if self._listing.after and self._listing.after != self.params.get(\"after\"):\n- self.params[\"after\"] = self._listing.after\n+ if self._listing.after and self._listing.after != self.params.get(\n+ self._listing.AFTER_PARAM\n+ ):\n+ self.params[self._listing.AFTER_PARAM] = self._listing.after\n else:\n self._exhausted = True\ndiff --git a/praw/models/listing/listing.py b/praw/models/listing/listing.py\n--- a/praw/models/listing/listing.py\n+++ b/praw/models/listing/listing.py\n@@ -7,6 +7,7 @@\n class Listing(PRAWBase):\n \"\"\"A listing is a collection of :class:`.RedditBase` instances.\"\"\"\n \n+ AFTER_PARAM = \"after\"\n CHILD_ATTRIBUTE = \"children\"\n \n def __len__(self) -> int:\n@@ -44,6 +45,7 @@\n class ModNoteListing(Listing):\n \"\"\"Special Listing for handling :class:`.ModNote` lists.\"\"\"\n \n+ AFTER_PARAM = \"before\"\n CHILD_ATTRIBUTE = \"mod_notes\"\n \n @property\n", "issue": "Praw incorrectly uses the \"after\" parameter to paginate Mode Note API Queries.\n### Describe the Bug\n\nThe Reddit API does not support using the parameter \"after\" on queries for /api/mod/notes\r\n\r\nThe default listing generator uses the \"after\" parameter to paginate results.\r\n\r\nGenerator.py\r\n```\r\n if self._listing.after and self._listing.after != self.params.get(\"after\"):\r\n self.params[\"after\"] = self._listing.after\r\n else:\r\n self._exhausted = True\r\n```\r\n\r\nThe \"after\" parameter is set as part of the listing class.\r\n\r\n```\r\n class ModNoteListing(Listing):\r\n \"\"\"Special Listing for handling :class:`.ModNote` lists.\"\"\"\r\n \r\n CHILD_ATTRIBUTE = \"mod_notes\"\r\n \r\n @property\r\n def after(self) -> Optional[Any]:\r\n \"\"\"Return the next attribute or None.\"\"\"\r\n if not getattr(self, \"has_next_page\", True):\r\n return None\r\n return getattr(self, \"end_cursor\", None)\r\n```\r\n\r\nThe result is that the reddit API will ignore the 'after' parameter and return an identical result to the first query. When PRAW receives a second response with an identical 'end_cursor' parameter, it will end the query. This means that the maximum number of mod notes pulled by PRAW is 100.\n\n### Desired Result\n\nPRAW should record the 'end_cursor' parameter from responses to the Mod Notes API and transmit them as \"before\" in the next query. This will properly collect the next page of results from the API.\r\n\r\nI do not have the python knowledge to provide a best practice fix. Below is my hack that correctly provides all user notes.\r\n\r\nlisting.py\r\n```\r\nclass ModNoteListing(Listing):\r\n \"\"\"Special Listing for handling :class:`.ModNote` lists.\"\"\"\r\n\r\n CHILD_ATTRIBUTE = \"mod_notes\"\r\n\r\n @property\r\n def before(self) -> Optional[Any]:\r\n \"\"\"Return the next attribute or None.\"\"\"\r\n if not getattr(self, \"has_next_page\", True):\r\n return None\r\n return getattr(self, \"end_cursor\", None)\r\n```\r\n\r\ngenerator.py\r\n```\r\n def _next_batch(self):\r\n if self._exhausted:\r\n raise StopIteration()\r\n\r\n self._listing = self._reddit.get(self.url, params=self.params)\r\n self._listing = self._extract_sublist(self._listing)\r\n self._list_index = 0\r\n\r\n if not self._listing:\r\n raise StopIteration()\r\n\r\n if hasattr(self._listing, \"after\"):\r\n if self._listing.after and self._listing.after != self.params.get(\"after\"):\r\n self.params[\"after\"] = self._listing.after\r\n else:\r\n self._exhausted = True\r\n elif hasattr(self._listing, \"before\"):\r\n if self._listing.before and self._listing.before != self.params.get(\"before\"):\r\n self.params[\"before\"] = self._listing.before\r\n else:\r\n self._exhausted = True\r\n else:\r\n self._exhausted = True\r\n```\n\n### Relevant Logs\n\n```shell\nDEBUG:prawcore:Params: {'subreddit': Subreddit(display_name='test'), 'user': 'TestUser', 'limit': 1024, 'raw_json': 1}\r\nDEBUG:prawcore:Response: 200 (5089 bytes)\r\nDEBUG:prawcore:Params: {'subreddit': Subreddit(display_name='test), 'user': 'testUser', 'limit': 1024, 'after': 'MTY2MDIzMTM3MDk5Mw==', 'raw_json': 1}\r\nDEBUG:prawcore:Response: 200 (5089 bytes)\n```\n\n\n### Code to reproduce the bug\n\n```python\nfor note in reddit.subreddit(\"test\").mod.notes.redditors(userName, limit = None):\n```\n\n\n### My code example does not include the `Reddit()` initialization to prevent credential leakage.\n\nYes\n\n### This code has previously worked as intended.\n\nNo\n\n### Operating System/Environment\n\nWindows 10\n\n### Python Version\n\nPython 3.10\n\n### PRAW Version\n\nVersion: 7.6.1\n\n### Prawcore Version\n\nVersion: 2.3.0\n\n### Anything else?\n\nThe user will need more than 100 mod notes in order to need to paginate requests.\n", "before_files": [{"content": "\"\"\"Provide the ListingGenerator class.\"\"\"\nfrom copy import deepcopy\nfrom typing import TYPE_CHECKING, Any, Dict, Iterator, Optional, Union\n\nfrom ..base import PRAWBase\nfrom .listing import FlairListing, ModNoteListing\n\nif TYPE_CHECKING: # pragma: no cover\n import praw\n\n\nclass ListingGenerator(PRAWBase, Iterator):\n \"\"\"Instances of this class generate :class:`.RedditBase` instances.\n\n .. warning::\n\n This class should not be directly utilized. Instead, you will find a number of\n methods that return instances of the class here_.\n\n .. _here: https://praw.readthedocs.io/en/latest/search.html?q=ListingGenerator\n\n \"\"\"\n\n def __init__(\n self,\n reddit: \"praw.Reddit\",\n url: str,\n limit: int = 100,\n params: Optional[Dict[str, Union[str, int]]] = None,\n ):\n \"\"\"Initialize a :class:`.ListingGenerator` instance.\n\n :param reddit: An instance of :class:`.Reddit`.\n :param url: A URL returning a Reddit listing.\n :param limit: The number of content entries to fetch. If ``limit`` is ``None``,\n then fetch as many entries as possible. Most of Reddit's listings contain a\n maximum of 1000 items, and are returned 100 at a time. This class will\n automatically issue all necessary requests (default: ``100``).\n :param params: A dictionary containing additional query string parameters to\n send with the request.\n\n \"\"\"\n super().__init__(reddit, _data=None)\n self._exhausted = False\n self._listing = None\n self._list_index = None\n self.limit = limit\n self.params = deepcopy(params) if params else {}\n self.params[\"limit\"] = limit or 1024\n self.url = url\n self.yielded = 0\n\n def __iter__(self) -> Iterator[Any]:\n \"\"\"Permit :class:`.ListingGenerator` to operate as an iterator.\"\"\"\n return self\n\n def __next__(self) -> Any:\n \"\"\"Permit :class:`.ListingGenerator` to operate as a generator.\"\"\"\n if self.limit is not None and self.yielded >= self.limit:\n raise StopIteration()\n\n if self._listing is None or self._list_index >= len(self._listing):\n self._next_batch()\n\n self._list_index += 1\n self.yielded += 1\n return self._listing[self._list_index - 1]\n\n def _extract_sublist(self, listing):\n if isinstance(listing, list):\n return listing[1] # for submission duplicates\n elif isinstance(listing, dict):\n classes = [FlairListing, ModNoteListing]\n\n for listing_type in classes:\n if listing_type.CHILD_ATTRIBUTE in listing:\n return listing_type(self._reddit, listing)\n else:\n raise ValueError(\n \"The generator returned a dictionary PRAW didn't recognize.\"\n \" File a bug report at PRAW.\"\n )\n return listing\n\n def _next_batch(self):\n if self._exhausted:\n raise StopIteration()\n\n self._listing = self._reddit.get(self.url, params=self.params)\n self._listing = self._extract_sublist(self._listing)\n self._list_index = 0\n\n if not self._listing:\n raise StopIteration()\n\n if self._listing.after and self._listing.after != self.params.get(\"after\"):\n self.params[\"after\"] = self._listing.after\n else:\n self._exhausted = True\n", "path": "praw/models/listing/generator.py"}, {"content": "\"\"\"Provide the Listing class.\"\"\"\nfrom typing import Any, Optional\n\nfrom ..base import PRAWBase\n\n\nclass Listing(PRAWBase):\n \"\"\"A listing is a collection of :class:`.RedditBase` instances.\"\"\"\n\n CHILD_ATTRIBUTE = \"children\"\n\n def __len__(self) -> int:\n \"\"\"Return the number of items in the Listing.\"\"\"\n return len(getattr(self, self.CHILD_ATTRIBUTE))\n\n def __getitem__(self, index: int) -> Any:\n \"\"\"Return the item at position index in the list.\"\"\"\n return getattr(self, self.CHILD_ATTRIBUTE)[index]\n\n def __setattr__(self, attribute: str, value: Any):\n \"\"\"Objectify the ``CHILD_ATTRIBUTE`` attribute.\"\"\"\n if attribute == self.CHILD_ATTRIBUTE:\n value = self._reddit._objector.objectify(value)\n super().__setattr__(attribute, value)\n\n\nclass FlairListing(Listing):\n \"\"\"Special Listing for handling flair lists.\"\"\"\n\n CHILD_ATTRIBUTE = \"users\"\n\n @property\n def after(self) -> Optional[Any]:\n \"\"\"Return the next attribute or ``None``.\"\"\"\n return getattr(self, \"next\", None)\n\n\nclass ModeratorListing(Listing):\n \"\"\"Special Listing for handling moderator lists.\"\"\"\n\n CHILD_ATTRIBUTE = \"moderators\"\n\n\nclass ModNoteListing(Listing):\n \"\"\"Special Listing for handling :class:`.ModNote` lists.\"\"\"\n\n CHILD_ATTRIBUTE = \"mod_notes\"\n\n @property\n def after(self) -> Optional[Any]:\n \"\"\"Return the next attribute or None.\"\"\"\n if not getattr(self, \"has_next_page\", True):\n return None\n return getattr(self, \"end_cursor\", None)\n\n\nclass ModmailConversationsListing(Listing):\n \"\"\"Special Listing for handling :class:`.ModmailConversation` lists.\"\"\"\n\n CHILD_ATTRIBUTE = \"conversations\"\n\n @property\n def after(self) -> Optional[str]:\n \"\"\"Return the next attribute or ``None``.\"\"\"\n try:\n return self.conversations[-1].id\n except IndexError:\n return None\n", "path": "praw/models/listing/listing.py"}]}
3,040
319
gh_patches_debug_1415
rasdani/github-patches
git_diff
bokeh__bokeh-1434
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Tools get lost on Grid Plots JS logic error prevents all tools from showing up in the toolbar. (cf. comment in #1342) </issue> <code> [start of examples/plotting/file/grid.py] 1 import numpy as np 2 3 from bokeh.plotting import * 4 5 N = 50 6 7 x = np.linspace(0, 4*np.pi, N) 8 y = np.sin(x) 9 10 TOOLS = "pan,wheel_zoom,box_zoom,reset,save" 11 12 l = figure(title="line", tools=TOOLS) 13 l.line(x,y, line_width=3, color="gold") 14 15 aw = figure(title="annular wedge", tools=TOOLS) 16 aw.annular_wedge(x, y, 10, 20, 0.6, 4.1, color="navy", alpha=0.5, 17 inner_radius_units="screen", outer_radius_units="screen") 18 19 bez = figure(title="bezier", tools=TOOLS) 20 bez.bezier(x, y, x+0.4, y, x+0.1, y+0.2, x-0.1, y-0.2, 21 line_width=2, color="olive") 22 23 q = figure(title="quad", tools=TOOLS) 24 q.quad(x, x-0.2, y, y-0.2, color="tomato", alpha=0.4) 25 26 p = gridplot([[l,aw],[bez,q]]) 27 28 output_file("grid.html", title="grid.py example") 29 show(p) 30 [end of examples/plotting/file/grid.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/plotting/file/grid.py b/examples/plotting/file/grid.py --- a/examples/plotting/file/grid.py +++ b/examples/plotting/file/grid.py @@ -7,7 +7,7 @@ x = np.linspace(0, 4*np.pi, N) y = np.sin(x) -TOOLS = "pan,wheel_zoom,box_zoom,reset,save" +TOOLS = "pan,wheel_zoom,box_zoom,reset,save,crosshair" l = figure(title="line", tools=TOOLS) l.line(x,y, line_width=3, color="gold")
{"golden_diff": "diff --git a/examples/plotting/file/grid.py b/examples/plotting/file/grid.py\n--- a/examples/plotting/file/grid.py\n+++ b/examples/plotting/file/grid.py\n@@ -7,7 +7,7 @@\n x = np.linspace(0, 4*np.pi, N)\n y = np.sin(x)\n \n-TOOLS = \"pan,wheel_zoom,box_zoom,reset,save\"\n+TOOLS = \"pan,wheel_zoom,box_zoom,reset,save,crosshair\"\n \n l = figure(title=\"line\", tools=TOOLS)\n l.line(x,y, line_width=3, color=\"gold\")\n", "issue": "Tools get lost on Grid Plots\nJS logic error prevents all tools from showing up in the toolbar. (cf. comment in #1342)\n\n", "before_files": [{"content": "import numpy as np\n\nfrom bokeh.plotting import *\n\nN = 50\n\nx = np.linspace(0, 4*np.pi, N)\ny = np.sin(x)\n\nTOOLS = \"pan,wheel_zoom,box_zoom,reset,save\"\n\nl = figure(title=\"line\", tools=TOOLS)\nl.line(x,y, line_width=3, color=\"gold\")\n\naw = figure(title=\"annular wedge\", tools=TOOLS)\naw.annular_wedge(x, y, 10, 20, 0.6, 4.1, color=\"navy\", alpha=0.5,\n inner_radius_units=\"screen\", outer_radius_units=\"screen\")\n\nbez = figure(title=\"bezier\", tools=TOOLS)\nbez.bezier(x, y, x+0.4, y, x+0.1, y+0.2, x-0.1, y-0.2,\n line_width=2, color=\"olive\")\n\nq = figure(title=\"quad\", tools=TOOLS)\nq.quad(x, x-0.2, y, y-0.2, color=\"tomato\", alpha=0.4)\n\np = gridplot([[l,aw],[bez,q]])\n\noutput_file(\"grid.html\", title=\"grid.py example\")\nshow(p)\n", "path": "examples/plotting/file/grid.py"}]}
906
132
gh_patches_debug_24860
rasdani/github-patches
git_diff
pyro-ppl__numpyro-267
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> fori_collect does not work with non-float arrays The issue is ravel_pytree which calls `vjp` and does not work with int32 or bool arrays. As such, `fori_collect` will fail if the default identity transform is used. </issue> <code> [start of numpyro/util.py] 1 from contextlib import contextmanager 2 import random 3 4 import numpy as onp 5 import tqdm 6 7 from jax import jit, lax, ops, vmap 8 from jax.flatten_util import ravel_pytree 9 import jax.numpy as np 10 11 _DATA_TYPES = {} 12 _DISABLE_CONTROL_FLOW_PRIM = False 13 14 15 def set_rng_seed(rng_seed): 16 random.seed(rng_seed) 17 onp.random.seed(rng_seed) 18 19 20 @contextmanager 21 def optional(condition, context_manager): 22 """ 23 Optionally wrap inside `context_manager` if condition is `True`. 24 """ 25 if condition: 26 with context_manager: 27 yield 28 else: 29 yield 30 31 32 @contextmanager 33 def control_flow_prims_disabled(): 34 global _DISABLE_CONTROL_FLOW_PRIM 35 stored_flag = _DISABLE_CONTROL_FLOW_PRIM 36 try: 37 _DISABLE_CONTROL_FLOW_PRIM = True 38 yield 39 finally: 40 _DISABLE_CONTROL_FLOW_PRIM = stored_flag 41 42 43 def cond(pred, true_operand, true_fun, false_operand, false_fun): 44 if _DISABLE_CONTROL_FLOW_PRIM: 45 if pred: 46 return true_fun(true_operand) 47 else: 48 return false_fun(false_operand) 49 else: 50 return lax.cond(pred, true_operand, true_fun, false_operand, false_fun) 51 52 53 def while_loop(cond_fun, body_fun, init_val): 54 if _DISABLE_CONTROL_FLOW_PRIM: 55 val = init_val 56 while cond_fun(val): 57 val = body_fun(val) 58 return val 59 else: 60 # TODO: consider jitting while_loop similar to fori_loop 61 return lax.while_loop(cond_fun, body_fun, init_val) 62 63 64 def fori_loop(lower, upper, body_fun, init_val): 65 if _DISABLE_CONTROL_FLOW_PRIM: 66 val = init_val 67 for i in range(int(lower), int(upper)): 68 val = body_fun(i, val) 69 return val 70 else: 71 return jit(lax.fori_loop, static_argnums=(2,))(lower, upper, body_fun, init_val) 72 73 74 def identity(x): 75 return x 76 77 78 def fori_collect(lower, upper, body_fun, init_val, transform=identity, progbar=True, **progbar_opts): 79 """ 80 This looping construct works like :func:`~jax.lax.fori_loop` but with the additional 81 effect of collecting values from the loop body. In addition, this allows for 82 post-processing of these samples via `transform`, and progress bar updates. 83 Note that, `progbar=False` will be faster, especially when collecting a 84 lot of samples. Refer to example usage in :func:`~numpyro.mcmc.hmc`. 85 86 :param int lower: the index to start the collective work. In other words, 87 we will skip collecting the first `lower` values. 88 :param int upper: number of times to run the loop body. 89 :param body_fun: a callable that takes a collection of 90 `np.ndarray` and returns a collection with the same shape and 91 `dtype`. 92 :param init_val: initial value to pass as argument to `body_fun`. Can 93 be any Python collection type containing `np.ndarray` objects. 94 :param transform: a callable to post-process the values returned by `body_fn`. 95 :param progbar: whether to post progress bar updates. 96 :param `**progbar_opts`: optional additional progress bar arguments. A 97 `diagnostics_fn` can be supplied which when passed the current value 98 from `body_fun` returns a string that is used to update the progress 99 bar postfix. Also a `progbar_desc` keyword argument can be supplied 100 which is used to label the progress bar. 101 :return: collection with the same type as `init_val` with values 102 collected along the leading axis of `np.ndarray` objects. 103 """ 104 assert lower < upper 105 init_val_flat, unravel_fn = ravel_pytree(transform(init_val)) 106 ravel_fn = lambda x: ravel_pytree(transform(x))[0] # noqa: E731 107 108 if not progbar: 109 collection = np.zeros((upper - lower,) + init_val_flat.shape) 110 111 def _body_fn(i, vals): 112 val, collection = vals 113 val = body_fun(val) 114 i = np.where(i >= lower, i - lower, 0) 115 collection = ops.index_update(collection, i, ravel_fn(val)) 116 return val, collection 117 118 _, collection = fori_loop(0, upper, _body_fn, (init_val, collection)) 119 else: 120 diagnostics_fn = progbar_opts.pop('diagnostics_fn', None) 121 progbar_desc = progbar_opts.pop('progbar_desc', '') 122 collection = [] 123 124 val = init_val 125 with tqdm.trange(upper, desc=progbar_desc) as t: 126 for i in t: 127 val = body_fun(val) 128 if i >= lower: 129 collection.append(jit(ravel_fn)(val)) 130 if diagnostics_fn: 131 t.set_postfix_str(diagnostics_fn(val), refresh=False) 132 133 collection = np.stack(collection) 134 135 return vmap(unravel_fn)(collection) 136 137 138 def copy_docs_from(source_class, full_text=False): 139 """ 140 Decorator to copy class and method docs from source to destin class. 141 """ 142 143 def decorator(destin_class): 144 # This works only in python 3.3+: 145 # if not destin_class.__doc__: 146 # destin_class.__doc__ = source_class.__doc__ 147 for name in dir(destin_class): 148 if name.startswith('_'): 149 continue 150 destin_attr = getattr(destin_class, name) 151 destin_attr = getattr(destin_attr, '__func__', destin_attr) 152 source_attr = getattr(source_class, name, None) 153 source_doc = getattr(source_attr, '__doc__', None) 154 if source_doc and not getattr(destin_attr, '__doc__', None): 155 if full_text or source_doc.startswith('See '): 156 destin_doc = source_doc 157 else: 158 destin_doc = 'See :meth:`{}.{}.{}`'.format( 159 source_class.__module__, source_class.__name__, name) 160 if isinstance(destin_attr, property): 161 # Set docs for object properties. 162 # Since __doc__ is read-only, we need to reset the property 163 # with the updated doc. 164 updated_property = property(destin_attr.fget, 165 destin_attr.fset, 166 destin_attr.fdel, 167 destin_doc) 168 setattr(destin_class, name, updated_property) 169 else: 170 destin_attr.__doc__ = destin_doc 171 return destin_class 172 173 return decorator 174 [end of numpyro/util.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/numpyro/util.py b/numpyro/util.py --- a/numpyro/util.py +++ b/numpyro/util.py @@ -1,3 +1,4 @@ +from collections import namedtuple from contextlib import contextmanager import random @@ -5,8 +6,8 @@ import tqdm from jax import jit, lax, ops, vmap -from jax.flatten_util import ravel_pytree import jax.numpy as np +from jax.tree_util import tree_flatten, tree_unflatten, tree_map _DATA_TYPES = {} _DISABLE_CONTROL_FLOW_PRIM = False @@ -171,3 +172,29 @@ return destin_class return decorator + + +pytree_metadata = namedtuple('pytree_metadata', ['flat', 'shape', 'size', 'dtype']) + + +def _ravel_list(*leaves): + leaves_metadata = tree_map(lambda l: pytree_metadata(np.ravel(l), np.shape(l), np.size(l), lax.dtype(l)), + leaves) + leaves_idx = np.cumsum(np.array((0,) + tuple(d.size for d in leaves_metadata))) + + def unravel_list(arr): + return [np.reshape(lax.dynamic_slice_in_dim(arr, leaves_idx[i], m.size), + m.shape).astype(m.dtype) + for i, m in enumerate(leaves_metadata)] + + return np.concatenate([m.flat for m in leaves_metadata]), unravel_list + + +def ravel_pytree(pytree): + leaves, treedef = tree_flatten(pytree) + flat, unravel_list = _ravel_list(*leaves) + + def unravel_pytree(arr): + return tree_unflatten(treedef, unravel_list(arr)) + + return flat, unravel_pytree
{"golden_diff": "diff --git a/numpyro/util.py b/numpyro/util.py\n--- a/numpyro/util.py\n+++ b/numpyro/util.py\n@@ -1,3 +1,4 @@\n+from collections import namedtuple\n from contextlib import contextmanager\n import random\n \n@@ -5,8 +6,8 @@\n import tqdm\n \n from jax import jit, lax, ops, vmap\n-from jax.flatten_util import ravel_pytree\n import jax.numpy as np\n+from jax.tree_util import tree_flatten, tree_unflatten, tree_map\n \n _DATA_TYPES = {}\n _DISABLE_CONTROL_FLOW_PRIM = False\n@@ -171,3 +172,29 @@\n return destin_class\n \n return decorator\n+\n+\n+pytree_metadata = namedtuple('pytree_metadata', ['flat', 'shape', 'size', 'dtype'])\n+\n+\n+def _ravel_list(*leaves):\n+ leaves_metadata = tree_map(lambda l: pytree_metadata(np.ravel(l), np.shape(l), np.size(l), lax.dtype(l)),\n+ leaves)\n+ leaves_idx = np.cumsum(np.array((0,) + tuple(d.size for d in leaves_metadata)))\n+\n+ def unravel_list(arr):\n+ return [np.reshape(lax.dynamic_slice_in_dim(arr, leaves_idx[i], m.size),\n+ m.shape).astype(m.dtype)\n+ for i, m in enumerate(leaves_metadata)]\n+\n+ return np.concatenate([m.flat for m in leaves_metadata]), unravel_list\n+\n+\n+def ravel_pytree(pytree):\n+ leaves, treedef = tree_flatten(pytree)\n+ flat, unravel_list = _ravel_list(*leaves)\n+\n+ def unravel_pytree(arr):\n+ return tree_unflatten(treedef, unravel_list(arr))\n+\n+ return flat, unravel_pytree\n", "issue": "fori_collect does not work with non-float arrays\nThe issue is ravel_pytree which calls `vjp` and does not work with int32 or bool arrays. As such, `fori_collect` will fail if the default identity transform is used.\n", "before_files": [{"content": "from contextlib import contextmanager\nimport random\n\nimport numpy as onp\nimport tqdm\n\nfrom jax import jit, lax, ops, vmap\nfrom jax.flatten_util import ravel_pytree\nimport jax.numpy as np\n\n_DATA_TYPES = {}\n_DISABLE_CONTROL_FLOW_PRIM = False\n\n\ndef set_rng_seed(rng_seed):\n random.seed(rng_seed)\n onp.random.seed(rng_seed)\n\n\n@contextmanager\ndef optional(condition, context_manager):\n \"\"\"\n Optionally wrap inside `context_manager` if condition is `True`.\n \"\"\"\n if condition:\n with context_manager:\n yield\n else:\n yield\n\n\n@contextmanager\ndef control_flow_prims_disabled():\n global _DISABLE_CONTROL_FLOW_PRIM\n stored_flag = _DISABLE_CONTROL_FLOW_PRIM\n try:\n _DISABLE_CONTROL_FLOW_PRIM = True\n yield\n finally:\n _DISABLE_CONTROL_FLOW_PRIM = stored_flag\n\n\ndef cond(pred, true_operand, true_fun, false_operand, false_fun):\n if _DISABLE_CONTROL_FLOW_PRIM:\n if pred:\n return true_fun(true_operand)\n else:\n return false_fun(false_operand)\n else:\n return lax.cond(pred, true_operand, true_fun, false_operand, false_fun)\n\n\ndef while_loop(cond_fun, body_fun, init_val):\n if _DISABLE_CONTROL_FLOW_PRIM:\n val = init_val\n while cond_fun(val):\n val = body_fun(val)\n return val\n else:\n # TODO: consider jitting while_loop similar to fori_loop\n return lax.while_loop(cond_fun, body_fun, init_val)\n\n\ndef fori_loop(lower, upper, body_fun, init_val):\n if _DISABLE_CONTROL_FLOW_PRIM:\n val = init_val\n for i in range(int(lower), int(upper)):\n val = body_fun(i, val)\n return val\n else:\n return jit(lax.fori_loop, static_argnums=(2,))(lower, upper, body_fun, init_val)\n\n\ndef identity(x):\n return x\n\n\ndef fori_collect(lower, upper, body_fun, init_val, transform=identity, progbar=True, **progbar_opts):\n \"\"\"\n This looping construct works like :func:`~jax.lax.fori_loop` but with the additional\n effect of collecting values from the loop body. In addition, this allows for\n post-processing of these samples via `transform`, and progress bar updates.\n Note that, `progbar=False` will be faster, especially when collecting a\n lot of samples. Refer to example usage in :func:`~numpyro.mcmc.hmc`.\n\n :param int lower: the index to start the collective work. In other words,\n we will skip collecting the first `lower` values.\n :param int upper: number of times to run the loop body.\n :param body_fun: a callable that takes a collection of\n `np.ndarray` and returns a collection with the same shape and\n `dtype`.\n :param init_val: initial value to pass as argument to `body_fun`. Can\n be any Python collection type containing `np.ndarray` objects.\n :param transform: a callable to post-process the values returned by `body_fn`.\n :param progbar: whether to post progress bar updates.\n :param `**progbar_opts`: optional additional progress bar arguments. A\n `diagnostics_fn` can be supplied which when passed the current value\n from `body_fun` returns a string that is used to update the progress\n bar postfix. Also a `progbar_desc` keyword argument can be supplied\n which is used to label the progress bar.\n :return: collection with the same type as `init_val` with values\n collected along the leading axis of `np.ndarray` objects.\n \"\"\"\n assert lower < upper\n init_val_flat, unravel_fn = ravel_pytree(transform(init_val))\n ravel_fn = lambda x: ravel_pytree(transform(x))[0] # noqa: E731\n\n if not progbar:\n collection = np.zeros((upper - lower,) + init_val_flat.shape)\n\n def _body_fn(i, vals):\n val, collection = vals\n val = body_fun(val)\n i = np.where(i >= lower, i - lower, 0)\n collection = ops.index_update(collection, i, ravel_fn(val))\n return val, collection\n\n _, collection = fori_loop(0, upper, _body_fn, (init_val, collection))\n else:\n diagnostics_fn = progbar_opts.pop('diagnostics_fn', None)\n progbar_desc = progbar_opts.pop('progbar_desc', '')\n collection = []\n\n val = init_val\n with tqdm.trange(upper, desc=progbar_desc) as t:\n for i in t:\n val = body_fun(val)\n if i >= lower:\n collection.append(jit(ravel_fn)(val))\n if diagnostics_fn:\n t.set_postfix_str(diagnostics_fn(val), refresh=False)\n\n collection = np.stack(collection)\n\n return vmap(unravel_fn)(collection)\n\n\ndef copy_docs_from(source_class, full_text=False):\n \"\"\"\n Decorator to copy class and method docs from source to destin class.\n \"\"\"\n\n def decorator(destin_class):\n # This works only in python 3.3+:\n # if not destin_class.__doc__:\n # destin_class.__doc__ = source_class.__doc__\n for name in dir(destin_class):\n if name.startswith('_'):\n continue\n destin_attr = getattr(destin_class, name)\n destin_attr = getattr(destin_attr, '__func__', destin_attr)\n source_attr = getattr(source_class, name, None)\n source_doc = getattr(source_attr, '__doc__', None)\n if source_doc and not getattr(destin_attr, '__doc__', None):\n if full_text or source_doc.startswith('See '):\n destin_doc = source_doc\n else:\n destin_doc = 'See :meth:`{}.{}.{}`'.format(\n source_class.__module__, source_class.__name__, name)\n if isinstance(destin_attr, property):\n # Set docs for object properties.\n # Since __doc__ is read-only, we need to reset the property\n # with the updated doc.\n updated_property = property(destin_attr.fget,\n destin_attr.fset,\n destin_attr.fdel,\n destin_doc)\n setattr(destin_class, name, updated_property)\n else:\n destin_attr.__doc__ = destin_doc\n return destin_class\n\n return decorator\n", "path": "numpyro/util.py"}]}
2,420
391
gh_patches_debug_2061
rasdani/github-patches
git_diff
plotly__dash-565
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> New version of dash_renderer is not automatically installed with Dash 0.36.0 Deploying apps on Dash Deployment Server results in `dash-renderer` not being updated if it is already installed (even if that version is `0.16.x` and the Dash version is specified as `0.36.0`. This causes an `Error loading dependencies`, as `dash-renderer` attempts to attach event handlers to Dash events, which don't exist any more. </issue> <code> [start of setup.py] 1 import io 2 from setuptools import setup, find_packages 3 4 main_ns = {} 5 exec(open('dash/version.py').read(), main_ns) # pylint: disable=exec-used 6 7 setup( 8 name='dash', 9 version=main_ns['__version__'], 10 author='chris p', 11 author_email='[email protected]', 12 packages=find_packages(exclude=['tests*']), 13 include_package_data=True, 14 license='MIT', 15 description=('A Python framework for building reactive web-apps. ' 16 'Developed by Plotly.'), 17 long_description=io.open('README.md', encoding='utf-8').read(), 18 long_description_content_type='text/markdown', 19 install_requires=[ 20 'Flask>=0.12', 21 'flask-compress', 22 'plotly', 23 'dash_renderer', 24 ], 25 entry_points={ 26 'console_scripts': [ 27 'dash-generate-components =' 28 ' dash.development.component_generator:cli' 29 ] 30 }, 31 url='https://plot.ly/dash', 32 classifiers=[ 33 'Development Status :: 5 - Production/Stable', 34 'Environment :: Web Environment', 35 'Framework :: Flask', 36 'Intended Audience :: Developers', 37 'Intended Audience :: Education', 38 'Intended Audience :: Financial and Insurance Industry', 39 'Intended Audience :: Healthcare Industry', 40 'Intended Audience :: Manufacturing', 41 'Intended Audience :: Science/Research', 42 'License :: OSI Approved :: MIT License', 43 'Programming Language :: Python', 44 'Programming Language :: Python :: 2', 45 'Programming Language :: Python :: 2.7', 46 'Programming Language :: Python :: 3', 47 'Programming Language :: Python :: 3.3', 48 'Programming Language :: Python :: 3.4', 49 'Programming Language :: Python :: 3.5', 50 'Programming Language :: Python :: 3.6', 51 'Programming Language :: Python :: 3.7', 52 'Topic :: Database :: Front-Ends', 53 'Topic :: Office/Business :: Financial :: Spreadsheet', 54 'Topic :: Scientific/Engineering :: Visualization', 55 'Topic :: Software Development :: Libraries :: Application Frameworks', 56 'Topic :: Software Development :: Widget Sets' 57 ] 58 ) 59 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -20,7 +20,10 @@ 'Flask>=0.12', 'flask-compress', 'plotly', - 'dash_renderer', + 'dash_renderer==0.17.0', + 'dash-core-components==0.43.0', + 'dash-html-components==0.13.5', + 'dash-table==3.3.0' ], entry_points={ 'console_scripts': [
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -20,7 +20,10 @@\n 'Flask>=0.12',\n 'flask-compress',\n 'plotly',\n- 'dash_renderer',\n+ 'dash_renderer==0.17.0',\n+ 'dash-core-components==0.43.0',\n+ 'dash-html-components==0.13.5',\n+ 'dash-table==3.3.0'\n ],\n entry_points={\n 'console_scripts': [\n", "issue": "New version of dash_renderer is not automatically installed with Dash 0.36.0\nDeploying apps on Dash Deployment Server results in `dash-renderer` not being updated if it is already installed (even if that version is `0.16.x` and the Dash version is specified as `0.36.0`. This causes an `Error loading dependencies`, as `dash-renderer` attempts to attach event handlers to Dash events, which don't exist any more. \n", "before_files": [{"content": "import io\nfrom setuptools import setup, find_packages\n\nmain_ns = {}\nexec(open('dash/version.py').read(), main_ns) # pylint: disable=exec-used\n\nsetup(\n name='dash',\n version=main_ns['__version__'],\n author='chris p',\n author_email='[email protected]',\n packages=find_packages(exclude=['tests*']),\n include_package_data=True,\n license='MIT',\n description=('A Python framework for building reactive web-apps. '\n 'Developed by Plotly.'),\n long_description=io.open('README.md', encoding='utf-8').read(),\n long_description_content_type='text/markdown',\n install_requires=[\n 'Flask>=0.12',\n 'flask-compress',\n 'plotly',\n 'dash_renderer',\n ],\n entry_points={\n 'console_scripts': [\n 'dash-generate-components ='\n ' dash.development.component_generator:cli'\n ]\n },\n url='https://plot.ly/dash',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Framework :: Flask',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Financial and Insurance Industry',\n 'Intended Audience :: Healthcare Industry',\n 'Intended Audience :: Manufacturing',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Database :: Front-Ends',\n 'Topic :: Office/Business :: Financial :: Spreadsheet',\n 'Topic :: Scientific/Engineering :: Visualization',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Widget Sets'\n ]\n)\n", "path": "setup.py"}]}
1,205
126
gh_patches_debug_29025
rasdani/github-patches
git_diff
angr__angr-1073
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Avoiding Infinite Loops with loopseer technique Hello, I'm trying to run some code which I know is just a loop running infinitely but I do want to exit this loop (hard coded limit) after it iterated 10 times. I thought loopseer suppose to do just that but it doesn't seems to stop after 10 iterations but keep going non stop. My example code: ``` void loop_forever(int a) { int i; for(i=0;;i++) { printf("Test infinite loop"); } } ``` Example usage: ``` p = angr.Project(loop_forever, load_options={"auto_load_libs": False}) cfg = p.analyses.CFGFast(normalize = True) state = p.factory.call_state(addr=0x10438) # 0x10438 = loop_forever function simgr = p.factory.simulation_manager(state) simgr.use_technique(angr.exploration_techniques.LoopSeer(cfg=cfg, bound=10)) while len(simgr.active) != 0: simgr.step() print simgr.active ``` The binary (elf, 32bit arm) can be download here: https://ufile.io/9nym2 </issue> <code> [start of angr/exploration_techniques/loop_seer.py] 1 import logging 2 3 from . import ExplorationTechnique 4 from ..analyses.loopfinder import Loop 5 from ..knowledge_base import KnowledgeBase 6 from ..knowledge_plugins.functions import Function 7 8 9 l = logging.getLogger("angr.exploration_techniques.loop_seer") 10 11 12 class LoopSeer(ExplorationTechnique): 13 """ 14 This exploration technique monitors exploration and maintains all 15 loop-related data (well, currently it is just the loop trip counts, but feel 16 free to add something else). 17 """ 18 19 def __init__(self, cfg=None, functions=None, loops=None, bound=None, bound_reached=None, discard_stash='spinning'): 20 """ 21 :param cfg: Normalized CFG is required. 22 :param functions: Function(s) containing the loop(s) to be analyzed. 23 :param loops: Loop(s) to be analyzed. 24 :param bound: Limit the number of iteration a loop may be executed. 25 :param bound_reached: If provided, should be a function that takes a SimulationManager and returns 26 a SimulationManager. Will be called when loop execution reach the given bound. 27 Default to moving states that exceed the loop limit to a discard stash. 28 :param discard_stash: Name of the stash containing states exceeding the loop limit. 29 """ 30 31 super(LoopSeer, self).__init__() 32 self.cfg = cfg 33 self.functions = functions 34 self.bound = bound 35 self.bound_reached = bound_reached 36 self.discard_stash = discard_stash 37 38 self.loops = {} 39 40 if type(loops) is Loop: 41 loops = [loops] 42 43 if type(loops) in (list, tuple) and all(type(l) is Loop for l in loops): 44 for loop in loops: 45 self.loops[loop.entry_edges[0][0].addr] = loop 46 47 elif loops is not None: 48 raise TypeError('What type of loop is it?') 49 50 def setup(self, simgr): 51 if self.cfg is None: 52 cfg_kb = KnowledgeBase(self.project, self.project.loader.main_object) 53 self.cfg = self.project.analyses.CFGFast(kb=cfg_kb, normalize=True) 54 elif not self.cfg.normalized: 55 l.warning("LoopSeer uses normalized CFG. Recomputing the CFG...") 56 self.cfg.normalize() 57 58 if type(self.functions) is str: 59 func = [self.cfg.kb.functions.function(name=self.functions)] 60 61 elif type(self.functions) is int: 62 func = [self.cfg.kb.functions.function(addr=self.functions)] 63 64 elif type(self.functions) is Function: 65 func = [self.functions] 66 67 elif type(self.functions) in (list, tuple): 68 func = [] 69 for f in self.functions: 70 if type(f) is str: 71 func.append(self.cfg.kb.functions.function(name=f)) 72 73 elif type(f) is int: 74 func.append(self.cfg.kb.functions.function(addr=f)) 75 76 elif type(f) is Function: 77 func.append(f) 78 79 else: 80 raise TypeError("What type of function is it?") 81 elif self.functions is None: 82 func = None 83 84 else: 85 raise TypeError("What type of function is it?") 86 87 if not self.loops or func is not None: 88 loop_finder = self.project.analyses.LoopFinder(kb=self.cfg.kb, normalize=True, functions=func) 89 90 for loop in loop_finder.loops: 91 entry = loop.entry_edges[0][0] 92 self.loops[entry.addr] = loop 93 94 def step(self, simgr, stash=None, **kwargs): 95 kwargs['successor_func'] = self.normalized_step 96 97 simgr.step(stash=stash, **kwargs) 98 99 for state in simgr.stashes[stash]: 100 # Processing a currently running loop 101 if state.loop_data.current_loop: 102 loop = state.loop_data.current_loop[-1][0] 103 header = loop.entry.addr 104 105 if state.addr == header: 106 state.loop_data.trip_counts[state.addr][-1] += 1 107 108 elif state.addr in state.loop_data.current_loop[-1][1]: 109 # This is for unoptimized while/for loops. 110 # 111 # 0x10812: movs r3, #0 -> this block dominates the loop 112 # 0x10814: str r3, [r7, #20] 113 # 0x10816: b 0x10868 114 # 0x10818: movs r3, #0 -> the real loop body starts here 115 # ... 116 # 0x10868: ldr r3, [r7, #20] -> the loop header is executed the first time without executing the loop body 117 # 0x1086a: cmp r3, #3 118 # 0x1086c: ble 0x10818 119 120 back_edge_src = loop.continue_edges[0][0].addr 121 back_edge_dst = loop.continue_edges[0][1].addr 122 block = self.project.factory.block(back_edge_src) 123 if back_edge_src != back_edge_dst and back_edge_dst in block.instruction_addrs: 124 state.loop_data.trip_counts[header][-1] -= 1 125 126 state.loop_data.current_loop.pop() 127 128 if self.bound is not None: 129 if state.loop_data.trip_counts[header][-1] >= self.bound: 130 if self.bound_reached is not None: 131 simgr = self.bound_reached(simgr) 132 else: 133 simgr.stashes[stash].remove(state) 134 simgr.stashes[self.discard_stash].append(state) 135 136 l.debug("%s trip counts %s", state, state.loop_data.trip_counts) 137 138 # Loop entry detected. This test is put here because in case of 139 # nested loops, we want to handle the outer loop before proceeding 140 # the inner loop. 141 if state.addr in self.loops: 142 loop = self.loops[state.addr] 143 header = loop.entry.addr 144 exits = [e[1].addr for e in loop.break_edges] 145 146 state.loop_data.trip_counts[header].append(0) 147 state.loop_data.current_loop.append((loop, exits)) 148 149 return simgr 150 151 def normalized_step(self, state): 152 node = self.cfg.get_any_node(state.addr) 153 return state.step(num_inst=len(node.instruction_addrs) if node is not None else None) 154 [end of angr/exploration_techniques/loop_seer.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/angr/exploration_techniques/loop_seer.py b/angr/exploration_techniques/loop_seer.py --- a/angr/exploration_techniques/loop_seer.py +++ b/angr/exploration_techniques/loop_seer.py @@ -91,11 +91,9 @@ entry = loop.entry_edges[0][0] self.loops[entry.addr] = loop - def step(self, simgr, stash=None, **kwargs): + def step(self, simgr, stash='active', **kwargs): kwargs['successor_func'] = self.normalized_step - simgr.step(stash=stash, **kwargs) - for state in simgr.stashes[stash]: # Processing a currently running loop if state.loop_data.current_loop: @@ -126,7 +124,7 @@ state.loop_data.current_loop.pop() if self.bound is not None: - if state.loop_data.trip_counts[header][-1] >= self.bound: + if state.loop_data.trip_counts[header][-1] > self.bound: if self.bound_reached is not None: simgr = self.bound_reached(simgr) else: @@ -146,6 +144,8 @@ state.loop_data.trip_counts[header].append(0) state.loop_data.current_loop.append((loop, exits)) + simgr.step(stash=stash, **kwargs) + return simgr def normalized_step(self, state):
{"golden_diff": "diff --git a/angr/exploration_techniques/loop_seer.py b/angr/exploration_techniques/loop_seer.py\n--- a/angr/exploration_techniques/loop_seer.py\n+++ b/angr/exploration_techniques/loop_seer.py\n@@ -91,11 +91,9 @@\n entry = loop.entry_edges[0][0]\n self.loops[entry.addr] = loop\n \n- def step(self, simgr, stash=None, **kwargs):\n+ def step(self, simgr, stash='active', **kwargs):\n kwargs['successor_func'] = self.normalized_step\n \n- simgr.step(stash=stash, **kwargs)\n-\n for state in simgr.stashes[stash]:\n # Processing a currently running loop\n if state.loop_data.current_loop:\n@@ -126,7 +124,7 @@\n state.loop_data.current_loop.pop()\n \n if self.bound is not None:\n- if state.loop_data.trip_counts[header][-1] >= self.bound:\n+ if state.loop_data.trip_counts[header][-1] > self.bound:\n if self.bound_reached is not None:\n simgr = self.bound_reached(simgr)\n else:\n@@ -146,6 +144,8 @@\n state.loop_data.trip_counts[header].append(0)\n state.loop_data.current_loop.append((loop, exits))\n \n+ simgr.step(stash=stash, **kwargs)\n+\n return simgr\n \n def normalized_step(self, state):\n", "issue": "Avoiding Infinite Loops with loopseer technique\nHello,\r\n\r\nI'm trying to run some code which I know is just a loop running infinitely but I do want to exit this loop (hard coded limit) after it iterated 10 times.\r\nI thought loopseer suppose to do just that but it doesn't seems to stop after 10 iterations but keep going non stop.\r\n\r\nMy example code:\r\n```\r\nvoid loop_forever(int a) {\r\n int i;\r\n for(i=0;;i++) {\r\n printf(\"Test infinite loop\");\r\n }\r\n}\r\n```\r\n\r\nExample usage:\r\n```\r\np = angr.Project(loop_forever, load_options={\"auto_load_libs\": False})\r\ncfg = p.analyses.CFGFast(normalize = True)\r\nstate = p.factory.call_state(addr=0x10438) # 0x10438 = loop_forever function\r\nsimgr = p.factory.simulation_manager(state)\r\nsimgr.use_technique(angr.exploration_techniques.LoopSeer(cfg=cfg, bound=10))\r\nwhile len(simgr.active) != 0:\r\n simgr.step()\r\n print simgr.active\r\n```\r\n\r\nThe binary (elf, 32bit arm) can be download here: https://ufile.io/9nym2\n", "before_files": [{"content": "import logging\n\nfrom . import ExplorationTechnique\nfrom ..analyses.loopfinder import Loop\nfrom ..knowledge_base import KnowledgeBase\nfrom ..knowledge_plugins.functions import Function\n\n\nl = logging.getLogger(\"angr.exploration_techniques.loop_seer\")\n\n\nclass LoopSeer(ExplorationTechnique):\n \"\"\"\n This exploration technique monitors exploration and maintains all\n loop-related data (well, currently it is just the loop trip counts, but feel\n free to add something else).\n \"\"\"\n\n def __init__(self, cfg=None, functions=None, loops=None, bound=None, bound_reached=None, discard_stash='spinning'):\n \"\"\"\n :param cfg: Normalized CFG is required.\n :param functions: Function(s) containing the loop(s) to be analyzed.\n :param loops: Loop(s) to be analyzed.\n :param bound: Limit the number of iteration a loop may be executed.\n :param bound_reached: If provided, should be a function that takes a SimulationManager and returns\n a SimulationManager. Will be called when loop execution reach the given bound.\n Default to moving states that exceed the loop limit to a discard stash.\n :param discard_stash: Name of the stash containing states exceeding the loop limit.\n \"\"\"\n\n super(LoopSeer, self).__init__()\n self.cfg = cfg\n self.functions = functions\n self.bound = bound\n self.bound_reached = bound_reached\n self.discard_stash = discard_stash\n\n self.loops = {}\n\n if type(loops) is Loop:\n loops = [loops]\n\n if type(loops) in (list, tuple) and all(type(l) is Loop for l in loops):\n for loop in loops:\n self.loops[loop.entry_edges[0][0].addr] = loop\n\n elif loops is not None:\n raise TypeError('What type of loop is it?')\n\n def setup(self, simgr):\n if self.cfg is None:\n cfg_kb = KnowledgeBase(self.project, self.project.loader.main_object)\n self.cfg = self.project.analyses.CFGFast(kb=cfg_kb, normalize=True)\n elif not self.cfg.normalized:\n l.warning(\"LoopSeer uses normalized CFG. Recomputing the CFG...\")\n self.cfg.normalize()\n\n if type(self.functions) is str:\n func = [self.cfg.kb.functions.function(name=self.functions)]\n\n elif type(self.functions) is int:\n func = [self.cfg.kb.functions.function(addr=self.functions)]\n\n elif type(self.functions) is Function:\n func = [self.functions]\n\n elif type(self.functions) in (list, tuple):\n func = []\n for f in self.functions:\n if type(f) is str:\n func.append(self.cfg.kb.functions.function(name=f))\n\n elif type(f) is int:\n func.append(self.cfg.kb.functions.function(addr=f))\n\n elif type(f) is Function:\n func.append(f)\n\n else:\n raise TypeError(\"What type of function is it?\")\n elif self.functions is None:\n func = None\n\n else:\n raise TypeError(\"What type of function is it?\")\n\n if not self.loops or func is not None:\n loop_finder = self.project.analyses.LoopFinder(kb=self.cfg.kb, normalize=True, functions=func)\n\n for loop in loop_finder.loops:\n entry = loop.entry_edges[0][0]\n self.loops[entry.addr] = loop\n\n def step(self, simgr, stash=None, **kwargs):\n kwargs['successor_func'] = self.normalized_step\n\n simgr.step(stash=stash, **kwargs)\n\n for state in simgr.stashes[stash]:\n # Processing a currently running loop\n if state.loop_data.current_loop:\n loop = state.loop_data.current_loop[-1][0]\n header = loop.entry.addr\n\n if state.addr == header:\n state.loop_data.trip_counts[state.addr][-1] += 1\n\n elif state.addr in state.loop_data.current_loop[-1][1]:\n # This is for unoptimized while/for loops.\n #\n # 0x10812: movs r3, #0 -> this block dominates the loop\n # 0x10814: str r3, [r7, #20]\n # 0x10816: b 0x10868\n # 0x10818: movs r3, #0 -> the real loop body starts here\n # ...\n # 0x10868: ldr r3, [r7, #20] -> the loop header is executed the first time without executing the loop body\n # 0x1086a: cmp r3, #3\n # 0x1086c: ble 0x10818\n\n back_edge_src = loop.continue_edges[0][0].addr\n back_edge_dst = loop.continue_edges[0][1].addr\n block = self.project.factory.block(back_edge_src)\n if back_edge_src != back_edge_dst and back_edge_dst in block.instruction_addrs:\n state.loop_data.trip_counts[header][-1] -= 1\n\n state.loop_data.current_loop.pop()\n\n if self.bound is not None:\n if state.loop_data.trip_counts[header][-1] >= self.bound:\n if self.bound_reached is not None:\n simgr = self.bound_reached(simgr)\n else:\n simgr.stashes[stash].remove(state)\n simgr.stashes[self.discard_stash].append(state)\n\n l.debug(\"%s trip counts %s\", state, state.loop_data.trip_counts)\n\n # Loop entry detected. This test is put here because in case of\n # nested loops, we want to handle the outer loop before proceeding\n # the inner loop.\n if state.addr in self.loops:\n loop = self.loops[state.addr]\n header = loop.entry.addr\n exits = [e[1].addr for e in loop.break_edges]\n\n state.loop_data.trip_counts[header].append(0)\n state.loop_data.current_loop.append((loop, exits))\n\n return simgr\n\n def normalized_step(self, state):\n node = self.cfg.get_any_node(state.addr)\n return state.step(num_inst=len(node.instruction_addrs) if node is not None else None)\n", "path": "angr/exploration_techniques/loop_seer.py"}]}
2,600
348
gh_patches_debug_10715
rasdani/github-patches
git_diff
akvo__akvo-rsr-2711
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Custom stylesheets for Akvo pages missing The custom CSS that can be uploaded to an Akvo page site is currently not served. This is due to the link to it accidentally being removed when the main stylesheet for RSR was supplemented with a second sheet for the new Results. </issue> <code> [start of akvo/rsr/context_processors.py] 1 # -*- coding: utf-8 -*- 2 """ 3 Akvo RSR is covered by the GNU Affero General Public License. 4 5 See more details in the license.txt file located at the root folder of the 6 Akvo RSR module. For additional details on the GNU license please see 7 < http://www.gnu.org/licenses/agpl.html >. 8 """ 9 10 import django 11 12 from django.conf import settings 13 from django.core.exceptions import DisallowedHost 14 from django.contrib.sites.models import get_current_site 15 16 17 def extra_context(request, protocol="http"): 18 """Add information to the request context.""" 19 try: 20 current_site = get_current_site(request) 21 except DisallowedHost: 22 current_site = None 23 24 django_version = django.get_version() 25 debug = getattr(settings, 'DEBUG', False) 26 deploy_tag = getattr(settings, 'DEPLOY_TAG', 'Unknown') 27 deploy_branch = getattr(settings, 'DEPLOY_BRANCH', 'Unknown') 28 deploy_commit_id = getattr(settings, 'DEPLOY_COMMIT_ID', 'Unknown') 29 deploy_commit_full_id = getattr(settings, 'DEPLOY_COMMIT_FULL_ID', 'Unknown') 30 31 return dict( 32 current_site=current_site, 33 django_version=django_version, 34 debug=debug, 35 deploy_tag=deploy_tag, 36 deploy_branch=deploy_branch, 37 deploy_commit_id=deploy_commit_id, 38 deploy_commit_full_id=deploy_commit_full_id 39 ) 40 41 42 def get_current_path_without_lang(request): 43 """Return current path without lang.""" 44 path = request.get_full_path() 45 path_bits = path.split('/') 46 path = '/'.join(path_bits[2:]) 47 return {'current_path_without_lang': path} 48 49 50 def extra_pages_context(request): 51 """Add context information of an RSR Page.""" 52 if request.rsr_page: 53 page = request.rsr_page 54 return { 55 'rsr_page': page, 56 'favicon': page.favicon, 57 'logo': page.logo, 58 'organisation': page.organisation, 59 'return_url': page.return_url, 60 'return_url_text': page.custom_return_url_text, 61 'stylesheet': page.stylesheet, 62 'akvoapp_root_url': '//{}'.format(settings.AKVOAPP_DOMAIN), 63 'domain_url': '//{}'.format(settings.RSR_DOMAIN), 64 'no_facebook': not page.facebook_button, 65 'facebook_app_id': page.facebook_app_id, 66 'no_twitter': not page.twitter_button, 67 } 68 69 return {} 70 [end of akvo/rsr/context_processors.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/akvo/rsr/context_processors.py b/akvo/rsr/context_processors.py --- a/akvo/rsr/context_processors.py +++ b/akvo/rsr/context_processors.py @@ -58,7 +58,7 @@ 'organisation': page.organisation, 'return_url': page.return_url, 'return_url_text': page.custom_return_url_text, - 'stylesheet': page.stylesheet, + 'page_stylesheet': page.stylesheet, 'akvoapp_root_url': '//{}'.format(settings.AKVOAPP_DOMAIN), 'domain_url': '//{}'.format(settings.RSR_DOMAIN), 'no_facebook': not page.facebook_button,
{"golden_diff": "diff --git a/akvo/rsr/context_processors.py b/akvo/rsr/context_processors.py\n--- a/akvo/rsr/context_processors.py\n+++ b/akvo/rsr/context_processors.py\n@@ -58,7 +58,7 @@\n 'organisation': page.organisation,\n 'return_url': page.return_url,\n 'return_url_text': page.custom_return_url_text,\n- 'stylesheet': page.stylesheet,\n+ 'page_stylesheet': page.stylesheet,\n 'akvoapp_root_url': '//{}'.format(settings.AKVOAPP_DOMAIN),\n 'domain_url': '//{}'.format(settings.RSR_DOMAIN),\n 'no_facebook': not page.facebook_button,\n", "issue": "Custom stylesheets for Akvo pages missing\nThe custom CSS that can be uploaded to an Akvo page site is currently not served.\r\n\r\nThis is due to the link to it accidentally being removed when the main stylesheet for RSR was supplemented with a second sheet for the new Results.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nAkvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the\nAkvo RSR module. For additional details on the GNU license please see\n< http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nimport django\n\nfrom django.conf import settings\nfrom django.core.exceptions import DisallowedHost\nfrom django.contrib.sites.models import get_current_site\n\n\ndef extra_context(request, protocol=\"http\"):\n \"\"\"Add information to the request context.\"\"\"\n try:\n current_site = get_current_site(request)\n except DisallowedHost:\n current_site = None\n\n django_version = django.get_version()\n debug = getattr(settings, 'DEBUG', False)\n deploy_tag = getattr(settings, 'DEPLOY_TAG', 'Unknown')\n deploy_branch = getattr(settings, 'DEPLOY_BRANCH', 'Unknown')\n deploy_commit_id = getattr(settings, 'DEPLOY_COMMIT_ID', 'Unknown')\n deploy_commit_full_id = getattr(settings, 'DEPLOY_COMMIT_FULL_ID', 'Unknown')\n\n return dict(\n current_site=current_site,\n django_version=django_version,\n debug=debug,\n deploy_tag=deploy_tag,\n deploy_branch=deploy_branch,\n deploy_commit_id=deploy_commit_id,\n deploy_commit_full_id=deploy_commit_full_id\n )\n\n\ndef get_current_path_without_lang(request):\n \"\"\"Return current path without lang.\"\"\"\n path = request.get_full_path()\n path_bits = path.split('/')\n path = '/'.join(path_bits[2:])\n return {'current_path_without_lang': path}\n\n\ndef extra_pages_context(request):\n \"\"\"Add context information of an RSR Page.\"\"\"\n if request.rsr_page:\n page = request.rsr_page\n return {\n 'rsr_page': page,\n 'favicon': page.favicon,\n 'logo': page.logo,\n 'organisation': page.organisation,\n 'return_url': page.return_url,\n 'return_url_text': page.custom_return_url_text,\n 'stylesheet': page.stylesheet,\n 'akvoapp_root_url': '//{}'.format(settings.AKVOAPP_DOMAIN),\n 'domain_url': '//{}'.format(settings.RSR_DOMAIN),\n 'no_facebook': not page.facebook_button,\n 'facebook_app_id': page.facebook_app_id,\n 'no_twitter': not page.twitter_button,\n }\n\n return {}\n", "path": "akvo/rsr/context_processors.py"}]}
1,234
145
gh_patches_debug_129
rasdani/github-patches
git_diff
librosa__librosa-1839
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> librosa 0.10.2 is not compatible with matplotlib <3.5 ***BEFORE POSTING A BUG REPORT*** Please look through [existing issues (both open and closed)](https://github.com/librosa/librosa/issues?q=is%3Aissue) to see if it's already been reported or fixed! **librosa 0.10.2 is not imcompatible with matplotlib.colormap** When I try to user librosa.display, it reports the following error: cannot import name 'colormaps' from 'matplotlib' ![image](https://github.com/librosa/librosa/assets/51704570/d50df74f-c345-48ba-8953-b9b1efec3ff7) **error code** <!-- Example: ``` import librosa.display import matplotlib.pyplot as plt import numpy as np --> **Expected behavior** A clear and concise description of what you expected to happen. **Screenshots** If applicable, add screenshots to help explain your problem. **Software versions*** ![image](https://github.com/librosa/librosa/assets/51704570/957530c7-9656-44f9-8b0e-c1df49c3b61f) **Additional context** I have tried to change the version of matplotlib, but it does not work. And the versions I have tried are: 2.0.0, 3.0.0 librosa 0.10.2 is not compatible with matplotlib <3.5 ***BEFORE POSTING A BUG REPORT*** Please look through [existing issues (both open and closed)](https://github.com/librosa/librosa/issues?q=is%3Aissue) to see if it's already been reported or fixed! **librosa 0.10.2 is not imcompatible with matplotlib.colormap** When I try to user librosa.display, it reports the following error: cannot import name 'colormaps' from 'matplotlib' ![image](https://github.com/librosa/librosa/assets/51704570/d50df74f-c345-48ba-8953-b9b1efec3ff7) **error code** <!-- Example: ``` import librosa.display import matplotlib.pyplot as plt import numpy as np --> **Expected behavior** A clear and concise description of what you expected to happen. **Screenshots** If applicable, add screenshots to help explain your problem. **Software versions*** ![image](https://github.com/librosa/librosa/assets/51704570/957530c7-9656-44f9-8b0e-c1df49c3b61f) **Additional context** I have tried to change the version of matplotlib, but it does not work. And the versions I have tried are: 2.0.0, 3.0.0 </issue> <code> [start of librosa/version.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 """Version info""" 4 5 import sys 6 import importlib 7 8 short_version = "0.10" 9 version = "0.10.2" 10 11 12 def __get_mod_version(modname): 13 try: 14 if modname in sys.modules: 15 mod = sys.modules[modname] 16 else: 17 mod = importlib.import_module(modname) 18 try: 19 return mod.__version__ 20 except AttributeError: 21 return "installed, no version number available" 22 23 except ImportError: 24 return None 25 26 27 def show_versions() -> None: 28 """Return the version information for all librosa dependencies.""" 29 core_deps = [ 30 "audioread", 31 "numpy", 32 "scipy", 33 "sklearn", 34 "joblib", 35 "decorator", 36 "numba", 37 "soundfile", 38 "pooch", 39 "soxr", 40 "typing_extensions", 41 "lazy_loader", 42 "msgpack", 43 ] 44 45 extra_deps = [ 46 "numpydoc", 47 "sphinx", 48 "sphinx_rtd_theme", 49 "matplotlib", 50 "sphinx_multiversion", 51 "sphinx_gallery", 52 "mir_eval", 53 "ipython", 54 "sphinxcontrib.rsvgconverter", 55 "pytest", 56 "pytest_mpl", 57 "pytest_cov", 58 "samplerate", 59 "resampy", 60 "presets", 61 "packaging", 62 ] 63 64 print("INSTALLED VERSIONS") 65 print("------------------") 66 print(f"python: {sys.version}\n") 67 print(f"librosa: {version}\n") 68 for dep in core_deps: 69 print("{}: {}".format(dep, __get_mod_version(dep))) 70 print("") 71 for dep in extra_deps: 72 print("{}: {}".format(dep, __get_mod_version(dep))) 73 [end of librosa/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/librosa/version.py b/librosa/version.py --- a/librosa/version.py +++ b/librosa/version.py @@ -6,7 +6,7 @@ import importlib short_version = "0.10" -version = "0.10.2" +version = "0.10.2.post1" def __get_mod_version(modname):
{"golden_diff": "diff --git a/librosa/version.py b/librosa/version.py\n--- a/librosa/version.py\n+++ b/librosa/version.py\n@@ -6,7 +6,7 @@\n import importlib\n \n short_version = \"0.10\"\n-version = \"0.10.2\"\n+version = \"0.10.2.post1\"\n \n \n def __get_mod_version(modname):\n", "issue": "librosa 0.10.2 is not compatible with matplotlib <3.5\n***BEFORE POSTING A BUG REPORT*** Please look through [existing issues (both open and closed)](https://github.com/librosa/librosa/issues?q=is%3Aissue) to see if it's already been reported or fixed!\r\n\r\n\r\n**librosa 0.10.2 is not imcompatible with matplotlib.colormap**\r\n When I try to user librosa.display, it reports the following error: cannot import name 'colormaps' from 'matplotlib'\r\n![image](https://github.com/librosa/librosa/assets/51704570/d50df74f-c345-48ba-8953-b9b1efec3ff7)\r\n\r\n\r\n\r\n**error code**\r\n<!--\r\nExample:\r\n```\r\nimport librosa.display\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n-->\r\n**Expected behavior**\r\nA clear and concise description of what you expected to happen.\r\n\r\n**Screenshots**\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n**Software versions***\r\n\r\n![image](https://github.com/librosa/librosa/assets/51704570/957530c7-9656-44f9-8b0e-c1df49c3b61f)\r\n\r\n**Additional context**\r\nI have tried to change the version of matplotlib, but it does not work. And the versions I have tried are: 2.0.0, 3.0.0\r\n\nlibrosa 0.10.2 is not compatible with matplotlib <3.5\n***BEFORE POSTING A BUG REPORT*** Please look through [existing issues (both open and closed)](https://github.com/librosa/librosa/issues?q=is%3Aissue) to see if it's already been reported or fixed!\r\n\r\n\r\n**librosa 0.10.2 is not imcompatible with matplotlib.colormap**\r\n When I try to user librosa.display, it reports the following error: cannot import name 'colormaps' from 'matplotlib'\r\n![image](https://github.com/librosa/librosa/assets/51704570/d50df74f-c345-48ba-8953-b9b1efec3ff7)\r\n\r\n\r\n\r\n**error code**\r\n<!--\r\nExample:\r\n```\r\nimport librosa.display\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n-->\r\n**Expected behavior**\r\nA clear and concise description of what you expected to happen.\r\n\r\n**Screenshots**\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n**Software versions***\r\n\r\n![image](https://github.com/librosa/librosa/assets/51704570/957530c7-9656-44f9-8b0e-c1df49c3b61f)\r\n\r\n**Additional context**\r\nI have tried to change the version of matplotlib, but it does not work. And the versions I have tried are: 2.0.0, 3.0.0\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Version info\"\"\"\n\nimport sys\nimport importlib\n\nshort_version = \"0.10\"\nversion = \"0.10.2\"\n\n\ndef __get_mod_version(modname):\n try:\n if modname in sys.modules:\n mod = sys.modules[modname]\n else:\n mod = importlib.import_module(modname)\n try:\n return mod.__version__\n except AttributeError:\n return \"installed, no version number available\"\n\n except ImportError:\n return None\n\n\ndef show_versions() -> None:\n \"\"\"Return the version information for all librosa dependencies.\"\"\"\n core_deps = [\n \"audioread\",\n \"numpy\",\n \"scipy\",\n \"sklearn\",\n \"joblib\",\n \"decorator\",\n \"numba\",\n \"soundfile\",\n \"pooch\",\n \"soxr\",\n \"typing_extensions\",\n \"lazy_loader\",\n \"msgpack\",\n ]\n\n extra_deps = [\n \"numpydoc\",\n \"sphinx\",\n \"sphinx_rtd_theme\",\n \"matplotlib\",\n \"sphinx_multiversion\",\n \"sphinx_gallery\",\n \"mir_eval\",\n \"ipython\",\n \"sphinxcontrib.rsvgconverter\",\n \"pytest\",\n \"pytest_mpl\",\n \"pytest_cov\",\n \"samplerate\",\n \"resampy\",\n \"presets\",\n \"packaging\",\n ]\n\n print(\"INSTALLED VERSIONS\")\n print(\"------------------\")\n print(f\"python: {sys.version}\\n\")\n print(f\"librosa: {version}\\n\")\n for dep in core_deps:\n print(\"{}: {}\".format(dep, __get_mod_version(dep)))\n print(\"\")\n for dep in extra_deps:\n print(\"{}: {}\".format(dep, __get_mod_version(dep)))\n", "path": "librosa/version.py"}]}
1,747
87
gh_patches_debug_23491
rasdani/github-patches
git_diff
ESMCI__cime-1396
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Build pFUnit on some other machines We should build pFUnit on some other machines so we can run unit tests there: - cheyenne - hobart Instructions for building pFUnit are: 1. Download pFUnit from http://sourceforge.net/projects/pfunit/files/latest/download 2. Set the PFUNIT environment variable. For example:: ``` export PFUNIT=/glade/p/cesmdata/cseg/tools/pFUnit/pFUnit3.2.8_cheyenne_Intel17.0.1_MPI_openMP ``` 3. Build pFUnit: ``` mkdir build cd build cmake -DMPI=YES -DOPENMP=YES .. make -j 4 ``` 4. Run pFUnit's own unit tests: ``` make tests ``` Note: this didn't work on yellowstone. Instead, I had to do it by hand: ``` DAV_CORES=4 execca mpirun.lsf tests/tests.x ``` 5. Install pFUnit on your system: ``` make install INSTALL_DIR=$PFUNIT ``` </issue> <code> [start of scripts/lib/CIME/BuildTools/cmakemacroswriter.py] 1 """Classes used to write build system files. 2 3 The classes here are used to write out settings for use by Makefile and CMake 4 build systems. The two relevant classes are CMakeMacroWriter and 5 MakeMacroWriter, which encapsulate the information necessary to write CMake and 6 Makefile formatted text, respectively. See the docstrings for those classes for 7 more. 8 """ 9 10 # This is not the most useful check. 11 # pylint: disable=invalid-name 12 13 from CIME.BuildTools.macrowriterbase import MacroWriterBase 14 from CIME.XML.standard_module_setup import * 15 logger = logging.getLogger(__name__) 16 17 18 class CMakeMacroWriter(MacroWriterBase): 19 20 """Macro writer for the CMake format. 21 22 For details on the provided methods, see MacroWriterBase, which this 23 class inherits from. 24 """ 25 26 def __init__(self, output): 27 """Initialize a CMake macro writer. 28 29 Arguments: 30 output - File-like object (probably an io.TextIOWrapper), which 31 will be written to. 32 """ 33 super(CMakeMacroWriter, self).__init__(output) 34 # This counter is for avoiding name conflicts in temporary 35 # variables used for shell commands. 36 self._var_num = 0 37 38 def environment_variable_string(self, name): 39 """Return an environment variable reference. 40 41 >>> import io 42 >>> s = io.StringIO() 43 >>> CMakeMacroWriter(s).environment_variable_string("foo") 44 '$ENV{foo}' 45 """ 46 return "$ENV{" + name + "}" 47 48 def shell_command_strings(self, command): 49 # pylint: disable=line-too-long 50 """Return strings used to get the output of a shell command. 51 52 >>> import io 53 >>> s = io.StringIO() 54 >>> set_up, inline, tear_down = CMakeMacroWriter(s).shell_command_strings("echo bar") 55 >>> set_up 56 'execute_process(COMMAND echo bar OUTPUT_VARIABLE CIME_TEMP_SHELL0 OUTPUT_STRIP_TRAILING_WHITESPACE)' 57 >>> inline 58 '${CIME_TEMP_SHELL0}' 59 >>> tear_down 60 'unset(CIME_TEMP_SHELL0)' 61 """ 62 # pylint: enable=line-too-long 63 # Create a unique variable name, then increment variable number 64 # counter so that we get a different value next time. 65 var_name = "CIME_TEMP_SHELL" + str(self._var_num) 66 self._var_num += 1 67 set_up = "execute_process(COMMAND " + command + \ 68 " OUTPUT_VARIABLE " + var_name + \ 69 " OUTPUT_STRIP_TRAILING_WHITESPACE)" 70 tear_down = "unset(" + var_name + ")" 71 return (set_up, "${" + var_name + "}", tear_down) 72 73 def variable_string(self, name): 74 """Return a string to refer to a variable with the given name. 75 76 >>> import io 77 >>> s = io.StringIO() 78 >>> CMakeMacroWriter(s).variable_string("foo") 79 '${foo}' 80 """ 81 return "${" + name + "}" 82 83 def set_variable(self, name, value): 84 """Write out a statement setting a variable to some value. 85 86 >>> import io 87 >>> s = io.StringIO() 88 >>> CMakeMacroWriter(s).set_variable("foo", "bar") 89 >>> s.getvalue() 90 u'set(foo "bar")\\n' 91 """ 92 self.write_line("set(" + name + ' "' + value + '")') 93 94 def start_ifeq(self, left, right): 95 """Write out a statement to start a conditional block. 96 97 >>> import io 98 >>> s = io.StringIO() 99 >>> CMakeMacroWriter(s).start_ifeq("foo", "bar") 100 >>> s.getvalue() 101 u'if("foo" STREQUAL "bar")\\n' 102 """ 103 self.write_line('if("' + left + '" STREQUAL "' + right + '")') 104 self.indent_right() 105 106 def end_ifeq(self): 107 """Write out a statement to end a block started with start_ifeq. 108 109 >>> import io 110 >>> s = io.StringIO() 111 >>> writer = CMakeMacroWriter(s) 112 >>> writer.start_ifeq("foo", "bar") 113 >>> writer.set_variable("foo2", "bar2") 114 >>> writer.end_ifeq() 115 >>> s.getvalue() 116 u'if("foo" STREQUAL "bar")\\n set(foo2 "bar2")\\nendif()\\n' 117 """ 118 self.indent_left() 119 self.write_line("endif()") 120 [end of scripts/lib/CIME/BuildTools/cmakemacroswriter.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scripts/lib/CIME/BuildTools/cmakemacroswriter.py b/scripts/lib/CIME/BuildTools/cmakemacroswriter.py --- a/scripts/lib/CIME/BuildTools/cmakemacroswriter.py +++ b/scripts/lib/CIME/BuildTools/cmakemacroswriter.py @@ -89,7 +89,8 @@ >>> s.getvalue() u'set(foo "bar")\\n' """ - self.write_line("set(" + name + ' "' + value + '")') + value_transformed = self._transform_value(value) + self.write_line("set(" + name + ' "' + value_transformed + '")') def start_ifeq(self, left, right): """Write out a statement to start a conditional block. @@ -117,3 +118,22 @@ """ self.indent_left() self.write_line("endif()") + + def _transform_value(self, value): + """Some elements need their values transformed in some way for CMake to handle them properly. + This method does those transformations. + + Args: + - value (str): value of element + + Returns transformed value + """ + + # Not all variables need leading & trailing whitespace removed, but some + # do. In particular, compiler variables (MPICC, MPICXX, MPIFC, SCC, + # SCXX, SFC) are only handled correctly if leading & trailing whitespace + # is removed. It doesn't seem to hurt to remove whitespace from all + # variables. + value_transformed = value.strip() + + return value_transformed
{"golden_diff": "diff --git a/scripts/lib/CIME/BuildTools/cmakemacroswriter.py b/scripts/lib/CIME/BuildTools/cmakemacroswriter.py\n--- a/scripts/lib/CIME/BuildTools/cmakemacroswriter.py\n+++ b/scripts/lib/CIME/BuildTools/cmakemacroswriter.py\n@@ -89,7 +89,8 @@\n >>> s.getvalue()\n u'set(foo \"bar\")\\\\n'\n \"\"\"\n- self.write_line(\"set(\" + name + ' \"' + value + '\")')\n+ value_transformed = self._transform_value(value)\n+ self.write_line(\"set(\" + name + ' \"' + value_transformed + '\")')\n \n def start_ifeq(self, left, right):\n \"\"\"Write out a statement to start a conditional block.\n@@ -117,3 +118,22 @@\n \"\"\"\n self.indent_left()\n self.write_line(\"endif()\")\n+\n+ def _transform_value(self, value):\n+ \"\"\"Some elements need their values transformed in some way for CMake to handle them properly.\n+ This method does those transformations.\n+\n+ Args:\n+ - value (str): value of element\n+\n+ Returns transformed value\n+ \"\"\"\n+\n+ # Not all variables need leading & trailing whitespace removed, but some\n+ # do. In particular, compiler variables (MPICC, MPICXX, MPIFC, SCC,\n+ # SCXX, SFC) are only handled correctly if leading & trailing whitespace\n+ # is removed. It doesn't seem to hurt to remove whitespace from all\n+ # variables.\n+ value_transformed = value.strip()\n+\n+ return value_transformed\n", "issue": "Build pFUnit on some other machines\nWe should build pFUnit on some other machines so we can run unit tests there:\r\n\r\n- cheyenne\r\n- hobart\r\n\r\nInstructions for building pFUnit are:\r\n\r\n1. Download pFUnit from\r\n http://sourceforge.net/projects/pfunit/files/latest/download\r\n\r\n2. Set the PFUNIT environment variable. For example::\r\n\r\n ```\r\n export PFUNIT=/glade/p/cesmdata/cseg/tools/pFUnit/pFUnit3.2.8_cheyenne_Intel17.0.1_MPI_openMP\r\n ```\r\n\r\n3. Build pFUnit:\r\n\r\n```\r\n mkdir build\r\n cd build\r\n cmake -DMPI=YES -DOPENMP=YES ..\r\n make -j 4\r\n```\r\n\r\n4. Run pFUnit's own unit tests:\r\n\r\n```\r\n make tests\r\n```\r\n\r\n Note: this didn't work on yellowstone. Instead, I had to do it by hand:\r\n\r\n```\r\n DAV_CORES=4 execca\r\n mpirun.lsf tests/tests.x\r\n```\r\n\r\n5. Install pFUnit on your system:\r\n\r\n```\r\n make install INSTALL_DIR=$PFUNIT\r\n```\n", "before_files": [{"content": "\"\"\"Classes used to write build system files.\n\nThe classes here are used to write out settings for use by Makefile and CMake\nbuild systems. The two relevant classes are CMakeMacroWriter and\nMakeMacroWriter, which encapsulate the information necessary to write CMake and\nMakefile formatted text, respectively. See the docstrings for those classes for\nmore.\n\"\"\"\n\n# This is not the most useful check.\n# pylint: disable=invalid-name\n\nfrom CIME.BuildTools.macrowriterbase import MacroWriterBase\nfrom CIME.XML.standard_module_setup import *\nlogger = logging.getLogger(__name__)\n\n\nclass CMakeMacroWriter(MacroWriterBase):\n\n \"\"\"Macro writer for the CMake format.\n\n For details on the provided methods, see MacroWriterBase, which this\n class inherits from.\n \"\"\"\n\n def __init__(self, output):\n \"\"\"Initialize a CMake macro writer.\n\n Arguments:\n output - File-like object (probably an io.TextIOWrapper), which\n will be written to.\n \"\"\"\n super(CMakeMacroWriter, self).__init__(output)\n # This counter is for avoiding name conflicts in temporary\n # variables used for shell commands.\n self._var_num = 0\n\n def environment_variable_string(self, name):\n \"\"\"Return an environment variable reference.\n\n >>> import io\n >>> s = io.StringIO()\n >>> CMakeMacroWriter(s).environment_variable_string(\"foo\")\n '$ENV{foo}'\n \"\"\"\n return \"$ENV{\" + name + \"}\"\n\n def shell_command_strings(self, command):\n # pylint: disable=line-too-long\n \"\"\"Return strings used to get the output of a shell command.\n\n >>> import io\n >>> s = io.StringIO()\n >>> set_up, inline, tear_down = CMakeMacroWriter(s).shell_command_strings(\"echo bar\")\n >>> set_up\n 'execute_process(COMMAND echo bar OUTPUT_VARIABLE CIME_TEMP_SHELL0 OUTPUT_STRIP_TRAILING_WHITESPACE)'\n >>> inline\n '${CIME_TEMP_SHELL0}'\n >>> tear_down\n 'unset(CIME_TEMP_SHELL0)'\n \"\"\"\n # pylint: enable=line-too-long\n # Create a unique variable name, then increment variable number\n # counter so that we get a different value next time.\n var_name = \"CIME_TEMP_SHELL\" + str(self._var_num)\n self._var_num += 1\n set_up = \"execute_process(COMMAND \" + command + \\\n \" OUTPUT_VARIABLE \" + var_name + \\\n \" OUTPUT_STRIP_TRAILING_WHITESPACE)\"\n tear_down = \"unset(\" + var_name + \")\"\n return (set_up, \"${\" + var_name + \"}\", tear_down)\n\n def variable_string(self, name):\n \"\"\"Return a string to refer to a variable with the given name.\n\n >>> import io\n >>> s = io.StringIO()\n >>> CMakeMacroWriter(s).variable_string(\"foo\")\n '${foo}'\n \"\"\"\n return \"${\" + name + \"}\"\n\n def set_variable(self, name, value):\n \"\"\"Write out a statement setting a variable to some value.\n\n >>> import io\n >>> s = io.StringIO()\n >>> CMakeMacroWriter(s).set_variable(\"foo\", \"bar\")\n >>> s.getvalue()\n u'set(foo \"bar\")\\\\n'\n \"\"\"\n self.write_line(\"set(\" + name + ' \"' + value + '\")')\n\n def start_ifeq(self, left, right):\n \"\"\"Write out a statement to start a conditional block.\n\n >>> import io\n >>> s = io.StringIO()\n >>> CMakeMacroWriter(s).start_ifeq(\"foo\", \"bar\")\n >>> s.getvalue()\n u'if(\"foo\" STREQUAL \"bar\")\\\\n'\n \"\"\"\n self.write_line('if(\"' + left + '\" STREQUAL \"' + right + '\")')\n self.indent_right()\n\n def end_ifeq(self):\n \"\"\"Write out a statement to end a block started with start_ifeq.\n\n >>> import io\n >>> s = io.StringIO()\n >>> writer = CMakeMacroWriter(s)\n >>> writer.start_ifeq(\"foo\", \"bar\")\n >>> writer.set_variable(\"foo2\", \"bar2\")\n >>> writer.end_ifeq()\n >>> s.getvalue()\n u'if(\"foo\" STREQUAL \"bar\")\\\\n set(foo2 \"bar2\")\\\\nendif()\\\\n'\n \"\"\"\n self.indent_left()\n self.write_line(\"endif()\")\n", "path": "scripts/lib/CIME/BuildTools/cmakemacroswriter.py"}]}
2,033
368
gh_patches_debug_18902
rasdani/github-patches
git_diff
fossasia__open-event-server-7197
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Stripe publishable key should be accessible to non-admin user The settings schema for non-admin user does not contain stripe publishable key and hence non-admin user cannot add stripe to its event. https://api.eventyay.com/v1/settings should contain stripe publishable key See #6277 as well </issue> <code> [start of app/api/schema/settings.py] 1 from marshmallow_jsonapi import fields 2 from marshmallow_jsonapi.flask import Schema 3 4 from app.api.helpers.utilities import dasherize 5 from app.settings import Environment 6 from utils.common import use_defaults 7 8 9 class SettingSchemaPublic(Schema): 10 """ 11 Public Api schema for settings Model 12 """ 13 14 class Meta: 15 """ 16 Meta class for setting Api Schema 17 """ 18 19 type_ = 'setting' 20 self_view = 'v1.setting_detail' 21 self_view_kwargs = {'id': '<id>'} 22 inflect = dasherize 23 24 id = fields.Str(dump_only=True) 25 26 # Name of the application. (Eg. Event Yay!, Open Event) 27 app_name = fields.Str(allow_none=True) 28 29 # Tagline for the application. (Eg. Event Management and Ticketing, Home) 30 tagline = fields.Str(allow_none=True) 31 32 # Order Expiry Time 33 order_expiry_time = fields.Integer( 34 allow_none=False, default=15, validate=lambda n: 1 <= n <= 60 35 ) 36 37 # Maximum number of complex custom fields allowed for a given form 38 max_complex_custom_fields = fields.Integer( 39 allow_none=False, default=30, validate=lambda n: 1 <= n <= 30 40 ) 41 42 # Google Analytics 43 analytics_key = fields.Str(allow_none=True) 44 45 # FB 46 fb_client_id = fields.Str(allow_none=True) 47 48 # 49 # Social links 50 # 51 google_url = fields.Str(allow_none=True) 52 github_url = fields.Str(allow_none=True) 53 twitter_url = fields.Str(allow_none=True) 54 support_url = fields.Str(allow_none=True) 55 facebook_url = fields.Str(allow_none=True) 56 instagram_url = fields.Str(allow_none=True) 57 youtube_url = fields.Str(allow_none=True) 58 59 # Url of Frontend 60 frontend_url = fields.Url(allow_none=True) 61 62 # 63 # Cookie Policy 64 # 65 cookie_policy = fields.Str(allow_none=True) 66 cookie_policy_link = fields.Str(allow_none=True) 67 68 # 69 # Online Payment Flags 70 # 71 is_paytm_activated = fields.Bool(default=False) 72 is_paypal_activated = fields.Bool(dump_only=True) 73 is_stripe_activated = fields.Bool(dump_only=True) 74 is_omise_activated = fields.Bool(dump_only=True) 75 is_alipay_activated = fields.Bool(dump_only=True) 76 77 78 class SettingSchemaNonAdmin(SettingSchemaPublic): 79 """ 80 Non Admin Api schema for settings Model 81 """ 82 83 class Meta: 84 """ 85 Meta class for setting Api Schema 86 """ 87 88 type_ = 'setting' 89 self_view = 'v1.setting_detail' 90 self_view_kwargs = {'id': '<id>'} 91 inflect = dasherize 92 93 id = fields.Str(dump_only=True) 94 95 # 96 # Generators 97 # 98 android_app_url = fields.Str(allow_none=True) 99 web_app_url = fields.Str(allow_none=True) 100 101 102 @use_defaults() 103 class SettingSchemaAdmin(SettingSchemaNonAdmin): 104 """ 105 Admin Api schema for settings Model 106 """ 107 108 class Meta: 109 """ 110 Meta class for setting Api Schema 111 """ 112 113 type_ = 'setting' 114 self_view = 'v1.setting_detail' 115 self_view_kwargs = {'id': '<id>'} 116 inflect = dasherize 117 118 id = fields.Str(dump_only=True) 119 # 120 # General 121 # 122 123 app_environment = fields.Str(default=Environment.PRODUCTION) 124 125 # Static domain 126 static_domain = fields.Str(allow_none=True) 127 128 # 129 # STORAGE 130 # 131 132 # storage place, local, s3, .. can be more in future 133 storage_place = fields.Str(allow_none=True) 134 # S3 135 aws_key = fields.Str(allow_none=True) 136 aws_secret = fields.Str(allow_none=True) 137 aws_bucket_name = fields.Str(allow_none=True) 138 aws_region = fields.Str(allow_none=True) 139 # Google Storage 140 gs_key = fields.Str(allow_none=True) 141 gs_secret = fields.Str(allow_none=True) 142 gs_bucket_name = fields.Str(allow_none=True) 143 144 # 145 # CAPTCHA 146 # 147 148 # Google reCAPTCHA 149 is_google_recaptcha_enabled = fields.Bool(allow_none=False, default=False) 150 google_recaptcha_site = fields.Str(allow_none=True) 151 google_recaptcha_secret = fields.Str(allow_none=True) 152 153 # 154 # Social Login 155 # 156 157 # Google Auth 158 google_client_id = fields.Str(allow_none=True) 159 google_client_secret = fields.Str(allow_none=True) 160 # FB 161 fb_client_id = fields.Str(allow_none=True) 162 fb_client_secret = fields.Str(allow_none=True) 163 # Twitter 164 tw_consumer_key = fields.Str(allow_none=True) 165 tw_consumer_secret = fields.Str(allow_none=True) 166 # Instagram 167 in_client_id = fields.Str(allow_none=True) 168 in_client_secret = fields.Str(allow_none=True) 169 170 # 171 # Payment Gateways 172 # 173 174 # Stripe Credantials 175 stripe_client_id = fields.Str(allow_none=True) 176 stripe_publishable_key = fields.Str(allow_none=True) 177 stripe_secret_key = fields.Str(allow_none=True) 178 stripe_test_client_id = fields.Str(allow_none=True) 179 stripe_test_secret_key = fields.Str(allow_none=True) 180 stripe_test_publishable_key = fields.Str(allow_none=True) 181 182 # PayPal Credentials 183 paypal_mode = fields.Str(allow_none=True) 184 paypal_client = fields.Str(allow_none=True) 185 paypal_secret = fields.Str(allow_none=True) 186 paypal_sandbox_client = fields.Str(allow_none=True) 187 paypal_sandbox_secret = fields.Str(allow_none=True) 188 189 # Omise Credentials 190 omise_mode = fields.Str(allow_none=True) 191 omise_test_public = fields.Str(allow_none=True) 192 omise_test_secret = fields.Str(allow_none=True) 193 omise_live_public = fields.Str(allow_none=True) 194 omise_live_secret = fields.Str(allow_none=True) 195 196 # Alipay Credentials 197 alipay_publishable_key = fields.Str(allow_none=True) 198 alipay_secret_key = fields.Str(allow_none=True) 199 200 # payTM credentials 201 paytm_mode = fields.Str(allow_none=True) 202 paytm_live_merchant = fields.Str(allow_none=True) 203 paytm_live_secret = fields.Str(allow_none=True) 204 paytm_sandbox_merchant = fields.Str(allow_none=True) 205 paytm_sandbox_secret = fields.Str(allow_none=True) 206 # 207 # EMAIL 208 # 209 210 # Email service. (sendgrid,smtp) 211 email_service = fields.Str(allow_none=True) 212 email_from = fields.Str(allow_none=True) 213 email_from_name = fields.Str(allow_none=True) 214 # Sendgrid 215 sendgrid_key = fields.Str(allow_none=True) 216 # SMTP 217 smtp_host = fields.Str(allow_none=True) 218 smtp_username = fields.Str(allow_none=True) 219 smtp_password = fields.Str(allow_none=True) 220 smtp_port = fields.Integer(allow_none=True) 221 smtp_encryption = fields.Str(allow_none=True) # Can be tls, ssl, none 222 223 # Event Invoices settings 224 invoice_sending_day = fields.Integer(allow_none=False, default=1) 225 invoice_sending_timezone = fields.Str(allow_none=False, default="UTC") 226 227 # Admin Invoice Details 228 admin_billing_contact_name = fields.Str(allow_none=True) 229 admin_billing_phone = fields.Str(allow_none=True) 230 admin_billing_email = fields.Email(allow_none=True) 231 admin_billing_state = fields.Str(allow_none=True) 232 admin_billing_country = fields.Str(allow_none=True) 233 admin_billing_tax_info = fields.Str(allow_none=True) 234 admin_company = fields.Str(allow_none=True) 235 admin_billing_address = fields.Str(allow_none=True) 236 admin_billing_city = fields.Str(allow_none=True) 237 admin_billing_zip = fields.Str(allow_none=True) 238 admin_billing_additional_info = fields.Str(allow_none=True) 239 [end of app/api/schema/settings.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/app/api/schema/settings.py b/app/api/schema/settings.py --- a/app/api/schema/settings.py +++ b/app/api/schema/settings.py @@ -74,6 +74,34 @@ is_omise_activated = fields.Bool(dump_only=True) is_alipay_activated = fields.Bool(dump_only=True) + # + # Payment Gateways + # + + # Stripe Credantials + stripe_client_id = fields.Str(dump_only=True) + stripe_publishable_key = fields.Str(dump_only=True) + stripe_test_client_id = fields.Str(dump_only=True) + stripe_test_publishable_key = fields.Str(dump_only=True) + + # PayPal Credentials + paypal_mode = fields.Str(dump_only=True) + paypal_client = fields.Str(dump_only=True) + paypal_sandbox_client = fields.Str(dump_only=True) + + # Omise Credentials + omise_mode = fields.Str(dump_only=True) + omise_test_public = fields.Str(dump_only=True) + omise_live_public = fields.Str(dump_only=True) + + # Alipay Credentials + alipay_publishable_key = fields.Str(dump_only=True) + + # payTM credentials + paytm_mode = fields.Str(dump_only=True) + paytm_live_merchant = fields.Str(dump_only=True) + paytm_sandbox_merchant = fields.Str(dump_only=True) + class SettingSchemaNonAdmin(SettingSchemaPublic): """
{"golden_diff": "diff --git a/app/api/schema/settings.py b/app/api/schema/settings.py\n--- a/app/api/schema/settings.py\n+++ b/app/api/schema/settings.py\n@@ -74,6 +74,34 @@\n is_omise_activated = fields.Bool(dump_only=True)\n is_alipay_activated = fields.Bool(dump_only=True)\n \n+ #\n+ # Payment Gateways\n+ #\n+\n+ # Stripe Credantials\n+ stripe_client_id = fields.Str(dump_only=True)\n+ stripe_publishable_key = fields.Str(dump_only=True)\n+ stripe_test_client_id = fields.Str(dump_only=True)\n+ stripe_test_publishable_key = fields.Str(dump_only=True)\n+\n+ # PayPal Credentials\n+ paypal_mode = fields.Str(dump_only=True)\n+ paypal_client = fields.Str(dump_only=True)\n+ paypal_sandbox_client = fields.Str(dump_only=True)\n+\n+ # Omise Credentials\n+ omise_mode = fields.Str(dump_only=True)\n+ omise_test_public = fields.Str(dump_only=True)\n+ omise_live_public = fields.Str(dump_only=True)\n+\n+ # Alipay Credentials\n+ alipay_publishable_key = fields.Str(dump_only=True)\n+\n+ # payTM credentials\n+ paytm_mode = fields.Str(dump_only=True)\n+ paytm_live_merchant = fields.Str(dump_only=True)\n+ paytm_sandbox_merchant = fields.Str(dump_only=True)\n+\n \n class SettingSchemaNonAdmin(SettingSchemaPublic):\n \"\"\"\n", "issue": "Stripe publishable key should be accessible to non-admin user\nThe settings schema for non-admin user does not contain stripe publishable key and hence non-admin user cannot add stripe to its event.\r\n\r\nhttps://api.eventyay.com/v1/settings should contain stripe publishable key\r\n\r\nSee #6277 as well\n", "before_files": [{"content": "from marshmallow_jsonapi import fields\nfrom marshmallow_jsonapi.flask import Schema\n\nfrom app.api.helpers.utilities import dasherize\nfrom app.settings import Environment\nfrom utils.common import use_defaults\n\n\nclass SettingSchemaPublic(Schema):\n \"\"\"\n Public Api schema for settings Model\n \"\"\"\n\n class Meta:\n \"\"\"\n Meta class for setting Api Schema\n \"\"\"\n\n type_ = 'setting'\n self_view = 'v1.setting_detail'\n self_view_kwargs = {'id': '<id>'}\n inflect = dasherize\n\n id = fields.Str(dump_only=True)\n\n # Name of the application. (Eg. Event Yay!, Open Event)\n app_name = fields.Str(allow_none=True)\n\n # Tagline for the application. (Eg. Event Management and Ticketing, Home)\n tagline = fields.Str(allow_none=True)\n\n # Order Expiry Time\n order_expiry_time = fields.Integer(\n allow_none=False, default=15, validate=lambda n: 1 <= n <= 60\n )\n\n # Maximum number of complex custom fields allowed for a given form\n max_complex_custom_fields = fields.Integer(\n allow_none=False, default=30, validate=lambda n: 1 <= n <= 30\n )\n\n # Google Analytics\n analytics_key = fields.Str(allow_none=True)\n\n # FB\n fb_client_id = fields.Str(allow_none=True)\n\n #\n # Social links\n #\n google_url = fields.Str(allow_none=True)\n github_url = fields.Str(allow_none=True)\n twitter_url = fields.Str(allow_none=True)\n support_url = fields.Str(allow_none=True)\n facebook_url = fields.Str(allow_none=True)\n instagram_url = fields.Str(allow_none=True)\n youtube_url = fields.Str(allow_none=True)\n\n # Url of Frontend\n frontend_url = fields.Url(allow_none=True)\n\n #\n # Cookie Policy\n #\n cookie_policy = fields.Str(allow_none=True)\n cookie_policy_link = fields.Str(allow_none=True)\n\n #\n # Online Payment Flags\n #\n is_paytm_activated = fields.Bool(default=False)\n is_paypal_activated = fields.Bool(dump_only=True)\n is_stripe_activated = fields.Bool(dump_only=True)\n is_omise_activated = fields.Bool(dump_only=True)\n is_alipay_activated = fields.Bool(dump_only=True)\n\n\nclass SettingSchemaNonAdmin(SettingSchemaPublic):\n \"\"\"\n Non Admin Api schema for settings Model\n \"\"\"\n\n class Meta:\n \"\"\"\n Meta class for setting Api Schema\n \"\"\"\n\n type_ = 'setting'\n self_view = 'v1.setting_detail'\n self_view_kwargs = {'id': '<id>'}\n inflect = dasherize\n\n id = fields.Str(dump_only=True)\n\n #\n # Generators\n #\n android_app_url = fields.Str(allow_none=True)\n web_app_url = fields.Str(allow_none=True)\n\n\n@use_defaults()\nclass SettingSchemaAdmin(SettingSchemaNonAdmin):\n \"\"\"\n Admin Api schema for settings Model\n \"\"\"\n\n class Meta:\n \"\"\"\n Meta class for setting Api Schema\n \"\"\"\n\n type_ = 'setting'\n self_view = 'v1.setting_detail'\n self_view_kwargs = {'id': '<id>'}\n inflect = dasherize\n\n id = fields.Str(dump_only=True)\n #\n # General\n #\n\n app_environment = fields.Str(default=Environment.PRODUCTION)\n\n # Static domain\n static_domain = fields.Str(allow_none=True)\n\n #\n # STORAGE\n #\n\n # storage place, local, s3, .. can be more in future\n storage_place = fields.Str(allow_none=True)\n # S3\n aws_key = fields.Str(allow_none=True)\n aws_secret = fields.Str(allow_none=True)\n aws_bucket_name = fields.Str(allow_none=True)\n aws_region = fields.Str(allow_none=True)\n # Google Storage\n gs_key = fields.Str(allow_none=True)\n gs_secret = fields.Str(allow_none=True)\n gs_bucket_name = fields.Str(allow_none=True)\n\n #\n # CAPTCHA\n #\n\n # Google reCAPTCHA\n is_google_recaptcha_enabled = fields.Bool(allow_none=False, default=False)\n google_recaptcha_site = fields.Str(allow_none=True)\n google_recaptcha_secret = fields.Str(allow_none=True)\n\n #\n # Social Login\n #\n\n # Google Auth\n google_client_id = fields.Str(allow_none=True)\n google_client_secret = fields.Str(allow_none=True)\n # FB\n fb_client_id = fields.Str(allow_none=True)\n fb_client_secret = fields.Str(allow_none=True)\n # Twitter\n tw_consumer_key = fields.Str(allow_none=True)\n tw_consumer_secret = fields.Str(allow_none=True)\n # Instagram\n in_client_id = fields.Str(allow_none=True)\n in_client_secret = fields.Str(allow_none=True)\n\n #\n # Payment Gateways\n #\n\n # Stripe Credantials\n stripe_client_id = fields.Str(allow_none=True)\n stripe_publishable_key = fields.Str(allow_none=True)\n stripe_secret_key = fields.Str(allow_none=True)\n stripe_test_client_id = fields.Str(allow_none=True)\n stripe_test_secret_key = fields.Str(allow_none=True)\n stripe_test_publishable_key = fields.Str(allow_none=True)\n\n # PayPal Credentials\n paypal_mode = fields.Str(allow_none=True)\n paypal_client = fields.Str(allow_none=True)\n paypal_secret = fields.Str(allow_none=True)\n paypal_sandbox_client = fields.Str(allow_none=True)\n paypal_sandbox_secret = fields.Str(allow_none=True)\n\n # Omise Credentials\n omise_mode = fields.Str(allow_none=True)\n omise_test_public = fields.Str(allow_none=True)\n omise_test_secret = fields.Str(allow_none=True)\n omise_live_public = fields.Str(allow_none=True)\n omise_live_secret = fields.Str(allow_none=True)\n\n # Alipay Credentials\n alipay_publishable_key = fields.Str(allow_none=True)\n alipay_secret_key = fields.Str(allow_none=True)\n\n # payTM credentials\n paytm_mode = fields.Str(allow_none=True)\n paytm_live_merchant = fields.Str(allow_none=True)\n paytm_live_secret = fields.Str(allow_none=True)\n paytm_sandbox_merchant = fields.Str(allow_none=True)\n paytm_sandbox_secret = fields.Str(allow_none=True)\n #\n # EMAIL\n #\n\n # Email service. (sendgrid,smtp)\n email_service = fields.Str(allow_none=True)\n email_from = fields.Str(allow_none=True)\n email_from_name = fields.Str(allow_none=True)\n # Sendgrid\n sendgrid_key = fields.Str(allow_none=True)\n # SMTP\n smtp_host = fields.Str(allow_none=True)\n smtp_username = fields.Str(allow_none=True)\n smtp_password = fields.Str(allow_none=True)\n smtp_port = fields.Integer(allow_none=True)\n smtp_encryption = fields.Str(allow_none=True) # Can be tls, ssl, none\n\n # Event Invoices settings\n invoice_sending_day = fields.Integer(allow_none=False, default=1)\n invoice_sending_timezone = fields.Str(allow_none=False, default=\"UTC\")\n\n # Admin Invoice Details\n admin_billing_contact_name = fields.Str(allow_none=True)\n admin_billing_phone = fields.Str(allow_none=True)\n admin_billing_email = fields.Email(allow_none=True)\n admin_billing_state = fields.Str(allow_none=True)\n admin_billing_country = fields.Str(allow_none=True)\n admin_billing_tax_info = fields.Str(allow_none=True)\n admin_company = fields.Str(allow_none=True)\n admin_billing_address = fields.Str(allow_none=True)\n admin_billing_city = fields.Str(allow_none=True)\n admin_billing_zip = fields.Str(allow_none=True)\n admin_billing_additional_info = fields.Str(allow_none=True)\n", "path": "app/api/schema/settings.py"}]}
2,977
337
gh_patches_debug_30329
rasdani/github-patches
git_diff
streamlit__streamlit-2604
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> In number_input, min_value & max_value format override format string and raises no warning # Summary Regardless of the format string specified by the user, actual format used will be based on min_value and max_value # Steps to reproduce The following code presents a warning: `weight = st.number_input("Today's weight:", min_value=50.0, max_value=750.0, format="%d")` _Warning: NumberInput value below is float, but format %d displays as integer._ I believe that's intended behavior. However, the inverse condition doesn't produce a warning or error. ``` weight = st.number_input("Today's weight:", min_value=50, max_value=750, format="%4.1f") print(weight) st.write("Echoing weight: ", weight) ``` If the input value is, for example, 170.4, the weight value will be 170, shown by both print() and st.write(), based on the interpreted format of min and max values. Note that this line of code does not produce any error or warning. The user (at least in my case) assumed min_value and max_value would be raised to float rather than ignoring format. I couldn't figure out why my input was being reduced from 170.4 to 170 The correct code works fine `weight = st.number_input("Today's weight:", min_value=50.0, max_value=750.0, format="%4.1f")` ## Expected behavior: A warning in both cases, not just one or the other. Maybe: _Warning: NumberInput value below is integer, format %f lowered to match integer._ ## Actual behavior: No warning produced, seems like there should be. Sorry, already explained in the Steps to Reproduce section ## Is this a regression? No (not that I know of) # Debug info - Streamlit version: (get it with `$ streamlit version`) 0.64.0 - Python version: (get it with `$ python --version`) 3.7.7 - Using Conda - OS version: Windows 10 64bit - Browser version: Chrome Version 84.0.4147.105 (Official Build) (64-bit) # Additional information </issue> <code> [start of lib/streamlit/elements/number_input.py] 1 # Copyright 2018-2020 Streamlit Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import numbers 16 from typing import cast 17 18 import streamlit 19 from streamlit.errors import StreamlitAPIException 20 from streamlit.js_number import JSNumber, JSNumberBoundsException 21 from streamlit.proto.NumberInput_pb2 import NumberInput as NumberInputProto 22 from .utils import register_widget, NoValue 23 24 25 class NumberInputMixin: 26 def number_input( 27 self, 28 label, 29 min_value=None, 30 max_value=None, 31 value=NoValue(), 32 step=None, 33 format=None, 34 key=None, 35 ): 36 """Display a numeric input widget. 37 38 Parameters 39 ---------- 40 label : str or None 41 A short label explaining to the user what this input is for. 42 min_value : int or float or None 43 The minimum permitted value. 44 If None, there will be no minimum. 45 max_value : int or float or None 46 The maximum permitted value. 47 If None, there will be no maximum. 48 value : int or float or None 49 The value of this widget when it first renders. 50 Defaults to min_value, or 0.0 if min_value is None 51 step : int or float or None 52 The stepping interval. 53 Defaults to 1 if the value is an int, 0.01 otherwise. 54 If the value is not specified, the format parameter will be used. 55 format : str or None 56 A printf-style format string controlling how the interface should 57 display numbers. Output must be purely numeric. This does not impact 58 the return value. Valid formatters: %d %e %f %g %i 59 key : str 60 An optional string to use as the unique key for the widget. 61 If this is omitted, a key will be generated for the widget 62 based on its content. Multiple widgets of the same type may 63 not share the same key. 64 65 Returns 66 ------- 67 int or float 68 The current value of the numeric input widget. The return type 69 will match the data type of the value parameter. 70 71 Example 72 ------- 73 >>> number = st.number_input('Insert a number') 74 >>> st.write('The current number is ', number) 75 """ 76 77 if isinstance(value, NoValue): 78 if min_value is not None: 79 value = min_value 80 else: 81 value = 0.0 # We set a float as default 82 83 int_value = isinstance(value, numbers.Integral) 84 float_value = isinstance(value, float) 85 86 if value is None: 87 raise StreamlitAPIException( 88 "Default value for number_input should be an int or a float." 89 ) 90 else: 91 if format is None: 92 format = "%d" if int_value else "%0.2f" 93 94 if format in ["%d", "%u", "%i"] and float_value: 95 # Warn user to check if displaying float as int was really intended. 96 import streamlit as st 97 98 st.warning( 99 "Warning: NumberInput value below is float, but format {} displays as integer.".format( 100 format 101 ) 102 ) 103 104 if step is None: 105 step = 1 if int_value else 0.01 106 107 try: 108 float(format % 2) 109 except (TypeError, ValueError): 110 raise StreamlitAPIException( 111 "Format string for st.number_input contains invalid characters: %s" 112 % format 113 ) 114 115 # Ensure that all arguments are of the same type. 116 args = [min_value, max_value, step] 117 118 int_args = all( 119 map( 120 lambda a: ( 121 isinstance(a, numbers.Integral) or isinstance(a, type(None)) 122 ), 123 args, 124 ) 125 ) 126 float_args = all( 127 map(lambda a: (isinstance(a, float) or isinstance(a, type(None))), args) 128 ) 129 130 if not int_args and not float_args: 131 raise StreamlitAPIException( 132 "All arguments must be of the same type." 133 "\n`value` has %(value_type)s type." 134 "\n`min_value` has %(min_type)s type." 135 "\n`max_value` has %(max_type)s type." 136 % { 137 "value_type": type(value).__name__, 138 "min_type": type(min_value).__name__, 139 "max_type": type(max_value).__name__, 140 } 141 ) 142 143 # Ensure that the value matches arguments' types. 144 all_ints = int_value and int_args 145 all_floats = float_value and float_args 146 147 if not all_ints and not all_floats: 148 raise StreamlitAPIException( 149 "All numerical arguments must be of the same type." 150 "\n`value` has %(value_type)s type." 151 "\n`min_value` has %(min_type)s type." 152 "\n`max_value` has %(max_type)s type." 153 "\n`step` has %(step_type)s type." 154 % { 155 "value_type": type(value).__name__, 156 "min_type": type(min_value).__name__, 157 "max_type": type(max_value).__name__, 158 "step_type": type(step).__name__, 159 } 160 ) 161 162 if (min_value and min_value > value) or (max_value and max_value < value): 163 raise StreamlitAPIException( 164 "The default `value` of %(value)s " 165 "must lie between the `min_value` of %(min)s " 166 "and the `max_value` of %(max)s, inclusively." 167 % {"value": value, "min": min_value, "max": max_value} 168 ) 169 170 # Bounds checks. JSNumber produces human-readable exceptions that 171 # we simply re-package as StreamlitAPIExceptions. 172 try: 173 if all_ints: 174 if min_value is not None: 175 JSNumber.validate_int_bounds(min_value, "`min_value`") 176 if max_value is not None: 177 JSNumber.validate_int_bounds(max_value, "`max_value`") 178 if step is not None: 179 JSNumber.validate_int_bounds(step, "`step`") 180 JSNumber.validate_int_bounds(value, "`value`") 181 else: 182 if min_value is not None: 183 JSNumber.validate_float_bounds(min_value, "`min_value`") 184 if max_value is not None: 185 JSNumber.validate_float_bounds(max_value, "`max_value`") 186 if step is not None: 187 JSNumber.validate_float_bounds(step, "`step`") 188 JSNumber.validate_float_bounds(value, "`value`") 189 except JSNumberBoundsException as e: 190 raise StreamlitAPIException(str(e)) 191 192 number_input_proto = NumberInputProto() 193 number_input_proto.data_type = ( 194 NumberInputProto.INT if all_ints else NumberInputProto.FLOAT 195 ) 196 number_input_proto.label = label 197 number_input_proto.default = value 198 199 if min_value is not None: 200 number_input_proto.min = min_value 201 number_input_proto.has_min = True 202 203 if max_value is not None: 204 number_input_proto.max = max_value 205 number_input_proto.has_max = True 206 207 if step is not None: 208 number_input_proto.step = step 209 210 if format is not None: 211 number_input_proto.format = format 212 213 ui_value = register_widget("number_input", number_input_proto, user_key=key) 214 215 return_value = ui_value if ui_value is not None else value 216 return self.dg._enqueue("number_input", number_input_proto, return_value) 217 218 @property 219 def dg(self) -> "streamlit.delta_generator.DeltaGenerator": 220 """Get our DeltaGenerator.""" 221 return cast("streamlit.delta_generator.DeltaGenerator", self) 222 [end of lib/streamlit/elements/number_input.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lib/streamlit/elements/number_input.py b/lib/streamlit/elements/number_input.py --- a/lib/streamlit/elements/number_input.py +++ b/lib/streamlit/elements/number_input.py @@ -55,7 +55,7 @@ format : str or None A printf-style format string controlling how the interface should display numbers. Output must be purely numeric. This does not impact - the return value. Valid formatters: %d %e %f %g %i + the return value. Valid formatters: %d %e %f %g %i %u key : str An optional string to use as the unique key for the widget. If this is omitted, a key will be generated for the widget @@ -91,14 +91,20 @@ if format is None: format = "%d" if int_value else "%0.2f" + # Warn user if they format an int type as a float or vice versa. if format in ["%d", "%u", "%i"] and float_value: - # Warn user to check if displaying float as int was really intended. import streamlit as st st.warning( - "Warning: NumberInput value below is float, but format {} displays as integer.".format( - format - ) + "Warning: NumberInput value below has type float," + f" but format {format} displays as integer." + ) + elif format[-1] == "f" and int_value: + import streamlit as st + + st.warning( + "Warning: NumberInput value below has type int so is" + f" displayed as int despite format string {format}." ) if step is None:
{"golden_diff": "diff --git a/lib/streamlit/elements/number_input.py b/lib/streamlit/elements/number_input.py\n--- a/lib/streamlit/elements/number_input.py\n+++ b/lib/streamlit/elements/number_input.py\n@@ -55,7 +55,7 @@\n format : str or None\n A printf-style format string controlling how the interface should\n display numbers. Output must be purely numeric. This does not impact\n- the return value. Valid formatters: %d %e %f %g %i\n+ the return value. Valid formatters: %d %e %f %g %i %u\n key : str\n An optional string to use as the unique key for the widget.\n If this is omitted, a key will be generated for the widget\n@@ -91,14 +91,20 @@\n if format is None:\n format = \"%d\" if int_value else \"%0.2f\"\n \n+ # Warn user if they format an int type as a float or vice versa.\n if format in [\"%d\", \"%u\", \"%i\"] and float_value:\n- # Warn user to check if displaying float as int was really intended.\n import streamlit as st\n \n st.warning(\n- \"Warning: NumberInput value below is float, but format {} displays as integer.\".format(\n- format\n- )\n+ \"Warning: NumberInput value below has type float,\"\n+ f\" but format {format} displays as integer.\"\n+ )\n+ elif format[-1] == \"f\" and int_value:\n+ import streamlit as st\n+\n+ st.warning(\n+ \"Warning: NumberInput value below has type int so is\"\n+ f\" displayed as int despite format string {format}.\"\n )\n \n if step is None:\n", "issue": "In number_input, min_value & max_value format override format string and raises no warning\n# Summary\r\n\r\nRegardless of the format string specified by the user, actual format used will be based on min_value and max_value\r\n\r\n# Steps to reproduce\r\n\r\nThe following code presents a warning:\r\n`weight = st.number_input(\"Today's weight:\", min_value=50.0, max_value=750.0, format=\"%d\")`\r\n_Warning: NumberInput value below is float, but format %d displays as integer._\r\n\r\nI believe that's intended behavior. However, the inverse condition doesn't produce a warning or error.\r\n```\r\nweight = st.number_input(\"Today's weight:\", min_value=50, max_value=750, format=\"%4.1f\")\r\nprint(weight)\r\nst.write(\"Echoing weight: \", weight)\r\n\r\n```\r\nIf the input value is, for example, 170.4, the weight value will be 170, shown by both print() and st.write(), based on the interpreted format of min and max values. Note that this line of code does not produce any error or warning. The user (at least in my case) assumed min_value and max_value would be raised to float rather than ignoring format. I couldn't figure out why my input was being reduced from 170.4 to 170\r\n\r\nThe correct code works fine\r\n`weight = st.number_input(\"Today's weight:\", min_value=50.0, max_value=750.0, format=\"%4.1f\")`\r\n\r\n## Expected behavior:\r\n\r\nA warning in both cases, not just one or the other. Maybe:\r\n_Warning: NumberInput value below is integer, format %f lowered to match integer._\r\n\r\n## Actual behavior:\r\n\r\nNo warning produced, seems like there should be. \r\nSorry, already explained in the Steps to Reproduce section\r\n\r\n## Is this a regression?\r\n\r\nNo (not that I know of)\r\n\r\n# Debug info\r\n\r\n- Streamlit version: (get it with `$ streamlit version`) 0.64.0\r\n- Python version: (get it with `$ python --version`) 3.7.7\r\n- Using Conda\r\n- OS version: Windows 10 64bit\r\n- Browser version: Chrome Version 84.0.4147.105 (Official Build) (64-bit)\r\n\r\n# Additional information\r\n\r\n\n", "before_files": [{"content": "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numbers\nfrom typing import cast\n\nimport streamlit\nfrom streamlit.errors import StreamlitAPIException\nfrom streamlit.js_number import JSNumber, JSNumberBoundsException\nfrom streamlit.proto.NumberInput_pb2 import NumberInput as NumberInputProto\nfrom .utils import register_widget, NoValue\n\n\nclass NumberInputMixin:\n def number_input(\n self,\n label,\n min_value=None,\n max_value=None,\n value=NoValue(),\n step=None,\n format=None,\n key=None,\n ):\n \"\"\"Display a numeric input widget.\n\n Parameters\n ----------\n label : str or None\n A short label explaining to the user what this input is for.\n min_value : int or float or None\n The minimum permitted value.\n If None, there will be no minimum.\n max_value : int or float or None\n The maximum permitted value.\n If None, there will be no maximum.\n value : int or float or None\n The value of this widget when it first renders.\n Defaults to min_value, or 0.0 if min_value is None\n step : int or float or None\n The stepping interval.\n Defaults to 1 if the value is an int, 0.01 otherwise.\n If the value is not specified, the format parameter will be used.\n format : str or None\n A printf-style format string controlling how the interface should\n display numbers. Output must be purely numeric. This does not impact\n the return value. Valid formatters: %d %e %f %g %i\n key : str\n An optional string to use as the unique key for the widget.\n If this is omitted, a key will be generated for the widget\n based on its content. Multiple widgets of the same type may\n not share the same key.\n\n Returns\n -------\n int or float\n The current value of the numeric input widget. The return type\n will match the data type of the value parameter.\n\n Example\n -------\n >>> number = st.number_input('Insert a number')\n >>> st.write('The current number is ', number)\n \"\"\"\n\n if isinstance(value, NoValue):\n if min_value is not None:\n value = min_value\n else:\n value = 0.0 # We set a float as default\n\n int_value = isinstance(value, numbers.Integral)\n float_value = isinstance(value, float)\n\n if value is None:\n raise StreamlitAPIException(\n \"Default value for number_input should be an int or a float.\"\n )\n else:\n if format is None:\n format = \"%d\" if int_value else \"%0.2f\"\n\n if format in [\"%d\", \"%u\", \"%i\"] and float_value:\n # Warn user to check if displaying float as int was really intended.\n import streamlit as st\n\n st.warning(\n \"Warning: NumberInput value below is float, but format {} displays as integer.\".format(\n format\n )\n )\n\n if step is None:\n step = 1 if int_value else 0.01\n\n try:\n float(format % 2)\n except (TypeError, ValueError):\n raise StreamlitAPIException(\n \"Format string for st.number_input contains invalid characters: %s\"\n % format\n )\n\n # Ensure that all arguments are of the same type.\n args = [min_value, max_value, step]\n\n int_args = all(\n map(\n lambda a: (\n isinstance(a, numbers.Integral) or isinstance(a, type(None))\n ),\n args,\n )\n )\n float_args = all(\n map(lambda a: (isinstance(a, float) or isinstance(a, type(None))), args)\n )\n\n if not int_args and not float_args:\n raise StreamlitAPIException(\n \"All arguments must be of the same type.\"\n \"\\n`value` has %(value_type)s type.\"\n \"\\n`min_value` has %(min_type)s type.\"\n \"\\n`max_value` has %(max_type)s type.\"\n % {\n \"value_type\": type(value).__name__,\n \"min_type\": type(min_value).__name__,\n \"max_type\": type(max_value).__name__,\n }\n )\n\n # Ensure that the value matches arguments' types.\n all_ints = int_value and int_args\n all_floats = float_value and float_args\n\n if not all_ints and not all_floats:\n raise StreamlitAPIException(\n \"All numerical arguments must be of the same type.\"\n \"\\n`value` has %(value_type)s type.\"\n \"\\n`min_value` has %(min_type)s type.\"\n \"\\n`max_value` has %(max_type)s type.\"\n \"\\n`step` has %(step_type)s type.\"\n % {\n \"value_type\": type(value).__name__,\n \"min_type\": type(min_value).__name__,\n \"max_type\": type(max_value).__name__,\n \"step_type\": type(step).__name__,\n }\n )\n\n if (min_value and min_value > value) or (max_value and max_value < value):\n raise StreamlitAPIException(\n \"The default `value` of %(value)s \"\n \"must lie between the `min_value` of %(min)s \"\n \"and the `max_value` of %(max)s, inclusively.\"\n % {\"value\": value, \"min\": min_value, \"max\": max_value}\n )\n\n # Bounds checks. JSNumber produces human-readable exceptions that\n # we simply re-package as StreamlitAPIExceptions.\n try:\n if all_ints:\n if min_value is not None:\n JSNumber.validate_int_bounds(min_value, \"`min_value`\")\n if max_value is not None:\n JSNumber.validate_int_bounds(max_value, \"`max_value`\")\n if step is not None:\n JSNumber.validate_int_bounds(step, \"`step`\")\n JSNumber.validate_int_bounds(value, \"`value`\")\n else:\n if min_value is not None:\n JSNumber.validate_float_bounds(min_value, \"`min_value`\")\n if max_value is not None:\n JSNumber.validate_float_bounds(max_value, \"`max_value`\")\n if step is not None:\n JSNumber.validate_float_bounds(step, \"`step`\")\n JSNumber.validate_float_bounds(value, \"`value`\")\n except JSNumberBoundsException as e:\n raise StreamlitAPIException(str(e))\n\n number_input_proto = NumberInputProto()\n number_input_proto.data_type = (\n NumberInputProto.INT if all_ints else NumberInputProto.FLOAT\n )\n number_input_proto.label = label\n number_input_proto.default = value\n\n if min_value is not None:\n number_input_proto.min = min_value\n number_input_proto.has_min = True\n\n if max_value is not None:\n number_input_proto.max = max_value\n number_input_proto.has_max = True\n\n if step is not None:\n number_input_proto.step = step\n\n if format is not None:\n number_input_proto.format = format\n\n ui_value = register_widget(\"number_input\", number_input_proto, user_key=key)\n\n return_value = ui_value if ui_value is not None else value\n return self.dg._enqueue(\"number_input\", number_input_proto, return_value)\n\n @property\n def dg(self) -> \"streamlit.delta_generator.DeltaGenerator\":\n \"\"\"Get our DeltaGenerator.\"\"\"\n return cast(\"streamlit.delta_generator.DeltaGenerator\", self)\n", "path": "lib/streamlit/elements/number_input.py"}]}
3,365
396
gh_patches_debug_31153
rasdani/github-patches
git_diff
cupy__cupy-6121
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `copyto` does not test shape for memcpy cases ### Description There should be a shape check before these: https://github.com/cupy/cupy/blob/e7f2e797216954dd152efe0169ec6a0094d7777d/cupy/_manipulation/basic.py#L54-L55 https://github.com/cupy/cupy/blob/e7f2e797216954dd152efe0169ec6a0094d7777d/cupy/_manipulation/basic.py#L76-L80 ### To Reproduce ```py >>> dst = cupy.ones((2, 3), dtype=int) >>> cupy.copyto(dst, cupy.arange(6)) >>> dst array([[0, 1, 2], [3, 4, 5]]) >>> numpy.copyto(numpy.ones((2, 3), dtype=int), numpy.arange(6)) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "<__array_function__ internals>", line 5, in copyto ValueError: could not broadcast input array from shape (6,) into shape (2,3) ``` ```py >>> cupy.copyto(cupy.empty((3, 0, 4, 5)), cupy.empty((6, 7, 0))) >>> numpy.copyto(numpy.empty((3, 0, 4, 5)), numpy.empty((6, 7, 0))) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "<__array_function__ internals>", line 5, in copyto ValueError: could not broadcast input array from shape (6,7,0) into shape (3,0,4,5) ``` </issue> <code> [start of cupy/_manipulation/basic.py] 1 import numpy 2 3 from cupy import _core 4 from cupy._core import _fusion_interface 5 from cupy._core import fusion 6 from cupy._sorting import search 7 from cupy_backends.cuda.api import runtime 8 9 10 def copyto(dst, src, casting='same_kind', where=None): 11 """Copies values from one array to another with broadcasting. 12 13 This function can be called for arrays on different devices. In this case, 14 casting, ``where``, and broadcasting is not supported, and an exception is 15 raised if these are used. 16 17 Args: 18 dst (cupy.ndarray): Target array. 19 src (cupy.ndarray): Source array. 20 casting (str): Casting rule. See :func:`numpy.can_cast` for detail. 21 where (cupy.ndarray of bool): If specified, this array acts as a mask, 22 and an element is copied only if the corresponding element of 23 ``where`` is True. 24 25 .. seealso:: :func:`numpy.copyto` 26 27 """ 28 29 src_type = type(src) 30 src_is_python_scalar = src_type in ( 31 int, bool, float, complex, 32 fusion._FusionVarScalar, _fusion_interface._ScalarProxy) 33 if src_is_python_scalar: 34 src_dtype = numpy.dtype(type(src)) 35 can_cast = numpy.can_cast(src, dst.dtype, casting) 36 else: 37 src_dtype = src.dtype 38 can_cast = numpy.can_cast(src_dtype, dst.dtype, casting) 39 40 if not can_cast: 41 raise TypeError('Cannot cast %s to %s in %s casting mode' % 42 (src_dtype, dst.dtype, casting)) 43 44 if not src_is_python_scalar and src.ndim > dst.ndim: 45 # NumPy allows stripping leading unit dimensions. 46 try: 47 src = src.squeeze(tuple(range(src.ndim - dst.ndim))) 48 except ValueError: 49 # "cannot select an axis to squeeze out 50 # which has size not equal to one" 51 pass # raise an error later 52 53 if fusion._is_fusing(): 54 if where is None: 55 _core.elementwise_copy(src, dst) 56 else: 57 fusion._call_ufunc(search._where_ufunc, where, src, dst, dst) 58 return 59 60 if where is not None: 61 _core.elementwise_copy(src, dst, _where=where) 62 return 63 64 if dst.size == 0: 65 return 66 67 if src_is_python_scalar: 68 dst.fill(src) 69 return 70 71 if _can_memcpy(dst, src): 72 dst.data.copy_from_async(src.data, src.nbytes) 73 return 74 75 device = dst.device 76 prev_device = runtime.getDevice() 77 try: 78 runtime.setDevice(device.id) 79 if src.device != device: 80 src = src.copy() 81 _core.elementwise_copy(src, dst) 82 finally: 83 runtime.setDevice(prev_device) 84 85 86 def _can_memcpy(dst, src): 87 c_contiguous = dst.flags.c_contiguous and src.flags.c_contiguous 88 f_contiguous = dst.flags.f_contiguous and src.flags.f_contiguous 89 return (c_contiguous or f_contiguous) and dst.dtype == src.dtype and \ 90 dst.size == src.size 91 [end of cupy/_manipulation/basic.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cupy/_manipulation/basic.py b/cupy/_manipulation/basic.py --- a/cupy/_manipulation/basic.py +++ b/cupy/_manipulation/basic.py @@ -1,3 +1,5 @@ +import itertools + import numpy from cupy import _core @@ -41,22 +43,35 @@ raise TypeError('Cannot cast %s to %s in %s casting mode' % (src_dtype, dst.dtype, casting)) - if not src_is_python_scalar and src.ndim > dst.ndim: - # NumPy allows stripping leading unit dimensions. - try: - src = src.squeeze(tuple(range(src.ndim - dst.ndim))) - except ValueError: - # "cannot select an axis to squeeze out - # which has size not equal to one" - pass # raise an error later - if fusion._is_fusing(): + # TODO(kataoka): NumPy allows stripping leading unit dimensions. + # But fusion array proxy does not currently support + # `shape` and `squeeze`. + if where is None: _core.elementwise_copy(src, dst) else: fusion._call_ufunc(search._where_ufunc, where, src, dst, dst) return + if not src_is_python_scalar: + # Check broadcast condition + # - for fast-paths and + # - for a better error message (than ufunc's). + # NumPy allows stripping leading unit dimensions. + if not all([ + s in (d, 1) + for s, d in itertools.zip_longest( + reversed(src.shape), reversed(dst.shape), fillvalue=1) + ]): + raise ValueError( + "could not broadcast input array " + f"from shape {src.shape} into shape {dst.shape}") + squeeze_ndim = src.ndim - dst.ndim + if squeeze_ndim > 0: + # always succeeds because broadcast conition is checked. + src = src.squeeze(tuple(range(squeeze_ndim))) + if where is not None: _core.elementwise_copy(src, dst, _where=where) return
{"golden_diff": "diff --git a/cupy/_manipulation/basic.py b/cupy/_manipulation/basic.py\n--- a/cupy/_manipulation/basic.py\n+++ b/cupy/_manipulation/basic.py\n@@ -1,3 +1,5 @@\n+import itertools\n+\n import numpy\n \n from cupy import _core\n@@ -41,22 +43,35 @@\n raise TypeError('Cannot cast %s to %s in %s casting mode' %\n (src_dtype, dst.dtype, casting))\n \n- if not src_is_python_scalar and src.ndim > dst.ndim:\n- # NumPy allows stripping leading unit dimensions.\n- try:\n- src = src.squeeze(tuple(range(src.ndim - dst.ndim)))\n- except ValueError:\n- # \"cannot select an axis to squeeze out\n- # which has size not equal to one\"\n- pass # raise an error later\n-\n if fusion._is_fusing():\n+ # TODO(kataoka): NumPy allows stripping leading unit dimensions.\n+ # But fusion array proxy does not currently support\n+ # `shape` and `squeeze`.\n+\n if where is None:\n _core.elementwise_copy(src, dst)\n else:\n fusion._call_ufunc(search._where_ufunc, where, src, dst, dst)\n return\n \n+ if not src_is_python_scalar:\n+ # Check broadcast condition\n+ # - for fast-paths and\n+ # - for a better error message (than ufunc's).\n+ # NumPy allows stripping leading unit dimensions.\n+ if not all([\n+ s in (d, 1)\n+ for s, d in itertools.zip_longest(\n+ reversed(src.shape), reversed(dst.shape), fillvalue=1)\n+ ]):\n+ raise ValueError(\n+ \"could not broadcast input array \"\n+ f\"from shape {src.shape} into shape {dst.shape}\")\n+ squeeze_ndim = src.ndim - dst.ndim\n+ if squeeze_ndim > 0:\n+ # always succeeds because broadcast conition is checked.\n+ src = src.squeeze(tuple(range(squeeze_ndim)))\n+\n if where is not None:\n _core.elementwise_copy(src, dst, _where=where)\n return\n", "issue": "`copyto` does not test shape for memcpy cases\n### Description\r\n\r\nThere should be a shape check before these:\r\nhttps://github.com/cupy/cupy/blob/e7f2e797216954dd152efe0169ec6a0094d7777d/cupy/_manipulation/basic.py#L54-L55\r\nhttps://github.com/cupy/cupy/blob/e7f2e797216954dd152efe0169ec6a0094d7777d/cupy/_manipulation/basic.py#L76-L80\r\n\r\n### To Reproduce\r\n\r\n```py\r\n>>> dst = cupy.ones((2, 3), dtype=int)\r\n>>> cupy.copyto(dst, cupy.arange(6))\r\n>>> dst\r\narray([[0, 1, 2],\r\n [3, 4, 5]])\r\n>>> numpy.copyto(numpy.ones((2, 3), dtype=int), numpy.arange(6))\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"<__array_function__ internals>\", line 5, in copyto\r\nValueError: could not broadcast input array from shape (6,) into shape (2,3)\r\n```\r\n\r\n```py\r\n>>> cupy.copyto(cupy.empty((3, 0, 4, 5)), cupy.empty((6, 7, 0)))\r\n>>> numpy.copyto(numpy.empty((3, 0, 4, 5)), numpy.empty((6, 7, 0)))\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"<__array_function__ internals>\", line 5, in copyto\r\nValueError: could not broadcast input array from shape (6,7,0) into shape (3,0,4,5)\r\n```\r\n\n", "before_files": [{"content": "import numpy\n\nfrom cupy import _core\nfrom cupy._core import _fusion_interface\nfrom cupy._core import fusion\nfrom cupy._sorting import search\nfrom cupy_backends.cuda.api import runtime\n\n\ndef copyto(dst, src, casting='same_kind', where=None):\n \"\"\"Copies values from one array to another with broadcasting.\n\n This function can be called for arrays on different devices. In this case,\n casting, ``where``, and broadcasting is not supported, and an exception is\n raised if these are used.\n\n Args:\n dst (cupy.ndarray): Target array.\n src (cupy.ndarray): Source array.\n casting (str): Casting rule. See :func:`numpy.can_cast` for detail.\n where (cupy.ndarray of bool): If specified, this array acts as a mask,\n and an element is copied only if the corresponding element of\n ``where`` is True.\n\n .. seealso:: :func:`numpy.copyto`\n\n \"\"\"\n\n src_type = type(src)\n src_is_python_scalar = src_type in (\n int, bool, float, complex,\n fusion._FusionVarScalar, _fusion_interface._ScalarProxy)\n if src_is_python_scalar:\n src_dtype = numpy.dtype(type(src))\n can_cast = numpy.can_cast(src, dst.dtype, casting)\n else:\n src_dtype = src.dtype\n can_cast = numpy.can_cast(src_dtype, dst.dtype, casting)\n\n if not can_cast:\n raise TypeError('Cannot cast %s to %s in %s casting mode' %\n (src_dtype, dst.dtype, casting))\n\n if not src_is_python_scalar and src.ndim > dst.ndim:\n # NumPy allows stripping leading unit dimensions.\n try:\n src = src.squeeze(tuple(range(src.ndim - dst.ndim)))\n except ValueError:\n # \"cannot select an axis to squeeze out\n # which has size not equal to one\"\n pass # raise an error later\n\n if fusion._is_fusing():\n if where is None:\n _core.elementwise_copy(src, dst)\n else:\n fusion._call_ufunc(search._where_ufunc, where, src, dst, dst)\n return\n\n if where is not None:\n _core.elementwise_copy(src, dst, _where=where)\n return\n\n if dst.size == 0:\n return\n\n if src_is_python_scalar:\n dst.fill(src)\n return\n\n if _can_memcpy(dst, src):\n dst.data.copy_from_async(src.data, src.nbytes)\n return\n\n device = dst.device\n prev_device = runtime.getDevice()\n try:\n runtime.setDevice(device.id)\n if src.device != device:\n src = src.copy()\n _core.elementwise_copy(src, dst)\n finally:\n runtime.setDevice(prev_device)\n\n\ndef _can_memcpy(dst, src):\n c_contiguous = dst.flags.c_contiguous and src.flags.c_contiguous\n f_contiguous = dst.flags.f_contiguous and src.flags.f_contiguous\n return (c_contiguous or f_contiguous) and dst.dtype == src.dtype and \\\n dst.size == src.size\n", "path": "cupy/_manipulation/basic.py"}]}
1,817
488
gh_patches_debug_39421
rasdani/github-patches
git_diff
googleapis__google-cloud-python-376
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add regression test for datastore keys (object names) with / separator See https://github.com/GoogleCloudPlatform/gcloud-python/pull/371#issuecomment-62659321 </issue> <code> [start of regression/storage.py] 1 from Crypto.Hash import MD5 2 import base64 3 import httplib2 4 import tempfile 5 import time 6 import unittest2 7 8 from gcloud import storage 9 # This assumes the command is being run via tox hence the 10 # repository root is the current directory. 11 from regression import regression_utils 12 13 14 HTTP = httplib2.Http() 15 SHARED_BUCKETS = {} 16 17 18 def setUpModule(): 19 if 'test_bucket' not in SHARED_BUCKETS: 20 connection = regression_utils.get_storage_connection() 21 # %d rounds milliseconds to nearest integer. 22 bucket_name = 'new%d' % (1000 * time.time(),) 23 # In the **very** rare case the bucket name is reserved, this 24 # fails with a ConnectionError. 25 SHARED_BUCKETS['test_bucket'] = connection.create_bucket(bucket_name) 26 27 28 def tearDownModule(): 29 for bucket in SHARED_BUCKETS.values(): 30 # Passing force=True also deletes all files. 31 bucket.delete(force=True) 32 33 34 class TestStorage(unittest2.TestCase): 35 36 @classmethod 37 def setUpClass(cls): 38 cls.connection = regression_utils.get_storage_connection() 39 40 41 class TestStorageBuckets(TestStorage): 42 43 def setUp(self): 44 self.case_buckets_to_delete = [] 45 46 def tearDown(self): 47 for bucket in self.case_buckets_to_delete: 48 bucket.delete() 49 50 def test_create_bucket(self): 51 new_bucket_name = 'a-new-bucket' 52 self.assertRaises(storage.exceptions.NotFound, 53 self.connection.get_bucket, new_bucket_name) 54 created = self.connection.create_bucket(new_bucket_name) 55 self.case_buckets_to_delete.append(created) 56 self.assertEqual(created.name, new_bucket_name) 57 58 def test_get_buckets(self): 59 buckets_to_create = [ 60 'new%d' % (1000 * time.time(),), 61 'newer%d' % (1000 * time.time(),), 62 'newest%d' % (1000 * time.time(),), 63 ] 64 created_buckets = [] 65 for bucket_name in buckets_to_create: 66 bucket = self.connection.create_bucket(bucket_name) 67 self.case_buckets_to_delete.append(bucket) 68 69 # Retrieve the buckets. 70 all_buckets = self.connection.get_all_buckets() 71 created_buckets = [bucket for bucket in all_buckets 72 if bucket.name in buckets_to_create] 73 self.assertEqual(len(created_buckets), len(buckets_to_create)) 74 75 76 class TestStorageFiles(TestStorage): 77 78 FILES = { 79 'logo': { 80 'path': 'regression/data/CloudPlatform_128px_Retina.png', 81 }, 82 'big': { 83 'path': 'regression/data/five-mb-file.zip', 84 }, 85 } 86 87 @staticmethod 88 def _get_base64_md5hash(filename): 89 with open(filename, 'rb') as file_obj: 90 hash = MD5.new(data=file_obj.read()) 91 digest_bytes = hash.digest() 92 return base64.b64encode(digest_bytes) 93 94 @classmethod 95 def setUpClass(cls): 96 super(TestStorageFiles, cls).setUpClass() 97 for file_data in cls.FILES.values(): 98 file_data['hash'] = cls._get_base64_md5hash(file_data['path']) 99 cls.bucket = SHARED_BUCKETS['test_bucket'] 100 101 def setUp(self): 102 self.case_keys_to_delete = [] 103 104 def tearDown(self): 105 for key in self.case_keys_to_delete: 106 key.delete() 107 108 109 class TestStorageWriteFiles(TestStorageFiles): 110 111 def test_large_file_write_from_stream(self): 112 key = self.bucket.new_key('LargeFile') 113 self.assertEqual(key._properties, {}) 114 115 file_data = self.FILES['big'] 116 with open(file_data['path'], 'rb') as file_obj: 117 self.bucket.upload_file_object(file_obj, key=key) 118 self.case_keys_to_delete.append(key) 119 120 key._properties.clear() # force a reload 121 self.assertEqual(key.md5_hash, file_data['hash']) 122 123 def test_write_metadata(self): 124 key = self.bucket.upload_file(self.FILES['logo']['path']) 125 self.case_keys_to_delete.append(key) 126 127 # NOTE: This should not be necessary. We should be able to pass 128 # it in to upload_file and also to upload_from_string. 129 key.content_type = 'image/png' 130 key._properties.clear() # force a reload 131 self.assertEqual(key.content_type, 'image/png') 132 133 def test_direct_write_and_read_into_file(self): 134 key = self.bucket.new_key('MyBuffer') 135 file_contents = 'Hello World' 136 key.upload_from_string(file_contents) 137 self.case_keys_to_delete.append(key) 138 139 same_key = self.bucket.new_key('MyBuffer') 140 temp_filename = tempfile.mktemp() 141 with open(temp_filename, 'w') as file_obj: 142 same_key.get_contents_to_file(file_obj) 143 144 with open(temp_filename, 'rb') as file_obj: 145 stored_contents = file_obj.read() 146 147 self.assertEqual(file_contents, stored_contents) 148 149 def test_copy_existing_file(self): 150 key = self.bucket.upload_file(self.FILES['logo']['path'], 151 key='CloudLogo') 152 self.case_keys_to_delete.append(key) 153 154 new_key = self.bucket.copy_key(key, self.bucket, 'CloudLogoCopy') 155 self.case_keys_to_delete.append(new_key) 156 157 base_contents = key.get_contents_as_string() 158 copied_contents = new_key.get_contents_as_string() 159 self.assertEqual(base_contents, copied_contents) 160 161 162 class TestStorageListFiles(TestStorageFiles): 163 164 FILENAMES = ['CloudLogo1', 'CloudLogo2', 'CloudLogo3'] 165 166 @classmethod 167 def setUpClass(cls): 168 super(TestStorageListFiles, cls).setUpClass() 169 # Make sure bucket empty before beginning. 170 for key in cls.bucket: 171 key.delete() 172 173 logo_path = cls.FILES['logo']['path'] 174 key = cls.bucket.upload_file(logo_path, key=cls.FILENAMES[0]) 175 cls.suite_keys_to_delete = [key] 176 177 # Copy main key onto remaining in FILENAMES. 178 for filename in cls.FILENAMES[1:]: 179 new_key = cls.bucket.copy_key(key, cls.bucket, filename) 180 cls.suite_keys_to_delete.append(new_key) 181 182 @classmethod 183 def tearDownClass(cls): 184 for key in cls.suite_keys_to_delete: 185 key.delete() 186 187 def test_list_files(self): 188 all_keys = self.bucket.get_all_keys() 189 self.assertEqual(len(all_keys), len(self.FILENAMES)) 190 191 def test_paginate_files(self): 192 truncation_size = 1 193 count = len(self.FILENAMES) - truncation_size 194 iterator = self.bucket.iterator(max_results=count) 195 response = iterator.get_next_page_response() 196 keys = list(iterator.get_items_from_response(response)) 197 self.assertEqual(len(keys), count) 198 self.assertEqual(iterator.page_number, 1) 199 self.assertTrue(iterator.next_page_token is not None) 200 201 response = iterator.get_next_page_response() 202 last_keys = list(iterator.get_items_from_response(response)) 203 self.assertEqual(len(last_keys), truncation_size) 204 205 206 class TestStorageSignURLs(TestStorageFiles): 207 208 def setUp(self): 209 super(TestStorageSignURLs, self).setUp() 210 211 logo_path = self.FILES['logo']['path'] 212 with open(logo_path, 'r') as file_obj: 213 self.LOCAL_FILE = file_obj.read() 214 215 key = self.bucket.new_key('LogoToSign.jpg') 216 key.upload_from_string(self.LOCAL_FILE) 217 self.case_keys_to_delete.append(key) 218 219 def tearDown(self): 220 for key in self.case_keys_to_delete: 221 if key.exists(): 222 key.delete() 223 224 def test_create_signed_read_url(self): 225 key = self.bucket.new_key('LogoToSign.jpg') 226 expiration = int(time.time() + 5) 227 signed_url = key.generate_signed_url(expiration, method='GET') 228 229 response, content = HTTP.request(signed_url, method='GET') 230 self.assertEqual(response.status, 200) 231 self.assertEqual(content, self.LOCAL_FILE) 232 233 def test_create_signed_delete_url(self): 234 key = self.bucket.new_key('LogoToSign.jpg') 235 expiration = int(time.time() + 283473274) 236 signed_delete_url = key.generate_signed_url(expiration, 237 method='DELETE') 238 239 response, content = HTTP.request(signed_delete_url, method='DELETE') 240 self.assertEqual(response.status, 204) 241 self.assertEqual(content, '') 242 243 # Check that the key has actually been deleted. 244 self.assertFalse(key in self.bucket) 245 [end of regression/storage.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/regression/storage.py b/regression/storage.py --- a/regression/storage.py +++ b/regression/storage.py @@ -82,6 +82,9 @@ 'big': { 'path': 'regression/data/five-mb-file.zip', }, + 'simple': { + 'path': 'regression/data/simple.txt', + } } @staticmethod @@ -203,6 +206,82 @@ self.assertEqual(len(last_keys), truncation_size) +class TestStoragePseudoHierarchy(TestStorageFiles): + + FILENAMES = [ + 'file01.txt', + 'parent/file11.txt', + 'parent/child/file21.txt', + 'parent/child/file22.txt', + 'parent/child/grand/file31.txt', + 'parent/child/other/file32.txt', + ] + + @classmethod + def setUpClass(cls): + super(TestStoragePseudoHierarchy, cls).setUpClass() + # Make sure bucket empty before beginning. + for key in cls.bucket: + key.delete() + + simple_path = cls.FILES['simple']['path'] + key = cls.bucket.upload_file(simple_path, key=cls.FILENAMES[0]) + cls.suite_keys_to_delete = [key] + for filename in cls.FILENAMES[1:]: + new_key = cls.bucket.copy_key(key, cls.bucket, filename) + cls.suite_keys_to_delete.append(new_key) + + @classmethod + def tearDownClass(cls): + for key in cls.suite_keys_to_delete: + key.delete() + + def test_root_level_w_delimiter(self): + iterator = self.bucket.iterator(delimiter='/') + response = iterator.get_next_page_response() + keys = list(iterator.get_items_from_response(response)) + self.assertEqual([key.name for key in keys], ['file01.txt']) + self.assertEqual(iterator.page_number, 1) + self.assertTrue(iterator.next_page_token is None) + self.assertEqual(iterator.prefixes, ('parent/',)) + + def test_first_level(self): + iterator = self.bucket.iterator(delimiter='/', prefix='parent/') + response = iterator.get_next_page_response() + keys = list(iterator.get_items_from_response(response)) + self.assertEqual([key.name for key in keys], ['parent/file11.txt']) + self.assertEqual(iterator.page_number, 1) + self.assertTrue(iterator.next_page_token is None) + self.assertEqual(iterator.prefixes, ('parent/child/',)) + + def test_second_level(self): + iterator = self.bucket.iterator(delimiter='/', prefix='parent/child/') + response = iterator.get_next_page_response() + keys = list(iterator.get_items_from_response(response)) + self.assertEqual([key.name for key in keys], + ['parent/child/file21.txt', + 'parent/child/file22.txt']) + self.assertEqual(iterator.page_number, 1) + self.assertTrue(iterator.next_page_token is None) + self.assertEqual(iterator.prefixes, + ('parent/child/grand/', 'parent/child/other/')) + + def test_third_level(self): + # Pseudo-hierarchy can be arbitrarily deep, subject to the limit + # of 1024 characters in the UTF-8 encoded name: + # https://cloud.google.com/storage/docs/bucketnaming#objectnames + # Exercise a layer deeper to illustrate this. + iterator = self.bucket.iterator(delimiter='/', + prefix='parent/child/grand/') + response = iterator.get_next_page_response() + keys = list(iterator.get_items_from_response(response)) + self.assertEqual([key.name for key in keys], + ['parent/child/grand/file31.txt']) + self.assertEqual(iterator.page_number, 1) + self.assertTrue(iterator.next_page_token is None) + self.assertEqual(iterator.prefixes, ()) + + class TestStorageSignURLs(TestStorageFiles): def setUp(self):
{"golden_diff": "diff --git a/regression/storage.py b/regression/storage.py\n--- a/regression/storage.py\n+++ b/regression/storage.py\n@@ -82,6 +82,9 @@\n 'big': {\n 'path': 'regression/data/five-mb-file.zip',\n },\n+ 'simple': {\n+ 'path': 'regression/data/simple.txt',\n+ }\n }\n \n @staticmethod\n@@ -203,6 +206,82 @@\n self.assertEqual(len(last_keys), truncation_size)\n \n \n+class TestStoragePseudoHierarchy(TestStorageFiles):\n+\n+ FILENAMES = [\n+ 'file01.txt',\n+ 'parent/file11.txt',\n+ 'parent/child/file21.txt',\n+ 'parent/child/file22.txt',\n+ 'parent/child/grand/file31.txt',\n+ 'parent/child/other/file32.txt',\n+ ]\n+\n+ @classmethod\n+ def setUpClass(cls):\n+ super(TestStoragePseudoHierarchy, cls).setUpClass()\n+ # Make sure bucket empty before beginning.\n+ for key in cls.bucket:\n+ key.delete()\n+\n+ simple_path = cls.FILES['simple']['path']\n+ key = cls.bucket.upload_file(simple_path, key=cls.FILENAMES[0])\n+ cls.suite_keys_to_delete = [key]\n+ for filename in cls.FILENAMES[1:]:\n+ new_key = cls.bucket.copy_key(key, cls.bucket, filename)\n+ cls.suite_keys_to_delete.append(new_key)\n+\n+ @classmethod\n+ def tearDownClass(cls):\n+ for key in cls.suite_keys_to_delete:\n+ key.delete()\n+\n+ def test_root_level_w_delimiter(self):\n+ iterator = self.bucket.iterator(delimiter='/')\n+ response = iterator.get_next_page_response()\n+ keys = list(iterator.get_items_from_response(response))\n+ self.assertEqual([key.name for key in keys], ['file01.txt'])\n+ self.assertEqual(iterator.page_number, 1)\n+ self.assertTrue(iterator.next_page_token is None)\n+ self.assertEqual(iterator.prefixes, ('parent/',))\n+\n+ def test_first_level(self):\n+ iterator = self.bucket.iterator(delimiter='/', prefix='parent/')\n+ response = iterator.get_next_page_response()\n+ keys = list(iterator.get_items_from_response(response))\n+ self.assertEqual([key.name for key in keys], ['parent/file11.txt'])\n+ self.assertEqual(iterator.page_number, 1)\n+ self.assertTrue(iterator.next_page_token is None)\n+ self.assertEqual(iterator.prefixes, ('parent/child/',))\n+\n+ def test_second_level(self):\n+ iterator = self.bucket.iterator(delimiter='/', prefix='parent/child/')\n+ response = iterator.get_next_page_response()\n+ keys = list(iterator.get_items_from_response(response))\n+ self.assertEqual([key.name for key in keys],\n+ ['parent/child/file21.txt',\n+ 'parent/child/file22.txt'])\n+ self.assertEqual(iterator.page_number, 1)\n+ self.assertTrue(iterator.next_page_token is None)\n+ self.assertEqual(iterator.prefixes,\n+ ('parent/child/grand/', 'parent/child/other/'))\n+\n+ def test_third_level(self):\n+ # Pseudo-hierarchy can be arbitrarily deep, subject to the limit\n+ # of 1024 characters in the UTF-8 encoded name:\n+ # https://cloud.google.com/storage/docs/bucketnaming#objectnames\n+ # Exercise a layer deeper to illustrate this.\n+ iterator = self.bucket.iterator(delimiter='/',\n+ prefix='parent/child/grand/')\n+ response = iterator.get_next_page_response()\n+ keys = list(iterator.get_items_from_response(response))\n+ self.assertEqual([key.name for key in keys],\n+ ['parent/child/grand/file31.txt'])\n+ self.assertEqual(iterator.page_number, 1)\n+ self.assertTrue(iterator.next_page_token is None)\n+ self.assertEqual(iterator.prefixes, ())\n+\n+\n class TestStorageSignURLs(TestStorageFiles):\n \n def setUp(self):\n", "issue": "Add regression test for datastore keys (object names) with / separator\nSee https://github.com/GoogleCloudPlatform/gcloud-python/pull/371#issuecomment-62659321\n\n", "before_files": [{"content": "from Crypto.Hash import MD5\nimport base64\nimport httplib2\nimport tempfile\nimport time\nimport unittest2\n\nfrom gcloud import storage\n# This assumes the command is being run via tox hence the\n# repository root is the current directory.\nfrom regression import regression_utils\n\n\nHTTP = httplib2.Http()\nSHARED_BUCKETS = {}\n\n\ndef setUpModule():\n if 'test_bucket' not in SHARED_BUCKETS:\n connection = regression_utils.get_storage_connection()\n # %d rounds milliseconds to nearest integer.\n bucket_name = 'new%d' % (1000 * time.time(),)\n # In the **very** rare case the bucket name is reserved, this\n # fails with a ConnectionError.\n SHARED_BUCKETS['test_bucket'] = connection.create_bucket(bucket_name)\n\n\ndef tearDownModule():\n for bucket in SHARED_BUCKETS.values():\n # Passing force=True also deletes all files.\n bucket.delete(force=True)\n\n\nclass TestStorage(unittest2.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.connection = regression_utils.get_storage_connection()\n\n\nclass TestStorageBuckets(TestStorage):\n\n def setUp(self):\n self.case_buckets_to_delete = []\n\n def tearDown(self):\n for bucket in self.case_buckets_to_delete:\n bucket.delete()\n\n def test_create_bucket(self):\n new_bucket_name = 'a-new-bucket'\n self.assertRaises(storage.exceptions.NotFound,\n self.connection.get_bucket, new_bucket_name)\n created = self.connection.create_bucket(new_bucket_name)\n self.case_buckets_to_delete.append(created)\n self.assertEqual(created.name, new_bucket_name)\n\n def test_get_buckets(self):\n buckets_to_create = [\n 'new%d' % (1000 * time.time(),),\n 'newer%d' % (1000 * time.time(),),\n 'newest%d' % (1000 * time.time(),),\n ]\n created_buckets = []\n for bucket_name in buckets_to_create:\n bucket = self.connection.create_bucket(bucket_name)\n self.case_buckets_to_delete.append(bucket)\n\n # Retrieve the buckets.\n all_buckets = self.connection.get_all_buckets()\n created_buckets = [bucket for bucket in all_buckets\n if bucket.name in buckets_to_create]\n self.assertEqual(len(created_buckets), len(buckets_to_create))\n\n\nclass TestStorageFiles(TestStorage):\n\n FILES = {\n 'logo': {\n 'path': 'regression/data/CloudPlatform_128px_Retina.png',\n },\n 'big': {\n 'path': 'regression/data/five-mb-file.zip',\n },\n }\n\n @staticmethod\n def _get_base64_md5hash(filename):\n with open(filename, 'rb') as file_obj:\n hash = MD5.new(data=file_obj.read())\n digest_bytes = hash.digest()\n return base64.b64encode(digest_bytes)\n\n @classmethod\n def setUpClass(cls):\n super(TestStorageFiles, cls).setUpClass()\n for file_data in cls.FILES.values():\n file_data['hash'] = cls._get_base64_md5hash(file_data['path'])\n cls.bucket = SHARED_BUCKETS['test_bucket']\n\n def setUp(self):\n self.case_keys_to_delete = []\n\n def tearDown(self):\n for key in self.case_keys_to_delete:\n key.delete()\n\n\nclass TestStorageWriteFiles(TestStorageFiles):\n\n def test_large_file_write_from_stream(self):\n key = self.bucket.new_key('LargeFile')\n self.assertEqual(key._properties, {})\n\n file_data = self.FILES['big']\n with open(file_data['path'], 'rb') as file_obj:\n self.bucket.upload_file_object(file_obj, key=key)\n self.case_keys_to_delete.append(key)\n\n key._properties.clear() # force a reload\n self.assertEqual(key.md5_hash, file_data['hash'])\n\n def test_write_metadata(self):\n key = self.bucket.upload_file(self.FILES['logo']['path'])\n self.case_keys_to_delete.append(key)\n\n # NOTE: This should not be necessary. We should be able to pass\n # it in to upload_file and also to upload_from_string.\n key.content_type = 'image/png'\n key._properties.clear() # force a reload\n self.assertEqual(key.content_type, 'image/png')\n\n def test_direct_write_and_read_into_file(self):\n key = self.bucket.new_key('MyBuffer')\n file_contents = 'Hello World'\n key.upload_from_string(file_contents)\n self.case_keys_to_delete.append(key)\n\n same_key = self.bucket.new_key('MyBuffer')\n temp_filename = tempfile.mktemp()\n with open(temp_filename, 'w') as file_obj:\n same_key.get_contents_to_file(file_obj)\n\n with open(temp_filename, 'rb') as file_obj:\n stored_contents = file_obj.read()\n\n self.assertEqual(file_contents, stored_contents)\n\n def test_copy_existing_file(self):\n key = self.bucket.upload_file(self.FILES['logo']['path'],\n key='CloudLogo')\n self.case_keys_to_delete.append(key)\n\n new_key = self.bucket.copy_key(key, self.bucket, 'CloudLogoCopy')\n self.case_keys_to_delete.append(new_key)\n\n base_contents = key.get_contents_as_string()\n copied_contents = new_key.get_contents_as_string()\n self.assertEqual(base_contents, copied_contents)\n\n\nclass TestStorageListFiles(TestStorageFiles):\n\n FILENAMES = ['CloudLogo1', 'CloudLogo2', 'CloudLogo3']\n\n @classmethod\n def setUpClass(cls):\n super(TestStorageListFiles, cls).setUpClass()\n # Make sure bucket empty before beginning.\n for key in cls.bucket:\n key.delete()\n\n logo_path = cls.FILES['logo']['path']\n key = cls.bucket.upload_file(logo_path, key=cls.FILENAMES[0])\n cls.suite_keys_to_delete = [key]\n\n # Copy main key onto remaining in FILENAMES.\n for filename in cls.FILENAMES[1:]:\n new_key = cls.bucket.copy_key(key, cls.bucket, filename)\n cls.suite_keys_to_delete.append(new_key)\n\n @classmethod\n def tearDownClass(cls):\n for key in cls.suite_keys_to_delete:\n key.delete()\n\n def test_list_files(self):\n all_keys = self.bucket.get_all_keys()\n self.assertEqual(len(all_keys), len(self.FILENAMES))\n\n def test_paginate_files(self):\n truncation_size = 1\n count = len(self.FILENAMES) - truncation_size\n iterator = self.bucket.iterator(max_results=count)\n response = iterator.get_next_page_response()\n keys = list(iterator.get_items_from_response(response))\n self.assertEqual(len(keys), count)\n self.assertEqual(iterator.page_number, 1)\n self.assertTrue(iterator.next_page_token is not None)\n\n response = iterator.get_next_page_response()\n last_keys = list(iterator.get_items_from_response(response))\n self.assertEqual(len(last_keys), truncation_size)\n\n\nclass TestStorageSignURLs(TestStorageFiles):\n\n def setUp(self):\n super(TestStorageSignURLs, self).setUp()\n\n logo_path = self.FILES['logo']['path']\n with open(logo_path, 'r') as file_obj:\n self.LOCAL_FILE = file_obj.read()\n\n key = self.bucket.new_key('LogoToSign.jpg')\n key.upload_from_string(self.LOCAL_FILE)\n self.case_keys_to_delete.append(key)\n\n def tearDown(self):\n for key in self.case_keys_to_delete:\n if key.exists():\n key.delete()\n\n def test_create_signed_read_url(self):\n key = self.bucket.new_key('LogoToSign.jpg')\n expiration = int(time.time() + 5)\n signed_url = key.generate_signed_url(expiration, method='GET')\n\n response, content = HTTP.request(signed_url, method='GET')\n self.assertEqual(response.status, 200)\n self.assertEqual(content, self.LOCAL_FILE)\n\n def test_create_signed_delete_url(self):\n key = self.bucket.new_key('LogoToSign.jpg')\n expiration = int(time.time() + 283473274)\n signed_delete_url = key.generate_signed_url(expiration,\n method='DELETE')\n\n response, content = HTTP.request(signed_delete_url, method='DELETE')\n self.assertEqual(response.status, 204)\n self.assertEqual(content, '')\n\n # Check that the key has actually been deleted.\n self.assertFalse(key in self.bucket)\n", "path": "regression/storage.py"}]}
3,030
887
gh_patches_debug_820
rasdani/github-patches
git_diff
pypi__warehouse-3568
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Set samesite=lax on session cookies This is a strong defense-in-depth mechanism for protecting against CSRF. It's currently only respected by Chrome, but Firefox will add it as well. </issue> <code> [start of warehouse/sessions.py] 1 # Licensed under the Apache License, Version 2.0 (the "License"); 2 # you may not use this file except in compliance with the License. 3 # You may obtain a copy of the License at 4 # 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # 7 # Unless required by applicable law or agreed to in writing, software 8 # distributed under the License is distributed on an "AS IS" BASIS, 9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 # See the License for the specific language governing permissions and 11 # limitations under the License. 12 13 import functools 14 import time 15 16 import msgpack 17 import msgpack.exceptions 18 import redis 19 20 from pyramid import viewderivers 21 from pyramid.interfaces import ISession, ISessionFactory 22 from zope.interface import implementer 23 24 from warehouse.cache.http import add_vary 25 from warehouse.utils import crypto 26 27 28 def _invalid_method(method): 29 @functools.wraps(method) 30 def wrapped(self, *args, **kwargs): 31 self._error_message() 32 return wrapped 33 34 35 @implementer(ISession) 36 class InvalidSession(dict): 37 38 __contains__ = _invalid_method(dict.__contains__) 39 __delitem__ = _invalid_method(dict.__delitem__) 40 __getitem__ = _invalid_method(dict.__getitem__) 41 __iter__ = _invalid_method(dict.__iter__) 42 __len__ = _invalid_method(dict.__len__) 43 __setitem__ = _invalid_method(dict.__setitem__) 44 clear = _invalid_method(dict.clear) 45 copy = _invalid_method(dict.copy) 46 fromkeys = _invalid_method(dict.fromkeys) 47 get = _invalid_method(dict.get) 48 items = _invalid_method(dict.items) 49 keys = _invalid_method(dict.keys) 50 pop = _invalid_method(dict.pop) 51 popitem = _invalid_method(dict.popitem) 52 setdefault = _invalid_method(dict.setdefault) 53 update = _invalid_method(dict.update) 54 values = _invalid_method(dict.values) 55 56 def _error_message(self): 57 raise RuntimeError( 58 "Cannot use request.session in a view without uses_session=True." 59 ) 60 61 def __getattr__(self, name): 62 self._error_message() 63 64 @property 65 def created(self): 66 self._error_message() 67 68 69 def _changed_method(method): 70 @functools.wraps(method) 71 def wrapped(self, *args, **kwargs): 72 self.changed() 73 return method(self, *args, **kwargs) 74 return wrapped 75 76 77 @implementer(ISession) 78 class Session(dict): 79 80 _csrf_token_key = "_csrf_token" 81 _flash_key = "_flash_messages" 82 83 # A number of our methods need to be decorated so that they also call 84 # self.changed() 85 __delitem__ = _changed_method(dict.__delitem__) 86 __setitem__ = _changed_method(dict.__setitem__) 87 clear = _changed_method(dict.clear) 88 pop = _changed_method(dict.pop) 89 popitem = _changed_method(dict.popitem) 90 setdefault = _changed_method(dict.setdefault) 91 update = _changed_method(dict.update) 92 93 def __init__(self, data=None, session_id=None, new=True): 94 # Brand new sessions don't have any data, so we'll just create an empty 95 # dictionary for them. 96 if data is None: 97 data = {} 98 99 # Initialize our actual dictionary here. 100 super().__init__(data) 101 102 # We need to track the state of our Session. 103 self._sid = session_id 104 self._changed = False 105 self.new = new 106 self.created = int(time.time()) 107 108 # We'll track all of the IDs that have been invalidated here 109 self.invalidated = set() 110 111 @property 112 def sid(self): 113 if self._sid is None: 114 self._sid = crypto.random_token() 115 return self._sid 116 117 def changed(self): 118 self._changed = True 119 120 def invalidate(self): 121 self.clear() 122 self.new = True 123 self.created = int(time.time()) 124 self._changed = False 125 126 # If the current session id isn't None we'll want to record it as one 127 # of the ones that have been invalidated. 128 if self._sid is not None: 129 self.invalidated.add(self._sid) 130 self._sid = None 131 132 def should_save(self): 133 return self._changed 134 135 # Flash Messages Methods 136 def _get_flash_queue_key(self, queue): 137 return ".".join(filter(None, [self._flash_key, queue])) 138 139 def flash(self, msg, queue="", allow_duplicate=True): 140 queue_key = self._get_flash_queue_key(queue) 141 142 # If we're not allowing duplicates check if this message is already 143 # in the queue, and if it is just return immediately. 144 if not allow_duplicate and msg in self[queue_key]: 145 return 146 147 self.setdefault(queue_key, []).append(msg) 148 149 def peek_flash(self, queue=""): 150 return self.get(self._get_flash_queue_key(queue), []) 151 152 def pop_flash(self, queue=""): 153 queue_key = self._get_flash_queue_key(queue) 154 messages = self.get(queue_key, []) 155 self.pop(queue_key, None) 156 return messages 157 158 # CSRF Methods 159 def new_csrf_token(self): 160 self[self._csrf_token_key] = crypto.random_token() 161 return self[self._csrf_token_key] 162 163 def get_csrf_token(self): 164 token = self.get(self._csrf_token_key) 165 if token is None: 166 token = self.new_csrf_token() 167 return token 168 169 170 @implementer(ISessionFactory) 171 class SessionFactory: 172 173 cookie_name = "session_id" 174 max_age = 12 * 60 * 60 # 12 hours 175 176 def __init__(self, secret, url): 177 self.redis = redis.StrictRedis.from_url(url) 178 self.signer = crypto.TimestampSigner(secret, salt="session") 179 180 def __call__(self, request): 181 return self._process_request(request) 182 183 def _redis_key(self, session_id): 184 return "warehouse/session/data/{}".format(session_id) 185 186 def _process_request(self, request): 187 # Register a callback with the request so we can save the session once 188 # it's finished. 189 request.add_response_callback(self._process_response) 190 191 # Load our session ID from the request. 192 session_id = request.cookies.get(self.cookie_name) 193 194 # If we do not have a session ID then we'll just use a new empty 195 # session. 196 if session_id is None: 197 return Session() 198 199 # Check to make sure we have a valid session id 200 try: 201 session_id = self.signer.unsign(session_id, max_age=self.max_age) 202 session_id = session_id.decode("utf8") 203 except crypto.BadSignature: 204 return Session() 205 206 # Fetch the serialized data from redis 207 bdata = self.redis.get(self._redis_key(session_id)) 208 209 # If the session didn't exist in redis, we'll give the user a new 210 # session. 211 if bdata is None: 212 return Session() 213 214 # De-serialize our session data 215 try: 216 data = msgpack.unpackb(bdata, encoding="utf8", use_list=True) 217 except (msgpack.exceptions.UnpackException, 218 msgpack.exceptions.ExtraData): 219 # If the session data was invalid we'll give the user a new session 220 return Session() 221 222 # If we were able to load existing session data, load it into a 223 # Session class 224 session = Session(data, session_id, False) 225 226 return session 227 228 def _process_response(self, request, response): 229 # If the request has an InvalidSession, then the view can't have 230 # accessed the session, and we can just skip all of this anyways. 231 if isinstance(request.session, InvalidSession): 232 return 233 234 # Check to see if the session has been marked to be deleted, if it has 235 # benn then we'll delete it, and tell our response to delete the 236 # session cookie as well. 237 if request.session.invalidated: 238 for session_id in request.session.invalidated: 239 self.redis.delete(self._redis_key(session_id)) 240 241 if not request.session.should_save(): 242 response.delete_cookie(self.cookie_name) 243 244 # Check to see if the session has been marked to be saved, generally 245 # this means that the session data has been modified and thus we need 246 # to store the new data. 247 if request.session.should_save(): 248 # Save our session in Redis 249 self.redis.setex( 250 self._redis_key(request.session.sid), 251 self.max_age, 252 msgpack.packb( 253 request.session, 254 encoding="utf8", 255 use_bin_type=True, 256 ), 257 ) 258 259 # Send our session cookie to the client 260 response.set_cookie( 261 self.cookie_name, 262 self.signer.sign(request.session.sid.encode("utf8")), 263 max_age=self.max_age, 264 httponly=True, 265 secure=request.scheme == "https", 266 ) 267 268 269 def session_view(view, info): 270 if info.options.get("uses_session"): 271 # If we're using the session, then we'll just return the original view 272 # with a small wrapper around it to ensure that it has a Vary: Cookie 273 # header. 274 return add_vary("Cookie")(view) 275 elif info.exception_only: 276 return view 277 else: 278 # If we're not using the session on this view, then we'll wrap the view 279 # with a wrapper that just ensures that the session cannot be used. 280 @functools.wraps(view) 281 def wrapped(context, request): 282 # This whole method is a little bit of an odd duck, we want to make 283 # sure that we don't actually *access* request.session, because 284 # doing so triggers the machinery to create a new session. So 285 # instead we will dig into the request object __dict__ to 286 # effectively do the same thing, jsut without triggering an access 287 # on request.session. 288 289 # Save the original session so that we can restore it once the 290 # inner views have been called. 291 nothing = object() 292 original_session = request.__dict__.get("session", nothing) 293 294 # This particular view hasn't been set to allow access to the 295 # session, so we'll just assign an InvalidSession to 296 # request.session 297 request.__dict__["session"] = InvalidSession() 298 299 try: 300 # Invoke the real view 301 return view(context, request) 302 finally: 303 # Restore the original session so that things like 304 # pyramid_debugtoolbar can access it. 305 if original_session is nothing: 306 del request.__dict__["session"] 307 else: 308 request.__dict__["session"] = original_session 309 310 return wrapped 311 312 313 session_view.options = {"uses_session"} 314 315 316 def includeme(config): 317 config.set_session_factory( 318 SessionFactory( 319 config.registry.settings["sessions.secret"], 320 config.registry.settings["sessions.url"], 321 ), 322 ) 323 324 config.add_view_deriver( 325 session_view, 326 over="csrf_view", 327 under=viewderivers.INGRESS, 328 ) 329 [end of warehouse/sessions.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/warehouse/sessions.py b/warehouse/sessions.py --- a/warehouse/sessions.py +++ b/warehouse/sessions.py @@ -263,6 +263,7 @@ max_age=self.max_age, httponly=True, secure=request.scheme == "https", + samesite=b"lax" )
{"golden_diff": "diff --git a/warehouse/sessions.py b/warehouse/sessions.py\n--- a/warehouse/sessions.py\n+++ b/warehouse/sessions.py\n@@ -263,6 +263,7 @@\n max_age=self.max_age,\n httponly=True,\n secure=request.scheme == \"https\",\n+ samesite=b\"lax\"\n )\n", "issue": "Set samesite=lax on session cookies\nThis is a strong defense-in-depth mechanism for protecting against CSRF. It's currently only respected by Chrome, but Firefox will add it as well.\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport functools\nimport time\n\nimport msgpack\nimport msgpack.exceptions\nimport redis\n\nfrom pyramid import viewderivers\nfrom pyramid.interfaces import ISession, ISessionFactory\nfrom zope.interface import implementer\n\nfrom warehouse.cache.http import add_vary\nfrom warehouse.utils import crypto\n\n\ndef _invalid_method(method):\n @functools.wraps(method)\n def wrapped(self, *args, **kwargs):\n self._error_message()\n return wrapped\n\n\n@implementer(ISession)\nclass InvalidSession(dict):\n\n __contains__ = _invalid_method(dict.__contains__)\n __delitem__ = _invalid_method(dict.__delitem__)\n __getitem__ = _invalid_method(dict.__getitem__)\n __iter__ = _invalid_method(dict.__iter__)\n __len__ = _invalid_method(dict.__len__)\n __setitem__ = _invalid_method(dict.__setitem__)\n clear = _invalid_method(dict.clear)\n copy = _invalid_method(dict.copy)\n fromkeys = _invalid_method(dict.fromkeys)\n get = _invalid_method(dict.get)\n items = _invalid_method(dict.items)\n keys = _invalid_method(dict.keys)\n pop = _invalid_method(dict.pop)\n popitem = _invalid_method(dict.popitem)\n setdefault = _invalid_method(dict.setdefault)\n update = _invalid_method(dict.update)\n values = _invalid_method(dict.values)\n\n def _error_message(self):\n raise RuntimeError(\n \"Cannot use request.session in a view without uses_session=True.\"\n )\n\n def __getattr__(self, name):\n self._error_message()\n\n @property\n def created(self):\n self._error_message()\n\n\ndef _changed_method(method):\n @functools.wraps(method)\n def wrapped(self, *args, **kwargs):\n self.changed()\n return method(self, *args, **kwargs)\n return wrapped\n\n\n@implementer(ISession)\nclass Session(dict):\n\n _csrf_token_key = \"_csrf_token\"\n _flash_key = \"_flash_messages\"\n\n # A number of our methods need to be decorated so that they also call\n # self.changed()\n __delitem__ = _changed_method(dict.__delitem__)\n __setitem__ = _changed_method(dict.__setitem__)\n clear = _changed_method(dict.clear)\n pop = _changed_method(dict.pop)\n popitem = _changed_method(dict.popitem)\n setdefault = _changed_method(dict.setdefault)\n update = _changed_method(dict.update)\n\n def __init__(self, data=None, session_id=None, new=True):\n # Brand new sessions don't have any data, so we'll just create an empty\n # dictionary for them.\n if data is None:\n data = {}\n\n # Initialize our actual dictionary here.\n super().__init__(data)\n\n # We need to track the state of our Session.\n self._sid = session_id\n self._changed = False\n self.new = new\n self.created = int(time.time())\n\n # We'll track all of the IDs that have been invalidated here\n self.invalidated = set()\n\n @property\n def sid(self):\n if self._sid is None:\n self._sid = crypto.random_token()\n return self._sid\n\n def changed(self):\n self._changed = True\n\n def invalidate(self):\n self.clear()\n self.new = True\n self.created = int(time.time())\n self._changed = False\n\n # If the current session id isn't None we'll want to record it as one\n # of the ones that have been invalidated.\n if self._sid is not None:\n self.invalidated.add(self._sid)\n self._sid = None\n\n def should_save(self):\n return self._changed\n\n # Flash Messages Methods\n def _get_flash_queue_key(self, queue):\n return \".\".join(filter(None, [self._flash_key, queue]))\n\n def flash(self, msg, queue=\"\", allow_duplicate=True):\n queue_key = self._get_flash_queue_key(queue)\n\n # If we're not allowing duplicates check if this message is already\n # in the queue, and if it is just return immediately.\n if not allow_duplicate and msg in self[queue_key]:\n return\n\n self.setdefault(queue_key, []).append(msg)\n\n def peek_flash(self, queue=\"\"):\n return self.get(self._get_flash_queue_key(queue), [])\n\n def pop_flash(self, queue=\"\"):\n queue_key = self._get_flash_queue_key(queue)\n messages = self.get(queue_key, [])\n self.pop(queue_key, None)\n return messages\n\n # CSRF Methods\n def new_csrf_token(self):\n self[self._csrf_token_key] = crypto.random_token()\n return self[self._csrf_token_key]\n\n def get_csrf_token(self):\n token = self.get(self._csrf_token_key)\n if token is None:\n token = self.new_csrf_token()\n return token\n\n\n@implementer(ISessionFactory)\nclass SessionFactory:\n\n cookie_name = \"session_id\"\n max_age = 12 * 60 * 60 # 12 hours\n\n def __init__(self, secret, url):\n self.redis = redis.StrictRedis.from_url(url)\n self.signer = crypto.TimestampSigner(secret, salt=\"session\")\n\n def __call__(self, request):\n return self._process_request(request)\n\n def _redis_key(self, session_id):\n return \"warehouse/session/data/{}\".format(session_id)\n\n def _process_request(self, request):\n # Register a callback with the request so we can save the session once\n # it's finished.\n request.add_response_callback(self._process_response)\n\n # Load our session ID from the request.\n session_id = request.cookies.get(self.cookie_name)\n\n # If we do not have a session ID then we'll just use a new empty\n # session.\n if session_id is None:\n return Session()\n\n # Check to make sure we have a valid session id\n try:\n session_id = self.signer.unsign(session_id, max_age=self.max_age)\n session_id = session_id.decode(\"utf8\")\n except crypto.BadSignature:\n return Session()\n\n # Fetch the serialized data from redis\n bdata = self.redis.get(self._redis_key(session_id))\n\n # If the session didn't exist in redis, we'll give the user a new\n # session.\n if bdata is None:\n return Session()\n\n # De-serialize our session data\n try:\n data = msgpack.unpackb(bdata, encoding=\"utf8\", use_list=True)\n except (msgpack.exceptions.UnpackException,\n msgpack.exceptions.ExtraData):\n # If the session data was invalid we'll give the user a new session\n return Session()\n\n # If we were able to load existing session data, load it into a\n # Session class\n session = Session(data, session_id, False)\n\n return session\n\n def _process_response(self, request, response):\n # If the request has an InvalidSession, then the view can't have\n # accessed the session, and we can just skip all of this anyways.\n if isinstance(request.session, InvalidSession):\n return\n\n # Check to see if the session has been marked to be deleted, if it has\n # benn then we'll delete it, and tell our response to delete the\n # session cookie as well.\n if request.session.invalidated:\n for session_id in request.session.invalidated:\n self.redis.delete(self._redis_key(session_id))\n\n if not request.session.should_save():\n response.delete_cookie(self.cookie_name)\n\n # Check to see if the session has been marked to be saved, generally\n # this means that the session data has been modified and thus we need\n # to store the new data.\n if request.session.should_save():\n # Save our session in Redis\n self.redis.setex(\n self._redis_key(request.session.sid),\n self.max_age,\n msgpack.packb(\n request.session,\n encoding=\"utf8\",\n use_bin_type=True,\n ),\n )\n\n # Send our session cookie to the client\n response.set_cookie(\n self.cookie_name,\n self.signer.sign(request.session.sid.encode(\"utf8\")),\n max_age=self.max_age,\n httponly=True,\n secure=request.scheme == \"https\",\n )\n\n\ndef session_view(view, info):\n if info.options.get(\"uses_session\"):\n # If we're using the session, then we'll just return the original view\n # with a small wrapper around it to ensure that it has a Vary: Cookie\n # header.\n return add_vary(\"Cookie\")(view)\n elif info.exception_only:\n return view\n else:\n # If we're not using the session on this view, then we'll wrap the view\n # with a wrapper that just ensures that the session cannot be used.\n @functools.wraps(view)\n def wrapped(context, request):\n # This whole method is a little bit of an odd duck, we want to make\n # sure that we don't actually *access* request.session, because\n # doing so triggers the machinery to create a new session. So\n # instead we will dig into the request object __dict__ to\n # effectively do the same thing, jsut without triggering an access\n # on request.session.\n\n # Save the original session so that we can restore it once the\n # inner views have been called.\n nothing = object()\n original_session = request.__dict__.get(\"session\", nothing)\n\n # This particular view hasn't been set to allow access to the\n # session, so we'll just assign an InvalidSession to\n # request.session\n request.__dict__[\"session\"] = InvalidSession()\n\n try:\n # Invoke the real view\n return view(context, request)\n finally:\n # Restore the original session so that things like\n # pyramid_debugtoolbar can access it.\n if original_session is nothing:\n del request.__dict__[\"session\"]\n else:\n request.__dict__[\"session\"] = original_session\n\n return wrapped\n\n\nsession_view.options = {\"uses_session\"}\n\n\ndef includeme(config):\n config.set_session_factory(\n SessionFactory(\n config.registry.settings[\"sessions.secret\"],\n config.registry.settings[\"sessions.url\"],\n ),\n )\n\n config.add_view_deriver(\n session_view,\n over=\"csrf_view\",\n under=viewderivers.INGRESS,\n )\n", "path": "warehouse/sessions.py"}]}
3,852
79
gh_patches_debug_6652
rasdani/github-patches
git_diff
RedHatInsights__insights-core-1593
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> DeprecationWarning from qemu_xml A bunch of below warning are reported in the python3 test of insights-plugins: ~~~ /Users/liuxc/work/insights/insights-core/insights/parsers/qemu_xml.py:149: DeprecationWarning: This method will be removed in future versions. Use ' list(elem)' or iteration over elem instead. /Users/liuxc/work/insights/insights-core/insights/parsers/qemu_xml.py:149: DeprecationWarning: This method will be removed in future versions. Use ' list(elem)' or iteration over elem instead. ~~~ </issue> <code> [start of insights/parsers/qemu_xml.py] 1 """ 2 QemuXML file ``/etc/libvirt/qemu/*.xml`` 3 ---------------------------------------- 4 """ 5 from .. import XMLParser, parser 6 from insights.specs import Specs 7 8 9 @parser(Specs.qemu_xml) 10 class QemuXML(XMLParser): 11 """This class parses xml files under ``/etc/libvirt/qemu/`` using 12 ``XMLParser`` base parser. 13 14 Sample file:: 15 16 <!-- 17 WARNING: THIS IS AN AUTO-GENERATED FILE. CHANGES TO IT ARE LIKELY TO BE 18 OVERWRITTEN AND LOST. Changes to this xml configuration should be made using: 19 virsh edit 05-s00c06h0 20 or other application using the libvirt API. 21 --> 22 23 <domain type='kvm'> 24 <name>05-s00c06h0</name> 25 <uuid>02cf0bba-2bd6-11e7-8337-e4115b9a50d0</uuid> 26 <memory unit='KiB'>12582912</memory> 27 <currentMemory unit='KiB'>12582912</currentMemory> 28 <vcpu placement='static'>4</vcpu> 29 <cputune> 30 <vcpupin vcpu='0' cpuset='1'/> 31 <vcpupin vcpu='1' cpuset='2'/> 32 <vcpupin vcpu='2' cpuset='3'/> 33 <vcpupin vcpu='3' cpuset='4'/> 34 <emulatorpin cpuset='1-4'/> 35 </cputune> 36 <numatune> 37 <memory mode='strict' nodeset='0-1'/> 38 <memnode cellid='0' mode='strict' nodeset='0'/> 39 <memnode cellid='1' mode='strict' nodeset='1'/> 40 </numatune> 41 <os> 42 <type arch='x86_64' machine='pc-i440fx-rhel7.0.0'>hvm</type> 43 <boot dev='hd'/> 44 <boot dev='network'/> 45 <bootmenu enable='yes' timeout='1000'/> 46 <bios useserial='yes' rebootTimeout='0'/> 47 </os> 48 <features> 49 <acpi/> 50 <apic/> 51 <pae/> 52 </features> 53 <cpu> 54 <numa> 55 <cell id='0' cpus='0-1' memory='6291456' unit='KiB'/> 56 <cell id='1' cpus='2-3' memory='6291456' unit='KiB'/> 57 </numa> 58 </cpu> 59 <clock offset='utc'/> 60 <on_poweroff>destroy</on_poweroff> 61 <on_reboot>restart</on_reboot> 62 <on_crash>restart</on_crash> 63 <devices> 64 <emulator>/usr/libexec/qemu-kvm</emulator> 65 <disk type='file' device='disk'> 66 <driver name='qemu' type='raw' cache='none' io='threads'/> 67 <source file='/var/lib/libvirt/images/05-s00c06h0_1.img'/> 68 <target dev='vda' bus='virtio'/> 69 <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/> 70 </disk> 71 <controller type='usb' index='0'> 72 <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/> 73 </controller> 74 <controller type='pci' index='0' model='pci-root'/> 75 <controller type='virtio-serial' index='0'> 76 <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/> 77 </controller> 78 <interface type='hostdev' managed='yes'> 79 <mac address='b2:59:73:15:00:00'/> 80 <source> 81 <address type='pci' domain='0x0000' bus='0x04' slot='0x10' function='0x0'/> 82 </source> 83 <rom bar='on' file='/opt/vcp/share/ipxe/808610ed.rom'/> 84 <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/> 85 </interface> 86 <interface type='hostdev' managed='yes'> 87 <mac address='b2:59:73:15:00:01'/> 88 <source> 89 <address type='pci' domain='0x0000' bus='0x04' slot='0x10' function='0x1'/> 90 </source> 91 <rom bar='on' file='/opt/vcp/share/ipxe/808610ed.rom'/> 92 <address type='pci' domain='0x0000' bus='0x00' slot='0x08' function='0x0'/> 93 </interface> 94 <serial type='pty'> 95 <target port='0'/> 96 </serial> 97 <console type='pty'> 98 <target type='serial' port='0'/> 99 </console> 100 <channel type='pipe'> 101 <source path='/var/lib/libvirt/qemu/channels/FROM-05-s00c06h0'/> 102 <target type='virtio' name='virtio2host'/> 103 <address type='virtio-serial' controller='0' bus='0' port='1'/> 104 </channel> 105 <channel type='pipe'> 106 <source path='/var/lib/libvirt/qemu/channels/HGC-05-s00c06h0'/> 107 <target type='virtio' name='virtio_host_guest_check'/> 108 <address type='virtio-serial' controller='0' bus='0' port='2'/> 109 </channel> 110 <input type='mouse' bus='ps2'/> 111 <input type='keyboard' bus='ps2'/> 112 <graphics type='vnc' port='-1' autoport='yes'> 113 <listen type='address'/> 114 </graphics> 115 <video> 116 <model type='cirrus' vram='16384' heads='1' primary='yes'/> 117 <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/> 118 </video> 119 <watchdog model='i6300esb' action='reset'> 120 <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/> 121 </watchdog> 122 <memballoon model='virtio'> 123 <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/> 124 </memballoon> 125 </devices> 126 </domain> 127 128 Examples: 129 >>> xml_numa.file_name == 'vm.xml' 130 True 131 >>> xml_numa.vm_name == '05-s00c06h0' 132 True 133 >>> memnode = xml_numa.get_elements('./numatune/memnode', None) 134 >>> len(memnode[0].items()) == 3 135 True 136 >>> len(memnode[1].items()) == 3 137 True 138 >>> memnode[0].get('cellid') == '0' 139 True 140 >>> memnode[1].get('mode') == 'strict' 141 True 142 """ 143 def parse_dom(self): 144 if self.dom is None: 145 return 146 else: 147 domain = {} 148 for child in self.dom: 149 if not child.getchildren(): 150 domain[child.tag] = child.text 151 else: 152 domain[child.tag] = [c.items() for c in child.getchildren()] 153 154 return domain 155 156 @property 157 def vm_name(self): 158 return self.data.get('name', None) 159 [end of insights/parsers/qemu_xml.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/insights/parsers/qemu_xml.py b/insights/parsers/qemu_xml.py --- a/insights/parsers/qemu_xml.py +++ b/insights/parsers/qemu_xml.py @@ -146,10 +146,10 @@ else: domain = {} for child in self.dom: - if not child.getchildren(): + if len(child) == 0: domain[child.tag] = child.text else: - domain[child.tag] = [c.items() for c in child.getchildren()] + domain[child.tag] = [c.items() for c in child] return domain
{"golden_diff": "diff --git a/insights/parsers/qemu_xml.py b/insights/parsers/qemu_xml.py\n--- a/insights/parsers/qemu_xml.py\n+++ b/insights/parsers/qemu_xml.py\n@@ -146,10 +146,10 @@\n else:\n domain = {}\n for child in self.dom:\n- if not child.getchildren():\n+ if len(child) == 0:\n domain[child.tag] = child.text\n else:\n- domain[child.tag] = [c.items() for c in child.getchildren()]\n+ domain[child.tag] = [c.items() for c in child]\n \n return domain\n", "issue": "DeprecationWarning from qemu_xml\nA bunch of below warning are reported in the python3 test of insights-plugins:\r\n\r\n~~~ \r\n\r\n /Users/liuxc/work/insights/insights-core/insights/parsers/qemu_xml.py:149: DeprecationWarning: This method will be removed in future versions. Use '\r\nlist(elem)' or iteration over elem instead.\r\n /Users/liuxc/work/insights/insights-core/insights/parsers/qemu_xml.py:149: DeprecationWarning: This method will be removed in future versions. Use '\r\nlist(elem)' or iteration over elem instead.\r\n~~~\n", "before_files": [{"content": "\"\"\"\nQemuXML file ``/etc/libvirt/qemu/*.xml``\n----------------------------------------\n\"\"\"\nfrom .. import XMLParser, parser\nfrom insights.specs import Specs\n\n\n@parser(Specs.qemu_xml)\nclass QemuXML(XMLParser):\n \"\"\"This class parses xml files under ``/etc/libvirt/qemu/`` using\n ``XMLParser`` base parser.\n\n Sample file::\n\n <!--\n WARNING: THIS IS AN AUTO-GENERATED FILE. CHANGES TO IT ARE LIKELY TO BE\n OVERWRITTEN AND LOST. Changes to this xml configuration should be made using:\n virsh edit 05-s00c06h0\n or other application using the libvirt API.\n -->\n\n <domain type='kvm'>\n <name>05-s00c06h0</name>\n <uuid>02cf0bba-2bd6-11e7-8337-e4115b9a50d0</uuid>\n <memory unit='KiB'>12582912</memory>\n <currentMemory unit='KiB'>12582912</currentMemory>\n <vcpu placement='static'>4</vcpu>\n <cputune>\n <vcpupin vcpu='0' cpuset='1'/>\n <vcpupin vcpu='1' cpuset='2'/>\n <vcpupin vcpu='2' cpuset='3'/>\n <vcpupin vcpu='3' cpuset='4'/>\n <emulatorpin cpuset='1-4'/>\n </cputune>\n <numatune>\n <memory mode='strict' nodeset='0-1'/>\n <memnode cellid='0' mode='strict' nodeset='0'/>\n <memnode cellid='1' mode='strict' nodeset='1'/>\n </numatune>\n <os>\n <type arch='x86_64' machine='pc-i440fx-rhel7.0.0'>hvm</type>\n <boot dev='hd'/>\n <boot dev='network'/>\n <bootmenu enable='yes' timeout='1000'/>\n <bios useserial='yes' rebootTimeout='0'/>\n </os>\n <features>\n <acpi/>\n <apic/>\n <pae/>\n </features>\n <cpu>\n <numa>\n <cell id='0' cpus='0-1' memory='6291456' unit='KiB'/>\n <cell id='1' cpus='2-3' memory='6291456' unit='KiB'/>\n </numa>\n </cpu>\n <clock offset='utc'/>\n <on_poweroff>destroy</on_poweroff>\n <on_reboot>restart</on_reboot>\n <on_crash>restart</on_crash>\n <devices>\n <emulator>/usr/libexec/qemu-kvm</emulator>\n <disk type='file' device='disk'>\n <driver name='qemu' type='raw' cache='none' io='threads'/>\n <source file='/var/lib/libvirt/images/05-s00c06h0_1.img'/>\n <target dev='vda' bus='virtio'/>\n <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>\n </disk>\n <controller type='usb' index='0'>\n <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>\n </controller>\n <controller type='pci' index='0' model='pci-root'/>\n <controller type='virtio-serial' index='0'>\n <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>\n </controller>\n <interface type='hostdev' managed='yes'>\n <mac address='b2:59:73:15:00:00'/>\n <source>\n <address type='pci' domain='0x0000' bus='0x04' slot='0x10' function='0x0'/>\n </source>\n <rom bar='on' file='/opt/vcp/share/ipxe/808610ed.rom'/>\n <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>\n </interface>\n <interface type='hostdev' managed='yes'>\n <mac address='b2:59:73:15:00:01'/>\n <source>\n <address type='pci' domain='0x0000' bus='0x04' slot='0x10' function='0x1'/>\n </source>\n <rom bar='on' file='/opt/vcp/share/ipxe/808610ed.rom'/>\n <address type='pci' domain='0x0000' bus='0x00' slot='0x08' function='0x0'/>\n </interface>\n <serial type='pty'>\n <target port='0'/>\n </serial>\n <console type='pty'>\n <target type='serial' port='0'/>\n </console>\n <channel type='pipe'>\n <source path='/var/lib/libvirt/qemu/channels/FROM-05-s00c06h0'/>\n <target type='virtio' name='virtio2host'/>\n <address type='virtio-serial' controller='0' bus='0' port='1'/>\n </channel>\n <channel type='pipe'>\n <source path='/var/lib/libvirt/qemu/channels/HGC-05-s00c06h0'/>\n <target type='virtio' name='virtio_host_guest_check'/>\n <address type='virtio-serial' controller='0' bus='0' port='2'/>\n </channel>\n <input type='mouse' bus='ps2'/>\n <input type='keyboard' bus='ps2'/>\n <graphics type='vnc' port='-1' autoport='yes'>\n <listen type='address'/>\n </graphics>\n <video>\n <model type='cirrus' vram='16384' heads='1' primary='yes'/>\n <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>\n </video>\n <watchdog model='i6300esb' action='reset'>\n <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>\n </watchdog>\n <memballoon model='virtio'>\n <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>\n </memballoon>\n </devices>\n </domain>\n\n Examples:\n >>> xml_numa.file_name == 'vm.xml'\n True\n >>> xml_numa.vm_name == '05-s00c06h0'\n True\n >>> memnode = xml_numa.get_elements('./numatune/memnode', None)\n >>> len(memnode[0].items()) == 3\n True\n >>> len(memnode[1].items()) == 3\n True\n >>> memnode[0].get('cellid') == '0'\n True\n >>> memnode[1].get('mode') == 'strict'\n True\n \"\"\"\n def parse_dom(self):\n if self.dom is None:\n return\n else:\n domain = {}\n for child in self.dom:\n if not child.getchildren():\n domain[child.tag] = child.text\n else:\n domain[child.tag] = [c.items() for c in child.getchildren()]\n\n return domain\n\n @property\n def vm_name(self):\n return self.data.get('name', None)\n", "path": "insights/parsers/qemu_xml.py"}]}
2,894
147
gh_patches_debug_19409
rasdani/github-patches
git_diff
certbot__certbot-3504
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> "No installers present" message is confusing. ``` No installers seem to be present and working on your system; fix that or try running certbot with the "certonly" command ``` This is one of the most common messages, received when running certbot on an Nginx installation, but it is confusing to a new user. What's an installer? Is it my mistake or certbot's that there are no installers? What does "certonly" do? Instead how about: ``` Certbot doesn't know how to automatically configure the web server on this system. However, it can still get a certificate for you. Please run "certbot[-auto] certonly" to do so. You'll need to manually configure your web server to use the resulting certificate. ``` </issue> <code> [start of certbot/plugins/selection.py] 1 """Decide which plugins to use for authentication & installation""" 2 from __future__ import print_function 3 4 import os 5 import logging 6 7 import six 8 import zope.component 9 10 from certbot import errors 11 from certbot import interfaces 12 13 from certbot.display import util as display_util 14 15 logger = logging.getLogger(__name__) 16 z_util = zope.component.getUtility 17 18 def pick_configurator( 19 config, default, plugins, 20 question="How would you like to authenticate and install " 21 "certificates?"): 22 """Pick configurator plugin.""" 23 return pick_plugin( 24 config, default, plugins, question, 25 (interfaces.IAuthenticator, interfaces.IInstaller)) 26 27 28 def pick_installer(config, default, plugins, 29 question="How would you like to install certificates?"): 30 """Pick installer plugin.""" 31 return pick_plugin( 32 config, default, plugins, question, (interfaces.IInstaller,)) 33 34 35 def pick_authenticator( 36 config, default, plugins, question="How would you " 37 "like to authenticate with the ACME CA?"): 38 """Pick authentication plugin.""" 39 return pick_plugin( 40 config, default, plugins, question, (interfaces.IAuthenticator,)) 41 42 43 def pick_plugin(config, default, plugins, question, ifaces): 44 """Pick plugin. 45 46 :param certbot.interfaces.IConfig: Configuration 47 :param str default: Plugin name supplied by user or ``None``. 48 :param certbot.plugins.disco.PluginsRegistry plugins: 49 All plugins registered as entry points. 50 :param str question: Question to be presented to the user in case 51 multiple candidates are found. 52 :param list ifaces: Interfaces that plugins must provide. 53 54 :returns: Initialized plugin. 55 :rtype: IPlugin 56 57 """ 58 if default is not None: 59 # throw more UX-friendly error if default not in plugins 60 filtered = plugins.filter(lambda p_ep: p_ep.name == default) 61 else: 62 if config.noninteractive_mode: 63 # it's really bad to auto-select the single available plugin in 64 # non-interactive mode, because an update could later add a second 65 # available plugin 66 raise errors.MissingCommandlineFlag( 67 "Missing command line flags. For non-interactive execution, " 68 "you will need to specify a plugin on the command line. Run " 69 "with '--help plugins' to see a list of options, and see " 70 "https://eff.org/letsencrypt-plugins for more detail on what " 71 "the plugins do and how to use them.") 72 73 filtered = plugins.visible().ifaces(ifaces) 74 75 filtered.init(config) 76 verified = filtered.verify(ifaces) 77 verified.prepare() 78 prepared = verified.available() 79 80 if len(prepared) > 1: 81 logger.debug("Multiple candidate plugins: %s", prepared) 82 plugin_ep = choose_plugin(list(six.itervalues(prepared)), question) 83 if plugin_ep is None: 84 return None 85 else: 86 return plugin_ep.init() 87 elif len(prepared) == 1: 88 plugin_ep = list(prepared.values())[0] 89 logger.debug("Single candidate plugin: %s", plugin_ep) 90 if plugin_ep.misconfigured: 91 return None 92 return plugin_ep.init() 93 else: 94 logger.debug("No candidate plugin") 95 return None 96 97 98 def choose_plugin(prepared, question): 99 """Allow the user to choose their plugin. 100 101 :param list prepared: List of `~.PluginEntryPoint`. 102 :param str question: Question to be presented to the user. 103 104 :returns: Plugin entry point chosen by the user. 105 :rtype: `~.PluginEntryPoint` 106 107 """ 108 opts = [plugin_ep.description_with_name + 109 (" [Misconfigured]" if plugin_ep.misconfigured else "") 110 for plugin_ep in prepared] 111 112 while True: 113 disp = z_util(interfaces.IDisplay) 114 code, index = disp.menu(question, opts, help_label="More Info") 115 116 if code == display_util.OK: 117 plugin_ep = prepared[index] 118 if plugin_ep.misconfigured: 119 z_util(interfaces.IDisplay).notification( 120 "The selected plugin encountered an error while parsing " 121 "your server configuration and cannot be used. The error " 122 "was:\n\n{0}".format(plugin_ep.prepare()), 123 height=display_util.HEIGHT, pause=False) 124 else: 125 return plugin_ep 126 elif code == display_util.HELP: 127 if prepared[index].misconfigured: 128 msg = "Reported Error: %s" % prepared[index].prepare() 129 else: 130 msg = prepared[index].init().more_info() 131 z_util(interfaces.IDisplay).notification( 132 msg, height=display_util.HEIGHT) 133 else: 134 return None 135 136 noninstaller_plugins = ["webroot", "manual", "standalone"] 137 138 def record_chosen_plugins(config, plugins, auth, inst): 139 "Update the config entries to reflect the plugins we actually selected." 140 cn = config.namespace 141 cn.authenticator = plugins.find_init(auth).name if auth else "None" 142 cn.installer = plugins.find_init(inst).name if inst else "None" 143 144 145 def choose_configurator_plugins(config, plugins, verb): 146 """ 147 Figure out which configurator we're going to use, modifies 148 config.authenticator and config.installer strings to reflect that choice if 149 necessary. 150 151 :raises errors.PluginSelectionError if there was a problem 152 153 :returns: (an `IAuthenticator` or None, an `IInstaller` or None) 154 :rtype: tuple 155 """ 156 157 req_auth, req_inst = cli_plugin_requests(config) 158 159 # Which plugins do we need? 160 if verb == "run": 161 need_inst = need_auth = True 162 from certbot.cli import cli_command 163 if req_auth in noninstaller_plugins and not req_inst: 164 msg = ('With the {0} plugin, you probably want to use the "certonly" command, eg:{1}' 165 '{1} {2} certonly --{0}{1}{1}' 166 '(Alternatively, add a --installer flag. See https://eff.org/letsencrypt-plugins' 167 '{1} and "--help plugins" for more information.)'.format( 168 req_auth, os.linesep, cli_command)) 169 170 raise errors.MissingCommandlineFlag(msg) 171 else: 172 need_inst = need_auth = False 173 if verb == "certonly": 174 need_auth = True 175 if verb == "install": 176 need_inst = True 177 if config.authenticator: 178 logger.warning("Specifying an authenticator doesn't make sense in install mode") 179 180 # Try to meet the user's request and/or ask them to pick plugins 181 authenticator = installer = None 182 if verb == "run" and req_auth == req_inst: 183 # Unless the user has explicitly asked for different auth/install, 184 # only consider offering a single choice 185 authenticator = installer = pick_configurator(config, req_inst, plugins) 186 else: 187 if need_inst or req_inst: 188 installer = pick_installer(config, req_inst, plugins) 189 if need_auth: 190 authenticator = pick_authenticator(config, req_auth, plugins) 191 logger.debug("Selected authenticator %s and installer %s", authenticator, installer) 192 193 # Report on any failures 194 if need_inst and not installer: 195 diagnose_configurator_problem("installer", req_inst, plugins) 196 if need_auth and not authenticator: 197 diagnose_configurator_problem("authenticator", req_auth, plugins) 198 199 record_chosen_plugins(config, plugins, authenticator, installer) 200 return installer, authenticator 201 202 203 def set_configurator(previously, now): 204 """ 205 Setting configurators multiple ways is okay, as long as they all agree 206 :param str previously: previously identified request for the installer/authenticator 207 :param str requested: the request currently being processed 208 """ 209 if not now: 210 # we're not actually setting anything 211 return previously 212 if previously: 213 if previously != now: 214 msg = "Too many flags setting configurators/installers/authenticators {0} -> {1}" 215 raise errors.PluginSelectionError(msg.format(repr(previously), repr(now))) 216 return now 217 218 219 def cli_plugin_requests(config): 220 """ 221 Figure out which plugins the user requested with CLI and config options 222 223 :returns: (requested authenticator string or None, requested installer string or None) 224 :rtype: tuple 225 """ 226 req_inst = req_auth = config.configurator 227 req_inst = set_configurator(req_inst, config.installer) 228 req_auth = set_configurator(req_auth, config.authenticator) 229 if config.nginx: 230 req_inst = set_configurator(req_inst, "nginx") 231 req_auth = set_configurator(req_auth, "nginx") 232 if config.apache: 233 req_inst = set_configurator(req_inst, "apache") 234 req_auth = set_configurator(req_auth, "apache") 235 if config.standalone: 236 req_auth = set_configurator(req_auth, "standalone") 237 if config.webroot: 238 req_auth = set_configurator(req_auth, "webroot") 239 if config.manual: 240 req_auth = set_configurator(req_auth, "manual") 241 logger.debug("Requested authenticator %s and installer %s", req_auth, req_inst) 242 return req_auth, req_inst 243 244 245 def diagnose_configurator_problem(cfg_type, requested, plugins): 246 """ 247 Raise the most helpful error message about a plugin being unavailable 248 249 :param str cfg_type: either "installer" or "authenticator" 250 :param str requested: the plugin that was requested 251 :param .PluginsRegistry plugins: available plugins 252 253 :raises error.PluginSelectionError: if there was a problem 254 """ 255 256 if requested: 257 if requested not in plugins: 258 msg = "The requested {0} plugin does not appear to be installed".format(requested) 259 else: 260 msg = ("The {0} plugin is not working; there may be problems with " 261 "your existing configuration.\nThe error was: {1!r}" 262 .format(requested, plugins[requested].problem)) 263 elif cfg_type == "installer": 264 msg = ('No installer plugins seem to be present and working on your system; ' 265 'fix that or try running certbot with the "certonly" command to obtain' 266 ' a certificate you can install manually') 267 else: 268 msg = "{0} could not be determined or is not installed".format(cfg_type) 269 raise errors.PluginSelectionError(msg) 270 [end of certbot/plugins/selection.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/certbot/plugins/selection.py b/certbot/plugins/selection.py --- a/certbot/plugins/selection.py +++ b/certbot/plugins/selection.py @@ -261,9 +261,12 @@ "your existing configuration.\nThe error was: {1!r}" .format(requested, plugins[requested].problem)) elif cfg_type == "installer": - msg = ('No installer plugins seem to be present and working on your system; ' - 'fix that or try running certbot with the "certonly" command to obtain' - ' a certificate you can install manually') + from certbot.cli import cli_command + msg = ('Certbot doesn\'t know how to automatically configure the web ' + 'server on this system. However, it can still get a certificate for ' + 'you. Please run "{0} certonly" to do so. You\'ll need to ' + 'manually configure your web server to use the resulting ' + 'certificate.').format(cli_command) else: msg = "{0} could not be determined or is not installed".format(cfg_type) raise errors.PluginSelectionError(msg)
{"golden_diff": "diff --git a/certbot/plugins/selection.py b/certbot/plugins/selection.py\n--- a/certbot/plugins/selection.py\n+++ b/certbot/plugins/selection.py\n@@ -261,9 +261,12 @@\n \"your existing configuration.\\nThe error was: {1!r}\"\n .format(requested, plugins[requested].problem))\n elif cfg_type == \"installer\":\n- msg = ('No installer plugins seem to be present and working on your system; '\n- 'fix that or try running certbot with the \"certonly\" command to obtain'\n- ' a certificate you can install manually')\n+ from certbot.cli import cli_command\n+ msg = ('Certbot doesn\\'t know how to automatically configure the web '\n+ 'server on this system. However, it can still get a certificate for '\n+ 'you. Please run \"{0} certonly\" to do so. You\\'ll need to '\n+ 'manually configure your web server to use the resulting '\n+ 'certificate.').format(cli_command)\n else:\n msg = \"{0} could not be determined or is not installed\".format(cfg_type)\n raise errors.PluginSelectionError(msg)\n", "issue": "\"No installers present\" message is confusing.\n```\nNo installers seem to be present and working on your system; fix that or try running certbot with the \"certonly\" command\n```\n\nThis is one of the most common messages, received when running certbot on an Nginx installation, but it is confusing to a new user. What's an installer? Is it my mistake or certbot's that there are no installers? What does \"certonly\" do?\n\nInstead how about:\n\n```\nCertbot doesn't know how to automatically configure the web server on this system. However, it can still get a certificate for you. Please run \"certbot[-auto] certonly\" to do so. You'll need to manually configure your web server to use the resulting certificate.\n```\n\n", "before_files": [{"content": "\"\"\"Decide which plugins to use for authentication & installation\"\"\"\nfrom __future__ import print_function\n\nimport os\nimport logging\n\nimport six\nimport zope.component\n\nfrom certbot import errors\nfrom certbot import interfaces\n\nfrom certbot.display import util as display_util\n\nlogger = logging.getLogger(__name__)\nz_util = zope.component.getUtility\n\ndef pick_configurator(\n config, default, plugins,\n question=\"How would you like to authenticate and install \"\n \"certificates?\"):\n \"\"\"Pick configurator plugin.\"\"\"\n return pick_plugin(\n config, default, plugins, question,\n (interfaces.IAuthenticator, interfaces.IInstaller))\n\n\ndef pick_installer(config, default, plugins,\n question=\"How would you like to install certificates?\"):\n \"\"\"Pick installer plugin.\"\"\"\n return pick_plugin(\n config, default, plugins, question, (interfaces.IInstaller,))\n\n\ndef pick_authenticator(\n config, default, plugins, question=\"How would you \"\n \"like to authenticate with the ACME CA?\"):\n \"\"\"Pick authentication plugin.\"\"\"\n return pick_plugin(\n config, default, plugins, question, (interfaces.IAuthenticator,))\n\n\ndef pick_plugin(config, default, plugins, question, ifaces):\n \"\"\"Pick plugin.\n\n :param certbot.interfaces.IConfig: Configuration\n :param str default: Plugin name supplied by user or ``None``.\n :param certbot.plugins.disco.PluginsRegistry plugins:\n All plugins registered as entry points.\n :param str question: Question to be presented to the user in case\n multiple candidates are found.\n :param list ifaces: Interfaces that plugins must provide.\n\n :returns: Initialized plugin.\n :rtype: IPlugin\n\n \"\"\"\n if default is not None:\n # throw more UX-friendly error if default not in plugins\n filtered = plugins.filter(lambda p_ep: p_ep.name == default)\n else:\n if config.noninteractive_mode:\n # it's really bad to auto-select the single available plugin in\n # non-interactive mode, because an update could later add a second\n # available plugin\n raise errors.MissingCommandlineFlag(\n \"Missing command line flags. For non-interactive execution, \"\n \"you will need to specify a plugin on the command line. Run \"\n \"with '--help plugins' to see a list of options, and see \"\n \"https://eff.org/letsencrypt-plugins for more detail on what \"\n \"the plugins do and how to use them.\")\n\n filtered = plugins.visible().ifaces(ifaces)\n\n filtered.init(config)\n verified = filtered.verify(ifaces)\n verified.prepare()\n prepared = verified.available()\n\n if len(prepared) > 1:\n logger.debug(\"Multiple candidate plugins: %s\", prepared)\n plugin_ep = choose_plugin(list(six.itervalues(prepared)), question)\n if plugin_ep is None:\n return None\n else:\n return plugin_ep.init()\n elif len(prepared) == 1:\n plugin_ep = list(prepared.values())[0]\n logger.debug(\"Single candidate plugin: %s\", plugin_ep)\n if plugin_ep.misconfigured:\n return None\n return plugin_ep.init()\n else:\n logger.debug(\"No candidate plugin\")\n return None\n\n\ndef choose_plugin(prepared, question):\n \"\"\"Allow the user to choose their plugin.\n\n :param list prepared: List of `~.PluginEntryPoint`.\n :param str question: Question to be presented to the user.\n\n :returns: Plugin entry point chosen by the user.\n :rtype: `~.PluginEntryPoint`\n\n \"\"\"\n opts = [plugin_ep.description_with_name +\n (\" [Misconfigured]\" if plugin_ep.misconfigured else \"\")\n for plugin_ep in prepared]\n\n while True:\n disp = z_util(interfaces.IDisplay)\n code, index = disp.menu(question, opts, help_label=\"More Info\")\n\n if code == display_util.OK:\n plugin_ep = prepared[index]\n if plugin_ep.misconfigured:\n z_util(interfaces.IDisplay).notification(\n \"The selected plugin encountered an error while parsing \"\n \"your server configuration and cannot be used. The error \"\n \"was:\\n\\n{0}\".format(plugin_ep.prepare()),\n height=display_util.HEIGHT, pause=False)\n else:\n return plugin_ep\n elif code == display_util.HELP:\n if prepared[index].misconfigured:\n msg = \"Reported Error: %s\" % prepared[index].prepare()\n else:\n msg = prepared[index].init().more_info()\n z_util(interfaces.IDisplay).notification(\n msg, height=display_util.HEIGHT)\n else:\n return None\n\nnoninstaller_plugins = [\"webroot\", \"manual\", \"standalone\"]\n\ndef record_chosen_plugins(config, plugins, auth, inst):\n \"Update the config entries to reflect the plugins we actually selected.\"\n cn = config.namespace\n cn.authenticator = plugins.find_init(auth).name if auth else \"None\"\n cn.installer = plugins.find_init(inst).name if inst else \"None\"\n\n\ndef choose_configurator_plugins(config, plugins, verb):\n \"\"\"\n Figure out which configurator we're going to use, modifies\n config.authenticator and config.installer strings to reflect that choice if\n necessary.\n\n :raises errors.PluginSelectionError if there was a problem\n\n :returns: (an `IAuthenticator` or None, an `IInstaller` or None)\n :rtype: tuple\n \"\"\"\n\n req_auth, req_inst = cli_plugin_requests(config)\n\n # Which plugins do we need?\n if verb == \"run\":\n need_inst = need_auth = True\n from certbot.cli import cli_command\n if req_auth in noninstaller_plugins and not req_inst:\n msg = ('With the {0} plugin, you probably want to use the \"certonly\" command, eg:{1}'\n '{1} {2} certonly --{0}{1}{1}'\n '(Alternatively, add a --installer flag. See https://eff.org/letsencrypt-plugins'\n '{1} and \"--help plugins\" for more information.)'.format(\n req_auth, os.linesep, cli_command))\n\n raise errors.MissingCommandlineFlag(msg)\n else:\n need_inst = need_auth = False\n if verb == \"certonly\":\n need_auth = True\n if verb == \"install\":\n need_inst = True\n if config.authenticator:\n logger.warning(\"Specifying an authenticator doesn't make sense in install mode\")\n\n # Try to meet the user's request and/or ask them to pick plugins\n authenticator = installer = None\n if verb == \"run\" and req_auth == req_inst:\n # Unless the user has explicitly asked for different auth/install,\n # only consider offering a single choice\n authenticator = installer = pick_configurator(config, req_inst, plugins)\n else:\n if need_inst or req_inst:\n installer = pick_installer(config, req_inst, plugins)\n if need_auth:\n authenticator = pick_authenticator(config, req_auth, plugins)\n logger.debug(\"Selected authenticator %s and installer %s\", authenticator, installer)\n\n # Report on any failures\n if need_inst and not installer:\n diagnose_configurator_problem(\"installer\", req_inst, plugins)\n if need_auth and not authenticator:\n diagnose_configurator_problem(\"authenticator\", req_auth, plugins)\n\n record_chosen_plugins(config, plugins, authenticator, installer)\n return installer, authenticator\n\n\ndef set_configurator(previously, now):\n \"\"\"\n Setting configurators multiple ways is okay, as long as they all agree\n :param str previously: previously identified request for the installer/authenticator\n :param str requested: the request currently being processed\n \"\"\"\n if not now:\n # we're not actually setting anything\n return previously\n if previously:\n if previously != now:\n msg = \"Too many flags setting configurators/installers/authenticators {0} -> {1}\"\n raise errors.PluginSelectionError(msg.format(repr(previously), repr(now)))\n return now\n\n\ndef cli_plugin_requests(config):\n \"\"\"\n Figure out which plugins the user requested with CLI and config options\n\n :returns: (requested authenticator string or None, requested installer string or None)\n :rtype: tuple\n \"\"\"\n req_inst = req_auth = config.configurator\n req_inst = set_configurator(req_inst, config.installer)\n req_auth = set_configurator(req_auth, config.authenticator)\n if config.nginx:\n req_inst = set_configurator(req_inst, \"nginx\")\n req_auth = set_configurator(req_auth, \"nginx\")\n if config.apache:\n req_inst = set_configurator(req_inst, \"apache\")\n req_auth = set_configurator(req_auth, \"apache\")\n if config.standalone:\n req_auth = set_configurator(req_auth, \"standalone\")\n if config.webroot:\n req_auth = set_configurator(req_auth, \"webroot\")\n if config.manual:\n req_auth = set_configurator(req_auth, \"manual\")\n logger.debug(\"Requested authenticator %s and installer %s\", req_auth, req_inst)\n return req_auth, req_inst\n\n\ndef diagnose_configurator_problem(cfg_type, requested, plugins):\n \"\"\"\n Raise the most helpful error message about a plugin being unavailable\n\n :param str cfg_type: either \"installer\" or \"authenticator\"\n :param str requested: the plugin that was requested\n :param .PluginsRegistry plugins: available plugins\n\n :raises error.PluginSelectionError: if there was a problem\n \"\"\"\n\n if requested:\n if requested not in plugins:\n msg = \"The requested {0} plugin does not appear to be installed\".format(requested)\n else:\n msg = (\"The {0} plugin is not working; there may be problems with \"\n \"your existing configuration.\\nThe error was: {1!r}\"\n .format(requested, plugins[requested].problem))\n elif cfg_type == \"installer\":\n msg = ('No installer plugins seem to be present and working on your system; '\n 'fix that or try running certbot with the \"certonly\" command to obtain'\n ' a certificate you can install manually')\n else:\n msg = \"{0} could not be determined or is not installed\".format(cfg_type)\n raise errors.PluginSelectionError(msg)\n", "path": "certbot/plugins/selection.py"}]}
3,641
262
gh_patches_debug_43847
rasdani/github-patches
git_diff
ansible__ansible-modules-core-5219
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add update_password option to os_user module <!--- Verify first that your issue/request is not already reported in GitHub --> ##### ISSUE TYPE <!--- Pick one below and delete the rest: --> - Feature Idea ##### COMPONENT NAME <!--- Name of the plugin/module/task --> os_user ##### ANSIBLE VERSION <!--- Paste verbatim output from “ansible --version” between quotes below --> ``` $ ansible --version ansible 2.1.2.0 ``` ##### CONFIGURATION <!--- Mention any settings you have changed/added/removed in ansible.cfg (or using the ANSIBLE_* environment variables). --> ##### OS / ENVIRONMENT <!--- Mention the OS you are running Ansible from, and the OS you are managing, or say “N/A” for anything that is not platform-specific. --> N/A ##### SUMMARY <!--- Explain the problem briefly --> The `os_user` module with a password specified for a user will always report 'changed'. The conclusion of the bug report in #5183 was that in order to "fix" this we need to add another parameter like the on in the `user` module. I.e a parameter called `update_password` that has options `on_create` or `always`. ##### STEPS TO REPRODUCE <!--- For bugs, show exactly how to reproduce the problem. For new features, show how the feature would be used. --> ``` - name: "Create test user" os_user: name: test state: present password: very-secret default_project: a-existing-project update_password: on_create ``` ##### EXPECTED RESULTS <!--- What did you expect to happen when running the steps above? --> On first run, the user would be created and the password set. On the second run, given that nothing changed, the task would say `ok`. If the parameter would be `update_password: always` on the other hand, the module should always set the password and would always report `changed` </issue> <code> [start of cloud/openstack/os_user.py] 1 #!/usr/bin/python 2 # Copyright (c) 2015 Hewlett-Packard Development Company, L.P. 3 # 4 # This module is free software: you can redistribute it and/or modify 5 # it under the terms of the GNU General Public License as published by 6 # the Free Software Foundation, either version 3 of the License, or 7 # (at your option) any later version. 8 # 9 # This software is distributed in the hope that it will be useful, 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 # GNU General Public License for more details. 13 # 14 # You should have received a copy of the GNU General Public License 15 # along with this software. If not, see <http://www.gnu.org/licenses/>. 16 17 18 try: 19 import shade 20 HAS_SHADE = True 21 except ImportError: 22 HAS_SHADE = False 23 24 DOCUMENTATION = ''' 25 --- 26 module: os_user 27 short_description: Manage OpenStack Identity Users 28 extends_documentation_fragment: openstack 29 version_added: "2.0" 30 description: 31 - Manage OpenStack Identity users. Users can be created, 32 updated or deleted using this module. A user will be updated 33 if I(name) matches an existing user and I(state) is present. 34 The value for I(name) cannot be updated without deleting and 35 re-creating the user. 36 options: 37 name: 38 description: 39 - Username for the user 40 required: true 41 password: 42 description: 43 - Password for the user 44 required: false 45 default: None 46 email: 47 description: 48 - Email address for the user 49 required: false 50 default: None 51 default_project: 52 description: 53 - Project name or ID that the user should be associated with by default 54 required: false 55 default: None 56 domain: 57 description: 58 - Domain to create the user in if the cloud supports domains 59 required: false 60 default: None 61 enabled: 62 description: 63 - Is the user enabled 64 required: false 65 default: True 66 state: 67 description: 68 - Should the resource be present or absent. 69 choices: [present, absent] 70 default: present 71 requirements: 72 - "python >= 2.6" 73 - "shade" 74 ''' 75 76 EXAMPLES = ''' 77 # Create a user 78 - os_user: 79 cloud: mycloud 80 state: present 81 name: demouser 82 password: secret 83 email: [email protected] 84 domain: default 85 default_project: demo 86 87 # Delete a user 88 - os_user: 89 cloud: mycloud 90 state: absent 91 name: demouser 92 ''' 93 94 95 RETURN = ''' 96 user: 97 description: Dictionary describing the user. 98 returned: On success when I(state) is 'present' 99 type: dictionary 100 contains: 101 default_project_id: 102 description: User default project ID. Only present with Keystone >= v3. 103 type: string 104 sample: "4427115787be45f08f0ec22a03bfc735" 105 domain_id: 106 description: User domain ID. Only present with Keystone >= v3. 107 type: string 108 sample: "default" 109 email: 110 description: User email address 111 type: string 112 sample: "[email protected]" 113 id: 114 description: User ID 115 type: string 116 sample: "f59382db809c43139982ca4189404650" 117 name: 118 description: User name 119 type: string 120 sample: "demouser" 121 ''' 122 123 def _needs_update(params_dict, user): 124 for k, v in params_dict.items(): 125 if k != 'password' and user[k] != v: 126 return True 127 128 # We don't get password back in the user object, so assume any supplied 129 # password is a change. 130 if params_dict['password'] is not None: 131 return True 132 133 return False 134 135 def _get_domain_id(cloud, domain): 136 try: 137 # We assume admin is passing domain id 138 domain_id = cloud.get_domain(domain)['id'] 139 except: 140 # If we fail, maybe admin is passing a domain name. 141 # Note that domains have unique names, just like id. 142 try: 143 domain_id = cloud.search_domains(filters={'name': domain})[0]['id'] 144 except: 145 # Ok, let's hope the user is non-admin and passing a sane id 146 domain_id = domain 147 148 return domain_id 149 150 def _get_default_project_id(cloud, default_project): 151 project = cloud.get_project(default_project) 152 if not project: 153 module.fail_json(msg='Default project %s is not valid' % default_project) 154 155 return project['id'] 156 157 def main(): 158 159 argument_spec = openstack_full_argument_spec( 160 name=dict(required=True), 161 password=dict(required=False, default=None), 162 email=dict(required=False, default=None), 163 default_project=dict(required=False, default=None), 164 domain=dict(required=False, default=None), 165 enabled=dict(default=True, type='bool'), 166 state=dict(default='present', choices=['absent', 'present']), 167 ) 168 169 module_kwargs = openstack_module_kwargs() 170 module = AnsibleModule( 171 argument_spec, 172 **module_kwargs) 173 174 if not HAS_SHADE: 175 module.fail_json(msg='shade is required for this module') 176 177 name = module.params['name'] 178 password = module.params['password'] 179 email = module.params['email'] 180 default_project = module.params['default_project'] 181 domain = module.params['domain'] 182 enabled = module.params['enabled'] 183 state = module.params['state'] 184 185 try: 186 cloud = shade.openstack_cloud(**module.params) 187 user = cloud.get_user(name) 188 189 domain_id = None 190 if domain: 191 opcloud = shade.operator_cloud(**module.params) 192 domain_id = _get_domain_id(opcloud, domain) 193 194 if state == 'present': 195 default_project_id = None 196 if default_project: 197 default_project_id = _get_default_project_id(cloud, default_project) 198 199 if user is None: 200 user = cloud.create_user( 201 name=name, password=password, email=email, 202 default_project=default_project_id, domain_id=domain_id, 203 enabled=enabled) 204 changed = True 205 else: 206 params_dict = {'email': email, 'enabled': enabled, 'password': password} 207 if domain_id is not None: 208 params_dict['domain_id'] = domain_id 209 if default_project_id is not None: 210 params_dict['default_project_id'] = default_project_id 211 212 if _needs_update(params_dict, user): 213 user = cloud.update_user( 214 user['id'], password=password, email=email, 215 default_project=default_project_id, domain_id=domain_id, 216 enabled=enabled) 217 changed = True 218 else: 219 changed = False 220 module.exit_json(changed=changed, user=user) 221 222 elif state == 'absent': 223 if user is None: 224 changed=False 225 else: 226 cloud.delete_user(user['id']) 227 changed=True 228 module.exit_json(changed=changed) 229 230 except shade.OpenStackCloudException as e: 231 module.fail_json(msg=str(e), extra_data=e.extra_data) 232 233 from ansible.module_utils.basic import * 234 from ansible.module_utils.openstack import * 235 236 237 if __name__ == '__main__': 238 main() 239 [end of cloud/openstack/os_user.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cloud/openstack/os_user.py b/cloud/openstack/os_user.py --- a/cloud/openstack/os_user.py +++ b/cloud/openstack/os_user.py @@ -43,6 +43,14 @@ - Password for the user required: false default: None + update_password: + required: false + default: always + choices: ['always', 'on_create'] + version_added: "2.3" + description: + - C(always) will attempt to update password. C(on_create) will only + set the password for newly created users. email: description: - Email address for the user @@ -89,6 +97,17 @@ cloud: mycloud state: absent name: demouser + +# Create a user but don't update password if user exists +- os_user: + cloud: mycloud + state: present + name: demouser + password: secret + update_password: on_create + email: [email protected] + domain: default + default_project: demo ''' @@ -122,12 +141,13 @@ def _needs_update(params_dict, user): for k, v in params_dict.items(): - if k != 'password' and user[k] != v: + if k not in ('password', 'update_password') and user[k] != v: return True # We don't get password back in the user object, so assume any supplied # password is a change. - if params_dict['password'] is not None: + if (params_dict['password'] is not None and + params_dict['update_password'] == 'always'): return True return False @@ -164,11 +184,17 @@ domain=dict(required=False, default=None), enabled=dict(default=True, type='bool'), state=dict(default='present', choices=['absent', 'present']), + update_password=dict(default='always', choices=['always', + 'on_create']), ) module_kwargs = openstack_module_kwargs() module = AnsibleModule( argument_spec, + required_if=[ + ('update_password', 'always', ['password']), + ('update_password', 'on_create', ['password']), + ], **module_kwargs) if not HAS_SHADE: @@ -181,6 +207,7 @@ domain = module.params['domain'] enabled = module.params['enabled'] state = module.params['state'] + update_password = module.params['update_password'] try: cloud = shade.openstack_cloud(**module.params) @@ -203,17 +230,25 @@ enabled=enabled) changed = True else: - params_dict = {'email': email, 'enabled': enabled, 'password': password} + params_dict = {'email': email, 'enabled': enabled, + 'password': password, + 'update_password': update_password} if domain_id is not None: params_dict['domain_id'] = domain_id if default_project_id is not None: params_dict['default_project_id'] = default_project_id if _needs_update(params_dict, user): - user = cloud.update_user( - user['id'], password=password, email=email, - default_project=default_project_id, domain_id=domain_id, - enabled=enabled) + if update_password == 'always': + user = cloud.update_user( + user['id'], password=password, email=email, + default_project=default_project_id, + domain_id=domain_id, enabled=enabled) + else: + user = cloud.update_user( + user['id'], email=email, + default_project=default_project_id, + domain_id=domain_id, enabled=enabled) changed = True else: changed = False
{"golden_diff": "diff --git a/cloud/openstack/os_user.py b/cloud/openstack/os_user.py\n--- a/cloud/openstack/os_user.py\n+++ b/cloud/openstack/os_user.py\n@@ -43,6 +43,14 @@\n - Password for the user\n required: false\n default: None\n+ update_password:\n+ required: false\n+ default: always\n+ choices: ['always', 'on_create']\n+ version_added: \"2.3\"\n+ description:\n+ - C(always) will attempt to update password. C(on_create) will only\n+ set the password for newly created users.\n email:\n description:\n - Email address for the user\n@@ -89,6 +97,17 @@\n cloud: mycloud\n state: absent\n name: demouser\n+\n+# Create a user but don't update password if user exists\n+- os_user:\n+ cloud: mycloud\n+ state: present\n+ name: demouser\n+ password: secret\n+ update_password: on_create\n+ email: [email protected]\n+ domain: default\n+ default_project: demo\n '''\n \n \n@@ -122,12 +141,13 @@\n \n def _needs_update(params_dict, user):\n for k, v in params_dict.items():\n- if k != 'password' and user[k] != v:\n+ if k not in ('password', 'update_password') and user[k] != v:\n return True\n \n # We don't get password back in the user object, so assume any supplied\n # password is a change.\n- if params_dict['password'] is not None:\n+ if (params_dict['password'] is not None and\n+ params_dict['update_password'] == 'always'):\n return True\n \n return False\n@@ -164,11 +184,17 @@\n domain=dict(required=False, default=None),\n enabled=dict(default=True, type='bool'),\n state=dict(default='present', choices=['absent', 'present']),\n+ update_password=dict(default='always', choices=['always',\n+ 'on_create']),\n )\n \n module_kwargs = openstack_module_kwargs()\n module = AnsibleModule(\n argument_spec,\n+ required_if=[\n+ ('update_password', 'always', ['password']),\n+ ('update_password', 'on_create', ['password']),\n+ ],\n **module_kwargs)\n \n if not HAS_SHADE:\n@@ -181,6 +207,7 @@\n domain = module.params['domain']\n enabled = module.params['enabled']\n state = module.params['state']\n+ update_password = module.params['update_password']\n \n try:\n cloud = shade.openstack_cloud(**module.params)\n@@ -203,17 +230,25 @@\n enabled=enabled)\n changed = True\n else:\n- params_dict = {'email': email, 'enabled': enabled, 'password': password}\n+ params_dict = {'email': email, 'enabled': enabled,\n+ 'password': password,\n+ 'update_password': update_password}\n if domain_id is not None:\n params_dict['domain_id'] = domain_id\n if default_project_id is not None:\n params_dict['default_project_id'] = default_project_id\n \n if _needs_update(params_dict, user):\n- user = cloud.update_user(\n- user['id'], password=password, email=email,\n- default_project=default_project_id, domain_id=domain_id,\n- enabled=enabled)\n+ if update_password == 'always':\n+ user = cloud.update_user(\n+ user['id'], password=password, email=email,\n+ default_project=default_project_id,\n+ domain_id=domain_id, enabled=enabled)\n+ else:\n+ user = cloud.update_user(\n+ user['id'], email=email,\n+ default_project=default_project_id,\n+ domain_id=domain_id, enabled=enabled)\n changed = True\n else:\n changed = False\n", "issue": "Add update_password option to os_user module\n<!--- Verify first that your issue/request is not already reported in GitHub -->\n##### ISSUE TYPE\n\n<!--- Pick one below and delete the rest: -->\n- Feature Idea\n##### COMPONENT NAME\n\n<!--- Name of the plugin/module/task -->\n\nos_user\n##### ANSIBLE VERSION\n\n<!--- Paste verbatim output from \u201cansible --version\u201d between quotes below -->\n\n```\n$ ansible --version\nansible 2.1.2.0\n```\n##### CONFIGURATION\n\n<!---\nMention any settings you have changed/added/removed in ansible.cfg\n(or using the ANSIBLE_* environment variables).\n-->\n##### OS / ENVIRONMENT\n\n<!---\nMention the OS you are running Ansible from, and the OS you are\nmanaging, or say \u201cN/A\u201d for anything that is not platform-specific.\n-->\n\nN/A\n##### SUMMARY\n\n<!--- Explain the problem briefly -->\n\nThe `os_user` module with a password specified for a user will always report 'changed'.\nThe conclusion of the bug report in #5183 was that in order to \"fix\" this we need to add another parameter like the on in the `user` module.\nI.e a parameter called `update_password` that has options `on_create` or `always`.\n##### STEPS TO REPRODUCE\n\n<!---\nFor bugs, show exactly how to reproduce the problem.\nFor new features, show how the feature would be used.\n-->\n\n```\n - name: \"Create test user\"\n os_user:\n name: test\n state: present\n password: very-secret\n default_project: a-existing-project\n update_password: on_create\n```\n##### EXPECTED RESULTS\n\n<!--- What did you expect to happen when running the steps above? -->\n\nOn first run, the user would be created and the password set.\nOn the second run, given that nothing changed, the task would say `ok`.\nIf the parameter would be `update_password: always` on the other hand, the module should always set the password and would always report `changed`\n\n", "before_files": [{"content": "#!/usr/bin/python\n# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.\n#\n# This module is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This software is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this software. If not, see <http://www.gnu.org/licenses/>.\n\n\ntry:\n import shade\n HAS_SHADE = True\nexcept ImportError:\n HAS_SHADE = False\n\nDOCUMENTATION = '''\n---\nmodule: os_user\nshort_description: Manage OpenStack Identity Users\nextends_documentation_fragment: openstack\nversion_added: \"2.0\"\ndescription:\n - Manage OpenStack Identity users. Users can be created,\n updated or deleted using this module. A user will be updated\n if I(name) matches an existing user and I(state) is present.\n The value for I(name) cannot be updated without deleting and\n re-creating the user.\noptions:\n name:\n description:\n - Username for the user\n required: true\n password:\n description:\n - Password for the user\n required: false\n default: None\n email:\n description:\n - Email address for the user\n required: false\n default: None\n default_project:\n description:\n - Project name or ID that the user should be associated with by default\n required: false\n default: None\n domain:\n description:\n - Domain to create the user in if the cloud supports domains\n required: false\n default: None\n enabled:\n description:\n - Is the user enabled\n required: false\n default: True\n state:\n description:\n - Should the resource be present or absent.\n choices: [present, absent]\n default: present\nrequirements:\n - \"python >= 2.6\"\n - \"shade\"\n'''\n\nEXAMPLES = '''\n# Create a user\n- os_user:\n cloud: mycloud\n state: present\n name: demouser\n password: secret\n email: [email protected]\n domain: default\n default_project: demo\n\n# Delete a user\n- os_user:\n cloud: mycloud\n state: absent\n name: demouser\n'''\n\n\nRETURN = '''\nuser:\n description: Dictionary describing the user.\n returned: On success when I(state) is 'present'\n type: dictionary\n contains:\n default_project_id:\n description: User default project ID. Only present with Keystone >= v3.\n type: string\n sample: \"4427115787be45f08f0ec22a03bfc735\"\n domain_id:\n description: User domain ID. Only present with Keystone >= v3.\n type: string\n sample: \"default\"\n email:\n description: User email address\n type: string\n sample: \"[email protected]\"\n id:\n description: User ID\n type: string\n sample: \"f59382db809c43139982ca4189404650\"\n name:\n description: User name\n type: string\n sample: \"demouser\"\n'''\n\ndef _needs_update(params_dict, user):\n for k, v in params_dict.items():\n if k != 'password' and user[k] != v:\n return True\n\n # We don't get password back in the user object, so assume any supplied\n # password is a change.\n if params_dict['password'] is not None:\n return True\n\n return False\n\ndef _get_domain_id(cloud, domain):\n try:\n # We assume admin is passing domain id\n domain_id = cloud.get_domain(domain)['id']\n except:\n # If we fail, maybe admin is passing a domain name.\n # Note that domains have unique names, just like id.\n try:\n domain_id = cloud.search_domains(filters={'name': domain})[0]['id']\n except:\n # Ok, let's hope the user is non-admin and passing a sane id\n domain_id = domain\n\n return domain_id\n\ndef _get_default_project_id(cloud, default_project):\n project = cloud.get_project(default_project)\n if not project:\n module.fail_json(msg='Default project %s is not valid' % default_project)\n\n return project['id']\n\ndef main():\n\n argument_spec = openstack_full_argument_spec(\n name=dict(required=True),\n password=dict(required=False, default=None),\n email=dict(required=False, default=None),\n default_project=dict(required=False, default=None),\n domain=dict(required=False, default=None),\n enabled=dict(default=True, type='bool'),\n state=dict(default='present', choices=['absent', 'present']),\n )\n\n module_kwargs = openstack_module_kwargs()\n module = AnsibleModule(\n argument_spec,\n **module_kwargs)\n\n if not HAS_SHADE:\n module.fail_json(msg='shade is required for this module')\n\n name = module.params['name']\n password = module.params['password']\n email = module.params['email']\n default_project = module.params['default_project']\n domain = module.params['domain']\n enabled = module.params['enabled']\n state = module.params['state']\n\n try:\n cloud = shade.openstack_cloud(**module.params)\n user = cloud.get_user(name)\n\n domain_id = None\n if domain:\n opcloud = shade.operator_cloud(**module.params)\n domain_id = _get_domain_id(opcloud, domain)\n\n if state == 'present':\n default_project_id = None\n if default_project:\n default_project_id = _get_default_project_id(cloud, default_project)\n\n if user is None:\n user = cloud.create_user(\n name=name, password=password, email=email,\n default_project=default_project_id, domain_id=domain_id,\n enabled=enabled)\n changed = True\n else:\n params_dict = {'email': email, 'enabled': enabled, 'password': password}\n if domain_id is not None:\n params_dict['domain_id'] = domain_id\n if default_project_id is not None:\n params_dict['default_project_id'] = default_project_id\n\n if _needs_update(params_dict, user):\n user = cloud.update_user(\n user['id'], password=password, email=email,\n default_project=default_project_id, domain_id=domain_id,\n enabled=enabled)\n changed = True\n else:\n changed = False\n module.exit_json(changed=changed, user=user)\n\n elif state == 'absent':\n if user is None:\n changed=False\n else:\n cloud.delete_user(user['id'])\n changed=True\n module.exit_json(changed=changed)\n\n except shade.OpenStackCloudException as e:\n module.fail_json(msg=str(e), extra_data=e.extra_data)\n\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.openstack import *\n\n\nif __name__ == '__main__':\n main()\n", "path": "cloud/openstack/os_user.py"}]}
3,190
879
gh_patches_debug_6356
rasdani/github-patches
git_diff
sunpy__sunpy-7486
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Saving with the ".jp2" extension results in a vertically flipped image ### Describe the bug Images get flipped when saving as jp2 files. This was my fault when I added this feature 🎉 ### To Reproduce ``` from sunpy.map import Map m = Map("https://helioviewer.org/jp2/AIA/2024/03/08/304/2024_03_08__00_00_05_123__SDO_AIA_AIA_304.jp2") m.peek() m.save("test.jp2") flipped = Map("test.jp2") flipped.peek() ``` See screenshots below ### Screenshots `m.peek()` <img width="634" alt="image" src="https://github.com/sunpy/sunpy/assets/94071409/fc6f1648-9bd3-4e11-a726-9b2d4810e42e"> `flipped.peek()` <img width="634" alt="image" src="https://github.com/sunpy/sunpy/assets/94071409/04c66615-05b3-4776-81c6-7aefa3c9ca5f"> ### System Details >>> import sunpy >>> sunpy.util.system_info() ============================== sunpy Installation Information ============================== General ####### OS: Mac OS 14.3.1 Arch: 64bit, (arm) sunpy: 4.0.dev2573+gf79e5d92d Installation path: sunpy.egg-info Required Dependencies ##################### astropy: 6.0.0 numpy: 1.26.4 packaging: 23.2 parfive: 2.0.2 pyerfa: 2.0.1.1 requests: 2.31.0 Optional Dependencies ##################### sunpy: 4.0.dev2573+gf79e5d92d ### Installation method git checkout </issue> <code> [start of sunpy/io/_jp2.py] 1 """ 2 This module provides a JPEG 2000 file reader for internal use. 3 4 .. warning:: 5 6 ``sunpy.io.jp2`` is deprecated, and will be removed in sunpy 4.1. This is 7 because it was designed for internal use only. 8 """ 9 import os 10 from xml.etree import ElementTree as ET 11 12 import numpy as np 13 14 from sunpy.io.header import FileHeader 15 from sunpy.util.io import HDPair, string_is_float 16 from sunpy.util.xml import xml_to_dict 17 18 __all__ = ['read', 'get_header', 'write'] 19 20 21 def read(filepath, **kwargs): 22 """ 23 Reads a JPEG2000 file. 24 25 Parameters 26 ---------- 27 filepath : `str` 28 The file to be read. 29 **kwargs : `dict` 30 Unused. 31 32 Returns 33 ------- 34 `list` 35 A list of (data, header) tuples. 36 """ 37 # Put import here to speed up sunpy.io import time 38 from glymur import Jp2k 39 40 header = get_header(filepath) 41 data = Jp2k(filepath)[...][::-1] 42 return [HDPair(data, header[0])] 43 44 45 def get_header(filepath): 46 """ 47 Reads the header from the file. 48 49 Parameters 50 ---------- 51 filepath : `str` 52 The file to be read. 53 54 Returns 55 ------- 56 `list` 57 A list of one header read from the file. 58 """ 59 # Put import here to speed up sunpy.io import time 60 from glymur import Jp2k 61 jp2 = Jp2k(filepath) 62 xml_box = [box for box in jp2.box if box.box_id == 'xml '] 63 xmlstring = ET.tostring(xml_box[0].xml.find('fits')) 64 pydict = xml_to_dict(xmlstring)["fits"] 65 66 # Fix types 67 for k, v in pydict.items(): 68 if v.isdigit(): 69 pydict[k] = int(v) 70 elif string_is_float(v): 71 pydict[k] = float(v) 72 73 # Remove newlines from comment 74 if 'comment' in pydict: 75 pydict['comment'] = pydict['comment'].replace("\n", "") 76 77 # Is this file a Helioviewer Project JPEG2000 file? 78 pydict['helioviewer'] = xml_box[0].xml.find('helioviewer') is not None 79 80 return [FileHeader(pydict)] 81 82 83 def header_to_xml(header): 84 """ 85 Converts image header metadata into an XML Tree that can be inserted into 86 a JP2 file header. 87 88 Parameters 89 ---------- 90 header : `MetaDict` 91 A header dictionary to convert to xml. 92 93 Returns 94 ---------- 95 `lxml.etree._Element` 96 A fits element where each child is an xml element 97 in the form <key>value</key> derived from the key/value 98 pairs in the given header dictionary 99 """ 100 # glymur uses lxml and will crash if trying to use 101 # python's builtin xml.etree 102 import lxml.etree as ET 103 104 fits = ET.Element("fits") 105 106 already_added = set() 107 for key in header: 108 # Some headers span multiple lines and get duplicated as keys 109 # header.get will appropriately return all data, so if we see 110 # a key again, we can assume it was already added to the xml tree. 111 if (key in already_added): 112 continue 113 114 # Add to the set so we don't duplicate entries 115 already_added.add(key) 116 117 el = ET.SubElement(fits, key) 118 data = header.get(key) 119 if isinstance(data, bool): 120 data = "1" if data else "0" 121 else: 122 data = str(data) 123 124 el.text = data 125 126 return fits 127 128 129 def generate_jp2_xmlbox(header): 130 """ 131 Generates the JP2 XML box to be inserted into the jp2 file. 132 133 Parameters 134 ---------- 135 header : `MetaDict` 136 A header dictionary. 137 138 Returns 139 ---------- 140 `XMLBox` 141 XML box containing FITS metadata to be used in jp2 headers 142 """ 143 # glymur uses lxml and will crash if trying to use 144 # python's builtin xml.etree 145 import lxml.etree as ET 146 from glymur import jp2box 147 148 header_xml = header_to_xml(header) 149 meta = ET.Element("meta") 150 meta.append(header_xml) 151 tree = ET.ElementTree(meta) 152 return jp2box.XMLBox(xml=tree) 153 154 155 def write(fname, data, header, **kwargs): 156 """ 157 Take a data header pair and write a JP2 file. 158 159 Parameters 160 ---------- 161 fname : `str` 162 File name, with extension. 163 data : `numpy.ndarray` 164 n-dimensional data array. 165 header : `dict` 166 A header dictionary. 167 kwargs : 168 Additional keyword args are passed to the glymur.Jp2k constructor 169 170 Notes 171 ----- 172 Saving as a JPEG2000 will cast the data array to 173 uint8 values to support the JPEG2000 format. 174 """ 175 from glymur import Jp2k 176 177 tmpname = fname + "tmp.jp2" 178 jp2_data = np.uint8(data) 179 jp2 = Jp2k(tmpname, jp2_data, **kwargs) 180 181 # Append the XML data to the header information stored in jp2.box 182 meta_boxes = jp2.box 183 target_index = len(meta_boxes) - 1 184 fits_box = generate_jp2_xmlbox(header) 185 meta_boxes.insert(target_index, fits_box) 186 187 # Rewrites the jp2 file on disk with the xml data in the header 188 jp2.wrap(fname, boxes=meta_boxes) 189 190 os.remove(tmpname) 191 [end of sunpy/io/_jp2.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sunpy/io/_jp2.py b/sunpy/io/_jp2.py --- a/sunpy/io/_jp2.py +++ b/sunpy/io/_jp2.py @@ -176,7 +176,11 @@ tmpname = fname + "tmp.jp2" jp2_data = np.uint8(data) - jp2 = Jp2k(tmpname, jp2_data, **kwargs) + + # The jp2 data is flipped when read in, so we have to flip it back before + # saving. See https://github.com/sunpy/sunpy/pull/768 for context. + flipped = np.flip(jp2_data, 0) + jp2 = Jp2k(tmpname, flipped, **kwargs) # Append the XML data to the header information stored in jp2.box meta_boxes = jp2.box
{"golden_diff": "diff --git a/sunpy/io/_jp2.py b/sunpy/io/_jp2.py\n--- a/sunpy/io/_jp2.py\n+++ b/sunpy/io/_jp2.py\n@@ -176,7 +176,11 @@\n \n tmpname = fname + \"tmp.jp2\"\n jp2_data = np.uint8(data)\n- jp2 = Jp2k(tmpname, jp2_data, **kwargs)\n+\n+ # The jp2 data is flipped when read in, so we have to flip it back before\n+ # saving. See https://github.com/sunpy/sunpy/pull/768 for context.\n+ flipped = np.flip(jp2_data, 0)\n+ jp2 = Jp2k(tmpname, flipped, **kwargs)\n \n # Append the XML data to the header information stored in jp2.box\n meta_boxes = jp2.box\n", "issue": "Saving with the \".jp2\" extension results in a vertically flipped image\n### Describe the bug\n\nImages get flipped when saving as jp2 files.\r\nThis was my fault when I added this feature \ud83c\udf89\n\n### To Reproduce\n\n```\r\nfrom sunpy.map import Map\r\nm = Map(\"https://helioviewer.org/jp2/AIA/2024/03/08/304/2024_03_08__00_00_05_123__SDO_AIA_AIA_304.jp2\")\r\nm.peek()\r\nm.save(\"test.jp2\")\r\nflipped = Map(\"test.jp2\")\r\nflipped.peek()\r\n```\r\n\r\nSee screenshots below\r\n\n\n### Screenshots\n\n`m.peek()`\r\n<img width=\"634\" alt=\"image\" src=\"https://github.com/sunpy/sunpy/assets/94071409/fc6f1648-9bd3-4e11-a726-9b2d4810e42e\">\r\n\r\n`flipped.peek()`\r\n<img width=\"634\" alt=\"image\" src=\"https://github.com/sunpy/sunpy/assets/94071409/04c66615-05b3-4776-81c6-7aefa3c9ca5f\">\r\n\n\n### System Details\n\n\r\n>>> import sunpy\r\n>>> sunpy.util.system_info()\r\n==============================\r\nsunpy Installation Information\r\n==============================\r\n\r\nGeneral\r\n#######\r\nOS: Mac OS 14.3.1\r\nArch: 64bit, (arm)\r\nsunpy: 4.0.dev2573+gf79e5d92d\r\nInstallation path: sunpy.egg-info\r\n\r\nRequired Dependencies\r\n#####################\r\nastropy: 6.0.0\r\nnumpy: 1.26.4\r\npackaging: 23.2\r\nparfive: 2.0.2\r\npyerfa: 2.0.1.1\r\nrequests: 2.31.0\r\n\r\nOptional Dependencies\r\n#####################\r\nsunpy: 4.0.dev2573+gf79e5d92d\n\n### Installation method\n\ngit checkout\n", "before_files": [{"content": "\"\"\"\nThis module provides a JPEG 2000 file reader for internal use.\n\n.. warning::\n\n ``sunpy.io.jp2`` is deprecated, and will be removed in sunpy 4.1. This is\n because it was designed for internal use only.\n\"\"\"\nimport os\nfrom xml.etree import ElementTree as ET\n\nimport numpy as np\n\nfrom sunpy.io.header import FileHeader\nfrom sunpy.util.io import HDPair, string_is_float\nfrom sunpy.util.xml import xml_to_dict\n\n__all__ = ['read', 'get_header', 'write']\n\n\ndef read(filepath, **kwargs):\n \"\"\"\n Reads a JPEG2000 file.\n\n Parameters\n ----------\n filepath : `str`\n The file to be read.\n **kwargs : `dict`\n Unused.\n\n Returns\n -------\n `list`\n A list of (data, header) tuples.\n \"\"\"\n # Put import here to speed up sunpy.io import time\n from glymur import Jp2k\n\n header = get_header(filepath)\n data = Jp2k(filepath)[...][::-1]\n return [HDPair(data, header[0])]\n\n\ndef get_header(filepath):\n \"\"\"\n Reads the header from the file.\n\n Parameters\n ----------\n filepath : `str`\n The file to be read.\n\n Returns\n -------\n `list`\n A list of one header read from the file.\n \"\"\"\n # Put import here to speed up sunpy.io import time\n from glymur import Jp2k\n jp2 = Jp2k(filepath)\n xml_box = [box for box in jp2.box if box.box_id == 'xml ']\n xmlstring = ET.tostring(xml_box[0].xml.find('fits'))\n pydict = xml_to_dict(xmlstring)[\"fits\"]\n\n # Fix types\n for k, v in pydict.items():\n if v.isdigit():\n pydict[k] = int(v)\n elif string_is_float(v):\n pydict[k] = float(v)\n\n # Remove newlines from comment\n if 'comment' in pydict:\n pydict['comment'] = pydict['comment'].replace(\"\\n\", \"\")\n\n # Is this file a Helioviewer Project JPEG2000 file?\n pydict['helioviewer'] = xml_box[0].xml.find('helioviewer') is not None\n\n return [FileHeader(pydict)]\n\n\ndef header_to_xml(header):\n \"\"\"\n Converts image header metadata into an XML Tree that can be inserted into\n a JP2 file header.\n\n Parameters\n ----------\n header : `MetaDict`\n A header dictionary to convert to xml.\n\n Returns\n ----------\n `lxml.etree._Element`\n A fits element where each child is an xml element\n in the form <key>value</key> derived from the key/value\n pairs in the given header dictionary\n \"\"\"\n # glymur uses lxml and will crash if trying to use\n # python's builtin xml.etree\n import lxml.etree as ET\n\n fits = ET.Element(\"fits\")\n\n already_added = set()\n for key in header:\n # Some headers span multiple lines and get duplicated as keys\n # header.get will appropriately return all data, so if we see\n # a key again, we can assume it was already added to the xml tree.\n if (key in already_added):\n continue\n\n # Add to the set so we don't duplicate entries\n already_added.add(key)\n\n el = ET.SubElement(fits, key)\n data = header.get(key)\n if isinstance(data, bool):\n data = \"1\" if data else \"0\"\n else:\n data = str(data)\n\n el.text = data\n\n return fits\n\n\ndef generate_jp2_xmlbox(header):\n \"\"\"\n Generates the JP2 XML box to be inserted into the jp2 file.\n\n Parameters\n ----------\n header : `MetaDict`\n A header dictionary.\n\n Returns\n ----------\n `XMLBox`\n XML box containing FITS metadata to be used in jp2 headers\n \"\"\"\n # glymur uses lxml and will crash if trying to use\n # python's builtin xml.etree\n import lxml.etree as ET\n from glymur import jp2box\n\n header_xml = header_to_xml(header)\n meta = ET.Element(\"meta\")\n meta.append(header_xml)\n tree = ET.ElementTree(meta)\n return jp2box.XMLBox(xml=tree)\n\n\ndef write(fname, data, header, **kwargs):\n \"\"\"\n Take a data header pair and write a JP2 file.\n\n Parameters\n ----------\n fname : `str`\n File name, with extension.\n data : `numpy.ndarray`\n n-dimensional data array.\n header : `dict`\n A header dictionary.\n kwargs :\n Additional keyword args are passed to the glymur.Jp2k constructor\n\n Notes\n -----\n Saving as a JPEG2000 will cast the data array to\n uint8 values to support the JPEG2000 format.\n \"\"\"\n from glymur import Jp2k\n\n tmpname = fname + \"tmp.jp2\"\n jp2_data = np.uint8(data)\n jp2 = Jp2k(tmpname, jp2_data, **kwargs)\n\n # Append the XML data to the header information stored in jp2.box\n meta_boxes = jp2.box\n target_index = len(meta_boxes) - 1\n fits_box = generate_jp2_xmlbox(header)\n meta_boxes.insert(target_index, fits_box)\n\n # Rewrites the jp2 file on disk with the xml data in the header\n jp2.wrap(fname, boxes=meta_boxes)\n\n os.remove(tmpname)\n", "path": "sunpy/io/_jp2.py"}]}
2,764
204
gh_patches_debug_37822
rasdani/github-patches
git_diff
comic__grand-challenge.org-2196
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Seperate external challenges from internal challenges It would be nice to seperate internal from external challenges a little more in the list view. The simplest (?) way would be to only include internal challenges in the list view and add a seperate list view for external challenges. This seperate list view could be linked to through a button on the normal challenge list view. </issue> <code> [start of app/grandchallenge/challenges/urls.py] 1 from django.urls import path 2 3 from grandchallenge.challenges.views import ( 4 ChallengeCreate, 5 ChallengeList, 6 ExternalChallengeCreate, 7 ExternalChallengeList, 8 ExternalChallengeUpdate, 9 UsersChallengeList, 10 ) 11 12 app_name = "challenges" 13 14 urlpatterns = [ 15 path("", ChallengeList.as_view(), name="list"), 16 path("my-challenges/", UsersChallengeList.as_view(), name="users-list"), 17 path("create/", ChallengeCreate.as_view(), name="create"), 18 path("external/", ExternalChallengeList.as_view(), name="external-list"), 19 path( 20 "external/create/", 21 ExternalChallengeCreate.as_view(), 22 name="external-create", 23 ), 24 path( 25 "external/<slug:short_name>/update/", 26 ExternalChallengeUpdate.as_view(), 27 name="external-update", 28 ), 29 ] 30 [end of app/grandchallenge/challenges/urls.py] [start of app/grandchallenge/challenges/views.py] 1 from django.contrib.auth.mixins import PermissionRequiredMixin 2 from django.contrib.messages.views import SuccessMessageMixin 3 from django.core.paginator import EmptyPage, Paginator 4 from django.db.models import Q 5 from django.utils.html import format_html 6 from django.views.generic import ( 7 CreateView, 8 ListView, 9 TemplateView, 10 UpdateView, 11 ) 12 from guardian.mixins import ( 13 LoginRequiredMixin, 14 PermissionRequiredMixin as ObjectPermissionRequiredMixin, 15 ) 16 17 from grandchallenge.challenges.filters import ChallengeFilter 18 from grandchallenge.challenges.forms import ( 19 ChallengeCreateForm, 20 ChallengeUpdateForm, 21 ExternalChallengeUpdateForm, 22 ) 23 from grandchallenge.challenges.models import ( 24 Challenge, 25 ExternalChallenge, 26 ) 27 from grandchallenge.core.templatetags.random_encode import random_encode 28 from grandchallenge.datatables.views import Column, PaginatedTableListView 29 from grandchallenge.subdomains.mixins import ChallengeSubdomainObjectMixin 30 from grandchallenge.subdomains.utils import reverse, reverse_lazy 31 32 33 class ChallengeCreate(LoginRequiredMixin, SuccessMessageMixin, CreateView): 34 model = Challenge 35 form_class = ChallengeCreateForm 36 success_message = "Challenge successfully created" 37 38 def form_valid(self, form): 39 form.instance.creator = self.request.user 40 return super().form_valid(form) 41 42 def get_form_kwargs(self, *args, **kwargs): 43 form_kwargs = super().get_form_kwargs(*args, **kwargs) 44 form_kwargs.update({"creator": self.request.user}) 45 return form_kwargs 46 47 48 class ChallengeList(TemplateView): 49 paginate_by = 40 50 template_name = "challenges/challenge_list.html" 51 52 @property 53 def _current_page(self): 54 return int(self.request.GET.get("page", 1)) 55 56 @property 57 def _filters_applied(self): 58 return any(k for k in self.request.GET if k.lower() != "page") 59 60 def _get_page(self): 61 int_qs = ( 62 Challenge.objects.filter(hidden=False) 63 .prefetch_related("phase_set", "publications") 64 .order_by("-created") 65 ) 66 self.int_filter = ChallengeFilter(self.request.GET, int_qs,) 67 ext_qs = ( 68 ExternalChallenge.objects.filter(hidden=False) 69 .prefetch_related("publications") 70 .order_by("-created") 71 ) 72 self.ext_filter = ChallengeFilter(self.request.GET, ext_qs,) 73 74 total_count = int_qs.count() + ext_qs.count() 75 76 int_paginator = Paginator(self.int_filter.qs, self.paginate_by // 2) 77 ext_paginator = Paginator(self.ext_filter.qs, self.paginate_by // 2) 78 79 num_pages = max(int_paginator.num_pages, ext_paginator.num_pages) 80 num_results = int_paginator.count + ext_paginator.count 81 82 try: 83 int_page = int_paginator.page(self._current_page) 84 except EmptyPage: 85 int_page = [] 86 87 try: 88 ext_page = ext_paginator.page(self._current_page) 89 except EmptyPage: 90 ext_page = [] 91 92 return [*int_page, *ext_page], num_pages, num_results, total_count 93 94 def get_context_data(self, *, object_list=None, **kwargs): 95 context = super().get_context_data(**kwargs) 96 97 page_obj, num_pages, num_results, total_count = self._get_page() 98 99 context.update( 100 { 101 "filter": self.int_filter, 102 "filters_applied": self._filters_applied, 103 "page_obj": page_obj, 104 "num_pages": num_pages, 105 "num_results": num_results, 106 "total_count": total_count, 107 "current_page": self._current_page, 108 "next_page": self._current_page + 1, 109 "previous_page": self._current_page - 1, 110 "jumbotron_title": "Challenges", 111 "jumbotron_description": format_html( 112 ( 113 "Here is an overview of all challenges that have been " 114 "organised within the area of medical image analysis " 115 "that we are aware of. Please <a href='{}'>contact " 116 "us</a> if you want to advertise your challenge or " 117 "know of any study that would fit in this overview." 118 ), 119 random_encode("mailto:[email protected]"), 120 ), 121 } 122 ) 123 124 return context 125 126 127 class UsersChallengeList(LoginRequiredMixin, PaginatedTableListView): 128 model = Challenge 129 template_name = "challenges/challenge_users_list.html" 130 row_template = "challenges/challenge_users_row.html" 131 search_fields = [ 132 "title", 133 "short_name", 134 "description", 135 ] 136 columns = [ 137 Column(title="Name", sort_field="short_name"), 138 Column(title="Created", sort_field="created"), 139 Column(title="Admins", sort_field="created"), 140 Column(title="Description", sort_field="description"), 141 Column(title="Automated Evaluation", sort_field="use_evaluation"), 142 ] 143 default_sort_column = 1 144 145 def get_queryset(self): 146 queryset = ( 147 super() 148 .get_queryset() 149 .prefetch_related( 150 "admins_group__user_set__user_profile", 151 "admins_group__user_set__verification", 152 ) 153 ) 154 if not self.request.user.is_superuser: 155 queryset = queryset.filter( 156 Q(participants_group__in=self.request.user.groups.all()) 157 | Q(admins_group__in=self.request.user.groups.all()) 158 ) 159 return queryset 160 161 162 class ChallengeUpdate( 163 LoginRequiredMixin, 164 ObjectPermissionRequiredMixin, 165 SuccessMessageMixin, 166 ChallengeSubdomainObjectMixin, 167 UpdateView, 168 ): 169 model = Challenge 170 slug_field = "short_name__iexact" 171 slug_url_kwarg = "challenge_short_name" 172 form_class = ChallengeUpdateForm 173 success_message = "Challenge successfully updated" 174 template_name_suffix = "_update" 175 permission_required = "change_challenge" 176 raise_exception = True 177 login_url = reverse_lazy("account_login") 178 179 def get_success_url(self): 180 return reverse( 181 "update", 182 kwargs={"challenge_short_name": self.request.challenge.short_name}, 183 ) 184 185 186 class ExternalChallengeCreate( 187 LoginRequiredMixin, 188 PermissionRequiredMixin, 189 SuccessMessageMixin, 190 CreateView, 191 ): 192 model = ExternalChallenge 193 form_class = ExternalChallengeUpdateForm 194 success_message = ( 195 "Your challenge has been successfully submitted. " 196 "An admin will review your challenge before it is published." 197 ) 198 raise_exception = True 199 permission_required = "challenges.add_externalchallenge" 200 201 def form_valid(self, form): 202 form.instance.creator = self.request.user 203 return super().form_valid(form) 204 205 def get_success_url(self): 206 return reverse("challenges:list") 207 208 209 class ExternalChallengeUpdate( 210 LoginRequiredMixin, 211 PermissionRequiredMixin, 212 SuccessMessageMixin, 213 UpdateView, 214 ): 215 model = ExternalChallenge 216 slug_field = "short_name__iexact" 217 slug_url_kwarg = "short_name" 218 form_class = ExternalChallengeUpdateForm 219 template_name_suffix = "_update" 220 success_message = "Challenge updated" 221 raise_exception = True 222 permission_required = "challenges.change_externalchallenge" 223 224 def get_success_url(self): 225 return reverse("challenges:list") 226 227 228 class ExternalChallengeList( 229 LoginRequiredMixin, PermissionRequiredMixin, ListView 230 ): 231 model = ExternalChallenge 232 raise_exception = True 233 permission_required = "challenges.view_externalchallenge" 234 [end of app/grandchallenge/challenges/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/app/grandchallenge/challenges/urls.py b/app/grandchallenge/challenges/urls.py --- a/app/grandchallenge/challenges/urls.py +++ b/app/grandchallenge/challenges/urls.py @@ -3,6 +3,7 @@ from grandchallenge.challenges.views import ( ChallengeCreate, ChallengeList, + CombinedChallengeList, ExternalChallengeCreate, ExternalChallengeList, ExternalChallengeUpdate, @@ -13,6 +14,11 @@ urlpatterns = [ path("", ChallengeList.as_view(), name="list"), + path( + "all-challenges/", + CombinedChallengeList.as_view(), + name="combined-list", + ), path("my-challenges/", UsersChallengeList.as_view(), name="users-list"), path("create/", ChallengeCreate.as_view(), name="create"), path("external/", ExternalChallengeList.as_view(), name="external-list"), diff --git a/app/grandchallenge/challenges/views.py b/app/grandchallenge/challenges/views.py --- a/app/grandchallenge/challenges/views.py +++ b/app/grandchallenge/challenges/views.py @@ -24,6 +24,7 @@ Challenge, ExternalChallenge, ) +from grandchallenge.core.filters import FilterMixin from grandchallenge.core.templatetags.random_encode import random_encode from grandchallenge.datatables.views import Column, PaginatedTableListView from grandchallenge.subdomains.mixins import ChallengeSubdomainObjectMixin @@ -45,9 +46,44 @@ return form_kwargs -class ChallengeList(TemplateView): +class ChallengeList(FilterMixin, ListView): + model = Challenge + ordering = "-created" + filter_class = ChallengeFilter + paginate_by = 40 + + def get_queryset(self): + return ( + super() + .get_queryset() + .filter(hidden=False) + .prefetch_related("phase_set", "publications") + .order_by("-created") + ) + + def get_context_data(self, *args, **kwargs): + context = super().get_context_data(*args, **kwargs) + + context.update( + { + "jumbotron_title": "Challenges", + "jumbotron_description": format_html( + ( + "Here is an overview over the medical image analysis" + " challenges that have been hosted on Grand Challenge." + "<br>Please <a href='{}'>contact us</a> if you would like " + "to host your own challenge." + ), + random_encode("mailto:[email protected]"), + ), + } + ) + return context + + +class CombinedChallengeList(TemplateView): paginate_by = 40 - template_name = "challenges/challenge_list.html" + template_name = "challenges/combined_challenge_list.html" @property def _current_page(self):
{"golden_diff": "diff --git a/app/grandchallenge/challenges/urls.py b/app/grandchallenge/challenges/urls.py\n--- a/app/grandchallenge/challenges/urls.py\n+++ b/app/grandchallenge/challenges/urls.py\n@@ -3,6 +3,7 @@\n from grandchallenge.challenges.views import (\n ChallengeCreate,\n ChallengeList,\n+ CombinedChallengeList,\n ExternalChallengeCreate,\n ExternalChallengeList,\n ExternalChallengeUpdate,\n@@ -13,6 +14,11 @@\n \n urlpatterns = [\n path(\"\", ChallengeList.as_view(), name=\"list\"),\n+ path(\n+ \"all-challenges/\",\n+ CombinedChallengeList.as_view(),\n+ name=\"combined-list\",\n+ ),\n path(\"my-challenges/\", UsersChallengeList.as_view(), name=\"users-list\"),\n path(\"create/\", ChallengeCreate.as_view(), name=\"create\"),\n path(\"external/\", ExternalChallengeList.as_view(), name=\"external-list\"),\ndiff --git a/app/grandchallenge/challenges/views.py b/app/grandchallenge/challenges/views.py\n--- a/app/grandchallenge/challenges/views.py\n+++ b/app/grandchallenge/challenges/views.py\n@@ -24,6 +24,7 @@\n Challenge,\n ExternalChallenge,\n )\n+from grandchallenge.core.filters import FilterMixin\n from grandchallenge.core.templatetags.random_encode import random_encode\n from grandchallenge.datatables.views import Column, PaginatedTableListView\n from grandchallenge.subdomains.mixins import ChallengeSubdomainObjectMixin\n@@ -45,9 +46,44 @@\n return form_kwargs\n \n \n-class ChallengeList(TemplateView):\n+class ChallengeList(FilterMixin, ListView):\n+ model = Challenge\n+ ordering = \"-created\"\n+ filter_class = ChallengeFilter\n+ paginate_by = 40\n+\n+ def get_queryset(self):\n+ return (\n+ super()\n+ .get_queryset()\n+ .filter(hidden=False)\n+ .prefetch_related(\"phase_set\", \"publications\")\n+ .order_by(\"-created\")\n+ )\n+\n+ def get_context_data(self, *args, **kwargs):\n+ context = super().get_context_data(*args, **kwargs)\n+\n+ context.update(\n+ {\n+ \"jumbotron_title\": \"Challenges\",\n+ \"jumbotron_description\": format_html(\n+ (\n+ \"Here is an overview over the medical image analysis\"\n+ \" challenges that have been hosted on Grand Challenge.\"\n+ \"<br>Please <a href='{}'>contact us</a> if you would like \"\n+ \"to host your own challenge.\"\n+ ),\n+ random_encode(\"mailto:[email protected]\"),\n+ ),\n+ }\n+ )\n+ return context\n+\n+\n+class CombinedChallengeList(TemplateView):\n paginate_by = 40\n- template_name = \"challenges/challenge_list.html\"\n+ template_name = \"challenges/combined_challenge_list.html\"\n \n @property\n def _current_page(self):\n", "issue": "Seperate external challenges from internal challenges\nIt would be nice to seperate internal from external challenges a little more in the list view. \r\n\r\nThe simplest (?) way would be to only include internal challenges in the list view and add a seperate list view for external challenges. This seperate list view could be linked to through a button on the normal challenge list view. \r\n\r\n\n", "before_files": [{"content": "from django.urls import path\n\nfrom grandchallenge.challenges.views import (\n ChallengeCreate,\n ChallengeList,\n ExternalChallengeCreate,\n ExternalChallengeList,\n ExternalChallengeUpdate,\n UsersChallengeList,\n)\n\napp_name = \"challenges\"\n\nurlpatterns = [\n path(\"\", ChallengeList.as_view(), name=\"list\"),\n path(\"my-challenges/\", UsersChallengeList.as_view(), name=\"users-list\"),\n path(\"create/\", ChallengeCreate.as_view(), name=\"create\"),\n path(\"external/\", ExternalChallengeList.as_view(), name=\"external-list\"),\n path(\n \"external/create/\",\n ExternalChallengeCreate.as_view(),\n name=\"external-create\",\n ),\n path(\n \"external/<slug:short_name>/update/\",\n ExternalChallengeUpdate.as_view(),\n name=\"external-update\",\n ),\n]\n", "path": "app/grandchallenge/challenges/urls.py"}, {"content": "from django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.core.paginator import EmptyPage, Paginator\nfrom django.db.models import Q\nfrom django.utils.html import format_html\nfrom django.views.generic import (\n CreateView,\n ListView,\n TemplateView,\n UpdateView,\n)\nfrom guardian.mixins import (\n LoginRequiredMixin,\n PermissionRequiredMixin as ObjectPermissionRequiredMixin,\n)\n\nfrom grandchallenge.challenges.filters import ChallengeFilter\nfrom grandchallenge.challenges.forms import (\n ChallengeCreateForm,\n ChallengeUpdateForm,\n ExternalChallengeUpdateForm,\n)\nfrom grandchallenge.challenges.models import (\n Challenge,\n ExternalChallenge,\n)\nfrom grandchallenge.core.templatetags.random_encode import random_encode\nfrom grandchallenge.datatables.views import Column, PaginatedTableListView\nfrom grandchallenge.subdomains.mixins import ChallengeSubdomainObjectMixin\nfrom grandchallenge.subdomains.utils import reverse, reverse_lazy\n\n\nclass ChallengeCreate(LoginRequiredMixin, SuccessMessageMixin, CreateView):\n model = Challenge\n form_class = ChallengeCreateForm\n success_message = \"Challenge successfully created\"\n\n def form_valid(self, form):\n form.instance.creator = self.request.user\n return super().form_valid(form)\n\n def get_form_kwargs(self, *args, **kwargs):\n form_kwargs = super().get_form_kwargs(*args, **kwargs)\n form_kwargs.update({\"creator\": self.request.user})\n return form_kwargs\n\n\nclass ChallengeList(TemplateView):\n paginate_by = 40\n template_name = \"challenges/challenge_list.html\"\n\n @property\n def _current_page(self):\n return int(self.request.GET.get(\"page\", 1))\n\n @property\n def _filters_applied(self):\n return any(k for k in self.request.GET if k.lower() != \"page\")\n\n def _get_page(self):\n int_qs = (\n Challenge.objects.filter(hidden=False)\n .prefetch_related(\"phase_set\", \"publications\")\n .order_by(\"-created\")\n )\n self.int_filter = ChallengeFilter(self.request.GET, int_qs,)\n ext_qs = (\n ExternalChallenge.objects.filter(hidden=False)\n .prefetch_related(\"publications\")\n .order_by(\"-created\")\n )\n self.ext_filter = ChallengeFilter(self.request.GET, ext_qs,)\n\n total_count = int_qs.count() + ext_qs.count()\n\n int_paginator = Paginator(self.int_filter.qs, self.paginate_by // 2)\n ext_paginator = Paginator(self.ext_filter.qs, self.paginate_by // 2)\n\n num_pages = max(int_paginator.num_pages, ext_paginator.num_pages)\n num_results = int_paginator.count + ext_paginator.count\n\n try:\n int_page = int_paginator.page(self._current_page)\n except EmptyPage:\n int_page = []\n\n try:\n ext_page = ext_paginator.page(self._current_page)\n except EmptyPage:\n ext_page = []\n\n return [*int_page, *ext_page], num_pages, num_results, total_count\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n\n page_obj, num_pages, num_results, total_count = self._get_page()\n\n context.update(\n {\n \"filter\": self.int_filter,\n \"filters_applied\": self._filters_applied,\n \"page_obj\": page_obj,\n \"num_pages\": num_pages,\n \"num_results\": num_results,\n \"total_count\": total_count,\n \"current_page\": self._current_page,\n \"next_page\": self._current_page + 1,\n \"previous_page\": self._current_page - 1,\n \"jumbotron_title\": \"Challenges\",\n \"jumbotron_description\": format_html(\n (\n \"Here is an overview of all challenges that have been \"\n \"organised within the area of medical image analysis \"\n \"that we are aware of. Please <a href='{}'>contact \"\n \"us</a> if you want to advertise your challenge or \"\n \"know of any study that would fit in this overview.\"\n ),\n random_encode(\"mailto:[email protected]\"),\n ),\n }\n )\n\n return context\n\n\nclass UsersChallengeList(LoginRequiredMixin, PaginatedTableListView):\n model = Challenge\n template_name = \"challenges/challenge_users_list.html\"\n row_template = \"challenges/challenge_users_row.html\"\n search_fields = [\n \"title\",\n \"short_name\",\n \"description\",\n ]\n columns = [\n Column(title=\"Name\", sort_field=\"short_name\"),\n Column(title=\"Created\", sort_field=\"created\"),\n Column(title=\"Admins\", sort_field=\"created\"),\n Column(title=\"Description\", sort_field=\"description\"),\n Column(title=\"Automated Evaluation\", sort_field=\"use_evaluation\"),\n ]\n default_sort_column = 1\n\n def get_queryset(self):\n queryset = (\n super()\n .get_queryset()\n .prefetch_related(\n \"admins_group__user_set__user_profile\",\n \"admins_group__user_set__verification\",\n )\n )\n if not self.request.user.is_superuser:\n queryset = queryset.filter(\n Q(participants_group__in=self.request.user.groups.all())\n | Q(admins_group__in=self.request.user.groups.all())\n )\n return queryset\n\n\nclass ChallengeUpdate(\n LoginRequiredMixin,\n ObjectPermissionRequiredMixin,\n SuccessMessageMixin,\n ChallengeSubdomainObjectMixin,\n UpdateView,\n):\n model = Challenge\n slug_field = \"short_name__iexact\"\n slug_url_kwarg = \"challenge_short_name\"\n form_class = ChallengeUpdateForm\n success_message = \"Challenge successfully updated\"\n template_name_suffix = \"_update\"\n permission_required = \"change_challenge\"\n raise_exception = True\n login_url = reverse_lazy(\"account_login\")\n\n def get_success_url(self):\n return reverse(\n \"update\",\n kwargs={\"challenge_short_name\": self.request.challenge.short_name},\n )\n\n\nclass ExternalChallengeCreate(\n LoginRequiredMixin,\n PermissionRequiredMixin,\n SuccessMessageMixin,\n CreateView,\n):\n model = ExternalChallenge\n form_class = ExternalChallengeUpdateForm\n success_message = (\n \"Your challenge has been successfully submitted. \"\n \"An admin will review your challenge before it is published.\"\n )\n raise_exception = True\n permission_required = \"challenges.add_externalchallenge\"\n\n def form_valid(self, form):\n form.instance.creator = self.request.user\n return super().form_valid(form)\n\n def get_success_url(self):\n return reverse(\"challenges:list\")\n\n\nclass ExternalChallengeUpdate(\n LoginRequiredMixin,\n PermissionRequiredMixin,\n SuccessMessageMixin,\n UpdateView,\n):\n model = ExternalChallenge\n slug_field = \"short_name__iexact\"\n slug_url_kwarg = \"short_name\"\n form_class = ExternalChallengeUpdateForm\n template_name_suffix = \"_update\"\n success_message = \"Challenge updated\"\n raise_exception = True\n permission_required = \"challenges.change_externalchallenge\"\n\n def get_success_url(self):\n return reverse(\"challenges:list\")\n\n\nclass ExternalChallengeList(\n LoginRequiredMixin, PermissionRequiredMixin, ListView\n):\n model = ExternalChallenge\n raise_exception = True\n permission_required = \"challenges.view_externalchallenge\"\n", "path": "app/grandchallenge/challenges/views.py"}]}
3,012
641
gh_patches_debug_10098
rasdani/github-patches
git_diff
liqd__a4-opin-2485
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Community debate module error messages **expected behaviour:** Error message only shown for document upload if file is incorrect **behaviour:** Error message is also shown when title is not added </issue> <code> [start of euth/communitydebate/views.py] 1 from django.contrib import messages 2 from django.shortcuts import render 3 from django.urls import reverse 4 from django.utils.translation import gettext as _ 5 from django.views import generic 6 from rules.contrib.views import PermissionRequiredMixin 7 8 from adhocracy4.filters import views as filter_views 9 from adhocracy4.modules.models import Module 10 from euth.projects import mixins as prj_mixins 11 12 from . import forms 13 from . import models as communitydebate_models 14 from .filters import TopicFilterSet 15 16 17 class TopicListView(prj_mixins.ProjectPhaseMixin, 18 filter_views.FilteredListView): 19 model = communitydebate_models.Topic 20 paginate_by = 15 21 filter_set = TopicFilterSet 22 23 def get_queryset(self): 24 return super().get_queryset().filter(module=self.module) 25 26 27 class TopicDetailView(PermissionRequiredMixin, generic.DetailView): 28 model = communitydebate_models.Topic 29 queryset = \ 30 communitydebate_models.Topic.objects\ 31 .annotate_positive_rating_count() \ 32 .annotate_negative_rating_count() 33 permission_required = 'euth_communitydebate.view_topic' 34 35 def get_context_data(self, **kwargs): 36 context = super().get_context_data() 37 upload_files = communitydebate_models.TopicFileUpload.objects\ 38 .filter(topic=self.object) 39 context['upload_files'] = upload_files 40 return context 41 42 43 class TopicCreateView(PermissionRequiredMixin, generic.CreateView): 44 model = communitydebate_models.Topic 45 form_class = forms.TopicForm 46 permission_required = 'euth_communitydebate.propose_topic' 47 template_name = 'euth_communitydebate/topic_form.html' 48 49 @property 50 def raise_exception(self): 51 return self.request.user.is_authenticated 52 53 def dispatch(self, *args, **kwargs): 54 mod_slug = self.kwargs[self.slug_url_kwarg] 55 self.module = Module.objects.get(slug=mod_slug) 56 self.project = self.module.project 57 return super().dispatch(*args, **kwargs) 58 59 def get_permission_object(self, *args, **kwargs): 60 return self.module 61 62 def get_context_data(self, upload_forms=None, **kwargs): 63 context = super().get_context_data(**kwargs) 64 context['project'] = self.project 65 context['mode'] = 'create' 66 if not upload_forms: 67 upload_forms = forms.TopicFileUploadFormset() 68 context['upload_forms'] = upload_forms 69 return context 70 71 def form_valid(self, form): 72 form.instance.creator = self.request.user 73 form.instance.module = self.module 74 return super().form_valid(form) 75 76 def get_form_kwargs(self): 77 kwargs = super().get_form_kwargs() 78 kwargs['module'] = self.module 79 return kwargs 80 81 def post(self, request, *args, **kwargs): 82 self.object = None 83 form = self.get_form() 84 if form.is_valid(): 85 topic = form.save(commit=False) 86 upload_forms = forms.TopicFileUploadFormset(request.POST, 87 request.FILES, 88 instance=topic) 89 if upload_forms.is_valid(): 90 response = self.form_valid(form) 91 upload_forms.save() 92 messages.add_message(request, 93 messages.SUCCESS, 94 _('Topic ' 95 'successfully created')) 96 return response 97 98 upload_forms = forms.TopicFileUploadFormset(request.POST, 99 request.FILES) 100 return render(request, self.template_name, 101 self.get_context_data(upload_forms=upload_forms)) 102 103 104 class TopicUpdateView(PermissionRequiredMixin, generic.UpdateView): 105 model = communitydebate_models.Topic 106 form_class = forms.TopicForm 107 permission_required = 'euth_communitydebate.modify_topic' 108 template_name = 'euth_communitydebate/topic_form.html' 109 110 @property 111 def raise_exception(self): 112 return self.request.user.is_authenticated 113 114 def dispatch(self, *args, **kwargs): 115 self.object = self.get_object() 116 return super().dispatch(*args, **kwargs) 117 118 def get_context_data(self, upload_forms=None, **kwargs): 119 context = super().get_context_data(**kwargs) 120 context['project'] = self.object.project 121 context['mode'] = 'update' 122 if not upload_forms: 123 upload_forms = forms.TopicFileUploadFormset( 124 instance=self.get_object()) 125 context['upload_forms'] = upload_forms 126 return context 127 128 def get_form_kwargs(self): 129 kwargs = super().get_form_kwargs() 130 kwargs['module'] = kwargs.get('instance').module 131 return kwargs 132 133 def post(self, request, *args, **kwargs): 134 form = self.get_form() 135 upload_forms = forms.TopicFileUploadFormset(request.POST, 136 request.FILES, 137 instance=self.object) 138 if upload_forms.is_valid() and form.is_valid(): 139 response = self.form_valid(form) 140 upload_forms.save() 141 messages.add_message(request, 142 messages.SUCCESS, 143 _('Topic successfully ' 144 'updated')) 145 else: 146 response = render(request, 147 self.template_name, 148 self.get_context_data(upload_forms=upload_forms)) 149 return response 150 151 152 class TopicDeleteView(PermissionRequiredMixin, generic.DeleteView): 153 model = communitydebate_models.Topic 154 success_message = _("Your topic has been deleted") 155 permission_required = 'euth_communitydebate.modify_topic' 156 157 @property 158 def raise_exception(self): 159 return self.request.user.is_authenticated 160 161 def delete(self, request, *args, **kwargs): 162 messages.success(self.request, self.success_message) 163 return super(TopicDeleteView, self).delete(request, *args, **kwargs) 164 165 def get_success_url(self): 166 return reverse('project-detail', 167 kwargs={'slug': self.object.project.slug}) 168 [end of euth/communitydebate/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/euth/communitydebate/views.py b/euth/communitydebate/views.py --- a/euth/communitydebate/views.py +++ b/euth/communitydebate/views.py @@ -95,8 +95,9 @@ 'successfully created')) return response - upload_forms = forms.TopicFileUploadFormset(request.POST, - request.FILES) + else: + upload_forms = forms.TopicFileUploadFormset(request.POST, + request.FILES) return render(request, self.template_name, self.get_context_data(upload_forms=upload_forms))
{"golden_diff": "diff --git a/euth/communitydebate/views.py b/euth/communitydebate/views.py\n--- a/euth/communitydebate/views.py\n+++ b/euth/communitydebate/views.py\n@@ -95,8 +95,9 @@\n 'successfully created'))\n return response\n \n- upload_forms = forms.TopicFileUploadFormset(request.POST,\n- request.FILES)\n+ else:\n+ upload_forms = forms.TopicFileUploadFormset(request.POST,\n+ request.FILES)\n return render(request, self.template_name,\n self.get_context_data(upload_forms=upload_forms))\n", "issue": "Community debate module error messages\n**expected behaviour:** Error message only shown for document upload if file is incorrect\r\n**behaviour:** Error message is also shown when title is not added\r\n\r\n\n", "before_files": [{"content": "from django.contrib import messages\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom django.utils.translation import gettext as _\nfrom django.views import generic\nfrom rules.contrib.views import PermissionRequiredMixin\n\nfrom adhocracy4.filters import views as filter_views\nfrom adhocracy4.modules.models import Module\nfrom euth.projects import mixins as prj_mixins\n\nfrom . import forms\nfrom . import models as communitydebate_models\nfrom .filters import TopicFilterSet\n\n\nclass TopicListView(prj_mixins.ProjectPhaseMixin,\n filter_views.FilteredListView):\n model = communitydebate_models.Topic\n paginate_by = 15\n filter_set = TopicFilterSet\n\n def get_queryset(self):\n return super().get_queryset().filter(module=self.module)\n\n\nclass TopicDetailView(PermissionRequiredMixin, generic.DetailView):\n model = communitydebate_models.Topic\n queryset = \\\n communitydebate_models.Topic.objects\\\n .annotate_positive_rating_count() \\\n .annotate_negative_rating_count()\n permission_required = 'euth_communitydebate.view_topic'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data()\n upload_files = communitydebate_models.TopicFileUpload.objects\\\n .filter(topic=self.object)\n context['upload_files'] = upload_files\n return context\n\n\nclass TopicCreateView(PermissionRequiredMixin, generic.CreateView):\n model = communitydebate_models.Topic\n form_class = forms.TopicForm\n permission_required = 'euth_communitydebate.propose_topic'\n template_name = 'euth_communitydebate/topic_form.html'\n\n @property\n def raise_exception(self):\n return self.request.user.is_authenticated\n\n def dispatch(self, *args, **kwargs):\n mod_slug = self.kwargs[self.slug_url_kwarg]\n self.module = Module.objects.get(slug=mod_slug)\n self.project = self.module.project\n return super().dispatch(*args, **kwargs)\n\n def get_permission_object(self, *args, **kwargs):\n return self.module\n\n def get_context_data(self, upload_forms=None, **kwargs):\n context = super().get_context_data(**kwargs)\n context['project'] = self.project\n context['mode'] = 'create'\n if not upload_forms:\n upload_forms = forms.TopicFileUploadFormset()\n context['upload_forms'] = upload_forms\n return context\n\n def form_valid(self, form):\n form.instance.creator = self.request.user\n form.instance.module = self.module\n return super().form_valid(form)\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['module'] = self.module\n return kwargs\n\n def post(self, request, *args, **kwargs):\n self.object = None\n form = self.get_form()\n if form.is_valid():\n topic = form.save(commit=False)\n upload_forms = forms.TopicFileUploadFormset(request.POST,\n request.FILES,\n instance=topic)\n if upload_forms.is_valid():\n response = self.form_valid(form)\n upload_forms.save()\n messages.add_message(request,\n messages.SUCCESS,\n _('Topic '\n 'successfully created'))\n return response\n\n upload_forms = forms.TopicFileUploadFormset(request.POST,\n request.FILES)\n return render(request, self.template_name,\n self.get_context_data(upload_forms=upload_forms))\n\n\nclass TopicUpdateView(PermissionRequiredMixin, generic.UpdateView):\n model = communitydebate_models.Topic\n form_class = forms.TopicForm\n permission_required = 'euth_communitydebate.modify_topic'\n template_name = 'euth_communitydebate/topic_form.html'\n\n @property\n def raise_exception(self):\n return self.request.user.is_authenticated\n\n def dispatch(self, *args, **kwargs):\n self.object = self.get_object()\n return super().dispatch(*args, **kwargs)\n\n def get_context_data(self, upload_forms=None, **kwargs):\n context = super().get_context_data(**kwargs)\n context['project'] = self.object.project\n context['mode'] = 'update'\n if not upload_forms:\n upload_forms = forms.TopicFileUploadFormset(\n instance=self.get_object())\n context['upload_forms'] = upload_forms\n return context\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['module'] = kwargs.get('instance').module\n return kwargs\n\n def post(self, request, *args, **kwargs):\n form = self.get_form()\n upload_forms = forms.TopicFileUploadFormset(request.POST,\n request.FILES,\n instance=self.object)\n if upload_forms.is_valid() and form.is_valid():\n response = self.form_valid(form)\n upload_forms.save()\n messages.add_message(request,\n messages.SUCCESS,\n _('Topic successfully '\n 'updated'))\n else:\n response = render(request,\n self.template_name,\n self.get_context_data(upload_forms=upload_forms))\n return response\n\n\nclass TopicDeleteView(PermissionRequiredMixin, generic.DeleteView):\n model = communitydebate_models.Topic\n success_message = _(\"Your topic has been deleted\")\n permission_required = 'euth_communitydebate.modify_topic'\n\n @property\n def raise_exception(self):\n return self.request.user.is_authenticated\n\n def delete(self, request, *args, **kwargs):\n messages.success(self.request, self.success_message)\n return super(TopicDeleteView, self).delete(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse('project-detail',\n kwargs={'slug': self.object.project.slug})\n", "path": "euth/communitydebate/views.py"}]}
2,159
123
gh_patches_debug_13913
rasdani/github-patches
git_diff
cookiecutter__cookiecutter-655
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Fix the repo regex to handle + in URLs The documentation said that you could do git+https:// or git+ssh:// but that was not, in fact, true. </issue> <code> [start of cookiecutter/main.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 4 """ 5 cookiecutter.main 6 ----------------- 7 8 Main entry point for the `cookiecutter` command. 9 10 The code in this module is also a good example of how to use Cookiecutter as a 11 library rather than a script. 12 """ 13 14 from __future__ import unicode_literals 15 import logging 16 import os 17 import re 18 19 from .config import get_user_config, USER_CONFIG_PATH 20 from .exceptions import InvalidModeException, RepositoryNotFound 21 from .prompt import prompt_for_config 22 from .generate import generate_context, generate_files 23 from .vcs import clone 24 from .replay import dump, load 25 26 logger = logging.getLogger(__name__) 27 28 builtin_abbreviations = { 29 'gh': 'https://github.com/{0}.git', 30 'bb': 'https://bitbucket.org/{0}', 31 } 32 33 REPO_REGEX = """ 34 ( 35 ((git|ssh|https|http):(//)?) # something like git:// ssh:// etc. 36 | # or 37 (\w+@[\w\.]+) # something like user@... 38 ) 39 .* 40 """ 41 42 43 def is_repo_url(value): 44 """Return True if value is a repository URL.""" 45 return bool(re.match(REPO_REGEX, value, re.VERBOSE)) 46 47 48 def expand_abbreviations(template, config_dict): 49 """ 50 Expand abbreviations in a template name. 51 52 :param template: The project template name. 53 :param config_dict: The user config, which will contain abbreviation 54 definitions. 55 """ 56 57 abbreviations = builtin_abbreviations.copy() 58 abbreviations.update(config_dict.get('abbreviations', {})) 59 60 if template in abbreviations: 61 return abbreviations[template] 62 63 # Split on colon. If there is no colon, rest will be empty 64 # and prefix will be the whole template 65 prefix, sep, rest = template.partition(':') 66 if prefix in abbreviations: 67 return abbreviations[prefix].format(rest) 68 69 return template 70 71 72 def cookiecutter( 73 template, checkout=None, no_input=False, extra_context=None, 74 replay=False, overwrite_if_exists=False, output_dir='.', 75 config_file=USER_CONFIG_PATH): 76 """ 77 API equivalent to using Cookiecutter at the command line. 78 79 :param template: A directory containing a project template directory, 80 or a URL to a git repository. 81 :param checkout: The branch, tag or commit ID to checkout after clone. 82 :param no_input: Prompt the user at command line for manual configuration? 83 :param extra_context: A dictionary of context that overrides default 84 and user configuration. 85 :param: overwrite_if_exists: Overwrite the contents of output directory 86 if it exists 87 :param output_dir: Where to output the generated project dir into. 88 :param config_file: User configuration file path. 89 """ 90 if replay and ((no_input is not False) or (extra_context is not None)): 91 err_msg = ( 92 "You can not use both replay and no_input or extra_context " 93 "at the same time." 94 ) 95 raise InvalidModeException(err_msg) 96 97 # Get user config from ~/.cookiecutterrc or equivalent 98 # If no config file, sensible defaults from config.DEFAULT_CONFIG are used 99 config_dict = get_user_config(config_file=config_file) 100 101 template = expand_abbreviations(template, config_dict) 102 103 if is_repo_url(template): 104 repo_dir = clone( 105 repo_url=template, 106 checkout=checkout, 107 clone_to_dir=config_dict['cookiecutters_dir'], 108 no_input=no_input 109 ) 110 else: 111 # If it's a local repo, no need to clone or copy to your 112 # cookiecutters_dir 113 repo_dir = template 114 115 if not os.path.isdir(repo_dir): 116 raise RepositoryNotFound( 117 'The repository {0} could not be located.'.format(template) 118 ) 119 120 template_name = os.path.basename(template) 121 122 if replay: 123 context = load(config_dict['replay_dir'], template_name) 124 else: 125 context_file = os.path.join(repo_dir, 'cookiecutter.json') 126 logging.debug('context_file is {0}'.format(context_file)) 127 128 context = generate_context( 129 context_file=context_file, 130 default_context=config_dict['default_context'], 131 extra_context=extra_context, 132 ) 133 134 # prompt the user to manually configure at the command line. 135 # except when 'no-input' flag is set 136 context['cookiecutter'] = prompt_for_config(context, no_input) 137 138 dump(config_dict['replay_dir'], template_name, context) 139 140 # Create project from local context and project template. 141 return generate_files( 142 repo_dir=repo_dir, 143 context=context, 144 overwrite_if_exists=overwrite_if_exists, 145 output_dir=output_dir 146 ) 147 [end of cookiecutter/main.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cookiecutter/main.py b/cookiecutter/main.py --- a/cookiecutter/main.py +++ b/cookiecutter/main.py @@ -30,19 +30,18 @@ 'bb': 'https://bitbucket.org/{0}', } -REPO_REGEX = """ -( -((git|ssh|https|http):(//)?) # something like git:// ssh:// etc. - | # or - (\w+@[\w\.]+) # something like user@... +REPO_REGEX = re.compile(r""" +(?x) +((((git|hg)\+)?(git|ssh|https?):(//)?) # something like git:// ssh:// etc. + | # or + (\w+@[\w\.]+) # something like user@... ) -.* -""" +""") def is_repo_url(value): """Return True if value is a repository URL.""" - return bool(re.match(REPO_REGEX, value, re.VERBOSE)) + return bool(REPO_REGEX.match(value)) def expand_abbreviations(template, config_dict):
{"golden_diff": "diff --git a/cookiecutter/main.py b/cookiecutter/main.py\n--- a/cookiecutter/main.py\n+++ b/cookiecutter/main.py\n@@ -30,19 +30,18 @@\n 'bb': 'https://bitbucket.org/{0}',\n }\n \n-REPO_REGEX = \"\"\"\n-(\n-((git|ssh|https|http):(//)?) # something like git:// ssh:// etc.\n- | # or\n- (\\w+@[\\w\\.]+) # something like user@...\n+REPO_REGEX = re.compile(r\"\"\"\n+(?x)\n+((((git|hg)\\+)?(git|ssh|https?):(//)?) # something like git:// ssh:// etc.\n+ | # or\n+ (\\w+@[\\w\\.]+) # something like user@...\n )\n-.*\n-\"\"\"\n+\"\"\")\n \n \n def is_repo_url(value):\n \"\"\"Return True if value is a repository URL.\"\"\"\n- return bool(re.match(REPO_REGEX, value, re.VERBOSE))\n+ return bool(REPO_REGEX.match(value))\n \n \n def expand_abbreviations(template, config_dict):\n", "issue": "Fix the repo regex to handle + in URLs\nThe documentation said that you could do git+https:// or git+ssh:// but that was not, in fact, true.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.main\n-----------------\n\nMain entry point for the `cookiecutter` command.\n\nThe code in this module is also a good example of how to use Cookiecutter as a\nlibrary rather than a script.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport logging\nimport os\nimport re\n\nfrom .config import get_user_config, USER_CONFIG_PATH\nfrom .exceptions import InvalidModeException, RepositoryNotFound\nfrom .prompt import prompt_for_config\nfrom .generate import generate_context, generate_files\nfrom .vcs import clone\nfrom .replay import dump, load\n\nlogger = logging.getLogger(__name__)\n\nbuiltin_abbreviations = {\n 'gh': 'https://github.com/{0}.git',\n 'bb': 'https://bitbucket.org/{0}',\n}\n\nREPO_REGEX = \"\"\"\n(\n((git|ssh|https|http):(//)?) # something like git:// ssh:// etc.\n | # or\n (\\w+@[\\w\\.]+) # something like user@...\n)\n.*\n\"\"\"\n\n\ndef is_repo_url(value):\n \"\"\"Return True if value is a repository URL.\"\"\"\n return bool(re.match(REPO_REGEX, value, re.VERBOSE))\n\n\ndef expand_abbreviations(template, config_dict):\n \"\"\"\n Expand abbreviations in a template name.\n\n :param template: The project template name.\n :param config_dict: The user config, which will contain abbreviation\n definitions.\n \"\"\"\n\n abbreviations = builtin_abbreviations.copy()\n abbreviations.update(config_dict.get('abbreviations', {}))\n\n if template in abbreviations:\n return abbreviations[template]\n\n # Split on colon. If there is no colon, rest will be empty\n # and prefix will be the whole template\n prefix, sep, rest = template.partition(':')\n if prefix in abbreviations:\n return abbreviations[prefix].format(rest)\n\n return template\n\n\ndef cookiecutter(\n template, checkout=None, no_input=False, extra_context=None,\n replay=False, overwrite_if_exists=False, output_dir='.',\n config_file=USER_CONFIG_PATH):\n \"\"\"\n API equivalent to using Cookiecutter at the command line.\n\n :param template: A directory containing a project template directory,\n or a URL to a git repository.\n :param checkout: The branch, tag or commit ID to checkout after clone.\n :param no_input: Prompt the user at command line for manual configuration?\n :param extra_context: A dictionary of context that overrides default\n and user configuration.\n :param: overwrite_if_exists: Overwrite the contents of output directory\n if it exists\n :param output_dir: Where to output the generated project dir into.\n :param config_file: User configuration file path.\n \"\"\"\n if replay and ((no_input is not False) or (extra_context is not None)):\n err_msg = (\n \"You can not use both replay and no_input or extra_context \"\n \"at the same time.\"\n )\n raise InvalidModeException(err_msg)\n\n # Get user config from ~/.cookiecutterrc or equivalent\n # If no config file, sensible defaults from config.DEFAULT_CONFIG are used\n config_dict = get_user_config(config_file=config_file)\n\n template = expand_abbreviations(template, config_dict)\n\n if is_repo_url(template):\n repo_dir = clone(\n repo_url=template,\n checkout=checkout,\n clone_to_dir=config_dict['cookiecutters_dir'],\n no_input=no_input\n )\n else:\n # If it's a local repo, no need to clone or copy to your\n # cookiecutters_dir\n repo_dir = template\n\n if not os.path.isdir(repo_dir):\n raise RepositoryNotFound(\n 'The repository {0} could not be located.'.format(template)\n )\n\n template_name = os.path.basename(template)\n\n if replay:\n context = load(config_dict['replay_dir'], template_name)\n else:\n context_file = os.path.join(repo_dir, 'cookiecutter.json')\n logging.debug('context_file is {0}'.format(context_file))\n\n context = generate_context(\n context_file=context_file,\n default_context=config_dict['default_context'],\n extra_context=extra_context,\n )\n\n # prompt the user to manually configure at the command line.\n # except when 'no-input' flag is set\n context['cookiecutter'] = prompt_for_config(context, no_input)\n\n dump(config_dict['replay_dir'], template_name, context)\n\n # Create project from local context and project template.\n return generate_files(\n repo_dir=repo_dir,\n context=context,\n overwrite_if_exists=overwrite_if_exists,\n output_dir=output_dir\n )\n", "path": "cookiecutter/main.py"}]}
1,931
246
gh_patches_debug_20096
rasdani/github-patches
git_diff
liqd__adhocracy4-1005
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> comments: make sure blocked comments are not searched or filtered for their category **URL:** **user:** **expected behaviour:** when I search for sth. in the comments, I do not expect to find blocked comments, of which I cannot see the comment text **behaviour:** because the search happens in the backend, blocked comments are also searched, while the comment list from the serializer does not show the comment text of searched comments **important screensize:** **device & browser:** **Comment/Question:** Screenshot? </issue> <code> [start of adhocracy4/comments_async/filters.py] 1 from django.db import models 2 from rest_framework.filters import BaseFilterBackend 3 from rest_framework.filters import SearchFilter 4 5 6 class CommentCategoryFilterBackend(BaseFilterBackend): 7 """Filter the comments for the categories.""" 8 9 def filter_queryset(self, request, queryset, view): 10 11 if 'comment_category' in request.GET: 12 category = request.GET['comment_category'] 13 return queryset.filter(comment_categories__contains=category) 14 15 return queryset 16 17 18 class CommentOrderingFilterBackend(BaseFilterBackend): 19 """Order the comments.""" 20 21 def filter_queryset(self, request, queryset, view): 22 23 if 'ordering' in request.GET: 24 ordering = request.GET['ordering'] 25 26 if ordering == 'new': 27 return queryset.order_by('-created') 28 elif ordering == 'ans': 29 queryset = queryset\ 30 .annotate(comment_count=models.Count( 31 'child_comments', distinct=True)) 32 return queryset.order_by('-comment_count', '-created') 33 elif ordering == 'pos': 34 queryset = queryset\ 35 .annotate(positive_rating_count=models.Count( 36 models.Case( 37 models.When( 38 ratings__value=1, 39 then=models.F('ratings__id') 40 ), 41 output_field=models.IntegerField() 42 ), 43 distinct=True)) 44 return queryset.order_by('-positive_rating_count', '-created') 45 elif ordering == 'neg': 46 queryset = queryset\ 47 .annotate(negative_rating_count=models.Count( 48 models.Case( 49 models.When( 50 ratings__value=-1, 51 then=models.F('ratings__id') 52 ), 53 output_field=models.IntegerField() 54 ), 55 distinct=True)) 56 return queryset.order_by('-negative_rating_count', '-created') 57 elif ordering == 'dis': 58 return queryset.order_by( 59 models.F('last_discussed').desc(nulls_last=True), 60 '-created' 61 ) 62 elif ordering == 'mom': 63 return queryset.order_by('-is_moderator_marked', '-created') 64 65 return queryset 66 67 68 class CustomSearchFilter(SearchFilter): 69 70 def filter_queryset(self, request, queryset, view): 71 qs = super().filter_queryset(request, queryset, view) 72 if self.get_search_terms(request): 73 return qs.filter(is_removed=False, is_censored=False) 74 return qs 75 [end of adhocracy4/comments_async/filters.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/adhocracy4/comments_async/filters.py b/adhocracy4/comments_async/filters.py --- a/adhocracy4/comments_async/filters.py +++ b/adhocracy4/comments_async/filters.py @@ -8,9 +8,12 @@ def filter_queryset(self, request, queryset, view): - if 'comment_category' in request.GET: + if ('comment_category' in request.GET + and request.GET['comment_category'] != ''): category = request.GET['comment_category'] - return queryset.filter(comment_categories__contains=category) + return queryset.filter( + comment_categories__contains=category, + is_blocked=False) return queryset @@ -70,5 +73,6 @@ def filter_queryset(self, request, queryset, view): qs = super().filter_queryset(request, queryset, view) if self.get_search_terms(request): - return qs.filter(is_removed=False, is_censored=False) + return qs.filter(is_removed=False, is_censored=False, + is_blocked=False) return qs
{"golden_diff": "diff --git a/adhocracy4/comments_async/filters.py b/adhocracy4/comments_async/filters.py\n--- a/adhocracy4/comments_async/filters.py\n+++ b/adhocracy4/comments_async/filters.py\n@@ -8,9 +8,12 @@\n \n def filter_queryset(self, request, queryset, view):\n \n- if 'comment_category' in request.GET:\n+ if ('comment_category' in request.GET\n+ and request.GET['comment_category'] != ''):\n category = request.GET['comment_category']\n- return queryset.filter(comment_categories__contains=category)\n+ return queryset.filter(\n+ comment_categories__contains=category,\n+ is_blocked=False)\n \n return queryset\n \n@@ -70,5 +73,6 @@\n def filter_queryset(self, request, queryset, view):\n qs = super().filter_queryset(request, queryset, view)\n if self.get_search_terms(request):\n- return qs.filter(is_removed=False, is_censored=False)\n+ return qs.filter(is_removed=False, is_censored=False,\n+ is_blocked=False)\n return qs\n", "issue": "comments: make sure blocked comments are not searched or filtered for their category\n**URL:** \r\n**user:** \r\n**expected behaviour:** when I search for sth. in the comments, I do not expect to find blocked comments, of which I cannot see the comment text\r\n**behaviour:** because the search happens in the backend, blocked comments are also searched, while the comment list from the serializer does not show the comment text of searched comments\r\n**important screensize:**\r\n**device & browser:** \r\n**Comment/Question:** \r\n\r\nScreenshot?\r\n\n", "before_files": [{"content": "from django.db import models\nfrom rest_framework.filters import BaseFilterBackend\nfrom rest_framework.filters import SearchFilter\n\n\nclass CommentCategoryFilterBackend(BaseFilterBackend):\n \"\"\"Filter the comments for the categories.\"\"\"\n\n def filter_queryset(self, request, queryset, view):\n\n if 'comment_category' in request.GET:\n category = request.GET['comment_category']\n return queryset.filter(comment_categories__contains=category)\n\n return queryset\n\n\nclass CommentOrderingFilterBackend(BaseFilterBackend):\n \"\"\"Order the comments.\"\"\"\n\n def filter_queryset(self, request, queryset, view):\n\n if 'ordering' in request.GET:\n ordering = request.GET['ordering']\n\n if ordering == 'new':\n return queryset.order_by('-created')\n elif ordering == 'ans':\n queryset = queryset\\\n .annotate(comment_count=models.Count(\n 'child_comments', distinct=True))\n return queryset.order_by('-comment_count', '-created')\n elif ordering == 'pos':\n queryset = queryset\\\n .annotate(positive_rating_count=models.Count(\n models.Case(\n models.When(\n ratings__value=1,\n then=models.F('ratings__id')\n ),\n output_field=models.IntegerField()\n ),\n distinct=True))\n return queryset.order_by('-positive_rating_count', '-created')\n elif ordering == 'neg':\n queryset = queryset\\\n .annotate(negative_rating_count=models.Count(\n models.Case(\n models.When(\n ratings__value=-1,\n then=models.F('ratings__id')\n ),\n output_field=models.IntegerField()\n ),\n distinct=True))\n return queryset.order_by('-negative_rating_count', '-created')\n elif ordering == 'dis':\n return queryset.order_by(\n models.F('last_discussed').desc(nulls_last=True),\n '-created'\n )\n elif ordering == 'mom':\n return queryset.order_by('-is_moderator_marked', '-created')\n\n return queryset\n\n\nclass CustomSearchFilter(SearchFilter):\n\n def filter_queryset(self, request, queryset, view):\n qs = super().filter_queryset(request, queryset, view)\n if self.get_search_terms(request):\n return qs.filter(is_removed=False, is_censored=False)\n return qs\n", "path": "adhocracy4/comments_async/filters.py"}]}
1,263
237
gh_patches_debug_10648
rasdani/github-patches
git_diff
zestedesavoir__zds-site-5449
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> L'export à la demande ne fonctionne pas Plus précisement, l'export Markdown ne fonctionne pas quand on clique sur le bouton "Exporter le contenu" alors qu'il fonctionne lors de la publication. </issue> <code> [start of zds/tutorialv2/api/views.py] 1 import contextlib 2 from pathlib import Path 3 4 from django.http import Http404 5 from django.utils import translation 6 from django.utils.translation import gettext as _ 7 from rest_framework import status 8 from rest_framework.fields import empty 9 from rest_framework.generics import UpdateAPIView, ListCreateAPIView, get_object_or_404 10 from rest_framework.response import Response 11 from rest_framework.serializers import Serializer, CharField, BooleanField 12 from rest_framework.permissions import IsAuthenticatedOrReadOnly 13 14 from zds.member.api.permissions import CanReadAndWriteNowOrReadOnly, IsNotOwnerOrReadOnly, IsAuthorOrStaff 15 from zds.tutorialv2.publication_utils import PublicatorRegistry 16 from zds.tutorialv2.utils import search_container_or_404 17 from zds.utils.api.views import KarmaView 18 from zds.tutorialv2.models.database import ContentReaction, PublishableContent, PublicationEvent 19 20 21 class ContainerReadinessSerializer(Serializer): 22 parent_container_slug = CharField(allow_blank=True, allow_null=True, required=False) 23 container_slug = CharField(required=True) 24 ready_to_publish = BooleanField(required=True) 25 26 def run_validation(self, data=empty): 27 init = super().run_validation(data) 28 if not init: 29 return init 30 if not data.get('parent_container_slug', ''): 31 init.pop('parent_container_slug', '') 32 return init 33 34 def save(self, **kwargs): 35 if not self.validated_data: 36 self.is_valid(True) 37 versioned = self.instance.load_version() 38 container = search_container_or_404(versioned, self.validated_data) 39 container.ready_to_publish = self.validated_data['ready_to_publish'] 40 sha = versioned.repo_update(versioned.title, versioned.get_introduction(), versioned.get_conclusion(), 41 commit_message=_('{} est {} à la publication.').format( 42 container.get_path(True), 43 _('prêt') if container.ready_to_publish else _('ignoré'))) 44 PublishableContent.objects.filter(pk=self.instance.pk).update(sha_draft=sha) 45 46 def to_representation(self, instance): 47 return {} 48 49 50 class ContentReactionKarmaView(KarmaView): 51 queryset = ContentReaction.objects.all() 52 permission_classes = (IsAuthenticatedOrReadOnly, CanReadAndWriteNowOrReadOnly, IsNotOwnerOrReadOnly) 53 54 55 class ContainerPublicationReadinessView(UpdateAPIView): 56 permission_classes = (IsAuthorOrStaff, ) 57 serializer_class = ContainerReadinessSerializer 58 59 def get_object(self): 60 content = PublishableContent.objects.prefetch_related('authors')\ 61 .filter(pk=int(self.kwargs.get('pk', 0)))\ 62 .first() 63 if not content: 64 raise Http404() 65 self.check_object_permissions(self.request, object) 66 return content 67 68 69 class ExportView(ListCreateAPIView): 70 permission_classes = (IsAuthorOrStaff,) 71 serializer_class = Serializer 72 73 def get_queryset(self): 74 return PublicationEvent.objects.filter(published_object__content__pk=self.kwargs.get('pk', 0)) 75 76 def ensure_directories(self, content: PublishableContent): 77 final_directory = Path(content.public_version.get_extra_contents_directory()) 78 building_directory = Path(str(final_directory.parent) + '__building', final_directory.name) 79 with contextlib.suppress(FileExistsError): 80 final_directory.mkdir(parents=True) 81 with contextlib.suppress(FileExistsError): 82 building_directory.mkdir(parents=True) 83 return building_directory, final_directory 84 85 def create(self, request, *args, **kwargs): 86 try: 87 publishable_content = get_object_or_404(PublishableContent.objects, pk=int(kwargs.get('pk'))) 88 if not publishable_content.public_version: 89 raise Http404('Not public content') 90 tmp_dir, _ = self.ensure_directories(publishable_content) 91 versioned = publishable_content.public_version.load_public_version() 92 base_name = str(Path(tmp_dir, versioned.slug)) 93 md_file_path = str(Path(tmp_dir, versioned.slug + '.md')) 94 95 PublicatorRegistry.get('md').publish(md_file_path, base_name, 96 versioned=versioned, 97 cur_language=translation.get_language()) 98 PublicatorRegistry.get('watchdog').publish_from_published_content(publishable_content.public_version) 99 except ValueError: 100 return Response({}, status=status.HTTP_400_BAD_REQUEST, headers={}) 101 else: 102 return Response({}, status=status.HTTP_201_CREATED, headers={}) 103 [end of zds/tutorialv2/api/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/zds/tutorialv2/api/views.py b/zds/tutorialv2/api/views.py --- a/zds/tutorialv2/api/views.py +++ b/zds/tutorialv2/api/views.py @@ -88,7 +88,7 @@ if not publishable_content.public_version: raise Http404('Not public content') tmp_dir, _ = self.ensure_directories(publishable_content) - versioned = publishable_content.public_version.load_public_version() + versioned = publishable_content.load_version(public=True) base_name = str(Path(tmp_dir, versioned.slug)) md_file_path = str(Path(tmp_dir, versioned.slug + '.md'))
{"golden_diff": "diff --git a/zds/tutorialv2/api/views.py b/zds/tutorialv2/api/views.py\n--- a/zds/tutorialv2/api/views.py\n+++ b/zds/tutorialv2/api/views.py\n@@ -88,7 +88,7 @@\n if not publishable_content.public_version:\n raise Http404('Not public content')\n tmp_dir, _ = self.ensure_directories(publishable_content)\n- versioned = publishable_content.public_version.load_public_version()\n+ versioned = publishable_content.load_version(public=True)\n base_name = str(Path(tmp_dir, versioned.slug))\n md_file_path = str(Path(tmp_dir, versioned.slug + '.md'))\n", "issue": "L'export \u00e0 la demande ne fonctionne pas\nPlus pr\u00e9cisement, l'export Markdown ne fonctionne pas quand on clique sur le bouton \"Exporter le contenu\" alors qu'il fonctionne lors de la publication.\n", "before_files": [{"content": "import contextlib\nfrom pathlib import Path\n\nfrom django.http import Http404\nfrom django.utils import translation\nfrom django.utils.translation import gettext as _\nfrom rest_framework import status\nfrom rest_framework.fields import empty\nfrom rest_framework.generics import UpdateAPIView, ListCreateAPIView, get_object_or_404\nfrom rest_framework.response import Response\nfrom rest_framework.serializers import Serializer, CharField, BooleanField\nfrom rest_framework.permissions import IsAuthenticatedOrReadOnly\n\nfrom zds.member.api.permissions import CanReadAndWriteNowOrReadOnly, IsNotOwnerOrReadOnly, IsAuthorOrStaff\nfrom zds.tutorialv2.publication_utils import PublicatorRegistry\nfrom zds.tutorialv2.utils import search_container_or_404\nfrom zds.utils.api.views import KarmaView\nfrom zds.tutorialv2.models.database import ContentReaction, PublishableContent, PublicationEvent\n\n\nclass ContainerReadinessSerializer(Serializer):\n parent_container_slug = CharField(allow_blank=True, allow_null=True, required=False)\n container_slug = CharField(required=True)\n ready_to_publish = BooleanField(required=True)\n\n def run_validation(self, data=empty):\n init = super().run_validation(data)\n if not init:\n return init\n if not data.get('parent_container_slug', ''):\n init.pop('parent_container_slug', '')\n return init\n\n def save(self, **kwargs):\n if not self.validated_data:\n self.is_valid(True)\n versioned = self.instance.load_version()\n container = search_container_or_404(versioned, self.validated_data)\n container.ready_to_publish = self.validated_data['ready_to_publish']\n sha = versioned.repo_update(versioned.title, versioned.get_introduction(), versioned.get_conclusion(),\n commit_message=_('{} est {} \u00e0 la publication.').format(\n container.get_path(True),\n _('pr\u00eat') if container.ready_to_publish else _('ignor\u00e9')))\n PublishableContent.objects.filter(pk=self.instance.pk).update(sha_draft=sha)\n\n def to_representation(self, instance):\n return {}\n\n\nclass ContentReactionKarmaView(KarmaView):\n queryset = ContentReaction.objects.all()\n permission_classes = (IsAuthenticatedOrReadOnly, CanReadAndWriteNowOrReadOnly, IsNotOwnerOrReadOnly)\n\n\nclass ContainerPublicationReadinessView(UpdateAPIView):\n permission_classes = (IsAuthorOrStaff, )\n serializer_class = ContainerReadinessSerializer\n\n def get_object(self):\n content = PublishableContent.objects.prefetch_related('authors')\\\n .filter(pk=int(self.kwargs.get('pk', 0)))\\\n .first()\n if not content:\n raise Http404()\n self.check_object_permissions(self.request, object)\n return content\n\n\nclass ExportView(ListCreateAPIView):\n permission_classes = (IsAuthorOrStaff,)\n serializer_class = Serializer\n\n def get_queryset(self):\n return PublicationEvent.objects.filter(published_object__content__pk=self.kwargs.get('pk', 0))\n\n def ensure_directories(self, content: PublishableContent):\n final_directory = Path(content.public_version.get_extra_contents_directory())\n building_directory = Path(str(final_directory.parent) + '__building', final_directory.name)\n with contextlib.suppress(FileExistsError):\n final_directory.mkdir(parents=True)\n with contextlib.suppress(FileExistsError):\n building_directory.mkdir(parents=True)\n return building_directory, final_directory\n\n def create(self, request, *args, **kwargs):\n try:\n publishable_content = get_object_or_404(PublishableContent.objects, pk=int(kwargs.get('pk')))\n if not publishable_content.public_version:\n raise Http404('Not public content')\n tmp_dir, _ = self.ensure_directories(publishable_content)\n versioned = publishable_content.public_version.load_public_version()\n base_name = str(Path(tmp_dir, versioned.slug))\n md_file_path = str(Path(tmp_dir, versioned.slug + '.md'))\n\n PublicatorRegistry.get('md').publish(md_file_path, base_name,\n versioned=versioned,\n cur_language=translation.get_language())\n PublicatorRegistry.get('watchdog').publish_from_published_content(publishable_content.public_version)\n except ValueError:\n return Response({}, status=status.HTTP_400_BAD_REQUEST, headers={})\n else:\n return Response({}, status=status.HTTP_201_CREATED, headers={})\n", "path": "zds/tutorialv2/api/views.py"}]}
1,725
148
gh_patches_debug_10271
rasdani/github-patches
git_diff
PaddlePaddle__Paddle2ONNX-15
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Need check whether protoc existed. </issue> <code> [start of variables.py] 1 # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from onnx import helper, onnx_pb2, TensorProto 16 import paddle.fluid.core as core 17 18 19 def paddle_variable_to_onnx_tensor(paddle_var_name, block): 20 # TODO(varunarora): Need to do this only in the case of VarType.LOD_TENSOR. 21 paddle_var = block.var(paddle_var_name) 22 return helper.make_tensor_value_info( 23 paddle_var_name, PADDLE_TO_ONNX_DTYPE[paddle_var.dtype], 24 paddle_var.shape) 25 26 27 PADDLE_TO_ONNX_DTYPE = { 28 core.VarDesc.VarType.FP32: onnx_pb2.TensorProto.FLOAT, 29 core.VarDesc.VarType.FP64: onnx_pb2.TensorProto.FLOAT16, 30 # '': onnx_pb2.TensorProto.DOUBLE, 31 core.VarDesc.VarType.INT32: onnx_pb2.TensorProto.INT32, 32 core.VarDesc.VarType.INT16: onnx_pb2.TensorProto.INT16, 33 # '': onnx_pb2.TensorProto.INT8, 34 # '': onnx_pb2.TensorProto.UINT8, 35 core.VarDesc.VarType.INT16: onnx_pb2.TensorProto.UINT16, 36 core.VarDesc.VarType.INT64: onnx_pb2.TensorProto.INT64, 37 # '': onnx_pb2.TensorProto.STRING, 38 # '': onnx_pb2.TensorProto.COMPLEX64, 39 # '': onnx_pb2.TensorProto.COMPLEX128, 40 core.VarDesc.VarType.BOOL: onnx_pb2.TensorProto.BOOL 41 } 42 [end of variables.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/variables.py b/variables.py --- a/variables.py +++ b/variables.py @@ -19,9 +19,9 @@ def paddle_variable_to_onnx_tensor(paddle_var_name, block): # TODO(varunarora): Need to do this only in the case of VarType.LOD_TENSOR. paddle_var = block.var(paddle_var_name) - return helper.make_tensor_value_info( - paddle_var_name, PADDLE_TO_ONNX_DTYPE[paddle_var.dtype], - paddle_var.shape) + return helper.make_tensor_value_info(paddle_var_name, + PADDLE_TO_ONNX_DTYPE[paddle_var.dtype], + paddle_var.shape) PADDLE_TO_ONNX_DTYPE = {
{"golden_diff": "diff --git a/variables.py b/variables.py\n--- a/variables.py\n+++ b/variables.py\n@@ -19,9 +19,9 @@\n def paddle_variable_to_onnx_tensor(paddle_var_name, block):\n # TODO(varunarora): Need to do this only in the case of VarType.LOD_TENSOR.\n paddle_var = block.var(paddle_var_name)\n- return helper.make_tensor_value_info(\n- paddle_var_name, PADDLE_TO_ONNX_DTYPE[paddle_var.dtype],\n- paddle_var.shape)\n+ return helper.make_tensor_value_info(paddle_var_name,\n+ PADDLE_TO_ONNX_DTYPE[paddle_var.dtype],\n+ paddle_var.shape)\n \n \n PADDLE_TO_ONNX_DTYPE = {\n", "issue": "Need check whether protoc existed.\n\n", "before_files": [{"content": "# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom onnx import helper, onnx_pb2, TensorProto\nimport paddle.fluid.core as core\n\n\ndef paddle_variable_to_onnx_tensor(paddle_var_name, block):\n # TODO(varunarora): Need to do this only in the case of VarType.LOD_TENSOR.\n paddle_var = block.var(paddle_var_name)\n return helper.make_tensor_value_info(\n paddle_var_name, PADDLE_TO_ONNX_DTYPE[paddle_var.dtype],\n paddle_var.shape)\n\n\nPADDLE_TO_ONNX_DTYPE = {\n core.VarDesc.VarType.FP32: onnx_pb2.TensorProto.FLOAT,\n core.VarDesc.VarType.FP64: onnx_pb2.TensorProto.FLOAT16,\n # '': onnx_pb2.TensorProto.DOUBLE,\n core.VarDesc.VarType.INT32: onnx_pb2.TensorProto.INT32,\n core.VarDesc.VarType.INT16: onnx_pb2.TensorProto.INT16,\n # '': onnx_pb2.TensorProto.INT8,\n # '': onnx_pb2.TensorProto.UINT8,\n core.VarDesc.VarType.INT16: onnx_pb2.TensorProto.UINT16,\n core.VarDesc.VarType.INT64: onnx_pb2.TensorProto.INT64,\n # '': onnx_pb2.TensorProto.STRING,\n # '': onnx_pb2.TensorProto.COMPLEX64,\n # '': onnx_pb2.TensorProto.COMPLEX128,\n core.VarDesc.VarType.BOOL: onnx_pb2.TensorProto.BOOL\n}\n", "path": "variables.py"}]}
1,090
163
gh_patches_debug_63368
rasdani/github-patches
git_diff
mkdocs__mkdocs-347
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Hostname for bitbucket is incorrect The host name used in the automatic `repo_name` detection is wrong. It should be using `bitbucket.org`, but instead it is `bitbucket.com`. This is found in config.py line 140. </issue> <code> [start of mkdocs/config.py] 1 # coding: utf-8 2 3 from mkdocs import utils 4 from mkdocs.compat import urlparse 5 from mkdocs.exceptions import ConfigurationError 6 7 import logging 8 import os 9 import yaml 10 11 log = logging.getLogger(__name__) 12 13 DEFAULT_CONFIG = { 14 'site_name': None, 15 'pages': None, 16 17 'site_url': None, 18 'site_description': None, 19 'site_author': None, 20 'site_favicon': None, 21 22 'theme': 'mkdocs', 23 'docs_dir': 'docs', 24 'site_dir': 'site', 25 'theme_dir': None, 26 27 'copyright': None, 28 'google_analytics': None, 29 30 # The address on which to serve the livereloading docs server. 31 'dev_addr': '127.0.0.1:8000', 32 33 # If `True`, use `<page_name>/index.hmtl` style files with hyperlinks to the directory. 34 # If `False`, use `<page_name>.html style file with hyperlinks to the file. 35 # True generates nicer URLs, but False is useful if browsing the output on a filesystem. 36 'use_directory_urls': True, 37 38 # Specify a link to the project source repo to be included 39 # in the documentation pages. 40 'repo_url': None, 41 42 # A name to use for the link to the project source repo. 43 # Default: If repo_url is unset then None, otherwise 44 # "GitHub" or "Bitbucket" for known url or Hostname for unknown urls. 45 'repo_name': None, 46 47 # Specify which css or javascript files from the docs 48 # directionary should be additionally included in the site. 49 # Default: List of all .css and .js files in the docs dir. 50 'extra_css': None, 51 'extra_javascript': None, 52 53 # Determine if the site should include the nav and next/prev elements. 54 # Default: True if the site has more than one page, False otherwise. 55 'include_nav': None, 56 'include_next_prev': None, 57 58 # PyMarkdown extension names. 59 'markdown_extensions': (), 60 61 # Determine if the site should generate a json search index and include 62 # search elements in the theme. - TODO 63 'include_search': False, 64 65 # Determine if the site should include a 404.html page. 66 # TODO: Implment this. Make this None, have it True if a 404.html 67 # template exists in the theme or docs dir. 68 'include_404': False, 69 70 # enabling strict mode causes MkDocs to stop the build when a problem is 71 # encountered rather than display an error. 72 'strict': False, 73 } 74 75 76 def load_config(filename='mkdocs.yml', options=None): 77 options = options or {} 78 if 'config' in options: 79 filename = options.pop('config') 80 if not os.path.exists(filename): 81 raise ConfigurationError("Config file '%s' does not exist." % filename) 82 with open(filename, 'r') as fp: 83 user_config = yaml.load(fp) 84 if not isinstance(user_config, dict): 85 raise ConfigurationError("The mkdocs.yml file is invalid. See http://www.mkdocs.org/user-guide/configuration/ for more information.") 86 user_config.update(options) 87 return validate_config(user_config) 88 89 90 def validate_config(user_config): 91 config = DEFAULT_CONFIG.copy() 92 config.update(user_config) 93 94 if not config['site_name']: 95 raise ConfigurationError("Config must contain 'site_name' setting.") 96 97 # If not specified, then the 'pages' config simply includes all 98 # markdown files in the docs dir, without generating any header items 99 # for them. 100 pages = [] 101 extra_css = [] 102 extra_javascript = [] 103 for (dirpath, dirnames, filenames) in os.walk(config['docs_dir']): 104 for filename in sorted(filenames): 105 fullpath = os.path.join(dirpath, filename) 106 relpath = os.path.relpath(fullpath, config['docs_dir']) 107 108 if utils.is_markdown_file(filename): 109 # index pages should always be the first listed page. 110 if os.path.splitext(relpath)[0] == 'index': 111 pages.insert(0, relpath) 112 else: 113 pages.append(relpath) 114 elif utils.is_css_file(filename): 115 extra_css.append(relpath) 116 elif utils.is_javascript_file(filename): 117 extra_javascript.append(relpath) 118 119 if config['pages'] is None: 120 config['pages'] = pages 121 122 if config['extra_css'] is None: 123 config['extra_css'] = extra_css 124 125 if config['extra_javascript'] is None: 126 config['extra_javascript'] = extra_javascript 127 128 package_dir = os.path.dirname(__file__) 129 theme_dir = [os.path.join(package_dir, 'themes', config['theme'])] 130 131 if config['theme_dir'] is not None: 132 theme_dir.insert(0, config['theme_dir']) 133 134 config['theme_dir'] = theme_dir 135 136 if config['repo_url'] is not None and config['repo_name'] is None: 137 repo_host = urlparse(config['repo_url']).netloc.lower() 138 if repo_host == 'github.com': 139 config['repo_name'] = 'GitHub' 140 elif repo_host == 'bitbucket.com': 141 config['repo_name'] = 'Bitbucket' 142 else: 143 config['repo_name'] = repo_host.split('.')[0].title() 144 145 if config['include_next_prev'] is None: 146 config['include_next_prev'] = len(config['pages']) > 1 147 148 if config['include_nav'] is None: 149 config['include_nav'] = len(config['pages']) > 1 150 151 # To Do: 152 153 # The docs dir must exist. 154 # The theme dir must exist. 155 # Ensure 'theme' is one of 'mkdocs', 'readthedocs', 'custom' 156 # A homepage 'index' must exist. 157 # The theme 'base.html' file must exist. 158 # Cannot set repo_name without setting repo_url. 159 # Cannot set 'include_next_prev: true' when only one page exists. 160 # Cannot set 'include_nav: true' when only one page exists. 161 # Error if any config keys provided that are not in the DEFAULT_CONFIG. 162 163 return config 164 [end of mkdocs/config.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mkdocs/config.py b/mkdocs/config.py --- a/mkdocs/config.py +++ b/mkdocs/config.py @@ -137,7 +137,7 @@ repo_host = urlparse(config['repo_url']).netloc.lower() if repo_host == 'github.com': config['repo_name'] = 'GitHub' - elif repo_host == 'bitbucket.com': + elif repo_host == 'bitbucket.org': config['repo_name'] = 'Bitbucket' else: config['repo_name'] = repo_host.split('.')[0].title()
{"golden_diff": "diff --git a/mkdocs/config.py b/mkdocs/config.py\n--- a/mkdocs/config.py\n+++ b/mkdocs/config.py\n@@ -137,7 +137,7 @@\n repo_host = urlparse(config['repo_url']).netloc.lower()\n if repo_host == 'github.com':\n config['repo_name'] = 'GitHub'\n- elif repo_host == 'bitbucket.com':\n+ elif repo_host == 'bitbucket.org':\n config['repo_name'] = 'Bitbucket'\n else:\n config['repo_name'] = repo_host.split('.')[0].title()\n", "issue": "Hostname for bitbucket is incorrect\nThe host name used in the automatic `repo_name` detection is wrong. It should be using `bitbucket.org`, but instead it is `bitbucket.com`. This is found in config.py line 140.\n\n", "before_files": [{"content": "# coding: utf-8\n\nfrom mkdocs import utils\nfrom mkdocs.compat import urlparse\nfrom mkdocs.exceptions import ConfigurationError\n\nimport logging\nimport os\nimport yaml\n\nlog = logging.getLogger(__name__)\n\nDEFAULT_CONFIG = {\n 'site_name': None,\n 'pages': None,\n\n 'site_url': None,\n 'site_description': None,\n 'site_author': None,\n 'site_favicon': None,\n\n 'theme': 'mkdocs',\n 'docs_dir': 'docs',\n 'site_dir': 'site',\n 'theme_dir': None,\n\n 'copyright': None,\n 'google_analytics': None,\n\n # The address on which to serve the livereloading docs server.\n 'dev_addr': '127.0.0.1:8000',\n\n # If `True`, use `<page_name>/index.hmtl` style files with hyperlinks to the directory.\n # If `False`, use `<page_name>.html style file with hyperlinks to the file.\n # True generates nicer URLs, but False is useful if browsing the output on a filesystem.\n 'use_directory_urls': True,\n\n # Specify a link to the project source repo to be included\n # in the documentation pages.\n 'repo_url': None,\n\n # A name to use for the link to the project source repo.\n # Default: If repo_url is unset then None, otherwise\n # \"GitHub\" or \"Bitbucket\" for known url or Hostname for unknown urls.\n 'repo_name': None,\n\n # Specify which css or javascript files from the docs\n # directionary should be additionally included in the site.\n # Default: List of all .css and .js files in the docs dir.\n 'extra_css': None,\n 'extra_javascript': None,\n\n # Determine if the site should include the nav and next/prev elements.\n # Default: True if the site has more than one page, False otherwise.\n 'include_nav': None,\n 'include_next_prev': None,\n\n # PyMarkdown extension names.\n 'markdown_extensions': (),\n\n # Determine if the site should generate a json search index and include\n # search elements in the theme. - TODO\n 'include_search': False,\n\n # Determine if the site should include a 404.html page.\n # TODO: Implment this. Make this None, have it True if a 404.html\n # template exists in the theme or docs dir.\n 'include_404': False,\n\n # enabling strict mode causes MkDocs to stop the build when a problem is\n # encountered rather than display an error.\n 'strict': False,\n}\n\n\ndef load_config(filename='mkdocs.yml', options=None):\n options = options or {}\n if 'config' in options:\n filename = options.pop('config')\n if not os.path.exists(filename):\n raise ConfigurationError(\"Config file '%s' does not exist.\" % filename)\n with open(filename, 'r') as fp:\n user_config = yaml.load(fp)\n if not isinstance(user_config, dict):\n raise ConfigurationError(\"The mkdocs.yml file is invalid. See http://www.mkdocs.org/user-guide/configuration/ for more information.\")\n user_config.update(options)\n return validate_config(user_config)\n\n\ndef validate_config(user_config):\n config = DEFAULT_CONFIG.copy()\n config.update(user_config)\n\n if not config['site_name']:\n raise ConfigurationError(\"Config must contain 'site_name' setting.\")\n\n # If not specified, then the 'pages' config simply includes all\n # markdown files in the docs dir, without generating any header items\n # for them.\n pages = []\n extra_css = []\n extra_javascript = []\n for (dirpath, dirnames, filenames) in os.walk(config['docs_dir']):\n for filename in sorted(filenames):\n fullpath = os.path.join(dirpath, filename)\n relpath = os.path.relpath(fullpath, config['docs_dir'])\n\n if utils.is_markdown_file(filename):\n # index pages should always be the first listed page.\n if os.path.splitext(relpath)[0] == 'index':\n pages.insert(0, relpath)\n else:\n pages.append(relpath)\n elif utils.is_css_file(filename):\n extra_css.append(relpath)\n elif utils.is_javascript_file(filename):\n extra_javascript.append(relpath)\n\n if config['pages'] is None:\n config['pages'] = pages\n\n if config['extra_css'] is None:\n config['extra_css'] = extra_css\n\n if config['extra_javascript'] is None:\n config['extra_javascript'] = extra_javascript\n\n package_dir = os.path.dirname(__file__)\n theme_dir = [os.path.join(package_dir, 'themes', config['theme'])]\n\n if config['theme_dir'] is not None:\n theme_dir.insert(0, config['theme_dir'])\n\n config['theme_dir'] = theme_dir\n\n if config['repo_url'] is not None and config['repo_name'] is None:\n repo_host = urlparse(config['repo_url']).netloc.lower()\n if repo_host == 'github.com':\n config['repo_name'] = 'GitHub'\n elif repo_host == 'bitbucket.com':\n config['repo_name'] = 'Bitbucket'\n else:\n config['repo_name'] = repo_host.split('.')[0].title()\n\n if config['include_next_prev'] is None:\n config['include_next_prev'] = len(config['pages']) > 1\n\n if config['include_nav'] is None:\n config['include_nav'] = len(config['pages']) > 1\n\n # To Do:\n\n # The docs dir must exist.\n # The theme dir must exist.\n # Ensure 'theme' is one of 'mkdocs', 'readthedocs', 'custom'\n # A homepage 'index' must exist.\n # The theme 'base.html' file must exist.\n # Cannot set repo_name without setting repo_url.\n # Cannot set 'include_next_prev: true' when only one page exists.\n # Cannot set 'include_nav: true' when only one page exists.\n # Error if any config keys provided that are not in the DEFAULT_CONFIG.\n\n return config\n", "path": "mkdocs/config.py"}]}
2,332
130
gh_patches_debug_9971
rasdani/github-patches
git_diff
pretix__pretix-882
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> rich_text: tel schema When providing email links using the mailto schema in richtext description like `[Email us!](mailto:[email protected])`, resulting in [Email us!](mailto:[email protected]), Pretix creates the correct `<a>` tag. However, users also use their mobile phone. It would be awesome to also be able to use the `tel` schema like `[Call us!](tel:+1-202-555-0102)`. At the moment, pretix just creates an `<a>` tag without an `href`, so the Telephone app is not opened. </issue> <code> [start of src/pretix/base/templatetags/rich_text.py] 1 import urllib.parse 2 3 import bleach 4 import markdown 5 from bleach import DEFAULT_CALLBACKS 6 from django import template 7 from django.conf import settings 8 from django.core import signing 9 from django.urls import reverse 10 from django.utils.http import is_safe_url 11 from django.utils.safestring import mark_safe 12 13 register = template.Library() 14 15 ALLOWED_TAGS = [ 16 'a', 17 'abbr', 18 'acronym', 19 'b', 20 'blockquote', 21 'br', 22 'code', 23 'em', 24 'i', 25 'li', 26 'ol', 27 'strong', 28 'ul', 29 'p', 30 'table', 31 'tbody', 32 'thead', 33 'tr', 34 'td', 35 'th', 36 'div', 37 'span', 38 'hr', 39 'h1', 40 'h2', 41 'h3', 42 'h4', 43 'h5', 44 'h6', 45 'pre', 46 # Update doc/user/markdown.rst if you change this! 47 ] 48 49 ALLOWED_ATTRIBUTES = { 50 'a': ['href', 'title'], 51 'abbr': ['title'], 52 'acronym': ['title'], 53 'table': ['width'], 54 'td': ['width', 'align'], 55 'div': ['class'], 56 'p': ['class'], 57 'span': ['class'], 58 # Update doc/user/markdown.rst if you change this! 59 } 60 61 62 def safelink_callback(attrs, new=False): 63 url = attrs.get((None, 'href'), '/') 64 if not is_safe_url(url) and not url.startswith('mailto:'): 65 signer = signing.Signer(salt='safe-redirect') 66 attrs[None, 'href'] = reverse('redirect') + '?url=' + urllib.parse.quote(signer.sign(url)) 67 attrs[None, 'target'] = '_blank' 68 attrs[None, 'rel'] = 'noopener' 69 return attrs 70 71 72 def abslink_callback(attrs, new=False): 73 attrs[None, 'href'] = urllib.parse.urljoin(settings.SITE_URL, attrs.get((None, 'href'), '/')) 74 attrs[None, 'target'] = '_blank' 75 attrs[None, 'rel'] = 'noopener' 76 return attrs 77 78 79 def markdown_compile(source): 80 return bleach.clean( 81 markdown.markdown( 82 source, 83 extensions=[ 84 'markdown.extensions.sane_lists', 85 # 'markdown.extensions.nl2br', # TODO: Enable, but check backwards-compatibility issues e.g. with mails 86 ] 87 ), 88 tags=ALLOWED_TAGS, 89 attributes=ALLOWED_ATTRIBUTES 90 ) 91 92 93 @register.filter 94 def rich_text(text: str, **kwargs): 95 """ 96 Processes markdown and cleans HTML in a text input. 97 """ 98 text = str(text) 99 body_md = bleach.linkify( 100 markdown_compile(text), 101 callbacks=DEFAULT_CALLBACKS + ([safelink_callback] if kwargs.get('safelinks', True) else [abslink_callback]) 102 ) 103 return mark_safe(body_md) 104 [end of src/pretix/base/templatetags/rich_text.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/pretix/base/templatetags/rich_text.py b/src/pretix/base/templatetags/rich_text.py --- a/src/pretix/base/templatetags/rich_text.py +++ b/src/pretix/base/templatetags/rich_text.py @@ -58,6 +58,8 @@ # Update doc/user/markdown.rst if you change this! } +ALLOWED_PROTOCOLS = ['http', 'https', 'mailto', 'tel'] + def safelink_callback(attrs, new=False): url = attrs.get((None, 'href'), '/') @@ -86,7 +88,8 @@ ] ), tags=ALLOWED_TAGS, - attributes=ALLOWED_ATTRIBUTES + attributes=ALLOWED_ATTRIBUTES, + protocols=ALLOWED_PROTOCOLS, )
{"golden_diff": "diff --git a/src/pretix/base/templatetags/rich_text.py b/src/pretix/base/templatetags/rich_text.py\n--- a/src/pretix/base/templatetags/rich_text.py\n+++ b/src/pretix/base/templatetags/rich_text.py\n@@ -58,6 +58,8 @@\n # Update doc/user/markdown.rst if you change this!\n }\n \n+ALLOWED_PROTOCOLS = ['http', 'https', 'mailto', 'tel']\n+\n \n def safelink_callback(attrs, new=False):\n url = attrs.get((None, 'href'), '/')\n@@ -86,7 +88,8 @@\n ]\n ),\n tags=ALLOWED_TAGS,\n- attributes=ALLOWED_ATTRIBUTES\n+ attributes=ALLOWED_ATTRIBUTES,\n+ protocols=ALLOWED_PROTOCOLS,\n )\n", "issue": "rich_text: tel schema\nWhen providing email links using the mailto schema in richtext description like `[Email us!](mailto:[email protected])`, resulting in [Email us!](mailto:[email protected]), Pretix creates the correct `<a>` tag. However, users also use their mobile phone. It would be awesome to also be able to use the `tel` schema like `[Call us!](tel:+1-202-555-0102)`. At the moment, pretix just creates an `<a>` tag without an `href`, so the Telephone app is not opened.\n", "before_files": [{"content": "import urllib.parse\n\nimport bleach\nimport markdown\nfrom bleach import DEFAULT_CALLBACKS\nfrom django import template\nfrom django.conf import settings\nfrom django.core import signing\nfrom django.urls import reverse\nfrom django.utils.http import is_safe_url\nfrom django.utils.safestring import mark_safe\n\nregister = template.Library()\n\nALLOWED_TAGS = [\n 'a',\n 'abbr',\n 'acronym',\n 'b',\n 'blockquote',\n 'br',\n 'code',\n 'em',\n 'i',\n 'li',\n 'ol',\n 'strong',\n 'ul',\n 'p',\n 'table',\n 'tbody',\n 'thead',\n 'tr',\n 'td',\n 'th',\n 'div',\n 'span',\n 'hr',\n 'h1',\n 'h2',\n 'h3',\n 'h4',\n 'h5',\n 'h6',\n 'pre',\n # Update doc/user/markdown.rst if you change this!\n]\n\nALLOWED_ATTRIBUTES = {\n 'a': ['href', 'title'],\n 'abbr': ['title'],\n 'acronym': ['title'],\n 'table': ['width'],\n 'td': ['width', 'align'],\n 'div': ['class'],\n 'p': ['class'],\n 'span': ['class'],\n # Update doc/user/markdown.rst if you change this!\n}\n\n\ndef safelink_callback(attrs, new=False):\n url = attrs.get((None, 'href'), '/')\n if not is_safe_url(url) and not url.startswith('mailto:'):\n signer = signing.Signer(salt='safe-redirect')\n attrs[None, 'href'] = reverse('redirect') + '?url=' + urllib.parse.quote(signer.sign(url))\n attrs[None, 'target'] = '_blank'\n attrs[None, 'rel'] = 'noopener'\n return attrs\n\n\ndef abslink_callback(attrs, new=False):\n attrs[None, 'href'] = urllib.parse.urljoin(settings.SITE_URL, attrs.get((None, 'href'), '/'))\n attrs[None, 'target'] = '_blank'\n attrs[None, 'rel'] = 'noopener'\n return attrs\n\n\ndef markdown_compile(source):\n return bleach.clean(\n markdown.markdown(\n source,\n extensions=[\n 'markdown.extensions.sane_lists',\n # 'markdown.extensions.nl2br', # TODO: Enable, but check backwards-compatibility issues e.g. with mails\n ]\n ),\n tags=ALLOWED_TAGS,\n attributes=ALLOWED_ATTRIBUTES\n )\n\n\[email protected]\ndef rich_text(text: str, **kwargs):\n \"\"\"\n Processes markdown and cleans HTML in a text input.\n \"\"\"\n text = str(text)\n body_md = bleach.linkify(\n markdown_compile(text),\n callbacks=DEFAULT_CALLBACKS + ([safelink_callback] if kwargs.get('safelinks', True) else [abslink_callback])\n )\n return mark_safe(body_md)\n", "path": "src/pretix/base/templatetags/rich_text.py"}]}
1,532
188
gh_patches_debug_56378
rasdani/github-patches
git_diff
qutebrowser__qutebrowser-3063
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Auto-use the first match in the completion Requested by @Earnestly in IRC: Typing, e.g. `:do`<kbd>Enter</kbd> should invoke `:download` automatically as that's the first-best match instead of showing an invalid command error. (of course configurable) </issue> <code> [start of qutebrowser/commands/runners.py] 1 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: 2 3 # Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]> 4 # 5 # This file is part of qutebrowser. 6 # 7 # qutebrowser is free software: you can redistribute it and/or modify 8 # it under the terms of the GNU General Public License as published by 9 # the Free Software Foundation, either version 3 of the License, or 10 # (at your option) any later version. 11 # 12 # qutebrowser is distributed in the hope that it will be useful, 13 # but WITHOUT ANY WARRANTY; without even the implied warranty of 14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 # GNU General Public License for more details. 16 # 17 # You should have received a copy of the GNU General Public License 18 # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. 19 20 """Module containing command managers (SearchRunner and CommandRunner).""" 21 22 import traceback 23 import re 24 25 import attr 26 from PyQt5.QtCore import pyqtSlot, QUrl, QObject 27 28 from qutebrowser.config import config 29 from qutebrowser.commands import cmdexc, cmdutils 30 from qutebrowser.utils import message, objreg, qtutils, usertypes, utils 31 from qutebrowser.misc import split 32 33 34 last_command = {} 35 36 37 @attr.s 38 class ParseResult: 39 40 """The result of parsing a commandline.""" 41 42 cmd = attr.ib() 43 args = attr.ib() 44 cmdline = attr.ib() 45 46 47 def _current_url(tabbed_browser): 48 """Convenience method to get the current url.""" 49 try: 50 return tabbed_browser.current_url() 51 except qtutils.QtValueError as e: 52 msg = "Current URL is invalid" 53 if e.reason: 54 msg += " ({})".format(e.reason) 55 msg += "!" 56 raise cmdexc.CommandError(msg) 57 58 59 def replace_variables(win_id, arglist): 60 """Utility function to replace variables like {url} in a list of args.""" 61 variables = { 62 'url': lambda: _current_url(tabbed_browser).toString( 63 QUrl.FullyEncoded | QUrl.RemovePassword), 64 'url:pretty': lambda: _current_url(tabbed_browser).toString( 65 QUrl.DecodeReserved | QUrl.RemovePassword), 66 'clipboard': utils.get_clipboard, 67 'primary': lambda: utils.get_clipboard(selection=True), 68 } 69 values = {} 70 args = [] 71 tabbed_browser = objreg.get('tabbed-browser', scope='window', 72 window=win_id) 73 74 def repl_cb(matchobj): 75 """Return replacement for given match.""" 76 var = matchobj.group("var") 77 if var not in values: 78 values[var] = variables[var]() 79 return values[var] 80 repl_pattern = re.compile("{(?P<var>" + "|".join(variables.keys()) + ")}") 81 82 try: 83 for arg in arglist: 84 # using re.sub with callback function replaces all variables in a 85 # single pass and avoids expansion of nested variables (e.g. 86 # "{url}" from clipboard is not expanded) 87 args.append(repl_pattern.sub(repl_cb, arg)) 88 except utils.ClipboardError as e: 89 raise cmdexc.CommandError(e) 90 return args 91 92 93 class CommandParser: 94 95 """Parse qutebrowser commandline commands. 96 97 Attributes: 98 99 _partial_match: Whether to allow partial command matches. 100 """ 101 102 def __init__(self, partial_match=False): 103 self._partial_match = partial_match 104 105 def _get_alias(self, text, default=None): 106 """Get an alias from the config. 107 108 Args: 109 text: The text to parse. 110 default : Default value to return when alias was not found. 111 112 Return: 113 The new command string if an alias was found. Default value 114 otherwise. 115 """ 116 parts = text.strip().split(maxsplit=1) 117 try: 118 alias = config.val.aliases[parts[0]] 119 except KeyError: 120 return default 121 122 try: 123 new_cmd = '{} {}'.format(alias, parts[1]) 124 except IndexError: 125 new_cmd = alias 126 if text.endswith(' '): 127 new_cmd += ' ' 128 return new_cmd 129 130 def _parse_all_gen(self, text, aliases=True, *args, **kwargs): 131 """Split a command on ;; and parse all parts. 132 133 If the first command in the commandline is a non-split one, it only 134 returns that. 135 136 Args: 137 text: Text to parse. 138 aliases: Whether to handle aliases. 139 *args/**kwargs: Passed to parse(). 140 141 Yields: 142 ParseResult tuples. 143 """ 144 text = text.strip().lstrip(':').strip() 145 if not text: 146 raise cmdexc.NoSuchCommandError("No command given") 147 148 if aliases: 149 text = self._get_alias(text, text) 150 151 if ';;' in text: 152 # Get the first command and check if it doesn't want to have ;; 153 # split. 154 first = text.split(';;')[0] 155 result = self.parse(first, *args, **kwargs) 156 if result.cmd.no_cmd_split: 157 sub_texts = [text] 158 else: 159 sub_texts = [e.strip() for e in text.split(';;')] 160 else: 161 sub_texts = [text] 162 for sub in sub_texts: 163 yield self.parse(sub, *args, **kwargs) 164 165 def parse_all(self, *args, **kwargs): 166 """Wrapper over parse_all.""" 167 return list(self._parse_all_gen(*args, **kwargs)) 168 169 def parse(self, text, *, fallback=False, keep=False): 170 """Split the commandline text into command and arguments. 171 172 Args: 173 text: Text to parse. 174 fallback: Whether to do a fallback splitting when the command was 175 unknown. 176 keep: Whether to keep special chars and whitespace 177 178 Return: 179 A ParseResult tuple. 180 """ 181 cmdstr, sep, argstr = text.partition(' ') 182 183 if not cmdstr and not fallback: 184 raise cmdexc.NoSuchCommandError("No command given") 185 186 if self._partial_match: 187 cmdstr = self._completion_match(cmdstr) 188 189 try: 190 cmd = cmdutils.cmd_dict[cmdstr] 191 except KeyError: 192 if not fallback: 193 raise cmdexc.NoSuchCommandError( 194 '{}: no such command'.format(cmdstr)) 195 cmdline = split.split(text, keep=keep) 196 return ParseResult(cmd=None, args=None, cmdline=cmdline) 197 198 args = self._split_args(cmd, argstr, keep) 199 if keep and args: 200 cmdline = [cmdstr, sep + args[0]] + args[1:] 201 elif keep: 202 cmdline = [cmdstr, sep] 203 else: 204 cmdline = [cmdstr] + args[:] 205 206 return ParseResult(cmd=cmd, args=args, cmdline=cmdline) 207 208 def _completion_match(self, cmdstr): 209 """Replace cmdstr with a matching completion if there's only one match. 210 211 Args: 212 cmdstr: The string representing the entered command so far 213 214 Return: 215 cmdstr modified to the matching completion or unmodified 216 """ 217 matches = [] 218 for valid_command in cmdutils.cmd_dict: 219 if valid_command.find(cmdstr) == 0: 220 matches.append(valid_command) 221 if len(matches) == 1: 222 cmdstr = matches[0] 223 return cmdstr 224 225 def _split_args(self, cmd, argstr, keep): 226 """Split the arguments from an arg string. 227 228 Args: 229 cmd: The command we're currently handling. 230 argstr: An argument string. 231 keep: Whether to keep special chars and whitespace 232 233 Return: 234 A list containing the split strings. 235 """ 236 if not argstr: 237 return [] 238 elif cmd.maxsplit is None: 239 return split.split(argstr, keep=keep) 240 else: 241 # If split=False, we still want to split the flags, but not 242 # everything after that. 243 # We first split the arg string and check the index of the first 244 # non-flag args, then we re-split again properly. 245 # example: 246 # 247 # input: "--foo -v bar baz" 248 # first split: ['--foo', '-v', 'bar', 'baz'] 249 # 0 1 2 3 250 # second split: ['--foo', '-v', 'bar baz'] 251 # (maxsplit=2) 252 split_args = split.simple_split(argstr, keep=keep) 253 flag_arg_count = 0 254 for i, arg in enumerate(split_args): 255 arg = arg.strip() 256 if arg.startswith('-'): 257 if arg in cmd.flags_with_args: 258 flag_arg_count += 1 259 else: 260 maxsplit = i + cmd.maxsplit + flag_arg_count 261 return split.simple_split(argstr, keep=keep, 262 maxsplit=maxsplit) 263 264 # If there are only flags, we got it right on the first try 265 # already. 266 return split_args 267 268 269 class CommandRunner(QObject): 270 271 """Parse and run qutebrowser commandline commands. 272 273 Attributes: 274 _win_id: The window this CommandRunner is associated with. 275 """ 276 277 def __init__(self, win_id, partial_match=False, parent=None): 278 super().__init__(parent) 279 self._parser = CommandParser(partial_match=partial_match) 280 self._win_id = win_id 281 282 def run(self, text, count=None): 283 """Parse a command from a line of text and run it. 284 285 Args: 286 text: The text to parse. 287 count: The count to pass to the command. 288 """ 289 record_last_command = True 290 record_macro = True 291 292 mode_manager = objreg.get('mode-manager', scope='window', 293 window=self._win_id) 294 cur_mode = mode_manager.mode 295 296 for result in self._parser.parse_all(text): 297 if result.cmd.no_replace_variables: 298 args = result.args 299 else: 300 args = replace_variables(self._win_id, result.args) 301 result.cmd.run(self._win_id, args, count=count) 302 303 if result.cmdline[0] == 'repeat-command': 304 record_last_command = False 305 306 if result.cmdline[0] in ['record-macro', 'run-macro', 307 'set-cmd-text']: 308 record_macro = False 309 310 if record_last_command: 311 last_command[cur_mode] = (text, count) 312 313 if record_macro and cur_mode == usertypes.KeyMode.normal: 314 macro_recorder = objreg.get('macro-recorder') 315 macro_recorder.record_command(text, count) 316 317 @pyqtSlot(str, int) 318 @pyqtSlot(str) 319 def run_safely(self, text, count=None): 320 """Run a command and display exceptions in the statusbar.""" 321 try: 322 self.run(text, count) 323 except cmdexc.Error as e: 324 message.error(str(e), stack=traceback.format_exc()) 325 [end of qutebrowser/commands/runners.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/qutebrowser/commands/runners.py b/qutebrowser/commands/runners.py --- a/qutebrowser/commands/runners.py +++ b/qutebrowser/commands/runners.py @@ -220,6 +220,8 @@ matches.append(valid_command) if len(matches) == 1: cmdstr = matches[0] + elif len(matches) > 1 and config.val.completion.use_best_match: + cmdstr = matches[0] return cmdstr def _split_args(self, cmd, argstr, keep):
{"golden_diff": "diff --git a/qutebrowser/commands/runners.py b/qutebrowser/commands/runners.py\n--- a/qutebrowser/commands/runners.py\n+++ b/qutebrowser/commands/runners.py\n@@ -220,6 +220,8 @@\n matches.append(valid_command)\n if len(matches) == 1:\n cmdstr = matches[0]\n+ elif len(matches) > 1 and config.val.completion.use_best_match:\n+ cmdstr = matches[0]\n return cmdstr\n \n def _split_args(self, cmd, argstr, keep):\n", "issue": "Auto-use the first match in the completion\nRequested by @Earnestly in IRC:\n\nTyping, e.g. `:do`<kbd>Enter</kbd> should invoke `:download` automatically as that's the first-best match instead of showing an invalid command error.\n\n(of course configurable)\n\n", "before_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Module containing command managers (SearchRunner and CommandRunner).\"\"\"\n\nimport traceback\nimport re\n\nimport attr\nfrom PyQt5.QtCore import pyqtSlot, QUrl, QObject\n\nfrom qutebrowser.config import config\nfrom qutebrowser.commands import cmdexc, cmdutils\nfrom qutebrowser.utils import message, objreg, qtutils, usertypes, utils\nfrom qutebrowser.misc import split\n\n\nlast_command = {}\n\n\[email protected]\nclass ParseResult:\n\n \"\"\"The result of parsing a commandline.\"\"\"\n\n cmd = attr.ib()\n args = attr.ib()\n cmdline = attr.ib()\n\n\ndef _current_url(tabbed_browser):\n \"\"\"Convenience method to get the current url.\"\"\"\n try:\n return tabbed_browser.current_url()\n except qtutils.QtValueError as e:\n msg = \"Current URL is invalid\"\n if e.reason:\n msg += \" ({})\".format(e.reason)\n msg += \"!\"\n raise cmdexc.CommandError(msg)\n\n\ndef replace_variables(win_id, arglist):\n \"\"\"Utility function to replace variables like {url} in a list of args.\"\"\"\n variables = {\n 'url': lambda: _current_url(tabbed_browser).toString(\n QUrl.FullyEncoded | QUrl.RemovePassword),\n 'url:pretty': lambda: _current_url(tabbed_browser).toString(\n QUrl.DecodeReserved | QUrl.RemovePassword),\n 'clipboard': utils.get_clipboard,\n 'primary': lambda: utils.get_clipboard(selection=True),\n }\n values = {}\n args = []\n tabbed_browser = objreg.get('tabbed-browser', scope='window',\n window=win_id)\n\n def repl_cb(matchobj):\n \"\"\"Return replacement for given match.\"\"\"\n var = matchobj.group(\"var\")\n if var not in values:\n values[var] = variables[var]()\n return values[var]\n repl_pattern = re.compile(\"{(?P<var>\" + \"|\".join(variables.keys()) + \")}\")\n\n try:\n for arg in arglist:\n # using re.sub with callback function replaces all variables in a\n # single pass and avoids expansion of nested variables (e.g.\n # \"{url}\" from clipboard is not expanded)\n args.append(repl_pattern.sub(repl_cb, arg))\n except utils.ClipboardError as e:\n raise cmdexc.CommandError(e)\n return args\n\n\nclass CommandParser:\n\n \"\"\"Parse qutebrowser commandline commands.\n\n Attributes:\n\n _partial_match: Whether to allow partial command matches.\n \"\"\"\n\n def __init__(self, partial_match=False):\n self._partial_match = partial_match\n\n def _get_alias(self, text, default=None):\n \"\"\"Get an alias from the config.\n\n Args:\n text: The text to parse.\n default : Default value to return when alias was not found.\n\n Return:\n The new command string if an alias was found. Default value\n otherwise.\n \"\"\"\n parts = text.strip().split(maxsplit=1)\n try:\n alias = config.val.aliases[parts[0]]\n except KeyError:\n return default\n\n try:\n new_cmd = '{} {}'.format(alias, parts[1])\n except IndexError:\n new_cmd = alias\n if text.endswith(' '):\n new_cmd += ' '\n return new_cmd\n\n def _parse_all_gen(self, text, aliases=True, *args, **kwargs):\n \"\"\"Split a command on ;; and parse all parts.\n\n If the first command in the commandline is a non-split one, it only\n returns that.\n\n Args:\n text: Text to parse.\n aliases: Whether to handle aliases.\n *args/**kwargs: Passed to parse().\n\n Yields:\n ParseResult tuples.\n \"\"\"\n text = text.strip().lstrip(':').strip()\n if not text:\n raise cmdexc.NoSuchCommandError(\"No command given\")\n\n if aliases:\n text = self._get_alias(text, text)\n\n if ';;' in text:\n # Get the first command and check if it doesn't want to have ;;\n # split.\n first = text.split(';;')[0]\n result = self.parse(first, *args, **kwargs)\n if result.cmd.no_cmd_split:\n sub_texts = [text]\n else:\n sub_texts = [e.strip() for e in text.split(';;')]\n else:\n sub_texts = [text]\n for sub in sub_texts:\n yield self.parse(sub, *args, **kwargs)\n\n def parse_all(self, *args, **kwargs):\n \"\"\"Wrapper over parse_all.\"\"\"\n return list(self._parse_all_gen(*args, **kwargs))\n\n def parse(self, text, *, fallback=False, keep=False):\n \"\"\"Split the commandline text into command and arguments.\n\n Args:\n text: Text to parse.\n fallback: Whether to do a fallback splitting when the command was\n unknown.\n keep: Whether to keep special chars and whitespace\n\n Return:\n A ParseResult tuple.\n \"\"\"\n cmdstr, sep, argstr = text.partition(' ')\n\n if not cmdstr and not fallback:\n raise cmdexc.NoSuchCommandError(\"No command given\")\n\n if self._partial_match:\n cmdstr = self._completion_match(cmdstr)\n\n try:\n cmd = cmdutils.cmd_dict[cmdstr]\n except KeyError:\n if not fallback:\n raise cmdexc.NoSuchCommandError(\n '{}: no such command'.format(cmdstr))\n cmdline = split.split(text, keep=keep)\n return ParseResult(cmd=None, args=None, cmdline=cmdline)\n\n args = self._split_args(cmd, argstr, keep)\n if keep and args:\n cmdline = [cmdstr, sep + args[0]] + args[1:]\n elif keep:\n cmdline = [cmdstr, sep]\n else:\n cmdline = [cmdstr] + args[:]\n\n return ParseResult(cmd=cmd, args=args, cmdline=cmdline)\n\n def _completion_match(self, cmdstr):\n \"\"\"Replace cmdstr with a matching completion if there's only one match.\n\n Args:\n cmdstr: The string representing the entered command so far\n\n Return:\n cmdstr modified to the matching completion or unmodified\n \"\"\"\n matches = []\n for valid_command in cmdutils.cmd_dict:\n if valid_command.find(cmdstr) == 0:\n matches.append(valid_command)\n if len(matches) == 1:\n cmdstr = matches[0]\n return cmdstr\n\n def _split_args(self, cmd, argstr, keep):\n \"\"\"Split the arguments from an arg string.\n\n Args:\n cmd: The command we're currently handling.\n argstr: An argument string.\n keep: Whether to keep special chars and whitespace\n\n Return:\n A list containing the split strings.\n \"\"\"\n if not argstr:\n return []\n elif cmd.maxsplit is None:\n return split.split(argstr, keep=keep)\n else:\n # If split=False, we still want to split the flags, but not\n # everything after that.\n # We first split the arg string and check the index of the first\n # non-flag args, then we re-split again properly.\n # example:\n #\n # input: \"--foo -v bar baz\"\n # first split: ['--foo', '-v', 'bar', 'baz']\n # 0 1 2 3\n # second split: ['--foo', '-v', 'bar baz']\n # (maxsplit=2)\n split_args = split.simple_split(argstr, keep=keep)\n flag_arg_count = 0\n for i, arg in enumerate(split_args):\n arg = arg.strip()\n if arg.startswith('-'):\n if arg in cmd.flags_with_args:\n flag_arg_count += 1\n else:\n maxsplit = i + cmd.maxsplit + flag_arg_count\n return split.simple_split(argstr, keep=keep,\n maxsplit=maxsplit)\n\n # If there are only flags, we got it right on the first try\n # already.\n return split_args\n\n\nclass CommandRunner(QObject):\n\n \"\"\"Parse and run qutebrowser commandline commands.\n\n Attributes:\n _win_id: The window this CommandRunner is associated with.\n \"\"\"\n\n def __init__(self, win_id, partial_match=False, parent=None):\n super().__init__(parent)\n self._parser = CommandParser(partial_match=partial_match)\n self._win_id = win_id\n\n def run(self, text, count=None):\n \"\"\"Parse a command from a line of text and run it.\n\n Args:\n text: The text to parse.\n count: The count to pass to the command.\n \"\"\"\n record_last_command = True\n record_macro = True\n\n mode_manager = objreg.get('mode-manager', scope='window',\n window=self._win_id)\n cur_mode = mode_manager.mode\n\n for result in self._parser.parse_all(text):\n if result.cmd.no_replace_variables:\n args = result.args\n else:\n args = replace_variables(self._win_id, result.args)\n result.cmd.run(self._win_id, args, count=count)\n\n if result.cmdline[0] == 'repeat-command':\n record_last_command = False\n\n if result.cmdline[0] in ['record-macro', 'run-macro',\n 'set-cmd-text']:\n record_macro = False\n\n if record_last_command:\n last_command[cur_mode] = (text, count)\n\n if record_macro and cur_mode == usertypes.KeyMode.normal:\n macro_recorder = objreg.get('macro-recorder')\n macro_recorder.record_command(text, count)\n\n @pyqtSlot(str, int)\n @pyqtSlot(str)\n def run_safely(self, text, count=None):\n \"\"\"Run a command and display exceptions in the statusbar.\"\"\"\n try:\n self.run(text, count)\n except cmdexc.Error as e:\n message.error(str(e), stack=traceback.format_exc())\n", "path": "qutebrowser/commands/runners.py"}]}
3,854
127
gh_patches_debug_28992
rasdani/github-patches
git_diff
HypothesisWorks__hypothesis-624
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> False positive HypothesisDeprecationWarning Just upgraded to Hypothesis 3.8.2, and I'm getting the following warning: ``` .venv/lib/python3.6/site-packages/hypothesis/strategies.py:416: HypothesisDeprecationWarning: Cannot sample from <enum 'AnEnum'>, not a sequence. Hypothesis goes to some length to ensure that sampling an element from a collection (with `sampled_from` or `choices`) is replayable and can be minimised. To replay a saved example, the sampled values must have the same iteration order on every run - ruling out sets, dicts, etc due to hash randomisation. Most cases can simply use `sorted(values)`, but mixed types or special values such as math.nan require careful handling - and note that when simplifying an example, Hypothesis treats earlier values as simpler. elements = check_sample(elements) ``` It's a great warning but I think I'm getting a false positive: ``` from hypothesis import given from hypothesis.strategies import sampled_from from enum import Enum class AnEnum(Enum): A = "A" B = "B" @given(sampled_from(AnEnum)) def test_enum(e): print(e) ``` According to https://docs.python.org/3/library/enum.html, "Enumerations support iteration, in definition order", so this should be fine, right? </issue> <code> [start of src/hypothesis/version.py] 1 # coding=utf-8 2 # 3 # This file is part of Hypothesis, which may be found at 4 # https://github.com/HypothesisWorks/hypothesis-python 5 # 6 # Most of this work is copyright (C) 2013-2017 David R. MacIver 7 # ([email protected]), but it contains contributions by others. See 8 # CONTRIBUTING.rst for a full list of people who may hold copyright, and 9 # consult the git log if you need to determine who owns an individual 10 # contribution. 11 # 12 # This Source Code Form is subject to the terms of the Mozilla Public License, 13 # v. 2.0. If a copy of the MPL was not distributed with this file, You can 14 # obtain one at http://mozilla.org/MPL/2.0/. 15 # 16 # END HEADER 17 18 from __future__ import division, print_function, absolute_import 19 20 __version_info__ = (3, 8, 3) 21 __version__ = '.'.join(map(str, __version_info__)) 22 [end of src/hypothesis/version.py] [start of src/hypothesis/internal/conjecture/utils.py] 1 # coding=utf-8 2 # 3 # This file is part of Hypothesis, which may be found at 4 # https://github.com/HypothesisWorks/hypothesis-python 5 # 6 # Most of this work is copyright (C) 2013-2017 David R. MacIver 7 # ([email protected]), but it contains contributions by others. See 8 # CONTRIBUTING.rst for a full list of people who may hold copyright, and 9 # consult the git log if you need to determine who owns an individual 10 # contribution. 11 # 12 # This Source Code Form is subject to the terms of the Mozilla Public License, 13 # v. 2.0. If a copy of the MPL was not distributed with this file, You can 14 # obtain one at http://mozilla.org/MPL/2.0/. 15 # 16 # END HEADER 17 18 from __future__ import division, print_function, absolute_import 19 20 import math 21 from collections import Sequence 22 23 from hypothesis._settings import note_deprecation 24 from hypothesis.internal.compat import hbytes, bit_length, int_to_bytes, \ 25 int_from_bytes 26 27 28 def n_byte_unsigned(data, n): 29 return int_from_bytes(data.draw_bytes(n)) 30 31 32 def saturate(n): 33 bits = bit_length(n) 34 k = 1 35 while k < bits: 36 n |= (n >> k) 37 k *= 2 38 return n 39 40 41 def integer_range(data, lower, upper, center=None, distribution=None): 42 assert lower <= upper 43 if lower == upper: 44 return int(lower) 45 46 if center is None: 47 center = lower 48 center = min(max(center, lower), upper) 49 if distribution is None: 50 if lower < center < upper: 51 def distribution(random): 52 if random.randint(0, 1): 53 return random.randint(center, upper) 54 else: 55 return random.randint(lower, center) 56 else: 57 def distribution(random): 58 return random.randint(lower, upper) 59 60 gap = upper - lower 61 bits = bit_length(gap) 62 nbytes = bits // 8 + int(bits % 8 != 0) 63 mask = saturate(gap) 64 65 def byte_distribution(random, n): 66 assert n == nbytes 67 v = distribution(random) 68 if v >= center: 69 probe = v - center 70 else: 71 probe = upper - v 72 return int_to_bytes(probe, n) 73 74 probe = gap + 1 75 76 while probe > gap: 77 probe = int_from_bytes( 78 data.draw_bytes(nbytes, byte_distribution) 79 ) & mask 80 81 if center == upper: 82 result = upper - probe 83 elif center == lower: 84 result = lower + probe 85 else: 86 if center + probe <= upper: 87 result = center + probe 88 else: 89 result = upper - probe 90 assert lower <= result <= upper 91 return int(result) 92 93 94 def integer_range_with_distribution(data, lower, upper, nums): 95 return integer_range( 96 data, lower, upper, distribution=nums 97 ) 98 99 100 def centered_integer_range(data, lower, upper, center): 101 return integer_range( 102 data, lower, upper, center=center 103 ) 104 105 106 def check_sample(values): 107 if not isinstance(values, Sequence): 108 note_deprecation( 109 ('Cannot sample from %r, not a sequence. ' % (values,)) + 110 'Hypothesis goes to some length to ensure that sampling an ' 111 'element from a collection (with `sampled_from` or `choices`) is ' 112 'replayable and can be minimised. To replay a saved example, ' 113 'the sampled values must have the same iteration order on every ' 114 'run - ruling out sets, dicts, etc due to hash randomisation. ' 115 'Most cases can simply use `sorted(values)`, but mixed types or ' 116 'special values such as math.nan require careful handling - and ' 117 'note that when simplifying an example, Hypothesis treats ' 118 'earlier values as simpler.') 119 return tuple(values) 120 121 122 def choice(data, values): 123 return values[integer_range(data, 0, len(values) - 1)] 124 125 126 def geometric(data, p): 127 denom = math.log1p(-p) 128 n_bytes = 8 129 130 def distribution(random, n): 131 assert n == n_bytes 132 for _ in range(100): 133 try: 134 return int_to_bytes(int( 135 math.log1p(-random.random()) / denom), n) 136 # This is basically impossible to hit but is required for 137 # correctness 138 except OverflowError: # pragma: no cover 139 pass 140 # We got a one in a million chance 100 times in a row. Something is up. 141 assert False # pragma: no cover 142 return int_from_bytes(data.draw_bytes(n_bytes, distribution)) 143 144 145 def boolean(data): 146 return bool(n_byte_unsigned(data, 1) & 1) 147 148 149 def biased_coin(data, p): 150 def distribution(random, n): 151 assert n == 1 152 return hbytes([int(random.random() <= p)]) 153 return bool( 154 data.draw_bytes(1, distribution)[0] & 1 155 ) 156 157 158 def write(data, string): 159 assert isinstance(string, hbytes) 160 161 def distribution(random, n): 162 assert n == len(string) 163 return string 164 x = data.draw_bytes(len(string), distribution) 165 if x != string: 166 data.mark_invalid() 167 [end of src/hypothesis/internal/conjecture/utils.py] [start of docs/conf.py] 1 # coding=utf-8 2 # 3 # This file is part of Hypothesis, which may be found at 4 # https://github.com/HypothesisWorks/hypothesis-python 5 # 6 # Most of this work is copyright (C) 2013-2017 David R. MacIver 7 # ([email protected]), but it contains contributions by others. See 8 # CONTRIBUTING.rst for a full list of people who may hold copyright, and 9 # consult the git log if you need to determine who owns an individual 10 # contribution. 11 # 12 # This Source Code Form is subject to the terms of the Mozilla Public License, 13 # v. 2.0. If a copy of the MPL was not distributed with this file, You can 14 # obtain one at http://mozilla.org/MPL/2.0/. 15 # 16 # END HEADER 17 18 # -*- coding: utf-8 -*- 19 20 from __future__ import division, print_function, absolute_import 21 22 # on_rtd is whether we are on readthedocs.org 23 import os 24 import sys 25 26 from hypothesis import __version__ 27 28 on_rtd = os.environ.get('READTHEDOCS', None) == 'True' 29 30 sys.path.append( 31 os.path.join(os.path.dirname(__file__), '..', 'src') 32 ) 33 34 35 autodoc_member_order = 'bysource' 36 37 extensions = [ 38 'sphinx.ext.autodoc', 39 'sphinx.ext.doctest', 40 'sphinx.ext.extlinks', 41 'sphinx.ext.viewcode', 42 'sphinx.ext.intersphinx', 43 ] 44 45 templates_path = ['_templates'] 46 47 source_suffix = '.rst' 48 49 # The master toctree document. 50 master_doc = 'index' 51 52 # General information about the project. 53 project = u'Hypothesis' 54 copyright = u'2015, David R. MacIver' 55 author = u'David R. MacIver' 56 57 version = __version__ 58 release = __version__ 59 60 language = None 61 62 exclude_patterns = ['_build'] 63 64 pygments_style = 'sphinx' 65 66 todo_include_todos = False 67 68 intersphinx_mapping = { 69 'python': ('http://docs.python.org/', None), 70 } 71 72 autodoc_mock_imports = ['numpy'] 73 74 doctest_global_setup = ''' 75 # Some standard imports 76 from hypothesis import * 77 from hypothesis.strategies import * 78 # Ensure that output (including from strategies) is deterministic 79 import random 80 random.seed(0) 81 ''' 82 83 # This config value must be a dictionary of external sites, mapping unique 84 # short alias names to a base URL and a prefix. 85 # See http://sphinx-doc.org/ext/extlinks.html 86 extlinks = { 87 'commit': ('https://github.com/HypothesisWorks/hypothesis-python/commit/%s', 'commit '), 88 'gh-file': ('https://github.com/HypothesisWorks/hypothesis-python/blob/master/%s', ''), 89 'gh-link': ('https://github.com/HypothesisWorks/hypothesis-python/%s', ''), 90 'issue': ('https://github.com/HypothesisWorks/hypothesis-python/issues/%s', 'issue #'), 91 'pull': ('https://github.com/HypothesisWorks/hypothesis-python/pulls/%s', 'pull request #'), 92 } 93 94 # -- Options for HTML output ---------------------------------------------- 95 96 if not on_rtd: # only import and set the theme if we're building docs locally 97 import sphinx_rtd_theme 98 html_theme = 'sphinx_rtd_theme' 99 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] 100 101 html_static_path = ['_static'] 102 103 htmlhelp_basename = 'Hypothesisdoc' 104 105 # -- Options for LaTeX output --------------------------------------------- 106 107 latex_elements = { 108 } 109 110 latex_documents = [ 111 (master_doc, 'Hypothesis.tex', u'Hypothesis Documentation', 112 u'David R. MacIver', 'manual'), 113 ] 114 115 man_pages = [ 116 (master_doc, 'hypothesis', u'Hypothesis Documentation', 117 [author], 1) 118 ] 119 120 texinfo_documents = [ 121 (master_doc, 'Hypothesis', u'Hypothesis Documentation', 122 author, 'Hypothesis', 'One line description of project.', 123 'Miscellaneous'), 124 ] 125 [end of docs/conf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -22,6 +22,7 @@ # on_rtd is whether we are on readthedocs.org import os import sys +import datetime from hypothesis import __version__ @@ -51,7 +52,7 @@ # General information about the project. project = u'Hypothesis' -copyright = u'2015, David R. MacIver' +copyright = u'2013-%s, David R. MacIver' % datetime.datetime.utcnow().year author = u'David R. MacIver' version = __version__ diff --git a/src/hypothesis/internal/conjecture/utils.py b/src/hypothesis/internal/conjecture/utils.py --- a/src/hypothesis/internal/conjecture/utils.py +++ b/src/hypothesis/internal/conjecture/utils.py @@ -17,6 +17,7 @@ from __future__ import division, print_function, absolute_import +import enum import math from collections import Sequence @@ -104,7 +105,7 @@ def check_sample(values): - if not isinstance(values, Sequence): + if not isinstance(values, (Sequence, enum.EnumMeta)): note_deprecation( ('Cannot sample from %r, not a sequence. ' % (values,)) + 'Hypothesis goes to some length to ensure that sampling an ' diff --git a/src/hypothesis/version.py b/src/hypothesis/version.py --- a/src/hypothesis/version.py +++ b/src/hypothesis/version.py @@ -17,5 +17,5 @@ from __future__ import division, print_function, absolute_import -__version_info__ = (3, 8, 3) +__version_info__ = (3, 8, 4) __version__ = '.'.join(map(str, __version_info__))
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -22,6 +22,7 @@\n # on_rtd is whether we are on readthedocs.org\n import os\n import sys\n+import datetime\n \n from hypothesis import __version__\n \n@@ -51,7 +52,7 @@\n \n # General information about the project.\n project = u'Hypothesis'\n-copyright = u'2015, David R. MacIver'\n+copyright = u'2013-%s, David R. MacIver' % datetime.datetime.utcnow().year\n author = u'David R. MacIver'\n \n version = __version__\ndiff --git a/src/hypothesis/internal/conjecture/utils.py b/src/hypothesis/internal/conjecture/utils.py\n--- a/src/hypothesis/internal/conjecture/utils.py\n+++ b/src/hypothesis/internal/conjecture/utils.py\n@@ -17,6 +17,7 @@\n \n from __future__ import division, print_function, absolute_import\n \n+import enum\n import math\n from collections import Sequence\n \n@@ -104,7 +105,7 @@\n \n \n def check_sample(values):\n- if not isinstance(values, Sequence):\n+ if not isinstance(values, (Sequence, enum.EnumMeta)):\n note_deprecation(\n ('Cannot sample from %r, not a sequence. ' % (values,)) +\n 'Hypothesis goes to some length to ensure that sampling an '\ndiff --git a/src/hypothesis/version.py b/src/hypothesis/version.py\n--- a/src/hypothesis/version.py\n+++ b/src/hypothesis/version.py\n@@ -17,5 +17,5 @@\n \n from __future__ import division, print_function, absolute_import\n \n-__version_info__ = (3, 8, 3)\n+__version_info__ = (3, 8, 4)\n __version__ = '.'.join(map(str, __version_info__))\n", "issue": "False positive HypothesisDeprecationWarning\nJust upgraded to Hypothesis 3.8.2, and I'm getting the following warning:\r\n\r\n```\r\n.venv/lib/python3.6/site-packages/hypothesis/strategies.py:416: HypothesisDeprecationWarning: Cannot sample from <enum 'AnEnum'>, not a sequence. Hypothesis goes to some length to ensure that sampling an element from a collection (with `sampled_from` or `choices`) is replayable and can be minimised. To replay a saved example, the sampled values must have the same iteration order on every run - ruling out sets, dicts, etc due to hash randomisation. Most cases can simply use `sorted(values)`, but mixed types or special values such as math.nan require careful handling - and note that when simplifying an example, Hypothesis treats earlier values as simpler.\r\n elements = check_sample(elements)\r\n```\r\nIt's a great warning but I think I'm getting a false positive:\r\n```\r\nfrom hypothesis import given\r\nfrom hypothesis.strategies import sampled_from\r\n\r\nfrom enum import Enum\r\n\r\n\r\nclass AnEnum(Enum):\r\n A = \"A\"\r\n B = \"B\"\r\n\r\n\r\n@given(sampled_from(AnEnum))\r\ndef test_enum(e):\r\n print(e)\r\n```\r\nAccording to https://docs.python.org/3/library/enum.html, \"Enumerations support iteration, in definition order\", so this should be fine, right?\n", "before_files": [{"content": "# coding=utf-8\n#\n# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis-python\n#\n# Most of this work is copyright (C) 2013-2017 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at http://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\nfrom __future__ import division, print_function, absolute_import\n\n__version_info__ = (3, 8, 3)\n__version__ = '.'.join(map(str, __version_info__))\n", "path": "src/hypothesis/version.py"}, {"content": "# coding=utf-8\n#\n# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis-python\n#\n# Most of this work is copyright (C) 2013-2017 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at http://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\nfrom __future__ import division, print_function, absolute_import\n\nimport math\nfrom collections import Sequence\n\nfrom hypothesis._settings import note_deprecation\nfrom hypothesis.internal.compat import hbytes, bit_length, int_to_bytes, \\\n int_from_bytes\n\n\ndef n_byte_unsigned(data, n):\n return int_from_bytes(data.draw_bytes(n))\n\n\ndef saturate(n):\n bits = bit_length(n)\n k = 1\n while k < bits:\n n |= (n >> k)\n k *= 2\n return n\n\n\ndef integer_range(data, lower, upper, center=None, distribution=None):\n assert lower <= upper\n if lower == upper:\n return int(lower)\n\n if center is None:\n center = lower\n center = min(max(center, lower), upper)\n if distribution is None:\n if lower < center < upper:\n def distribution(random):\n if random.randint(0, 1):\n return random.randint(center, upper)\n else:\n return random.randint(lower, center)\n else:\n def distribution(random):\n return random.randint(lower, upper)\n\n gap = upper - lower\n bits = bit_length(gap)\n nbytes = bits // 8 + int(bits % 8 != 0)\n mask = saturate(gap)\n\n def byte_distribution(random, n):\n assert n == nbytes\n v = distribution(random)\n if v >= center:\n probe = v - center\n else:\n probe = upper - v\n return int_to_bytes(probe, n)\n\n probe = gap + 1\n\n while probe > gap:\n probe = int_from_bytes(\n data.draw_bytes(nbytes, byte_distribution)\n ) & mask\n\n if center == upper:\n result = upper - probe\n elif center == lower:\n result = lower + probe\n else:\n if center + probe <= upper:\n result = center + probe\n else:\n result = upper - probe\n assert lower <= result <= upper\n return int(result)\n\n\ndef integer_range_with_distribution(data, lower, upper, nums):\n return integer_range(\n data, lower, upper, distribution=nums\n )\n\n\ndef centered_integer_range(data, lower, upper, center):\n return integer_range(\n data, lower, upper, center=center\n )\n\n\ndef check_sample(values):\n if not isinstance(values, Sequence):\n note_deprecation(\n ('Cannot sample from %r, not a sequence. ' % (values,)) +\n 'Hypothesis goes to some length to ensure that sampling an '\n 'element from a collection (with `sampled_from` or `choices`) is '\n 'replayable and can be minimised. To replay a saved example, '\n 'the sampled values must have the same iteration order on every '\n 'run - ruling out sets, dicts, etc due to hash randomisation. '\n 'Most cases can simply use `sorted(values)`, but mixed types or '\n 'special values such as math.nan require careful handling - and '\n 'note that when simplifying an example, Hypothesis treats '\n 'earlier values as simpler.')\n return tuple(values)\n\n\ndef choice(data, values):\n return values[integer_range(data, 0, len(values) - 1)]\n\n\ndef geometric(data, p):\n denom = math.log1p(-p)\n n_bytes = 8\n\n def distribution(random, n):\n assert n == n_bytes\n for _ in range(100):\n try:\n return int_to_bytes(int(\n math.log1p(-random.random()) / denom), n)\n # This is basically impossible to hit but is required for\n # correctness\n except OverflowError: # pragma: no cover\n pass\n # We got a one in a million chance 100 times in a row. Something is up.\n assert False # pragma: no cover\n return int_from_bytes(data.draw_bytes(n_bytes, distribution))\n\n\ndef boolean(data):\n return bool(n_byte_unsigned(data, 1) & 1)\n\n\ndef biased_coin(data, p):\n def distribution(random, n):\n assert n == 1\n return hbytes([int(random.random() <= p)])\n return bool(\n data.draw_bytes(1, distribution)[0] & 1\n )\n\n\ndef write(data, string):\n assert isinstance(string, hbytes)\n\n def distribution(random, n):\n assert n == len(string)\n return string\n x = data.draw_bytes(len(string), distribution)\n if x != string:\n data.mark_invalid()\n", "path": "src/hypothesis/internal/conjecture/utils.py"}, {"content": "# coding=utf-8\n#\n# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis-python\n#\n# Most of this work is copyright (C) 2013-2017 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at http://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\n# -*- coding: utf-8 -*-\n\nfrom __future__ import division, print_function, absolute_import\n\n# on_rtd is whether we are on readthedocs.org\nimport os\nimport sys\n\nfrom hypothesis import __version__\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\nsys.path.append(\n os.path.join(os.path.dirname(__file__), '..', 'src')\n)\n\n\nautodoc_member_order = 'bysource'\n\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.doctest',\n 'sphinx.ext.extlinks',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.intersphinx',\n]\n\ntemplates_path = ['_templates']\n\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'Hypothesis'\ncopyright = u'2015, David R. MacIver'\nauthor = u'David R. MacIver'\n\nversion = __version__\nrelease = __version__\n\nlanguage = None\n\nexclude_patterns = ['_build']\n\npygments_style = 'sphinx'\n\ntodo_include_todos = False\n\nintersphinx_mapping = {\n 'python': ('http://docs.python.org/', None),\n}\n\nautodoc_mock_imports = ['numpy']\n\ndoctest_global_setup = '''\n# Some standard imports\nfrom hypothesis import *\nfrom hypothesis.strategies import *\n# Ensure that output (including from strategies) is deterministic\nimport random\nrandom.seed(0)\n'''\n\n# This config value must be a dictionary of external sites, mapping unique\n# short alias names to a base URL and a prefix.\n# See http://sphinx-doc.org/ext/extlinks.html\nextlinks = {\n 'commit': ('https://github.com/HypothesisWorks/hypothesis-python/commit/%s', 'commit '),\n 'gh-file': ('https://github.com/HypothesisWorks/hypothesis-python/blob/master/%s', ''),\n 'gh-link': ('https://github.com/HypothesisWorks/hypothesis-python/%s', ''),\n 'issue': ('https://github.com/HypothesisWorks/hypothesis-python/issues/%s', 'issue #'),\n 'pull': ('https://github.com/HypothesisWorks/hypothesis-python/pulls/%s', 'pull request #'),\n}\n\n# -- Options for HTML output ----------------------------------------------\n\nif not on_rtd: # only import and set the theme if we're building docs locally\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\nhtml_static_path = ['_static']\n\nhtmlhelp_basename = 'Hypothesisdoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n}\n\nlatex_documents = [\n (master_doc, 'Hypothesis.tex', u'Hypothesis Documentation',\n u'David R. MacIver', 'manual'),\n]\n\nman_pages = [\n (master_doc, 'hypothesis', u'Hypothesis Documentation',\n [author], 1)\n]\n\ntexinfo_documents = [\n (master_doc, 'Hypothesis', u'Hypothesis Documentation',\n author, 'Hypothesis', 'One line description of project.',\n 'Miscellaneous'),\n]\n", "path": "docs/conf.py"}]}
3,877
433
gh_patches_debug_30999
rasdani/github-patches
git_diff
apache__airflow-28953
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Support telegram-bot v20+ ### Body Currently our telegram integration uses Telegram v13 telegram-bot library. On 1st of Jan 2023 a new, backwards incompatible version of Telegram-bot has been released : https://pypi.org/project/python-telegram-bot/20.0/#history and at least as reported by MyPy and our test suite test failures, Telegram 20 needs some changes to work: Here is a transition guide that might be helpful. Transition guide is here: https://github.com/python-telegram-bot/python-telegram-bot/wiki/Transition-guide-to-Version-20.0 In the meantime we limit telegram to < 20.0.0 ### Committer - [X] I acknowledge that I am a maintainer/committer of the Apache Airflow project. </issue> <code> [start of airflow/providers/telegram/hooks/telegram.py] 1 # 2 # Licensed to the Apache Software Foundation (ASF) under one 3 # or more contributor license agreements. See the NOTICE file 4 # distributed with this work for additional information 5 # regarding copyright ownership. The ASF licenses this file 6 # to you under the Apache License, Version 2.0 (the 7 # "License"); you may not use this file except in compliance 8 # with the License. You may obtain a copy of the License at 9 # 10 # http://www.apache.org/licenses/LICENSE-2.0 11 # 12 # Unless required by applicable law or agreed to in writing, 13 # software distributed under the License is distributed on an 14 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 # KIND, either express or implied. See the License for the 16 # specific language governing permissions and limitations 17 # under the License. 18 """Hook for Telegram""" 19 from __future__ import annotations 20 21 import telegram 22 import tenacity 23 24 from airflow.exceptions import AirflowException 25 from airflow.hooks.base import BaseHook 26 27 28 class TelegramHook(BaseHook): 29 """ 30 This hook allows you to post messages to Telegram using the telegram python-telegram-bot library. 31 32 The library can be found here: https://github.com/python-telegram-bot/python-telegram-bot 33 It accepts both telegram bot API token directly or connection that has telegram bot API token. 34 If both supplied, token parameter will be given precedence, otherwise 'password' field in the connection 35 from telegram_conn_id will be used. 36 chat_id can also be provided in the connection using 'host' field in connection. 37 Following is the details of a telegram_connection: 38 name: 'telegram-connection-name' 39 conn_type: 'http' 40 password: 'TELEGRAM_TOKEN' 41 host: 'chat_id' (optional) 42 Examples: 43 .. code-block:: python 44 45 # Create hook 46 telegram_hook = TelegramHook(telegram_conn_id="telegram_default") 47 # or telegram_hook = TelegramHook(telegram_conn_id='telegram_default', chat_id='-1xxx') 48 # or telegram_hook = TelegramHook(token='xxx:xxx', chat_id='-1xxx') 49 50 # Call method from telegram bot client 51 telegram_hook.send_message(None, {"text": "message", "chat_id": "-1xxx"}) 52 # or telegram_hook.send_message(None', {"text": "message"}) 53 54 :param telegram_conn_id: connection that optionally has Telegram API token in the password field 55 :param token: optional telegram API token 56 :param chat_id: optional chat_id of the telegram chat/channel/group 57 """ 58 59 def __init__( 60 self, 61 telegram_conn_id: str | None = None, 62 token: str | None = None, 63 chat_id: str | None = None, 64 ) -> None: 65 super().__init__() 66 self.token = self.__get_token(token, telegram_conn_id) 67 self.chat_id = self.__get_chat_id(chat_id, telegram_conn_id) 68 self.connection = self.get_conn() 69 70 def get_conn(self) -> telegram.bot.Bot: 71 """ 72 Returns the telegram bot client 73 74 :return: telegram bot client 75 """ 76 return telegram.bot.Bot(token=self.token) 77 78 def __get_token(self, token: str | None, telegram_conn_id: str | None) -> str: 79 """ 80 Returns the telegram API token 81 82 :param token: telegram API token 83 :param telegram_conn_id: telegram connection name 84 :return: telegram API token 85 """ 86 if token is not None: 87 return token 88 89 if telegram_conn_id is not None: 90 conn = self.get_connection(telegram_conn_id) 91 92 if not conn.password: 93 raise AirflowException("Missing token(password) in Telegram connection") 94 95 return conn.password 96 97 raise AirflowException("Cannot get token: No valid Telegram connection supplied.") 98 99 def __get_chat_id(self, chat_id: str | None, telegram_conn_id: str | None) -> str | None: 100 """ 101 Returns the telegram chat ID for a chat/channel/group 102 103 :param chat_id: optional chat ID 104 :param telegram_conn_id: telegram connection name 105 :return: telegram chat ID 106 """ 107 if chat_id is not None: 108 return chat_id 109 110 if telegram_conn_id is not None: 111 conn = self.get_connection(telegram_conn_id) 112 return conn.host 113 114 return None 115 116 @tenacity.retry( 117 retry=tenacity.retry_if_exception_type(telegram.error.TelegramError), 118 stop=tenacity.stop_after_attempt(5), 119 wait=tenacity.wait_fixed(1), 120 ) 121 def send_message(self, api_params: dict) -> None: 122 """ 123 Sends the message to a telegram channel or chat. 124 125 :param api_params: params for telegram_instance.send_message. It can also be used to override chat_id 126 """ 127 kwargs = { 128 "chat_id": self.chat_id, 129 "parse_mode": telegram.parsemode.ParseMode.HTML, 130 "disable_web_page_preview": True, 131 } 132 kwargs.update(api_params) 133 134 if "text" not in kwargs or kwargs["text"] is None: 135 raise AirflowException("'text' must be provided for telegram message") 136 137 if kwargs["chat_id"] is None: 138 raise AirflowException("'chat_id' must be provided for telegram message") 139 140 response = self.connection.send_message(**kwargs) 141 self.log.debug(response) 142 [end of airflow/providers/telegram/hooks/telegram.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/airflow/providers/telegram/hooks/telegram.py b/airflow/providers/telegram/hooks/telegram.py --- a/airflow/providers/telegram/hooks/telegram.py +++ b/airflow/providers/telegram/hooks/telegram.py @@ -18,6 +18,8 @@ """Hook for Telegram""" from __future__ import annotations +import asyncio + import telegram import tenacity @@ -67,13 +69,13 @@ self.chat_id = self.__get_chat_id(chat_id, telegram_conn_id) self.connection = self.get_conn() - def get_conn(self) -> telegram.bot.Bot: + def get_conn(self) -> telegram.Bot: """ Returns the telegram bot client :return: telegram bot client """ - return telegram.bot.Bot(token=self.token) + return telegram.Bot(self.token) def __get_token(self, token: str | None, telegram_conn_id: str | None) -> str: """ @@ -126,7 +128,7 @@ """ kwargs = { "chat_id": self.chat_id, - "parse_mode": telegram.parsemode.ParseMode.HTML, + "parse_mode": telegram.constants.ParseMode.HTML, "disable_web_page_preview": True, } kwargs.update(api_params) @@ -137,5 +139,5 @@ if kwargs["chat_id"] is None: raise AirflowException("'chat_id' must be provided for telegram message") - response = self.connection.send_message(**kwargs) + response = asyncio.run(self.connection.send_message(**kwargs)) self.log.debug(response)
{"golden_diff": "diff --git a/airflow/providers/telegram/hooks/telegram.py b/airflow/providers/telegram/hooks/telegram.py\n--- a/airflow/providers/telegram/hooks/telegram.py\n+++ b/airflow/providers/telegram/hooks/telegram.py\n@@ -18,6 +18,8 @@\n \"\"\"Hook for Telegram\"\"\"\n from __future__ import annotations\n \n+import asyncio\n+\n import telegram\n import tenacity\n \n@@ -67,13 +69,13 @@\n self.chat_id = self.__get_chat_id(chat_id, telegram_conn_id)\n self.connection = self.get_conn()\n \n- def get_conn(self) -> telegram.bot.Bot:\n+ def get_conn(self) -> telegram.Bot:\n \"\"\"\n Returns the telegram bot client\n \n :return: telegram bot client\n \"\"\"\n- return telegram.bot.Bot(token=self.token)\n+ return telegram.Bot(self.token)\n \n def __get_token(self, token: str | None, telegram_conn_id: str | None) -> str:\n \"\"\"\n@@ -126,7 +128,7 @@\n \"\"\"\n kwargs = {\n \"chat_id\": self.chat_id,\n- \"parse_mode\": telegram.parsemode.ParseMode.HTML,\n+ \"parse_mode\": telegram.constants.ParseMode.HTML,\n \"disable_web_page_preview\": True,\n }\n kwargs.update(api_params)\n@@ -137,5 +139,5 @@\n if kwargs[\"chat_id\"] is None:\n raise AirflowException(\"'chat_id' must be provided for telegram message\")\n \n- response = self.connection.send_message(**kwargs)\n+ response = asyncio.run(self.connection.send_message(**kwargs))\n self.log.debug(response)\n", "issue": "Support telegram-bot v20+\n### Body\n\nCurrently our telegram integration uses Telegram v13 telegram-bot library. On 1st of Jan 2023 a new, backwards incompatible version of Telegram-bot has been released : https://pypi.org/project/python-telegram-bot/20.0/#history and at least as reported by MyPy and our test suite test failures, Telegram 20 needs some changes to work:\r\n\r\nHere is a transition guide that might be helpful. \r\n\r\nTransition guide is here: https://github.com/python-telegram-bot/python-telegram-bot/wiki/Transition-guide-to-Version-20.0\r\n\r\nIn the meantime we limit telegram to < 20.0.0\n\n### Committer\n\n- [X] I acknowledge that I am a maintainer/committer of the Apache Airflow project.\n", "before_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Hook for Telegram\"\"\"\nfrom __future__ import annotations\n\nimport telegram\nimport tenacity\n\nfrom airflow.exceptions import AirflowException\nfrom airflow.hooks.base import BaseHook\n\n\nclass TelegramHook(BaseHook):\n \"\"\"\n This hook allows you to post messages to Telegram using the telegram python-telegram-bot library.\n\n The library can be found here: https://github.com/python-telegram-bot/python-telegram-bot\n It accepts both telegram bot API token directly or connection that has telegram bot API token.\n If both supplied, token parameter will be given precedence, otherwise 'password' field in the connection\n from telegram_conn_id will be used.\n chat_id can also be provided in the connection using 'host' field in connection.\n Following is the details of a telegram_connection:\n name: 'telegram-connection-name'\n conn_type: 'http'\n password: 'TELEGRAM_TOKEN'\n host: 'chat_id' (optional)\n Examples:\n .. code-block:: python\n\n # Create hook\n telegram_hook = TelegramHook(telegram_conn_id=\"telegram_default\")\n # or telegram_hook = TelegramHook(telegram_conn_id='telegram_default', chat_id='-1xxx')\n # or telegram_hook = TelegramHook(token='xxx:xxx', chat_id='-1xxx')\n\n # Call method from telegram bot client\n telegram_hook.send_message(None, {\"text\": \"message\", \"chat_id\": \"-1xxx\"})\n # or telegram_hook.send_message(None', {\"text\": \"message\"})\n\n :param telegram_conn_id: connection that optionally has Telegram API token in the password field\n :param token: optional telegram API token\n :param chat_id: optional chat_id of the telegram chat/channel/group\n \"\"\"\n\n def __init__(\n self,\n telegram_conn_id: str | None = None,\n token: str | None = None,\n chat_id: str | None = None,\n ) -> None:\n super().__init__()\n self.token = self.__get_token(token, telegram_conn_id)\n self.chat_id = self.__get_chat_id(chat_id, telegram_conn_id)\n self.connection = self.get_conn()\n\n def get_conn(self) -> telegram.bot.Bot:\n \"\"\"\n Returns the telegram bot client\n\n :return: telegram bot client\n \"\"\"\n return telegram.bot.Bot(token=self.token)\n\n def __get_token(self, token: str | None, telegram_conn_id: str | None) -> str:\n \"\"\"\n Returns the telegram API token\n\n :param token: telegram API token\n :param telegram_conn_id: telegram connection name\n :return: telegram API token\n \"\"\"\n if token is not None:\n return token\n\n if telegram_conn_id is not None:\n conn = self.get_connection(telegram_conn_id)\n\n if not conn.password:\n raise AirflowException(\"Missing token(password) in Telegram connection\")\n\n return conn.password\n\n raise AirflowException(\"Cannot get token: No valid Telegram connection supplied.\")\n\n def __get_chat_id(self, chat_id: str | None, telegram_conn_id: str | None) -> str | None:\n \"\"\"\n Returns the telegram chat ID for a chat/channel/group\n\n :param chat_id: optional chat ID\n :param telegram_conn_id: telegram connection name\n :return: telegram chat ID\n \"\"\"\n if chat_id is not None:\n return chat_id\n\n if telegram_conn_id is not None:\n conn = self.get_connection(telegram_conn_id)\n return conn.host\n\n return None\n\n @tenacity.retry(\n retry=tenacity.retry_if_exception_type(telegram.error.TelegramError),\n stop=tenacity.stop_after_attempt(5),\n wait=tenacity.wait_fixed(1),\n )\n def send_message(self, api_params: dict) -> None:\n \"\"\"\n Sends the message to a telegram channel or chat.\n\n :param api_params: params for telegram_instance.send_message. It can also be used to override chat_id\n \"\"\"\n kwargs = {\n \"chat_id\": self.chat_id,\n \"parse_mode\": telegram.parsemode.ParseMode.HTML,\n \"disable_web_page_preview\": True,\n }\n kwargs.update(api_params)\n\n if \"text\" not in kwargs or kwargs[\"text\"] is None:\n raise AirflowException(\"'text' must be provided for telegram message\")\n\n if kwargs[\"chat_id\"] is None:\n raise AirflowException(\"'chat_id' must be provided for telegram message\")\n\n response = self.connection.send_message(**kwargs)\n self.log.debug(response)\n", "path": "airflow/providers/telegram/hooks/telegram.py"}]}
2,176
357
gh_patches_debug_9859
rasdani/github-patches
git_diff
aimhubio__aim-3112
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [fix] Serve files linked into the static files directory In certain conda environments, the files in the static files root directory can be symlinked from a different place. The fixed implementation only resolves relative .. segments in the request path without resolving any symlinks. This way, it still prevents reading arbitrary files through the web server while allowing the reading of symlinked files. </issue> <code> [start of aim/web/api/views.py] 1 import os 2 from pathlib import Path 3 4 from fastapi import HTTPException, Request 5 from fastapi.responses import FileResponse, HTMLResponse 6 7 from aim.web.api.utils import APIRouter # wrapper for fastapi.APIRouter 8 from aim.web.configs import AIM_UI_BASE_PATH 9 10 statics_router = APIRouter() 11 12 13 @statics_router.get('/static-files/{path:path}/') 14 async def serve_static_files(path): 15 import aim_ui 16 17 static_files_root = Path(aim_ui.__file__).parent / 'build' 18 # Normalize to resolve any .. segments 19 static_file_name = os.path.normpath(static_files_root / path) 20 21 # Ensure that no paths outside the root directory are accessed by checking that the 22 # root directory is a prefix of the file path 23 common_prefix = Path(os.path.commonpath([static_files_root, static_file_name])) 24 if common_prefix == static_files_root: 25 raise HTTPException(status_code=404) 26 27 compressed_file_name = Path(f'{static_file_name}.gz') 28 if compressed_file_name.exists(): 29 return FileResponse(compressed_file_name, headers={'Content-Encoding': 'gzip'}) 30 return FileResponse(static_file_name) 31 32 33 # do not change the placement of this method 34 # as it also serves as a fallback for wrong url routes 35 @statics_router.get('/{path:path}/', response_class=HTMLResponse) 36 async def serve_index_html(request: Request): 37 import aim_ui 38 from jinja2 import Environment, FileSystemLoader 39 40 template_files_dir = os.path.join(os.path.dirname(aim_ui.__file__), 'build') 41 env = Environment( 42 loader=FileSystemLoader(template_files_dir), 43 autoescape=True 44 ) 45 template = env.get_template('index-template.html') 46 base_path = os.environ.get(AIM_UI_BASE_PATH, '') 47 return template.render(base_path=base_path) 48 [end of aim/web/api/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/aim/web/api/views.py b/aim/web/api/views.py --- a/aim/web/api/views.py +++ b/aim/web/api/views.py @@ -21,7 +21,7 @@ # Ensure that no paths outside the root directory are accessed by checking that the # root directory is a prefix of the file path common_prefix = Path(os.path.commonpath([static_files_root, static_file_name])) - if common_prefix == static_files_root: + if common_prefix != static_files_root: raise HTTPException(status_code=404) compressed_file_name = Path(f'{static_file_name}.gz')
{"golden_diff": "diff --git a/aim/web/api/views.py b/aim/web/api/views.py\n--- a/aim/web/api/views.py\n+++ b/aim/web/api/views.py\n@@ -21,7 +21,7 @@\n # Ensure that no paths outside the root directory are accessed by checking that the\n # root directory is a prefix of the file path\n common_prefix = Path(os.path.commonpath([static_files_root, static_file_name]))\n- if common_prefix == static_files_root:\n+ if common_prefix != static_files_root:\n raise HTTPException(status_code=404)\n \n compressed_file_name = Path(f'{static_file_name}.gz')\n", "issue": "[fix] Serve files linked into the static files directory\nIn certain conda environments, the files in the static files root directory can be symlinked from a different place. The fixed implementation only resolves relative .. segments in the request path without resolving any symlinks. This way, it still prevents reading arbitrary files through the web server while allowing the reading of symlinked files.\n", "before_files": [{"content": "import os\nfrom pathlib import Path\n\nfrom fastapi import HTTPException, Request\nfrom fastapi.responses import FileResponse, HTMLResponse\n\nfrom aim.web.api.utils import APIRouter # wrapper for fastapi.APIRouter\nfrom aim.web.configs import AIM_UI_BASE_PATH\n\nstatics_router = APIRouter()\n\n\n@statics_router.get('/static-files/{path:path}/')\nasync def serve_static_files(path):\n import aim_ui\n\n static_files_root = Path(aim_ui.__file__).parent / 'build'\n # Normalize to resolve any .. segments\n static_file_name = os.path.normpath(static_files_root / path)\n\n # Ensure that no paths outside the root directory are accessed by checking that the\n # root directory is a prefix of the file path\n common_prefix = Path(os.path.commonpath([static_files_root, static_file_name]))\n if common_prefix == static_files_root:\n raise HTTPException(status_code=404)\n\n compressed_file_name = Path(f'{static_file_name}.gz')\n if compressed_file_name.exists():\n return FileResponse(compressed_file_name, headers={'Content-Encoding': 'gzip'})\n return FileResponse(static_file_name)\n\n\n# do not change the placement of this method\n# as it also serves as a fallback for wrong url routes\n@statics_router.get('/{path:path}/', response_class=HTMLResponse)\nasync def serve_index_html(request: Request):\n import aim_ui\n from jinja2 import Environment, FileSystemLoader\n\n template_files_dir = os.path.join(os.path.dirname(aim_ui.__file__), 'build')\n env = Environment(\n loader=FileSystemLoader(template_files_dir),\n autoescape=True\n )\n template = env.get_template('index-template.html')\n base_path = os.environ.get(AIM_UI_BASE_PATH, '')\n return template.render(base_path=base_path)\n", "path": "aim/web/api/views.py"}]}
1,092
141
gh_patches_debug_15906
rasdani/github-patches
git_diff
cowrie__cowrie-1563
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> 9may </issue> <code> [start of src/cowrie/output/mysql.py] 1 """ 2 MySQL output connector. Writes audit logs to MySQL database 3 """ 4 5 6 import MySQLdb 7 8 from twisted.enterprise import adbapi 9 from twisted.internet import defer 10 from twisted.python import log 11 12 import cowrie.core.output 13 from cowrie.core.config import CowrieConfig 14 15 16 class ReconnectingConnectionPool(adbapi.ConnectionPool): 17 """ 18 Reconnecting adbapi connection pool for MySQL. 19 20 This class improves on the solution posted at 21 http://www.gelens.org/2008/09/12/reinitializing-twisted-connectionpool/ 22 by checking exceptions by error code and only disconnecting the current 23 connection instead of all of them. 24 25 Also see: 26 http://twistedmatrix.com/pipermail/twisted-python/2009-July/020007.html 27 """ 28 29 def _runInteraction(self, interaction, *args, **kw): 30 try: 31 return adbapi.ConnectionPool._runInteraction(self, interaction, *args, **kw) 32 except (MySQLdb.OperationalError, MySQLdb._exceptions.OperationalError) as e: 33 if e.args[0] not in (2003, 2006, 2013): 34 raise e 35 log.msg(f"RCP: got error {e}, retrying operation") 36 conn = self.connections.get(self.threadID()) 37 self.disconnect(conn) 38 # Try the interaction again 39 return adbapi.ConnectionPool._runInteraction(self, interaction, *args, **kw) 40 41 42 class Output(cowrie.core.output.Output): 43 """ 44 mysql output 45 """ 46 47 db = None 48 debug: bool = False 49 50 def start(self): 51 self.debug = CowrieConfig.getboolean("output_mysql", "debug", fallback=False) 52 port = CowrieConfig.getint("output_mysql", "port", fallback=3306) 53 try: 54 self.db = ReconnectingConnectionPool( 55 "MySQLdb", 56 host=CowrieConfig.get("output_mysql", "host"), 57 db=CowrieConfig.get("output_mysql", "database"), 58 user=CowrieConfig.get("output_mysql", "username"), 59 passwd=CowrieConfig.get("output_mysql", "password", raw=True), 60 port=port, 61 cp_min=1, 62 cp_max=1, 63 charset="utf8mb4", 64 cp_reconnect=True, 65 use_unicode=True, 66 ) 67 except (MySQLdb.Error, MySQLdb._exceptions.Error) as e: 68 log.msg(f"output_mysql: Error {e.args[0]}: {e.args[1]}") 69 70 def stop(self): 71 self.db.commit() 72 self.db.close() 73 74 def sqlerror(self, error): 75 """ 76 1146, "Table '...' doesn't exist" 77 1406, "Data too long for column '...' at row ..." 78 """ 79 if error.value.args[0] in (1146, 1406): 80 log.msg(f"output_mysql: MySQL Error: {error.value.args!r}") 81 log.msg("MySQL schema maybe misconfigured, doublecheck database!") 82 else: 83 log.msg(f"output_mysql: MySQL Error: {error.value.args!r}") 84 85 def simpleQuery(self, sql, args): 86 """ 87 Just run a deferred sql query, only care about errors 88 """ 89 if self.debug: 90 log.msg(f"output_mysql: MySQL query: {sql} {args!r}") 91 d = self.db.runQuery(sql, args) 92 d.addErrback(self.sqlerror) 93 94 @defer.inlineCallbacks 95 def write(self, entry): 96 if entry["eventid"] == "cowrie.session.connect": 97 r = yield self.db.runQuery( 98 f"SELECT `id`\" \"FROM `sensors`\" \"WHERE `ip` = {self.sensor}" 99 ) 100 101 if r: 102 sensorid = r[0][0] 103 else: 104 yield self.db.runQuery( 105 f"INSERT INTO `sensors` (`ip`) \" \"VALUES ({self.sensor})" 106 ) 107 108 r = yield self.db.runQuery("SELECT LAST_INSERT_ID()") 109 sensorid = int(r[0][0]) 110 self.simpleQuery( 111 "INSERT INTO `sessions` (`id`, `starttime`, `sensor`, `ip`) " 112 "VALUES (%s, FROM_UNIXTIME(%s), %s, %s)", 113 (entry["session"], entry["time"], sensorid, entry["src_ip"]), 114 ) 115 116 elif entry["eventid"] == "cowrie.login.success": 117 self.simpleQuery( 118 "INSERT INTO `auth` (`session`, `success`, `username`, `password`, `timestamp`) " 119 "VALUES (%s, %s, %s, %s, FROM_UNIXTIME(%s))", 120 ( 121 entry["session"], 122 1, 123 entry["username"], 124 entry["password"], 125 entry["time"], 126 ), 127 ) 128 129 elif entry["eventid"] == "cowrie.login.failed": 130 self.simpleQuery( 131 "INSERT INTO `auth` (`session`, `success`, `username`, `password`, `timestamp`) " 132 "VALUES (%s, %s, %s, %s, FROM_UNIXTIME(%s))", 133 ( 134 entry["session"], 135 0, 136 entry["username"], 137 entry["password"], 138 entry["time"], 139 ), 140 ) 141 142 elif entry["eventid"] == "cowrie.session.params": 143 self.simpleQuery( 144 "INSERT INTO `params` (`session`, `arch`) " "VALUES (%s, %s)", 145 (entry["session"], entry["arch"]), 146 ) 147 148 elif entry["eventid"] == "cowrie.command.input": 149 self.simpleQuery( 150 "INSERT INTO `input` (`session`, `timestamp`, `success`, `input`) " 151 "VALUES (%s, FROM_UNIXTIME(%s), %s , %s)", 152 (entry["session"], entry["time"], 1, entry["input"]), 153 ) 154 155 elif entry["eventid"] == "cowrie.command.failed": 156 self.simpleQuery( 157 "INSERT INTO `input` (`session`, `timestamp`, `success`, `input`) " 158 "VALUES (%s, FROM_UNIXTIME(%s), %s , %s)", 159 (entry["session"], entry["time"], 0, entry["input"]), 160 ) 161 162 elif entry["eventid"] == "cowrie.session.file_download": 163 self.simpleQuery( 164 "INSERT INTO `downloads` (`session`, `timestamp`, `url`, `outfile`, `shasum`) " 165 "VALUES (%s, FROM_UNIXTIME(%s), %s, %s, %s)", 166 ( 167 entry["session"], 168 entry["time"], 169 entry.get("url", ""), 170 entry["outfile"], 171 entry["shasum"], 172 ), 173 ) 174 175 elif entry["eventid"] == "cowrie.session.file_download.failed": 176 self.simpleQuery( 177 "INSERT INTO `downloads` (`session`, `timestamp`, `url`, `outfile`, `shasum`) " 178 "VALUES (%s, FROM_UNIXTIME(%s), %s, %s, %s)", 179 (entry["session"], entry["time"], entry.get("url", ""), "NULL", "NULL"), 180 ) 181 182 elif entry["eventid"] == "cowrie.session.file_upload": 183 self.simpleQuery( 184 "INSERT INTO `downloads` (`session`, `timestamp`, `url`, `outfile`, `shasum`) " 185 "VALUES (%s, FROM_UNIXTIME(%s), %s, %s, %s)", 186 ( 187 entry["session"], 188 entry["time"], 189 "", 190 entry["outfile"], 191 entry["shasum"], 192 ), 193 ) 194 195 elif entry["eventid"] == "cowrie.session.input": 196 self.simpleQuery( 197 "INSERT INTO `input` (`session`, `timestamp`, `realm`, `input`) " 198 "VALUES (%s, FROM_UNIXTIME(%s), %s , %s)", 199 (entry["session"], entry["time"], entry["realm"], entry["input"]), 200 ) 201 202 elif entry["eventid"] == "cowrie.client.version": 203 r = yield self.db.runQuery( 204 "SELECT `id` FROM `clients` " "WHERE `version` = %s", 205 (entry["version"],), 206 ) 207 208 if r: 209 id = int(r[0][0]) 210 else: 211 yield self.db.runQuery( 212 "INSERT INTO `clients` (`version`) " "VALUES (%s)", 213 (entry["version"],), 214 ) 215 216 r = yield self.db.runQuery("SELECT LAST_INSERT_ID()") 217 id = int(r[0][0]) 218 self.simpleQuery( 219 "UPDATE `sessions` " "SET `client` = %s " "WHERE `id` = %s", 220 (id, entry["session"]), 221 ) 222 223 elif entry["eventid"] == "cowrie.client.size": 224 self.simpleQuery( 225 "UPDATE `sessions` " "SET `termsize` = %s " "WHERE `id` = %s", 226 ("{}x{}".format(entry["width"], entry["height"]), entry["session"]), 227 ) 228 229 elif entry["eventid"] == "cowrie.session.closed": 230 self.simpleQuery( 231 "UPDATE `sessions` " 232 "SET `endtime` = FROM_UNIXTIME(%s) " 233 "WHERE `id` = %s", 234 (entry["time"], entry["session"]), 235 ) 236 237 elif entry["eventid"] == "cowrie.log.closed": 238 self.simpleQuery( 239 "INSERT INTO `ttylog` (`session`, `ttylog`, `size`) " 240 "VALUES (%s, %s, %s)", 241 (entry["session"], entry["ttylog"], entry["size"]), 242 ) 243 244 elif entry["eventid"] == "cowrie.client.fingerprint": 245 self.simpleQuery( 246 "INSERT INTO `keyfingerprints` (`session`, `username`, `fingerprint`) " 247 "VALUES (%s, %s, %s)", 248 (entry["session"], entry["username"], entry["fingerprint"]), 249 ) 250 251 elif entry["eventid"] == "cowrie.direct-tcpip.request": 252 self.simpleQuery( 253 "INSERT INTO `ipforwards` (`session`, `timestamp`, `dst_ip`, `dst_port`) " 254 "VALUES (%s, FROM_UNIXTIME(%s), %s, %s)", 255 (entry["session"], entry["time"], entry["dst_ip"], entry["dst_port"]), 256 ) 257 258 elif entry["eventid"] == "cowrie.direct-tcpip.data": 259 self.simpleQuery( 260 "INSERT INTO `ipforwardsdata` (`session`, `timestamp`, `dst_ip`, `dst_port`, `data`) " 261 "VALUES (%s, FROM_UNIXTIME(%s), %s, %s, %s)", 262 ( 263 entry["session"], 264 entry["time"], 265 entry["dst_ip"], 266 entry["dst_port"], 267 entry["data"], 268 ), 269 ) 270 [end of src/cowrie/output/mysql.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/cowrie/output/mysql.py b/src/cowrie/output/mysql.py --- a/src/cowrie/output/mysql.py +++ b/src/cowrie/output/mysql.py @@ -95,14 +95,14 @@ def write(self, entry): if entry["eventid"] == "cowrie.session.connect": r = yield self.db.runQuery( - f"SELECT `id`\" \"FROM `sensors`\" \"WHERE `ip` = {self.sensor}" + "SELECT `id`" "FROM `sensors`" f"WHERE `ip` = {self.sensor}" ) if r: sensorid = r[0][0] else: yield self.db.runQuery( - f"INSERT INTO `sensors` (`ip`) \" \"VALUES ({self.sensor})" + "INSERT INTO `sensors` (`ip`) " f"VALUES ({self.sensor})" ) r = yield self.db.runQuery("SELECT LAST_INSERT_ID()")
{"golden_diff": "diff --git a/src/cowrie/output/mysql.py b/src/cowrie/output/mysql.py\n--- a/src/cowrie/output/mysql.py\n+++ b/src/cowrie/output/mysql.py\n@@ -95,14 +95,14 @@\n def write(self, entry):\n if entry[\"eventid\"] == \"cowrie.session.connect\":\n r = yield self.db.runQuery(\n- f\"SELECT `id`\\\" \\\"FROM `sensors`\\\" \\\"WHERE `ip` = {self.sensor}\"\n+ \"SELECT `id`\" \"FROM `sensors`\" f\"WHERE `ip` = {self.sensor}\"\n )\n \n if r:\n sensorid = r[0][0]\n else:\n yield self.db.runQuery(\n- f\"INSERT INTO `sensors` (`ip`) \\\" \\\"VALUES ({self.sensor})\"\n+ \"INSERT INTO `sensors` (`ip`) \" f\"VALUES ({self.sensor})\"\n )\n \n r = yield self.db.runQuery(\"SELECT LAST_INSERT_ID()\")\n", "issue": "9may\n\n", "before_files": [{"content": "\"\"\"\nMySQL output connector. Writes audit logs to MySQL database\n\"\"\"\n\n\nimport MySQLdb\n\nfrom twisted.enterprise import adbapi\nfrom twisted.internet import defer\nfrom twisted.python import log\n\nimport cowrie.core.output\nfrom cowrie.core.config import CowrieConfig\n\n\nclass ReconnectingConnectionPool(adbapi.ConnectionPool):\n \"\"\"\n Reconnecting adbapi connection pool for MySQL.\n\n This class improves on the solution posted at\n http://www.gelens.org/2008/09/12/reinitializing-twisted-connectionpool/\n by checking exceptions by error code and only disconnecting the current\n connection instead of all of them.\n\n Also see:\n http://twistedmatrix.com/pipermail/twisted-python/2009-July/020007.html\n \"\"\"\n\n def _runInteraction(self, interaction, *args, **kw):\n try:\n return adbapi.ConnectionPool._runInteraction(self, interaction, *args, **kw)\n except (MySQLdb.OperationalError, MySQLdb._exceptions.OperationalError) as e:\n if e.args[0] not in (2003, 2006, 2013):\n raise e\n log.msg(f\"RCP: got error {e}, retrying operation\")\n conn = self.connections.get(self.threadID())\n self.disconnect(conn)\n # Try the interaction again\n return adbapi.ConnectionPool._runInteraction(self, interaction, *args, **kw)\n\n\nclass Output(cowrie.core.output.Output):\n \"\"\"\n mysql output\n \"\"\"\n\n db = None\n debug: bool = False\n\n def start(self):\n self.debug = CowrieConfig.getboolean(\"output_mysql\", \"debug\", fallback=False)\n port = CowrieConfig.getint(\"output_mysql\", \"port\", fallback=3306)\n try:\n self.db = ReconnectingConnectionPool(\n \"MySQLdb\",\n host=CowrieConfig.get(\"output_mysql\", \"host\"),\n db=CowrieConfig.get(\"output_mysql\", \"database\"),\n user=CowrieConfig.get(\"output_mysql\", \"username\"),\n passwd=CowrieConfig.get(\"output_mysql\", \"password\", raw=True),\n port=port,\n cp_min=1,\n cp_max=1,\n charset=\"utf8mb4\",\n cp_reconnect=True,\n use_unicode=True,\n )\n except (MySQLdb.Error, MySQLdb._exceptions.Error) as e:\n log.msg(f\"output_mysql: Error {e.args[0]}: {e.args[1]}\")\n\n def stop(self):\n self.db.commit()\n self.db.close()\n\n def sqlerror(self, error):\n \"\"\"\n 1146, \"Table '...' doesn't exist\"\n 1406, \"Data too long for column '...' at row ...\"\n \"\"\"\n if error.value.args[0] in (1146, 1406):\n log.msg(f\"output_mysql: MySQL Error: {error.value.args!r}\")\n log.msg(\"MySQL schema maybe misconfigured, doublecheck database!\")\n else:\n log.msg(f\"output_mysql: MySQL Error: {error.value.args!r}\")\n\n def simpleQuery(self, sql, args):\n \"\"\"\n Just run a deferred sql query, only care about errors\n \"\"\"\n if self.debug:\n log.msg(f\"output_mysql: MySQL query: {sql} {args!r}\")\n d = self.db.runQuery(sql, args)\n d.addErrback(self.sqlerror)\n\n @defer.inlineCallbacks\n def write(self, entry):\n if entry[\"eventid\"] == \"cowrie.session.connect\":\n r = yield self.db.runQuery(\n f\"SELECT `id`\\\" \\\"FROM `sensors`\\\" \\\"WHERE `ip` = {self.sensor}\"\n )\n\n if r:\n sensorid = r[0][0]\n else:\n yield self.db.runQuery(\n f\"INSERT INTO `sensors` (`ip`) \\\" \\\"VALUES ({self.sensor})\"\n )\n\n r = yield self.db.runQuery(\"SELECT LAST_INSERT_ID()\")\n sensorid = int(r[0][0])\n self.simpleQuery(\n \"INSERT INTO `sessions` (`id`, `starttime`, `sensor`, `ip`) \"\n \"VALUES (%s, FROM_UNIXTIME(%s), %s, %s)\",\n (entry[\"session\"], entry[\"time\"], sensorid, entry[\"src_ip\"]),\n )\n\n elif entry[\"eventid\"] == \"cowrie.login.success\":\n self.simpleQuery(\n \"INSERT INTO `auth` (`session`, `success`, `username`, `password`, `timestamp`) \"\n \"VALUES (%s, %s, %s, %s, FROM_UNIXTIME(%s))\",\n (\n entry[\"session\"],\n 1,\n entry[\"username\"],\n entry[\"password\"],\n entry[\"time\"],\n ),\n )\n\n elif entry[\"eventid\"] == \"cowrie.login.failed\":\n self.simpleQuery(\n \"INSERT INTO `auth` (`session`, `success`, `username`, `password`, `timestamp`) \"\n \"VALUES (%s, %s, %s, %s, FROM_UNIXTIME(%s))\",\n (\n entry[\"session\"],\n 0,\n entry[\"username\"],\n entry[\"password\"],\n entry[\"time\"],\n ),\n )\n\n elif entry[\"eventid\"] == \"cowrie.session.params\":\n self.simpleQuery(\n \"INSERT INTO `params` (`session`, `arch`) \" \"VALUES (%s, %s)\",\n (entry[\"session\"], entry[\"arch\"]),\n )\n\n elif entry[\"eventid\"] == \"cowrie.command.input\":\n self.simpleQuery(\n \"INSERT INTO `input` (`session`, `timestamp`, `success`, `input`) \"\n \"VALUES (%s, FROM_UNIXTIME(%s), %s , %s)\",\n (entry[\"session\"], entry[\"time\"], 1, entry[\"input\"]),\n )\n\n elif entry[\"eventid\"] == \"cowrie.command.failed\":\n self.simpleQuery(\n \"INSERT INTO `input` (`session`, `timestamp`, `success`, `input`) \"\n \"VALUES (%s, FROM_UNIXTIME(%s), %s , %s)\",\n (entry[\"session\"], entry[\"time\"], 0, entry[\"input\"]),\n )\n\n elif entry[\"eventid\"] == \"cowrie.session.file_download\":\n self.simpleQuery(\n \"INSERT INTO `downloads` (`session`, `timestamp`, `url`, `outfile`, `shasum`) \"\n \"VALUES (%s, FROM_UNIXTIME(%s), %s, %s, %s)\",\n (\n entry[\"session\"],\n entry[\"time\"],\n entry.get(\"url\", \"\"),\n entry[\"outfile\"],\n entry[\"shasum\"],\n ),\n )\n\n elif entry[\"eventid\"] == \"cowrie.session.file_download.failed\":\n self.simpleQuery(\n \"INSERT INTO `downloads` (`session`, `timestamp`, `url`, `outfile`, `shasum`) \"\n \"VALUES (%s, FROM_UNIXTIME(%s), %s, %s, %s)\",\n (entry[\"session\"], entry[\"time\"], entry.get(\"url\", \"\"), \"NULL\", \"NULL\"),\n )\n\n elif entry[\"eventid\"] == \"cowrie.session.file_upload\":\n self.simpleQuery(\n \"INSERT INTO `downloads` (`session`, `timestamp`, `url`, `outfile`, `shasum`) \"\n \"VALUES (%s, FROM_UNIXTIME(%s), %s, %s, %s)\",\n (\n entry[\"session\"],\n entry[\"time\"],\n \"\",\n entry[\"outfile\"],\n entry[\"shasum\"],\n ),\n )\n\n elif entry[\"eventid\"] == \"cowrie.session.input\":\n self.simpleQuery(\n \"INSERT INTO `input` (`session`, `timestamp`, `realm`, `input`) \"\n \"VALUES (%s, FROM_UNIXTIME(%s), %s , %s)\",\n (entry[\"session\"], entry[\"time\"], entry[\"realm\"], entry[\"input\"]),\n )\n\n elif entry[\"eventid\"] == \"cowrie.client.version\":\n r = yield self.db.runQuery(\n \"SELECT `id` FROM `clients` \" \"WHERE `version` = %s\",\n (entry[\"version\"],),\n )\n\n if r:\n id = int(r[0][0])\n else:\n yield self.db.runQuery(\n \"INSERT INTO `clients` (`version`) \" \"VALUES (%s)\",\n (entry[\"version\"],),\n )\n\n r = yield self.db.runQuery(\"SELECT LAST_INSERT_ID()\")\n id = int(r[0][0])\n self.simpleQuery(\n \"UPDATE `sessions` \" \"SET `client` = %s \" \"WHERE `id` = %s\",\n (id, entry[\"session\"]),\n )\n\n elif entry[\"eventid\"] == \"cowrie.client.size\":\n self.simpleQuery(\n \"UPDATE `sessions` \" \"SET `termsize` = %s \" \"WHERE `id` = %s\",\n (\"{}x{}\".format(entry[\"width\"], entry[\"height\"]), entry[\"session\"]),\n )\n\n elif entry[\"eventid\"] == \"cowrie.session.closed\":\n self.simpleQuery(\n \"UPDATE `sessions` \"\n \"SET `endtime` = FROM_UNIXTIME(%s) \"\n \"WHERE `id` = %s\",\n (entry[\"time\"], entry[\"session\"]),\n )\n\n elif entry[\"eventid\"] == \"cowrie.log.closed\":\n self.simpleQuery(\n \"INSERT INTO `ttylog` (`session`, `ttylog`, `size`) \"\n \"VALUES (%s, %s, %s)\",\n (entry[\"session\"], entry[\"ttylog\"], entry[\"size\"]),\n )\n\n elif entry[\"eventid\"] == \"cowrie.client.fingerprint\":\n self.simpleQuery(\n \"INSERT INTO `keyfingerprints` (`session`, `username`, `fingerprint`) \"\n \"VALUES (%s, %s, %s)\",\n (entry[\"session\"], entry[\"username\"], entry[\"fingerprint\"]),\n )\n\n elif entry[\"eventid\"] == \"cowrie.direct-tcpip.request\":\n self.simpleQuery(\n \"INSERT INTO `ipforwards` (`session`, `timestamp`, `dst_ip`, `dst_port`) \"\n \"VALUES (%s, FROM_UNIXTIME(%s), %s, %s)\",\n (entry[\"session\"], entry[\"time\"], entry[\"dst_ip\"], entry[\"dst_port\"]),\n )\n\n elif entry[\"eventid\"] == \"cowrie.direct-tcpip.data\":\n self.simpleQuery(\n \"INSERT INTO `ipforwardsdata` (`session`, `timestamp`, `dst_ip`, `dst_port`, `data`) \"\n \"VALUES (%s, FROM_UNIXTIME(%s), %s, %s, %s)\",\n (\n entry[\"session\"],\n entry[\"time\"],\n entry[\"dst_ip\"],\n entry[\"dst_port\"],\n entry[\"data\"],\n ),\n )\n", "path": "src/cowrie/output/mysql.py"}]}
3,605
220
gh_patches_debug_2972
rasdani/github-patches
git_diff
pyodide__pyodide-325
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ValueError: invalid __array_struct__ when using js arrays of arrays and numpy When using a matrix (array of array of numbers) in javascript and trying to convert that to a numpy array, it fails with the error `ValueError: invalid __array_struct__` To reproduce: JavaScript: ``` window.A = [[1,2,3],[4,5,6]]; ``` Python: ``` import numpy from js import A m = numpy.array(A) ``` </issue> <code> [start of src/pyodide.py] 1 """ 2 A library of helper utilities for connecting Python to the browser environment. 3 """ 4 5 import ast 6 import io 7 from textwrap import dedent 8 9 __version__ = '0.8.2' 10 11 12 def open_url(url): 13 """ 14 Fetches a given *url* and returns a io.StringIO to access its contents. 15 """ 16 from js import XMLHttpRequest 17 18 req = XMLHttpRequest.new() 19 req.open('GET', url, False) 20 req.send(None) 21 return io.StringIO(req.response) 22 23 24 def eval_code(code, ns): 25 """ 26 Runs a string of code, the last part of which may be an expression. 27 """ 28 # handle mis-indented input from multi-line strings 29 code = dedent(code) 30 31 mod = ast.parse(code) 32 if len(mod.body) == 0: 33 return None 34 35 if isinstance(mod.body[-1], ast.Expr): 36 expr = ast.Expression(mod.body[-1].value) 37 del mod.body[-1] 38 else: 39 expr = None 40 41 if len(mod.body): 42 exec(compile(mod, '<exec>', mode='exec'), ns, ns) 43 if expr is not None: 44 return eval(compile(expr, '<eval>', mode='eval'), ns, ns) 45 else: 46 return None 47 48 49 def find_imports(code): 50 """ 51 Finds the imports in a string of code and returns a list of their package 52 names. 53 """ 54 # handle mis-indented input from multi-line strings 55 code = dedent(code) 56 57 mod = ast.parse(code) 58 imports = set() 59 for node in ast.walk(mod): 60 if isinstance(node, ast.Import): 61 for name in node.names: 62 name = name.name 63 imports.add(name.split('.')[0]) 64 elif isinstance(node, ast.ImportFrom): 65 name = node.module 66 imports.add(name.split('.')[0]) 67 return list(imports) 68 69 70 __all__ = ['open_url', 'eval_code', 'find_imports'] 71 [end of src/pyodide.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/pyodide.py b/src/pyodide.py --- a/src/pyodide.py +++ b/src/pyodide.py @@ -67,4 +67,16 @@ return list(imports) -__all__ = ['open_url', 'eval_code', 'find_imports'] +def as_nested_list(obj): + """ + Assumes a Javascript object is made of (possibly nested) arrays and + converts them to nested Python lists. + """ + try: + it = iter(obj) + return [as_nested_list(x) for x in it] + except TypeError: + return obj + + +__all__ = ['open_url', 'eval_code', 'find_imports', 'as_nested_list']
{"golden_diff": "diff --git a/src/pyodide.py b/src/pyodide.py\n--- a/src/pyodide.py\n+++ b/src/pyodide.py\n@@ -67,4 +67,16 @@\n return list(imports)\n \n \n-__all__ = ['open_url', 'eval_code', 'find_imports']\n+def as_nested_list(obj):\n+ \"\"\"\n+ Assumes a Javascript object is made of (possibly nested) arrays and\n+ converts them to nested Python lists.\n+ \"\"\"\n+ try:\n+ it = iter(obj)\n+ return [as_nested_list(x) for x in it]\n+ except TypeError:\n+ return obj\n+\n+\n+__all__ = ['open_url', 'eval_code', 'find_imports', 'as_nested_list']\n", "issue": "ValueError: invalid __array_struct__ when using js arrays of arrays and numpy\nWhen using a matrix (array of array of numbers) in javascript and trying to convert that to a numpy array, it fails with the error `ValueError: invalid __array_struct__`\r\n\r\nTo reproduce:\r\nJavaScript:\r\n```\r\nwindow.A = [[1,2,3],[4,5,6]];\r\n```\r\nPython:\r\n```\r\nimport numpy\r\nfrom js import A\r\nm = numpy.array(A)\r\n```\n", "before_files": [{"content": "\"\"\"\nA library of helper utilities for connecting Python to the browser environment.\n\"\"\"\n\nimport ast\nimport io\nfrom textwrap import dedent\n\n__version__ = '0.8.2'\n\n\ndef open_url(url):\n \"\"\"\n Fetches a given *url* and returns a io.StringIO to access its contents.\n \"\"\"\n from js import XMLHttpRequest\n\n req = XMLHttpRequest.new()\n req.open('GET', url, False)\n req.send(None)\n return io.StringIO(req.response)\n\n\ndef eval_code(code, ns):\n \"\"\"\n Runs a string of code, the last part of which may be an expression.\n \"\"\"\n # handle mis-indented input from multi-line strings\n code = dedent(code)\n\n mod = ast.parse(code)\n if len(mod.body) == 0:\n return None\n\n if isinstance(mod.body[-1], ast.Expr):\n expr = ast.Expression(mod.body[-1].value)\n del mod.body[-1]\n else:\n expr = None\n\n if len(mod.body):\n exec(compile(mod, '<exec>', mode='exec'), ns, ns)\n if expr is not None:\n return eval(compile(expr, '<eval>', mode='eval'), ns, ns)\n else:\n return None\n\n\ndef find_imports(code):\n \"\"\"\n Finds the imports in a string of code and returns a list of their package\n names.\n \"\"\"\n # handle mis-indented input from multi-line strings\n code = dedent(code)\n\n mod = ast.parse(code)\n imports = set()\n for node in ast.walk(mod):\n if isinstance(node, ast.Import):\n for name in node.names:\n name = name.name\n imports.add(name.split('.')[0])\n elif isinstance(node, ast.ImportFrom):\n name = node.module\n imports.add(name.split('.')[0])\n return list(imports)\n\n\n__all__ = ['open_url', 'eval_code', 'find_imports']\n", "path": "src/pyodide.py"}]}
1,192
168
gh_patches_debug_37105
rasdani/github-patches
git_diff
microsoft__AzureTRE-670
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [BUG] Failed workspace deployment: only one positional argument may be specified, the installation name, but multiple were received **Describe the bug** Workspace deployment failed with: ``` only one positional argument may be specified, the installation name, but multiple were received [mrtre17-9c1d X secure project] ``` **Steps to reproduce** Deployed a workspace with following values: ``` "azure_location":"westeurope", "workspace_id":"9c1d", "tre_id":"mrtre17", "address_space":"192.168.10.0/24", "display_name":"Project X", "description":"Very secure project" ``` </issue> <code> [start of resource_processor/vmss_porter/runner.py] 1 import os 2 import sys 3 import json 4 import socket 5 import asyncio 6 import logging 7 8 from shared.logging import disable_unwanted_loggers, initialize_logging, get_message_id_logger # pylint: disable=import-error # noqa 9 from resources import strings # pylint: disable=import-error # noqa 10 from contextlib import asynccontextmanager 11 from azure.servicebus import ServiceBusMessage 12 from azure.servicebus.aio import ServiceBusClient, AutoLockRenewer 13 from azure.identity.aio import DefaultAzureCredential 14 15 logger_adapter = initialize_logging(logging.INFO, socket.gethostname()) 16 disable_unwanted_loggers() 17 18 19 @asynccontextmanager 20 async def default_credentials(msi_id): 21 """ 22 Context manager which yields the default credentials. 23 """ 24 credential = DefaultAzureCredential(managed_identity_client_id=msi_id) if msi_id else DefaultAzureCredential() 25 yield credential 26 await credential.close() 27 28 29 async def receive_message(env_vars, service_bus_client): 30 """ 31 This method is an async generator which receives messages from service bus 32 and yields those messages. If the yielded function return True the message is 33 marked complete. 34 """ 35 async with service_bus_client: 36 q_name = env_vars["resource_request_queue"] 37 renewer = AutoLockRenewer(max_lock_renewal_duration=1800) 38 receiver = service_bus_client.get_queue_receiver(queue_name=q_name, auto_lock_renewer=renewer) 39 40 async with receiver: 41 received_msgs = await receiver.receive_messages(max_message_count=10, max_wait_time=5) 42 43 for msg in received_msgs: 44 result = True 45 message = "" 46 47 try: 48 message = json.loads(str(msg)) 49 result = (yield message) 50 except (json.JSONDecodeError) as e: 51 logging.error(f"Received bad service bus resource request message: {e}") 52 if result: 53 logging.info(f"Resource request for {message} is complete") 54 else: 55 logging.error('Message processing failed!') 56 logger_adapter.info(f"Message with id = {message['id']} processed as {result} and marked complete.") 57 await receiver.complete_message(msg) 58 59 60 def azure_login_command(env_vars): 61 local_login = f"az login --service-principal --username {env_vars['arm_client_id']} --password {env_vars['arm_client_secret']} --tenant {env_vars['arm_tenant_id']}" 62 vmss_login = f"az login --identity -u {env_vars['vmss_msi_id']}" 63 command = vmss_login if env_vars['vmss_msi_id'] else local_login 64 return command 65 66 67 async def filter_parameters_not_needed_by_porter(msg_body, env_vars): 68 parameters = msg_body["parameters"] 69 command = [f"{azure_login_command(env_vars)} >/dev/null && \ 70 az acr login --name {env_vars['registry_server'].replace('.azurecr.io','')} >/dev/null && \ 71 porter explain --reference {env_vars['registry_server']}/{msg_body['name']}:v{msg_body['version']} -ojson"] 72 proc = await asyncio.create_subprocess_shell( 73 ''.join(command), 74 stdout=asyncio.subprocess.PIPE, 75 stderr=asyncio.subprocess.PIPE, 76 env=porter_envs(env_vars)) 77 78 stdout, stderr = await proc.communicate() 79 logging.info(f'[{command!r} exited with {proc.returncode}]') 80 result_stdout = None 81 result_stderr = None 82 if stdout: 83 result_stdout = stdout.decode() 84 porter_explain_parameters = json.loads(result_stdout)["parameters"] 85 items = [item["name"] for item in porter_explain_parameters] 86 porter_keys = set(items).intersection(set(parameters.keys())) 87 return porter_keys 88 if stderr: 89 result_stderr = stderr.decode() 90 logger_adapter.info('[stderr]') 91 for string in result_stderr.split('\n'): 92 logger_adapter.info(str(string)) 93 94 return parameters.keys() 95 96 97 async def build_porter_command(msg_body, env_vars): 98 porter_parameters = "" 99 100 porter_keys = await filter_parameters_not_needed_by_porter(msg_body, env_vars) 101 for parameter in porter_keys: 102 porter_parameters = porter_parameters + f" --param {parameter}={msg_body['parameters'][parameter]}" 103 104 installation_id = msg_body['parameters']['tre_id'] + "-" + msg_body['parameters']['workspace_id'] 105 106 porter_parameters = porter_parameters + f" --param tfstate_container_name={env_vars['tfstate_container_name']}" 107 porter_parameters = porter_parameters + f" --param tfstate_resource_group_name={env_vars['tfstate_resource_group_name']}" 108 porter_parameters = porter_parameters + f" --param tfstate_storage_account_name={env_vars['tfstate_storage_account_name']}" 109 porter_parameters = porter_parameters + f" --param arm_use_msi={env_vars['arm_use_msi']}" 110 111 command_line = [f"{azure_login_command(env_vars)} && az acr login --name {env_vars['registry_server'].replace('.azurecr.io','')} && porter " 112 f"{msg_body['action']} {installation_id} " 113 f" --reference {env_vars['registry_server']}/{msg_body['name']}:v{msg_body['version']}" 114 f" {porter_parameters} --cred ./vmss_porter/azure.json --allow-docker-host-access" 115 f" && porter show {installation_id}"] 116 return command_line 117 118 119 def porter_envs(env_var): 120 porter_env_vars = {} 121 porter_env_vars["HOME"] = os.environ['HOME'] 122 porter_env_vars["PATH"] = os.environ['PATH'] 123 porter_env_vars["ARM_CLIENT_ID"] = env_var["arm_client_id"] 124 porter_env_vars["ARM_CLIENT_SECRET"] = env_var["arm_client_secret"] 125 porter_env_vars["ARM_SUBSCRIPTION_ID"] = env_var["arm_subscription_id"] 126 porter_env_vars["ARM_TENANT_ID"] = env_var["arm_tenant_id"] 127 128 return porter_env_vars 129 130 131 async def run_porter(command, env_vars): 132 proc = await asyncio.create_subprocess_shell( 133 ''.join(command), 134 stdout=asyncio.subprocess.PIPE, 135 stderr=asyncio.subprocess.PIPE, 136 env=porter_envs(env_vars)) 137 138 stdout, stderr = await proc.communicate() 139 logging.info(f'[{command!r} exited with {proc.returncode}]') 140 result_stdout = None 141 result_stderr = None 142 if stdout: 143 result_stdout = stdout.decode() 144 logger_adapter.info('[stdout]') 145 for string in result_stdout.split('\n'): 146 if len(string) != 0: 147 logger_adapter.info(str(string)) 148 if stderr: 149 result_stderr = stderr.decode() 150 logger_adapter.info('[stderr]') 151 for string in result_stderr.split('\n'): 152 if len(string) != 0: 153 logger_adapter.info(str(string)) 154 155 return (proc.returncode, result_stdout, result_stderr) 156 157 158 def service_bus_message_generator(sb_message, status, deployment_message): 159 installation_id = sb_message['parameters']['tre_id'] + "-" + sb_message['parameters']['workspace_id'] 160 resource_request_message = json.dumps({ 161 "id": sb_message["id"], 162 "status": status, 163 "message": f"{installation_id}: {deployment_message}" 164 }) 165 return resource_request_message 166 167 168 async def deploy_porter_bundle(msg_body, sb_client, env_vars, message_logger_adapter): 169 installation_id = msg_body['parameters']['tre_id'] + "-" + msg_body['parameters']['workspace_id'] 170 message_logger_adapter.info(f"{installation_id}: Deployment job configuration starting") 171 sb_sender = sb_client.get_queue_sender(queue_name=env_vars["deployment_status_queue"]) 172 resource_request_message = service_bus_message_generator(msg_body, strings.RESOURCE_STATUS_DEPLOYING, "Deployment job starting") 173 await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body["id"])) 174 porter_command = await build_porter_command(msg_body, env_vars) 175 returncode, _, err = await run_porter(porter_command, env_vars) 176 if returncode != 0: 177 error_message = "Error context message = " + " ".join(err.split('\n')) 178 resource_request_message = service_bus_message_generator(msg_body, strings.RESOURCE_STATUS_FAILED, error_message) 179 await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body["id"])) 180 message_logger_adapter.info(f"{installation_id}: Deployment job configuration failed error = {error_message}") 181 return False 182 else: 183 success_message = "Workspace was deployed successfully..." 184 resource_request_message = service_bus_message_generator(msg_body, strings.RESOURCE_STATUS_DEPLOYED, success_message) 185 await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body["id"])) 186 message_logger_adapter.info(f"{installation_id}: {success_message}") 187 return True 188 189 190 async def runner(env_vars): 191 msi_id = env_vars["vmss_msi_id"] 192 service_bus_namespace = env_vars["service_bus_namespace"] 193 async with default_credentials(msi_id) as credential: 194 service_bus_client = ServiceBusClient(service_bus_namespace, credential) 195 logger_adapter.info("Starting message receiving loop...") 196 while True: 197 logger_adapter.info("Checking for new messages...") 198 receive_message_gen = receive_message(env_vars, service_bus_client) 199 try: 200 async for message in receive_message_gen: 201 logger_adapter.info(f"Message received for id={message['id']}") 202 message_logger_adapter = get_message_id_logger(message['id']) # logger includes message id in every entry. 203 result = await deploy_porter_bundle(message, service_bus_client, env_vars, message_logger_adapter) 204 await receive_message_gen.asend(result) 205 except StopAsyncIteration: # the async generator when finished signals end with this exception. 206 pass 207 logger_adapter.info("All messages done sleeping...") 208 await asyncio.sleep(60) 209 210 211 def read_env_vars(): 212 env_vars = { 213 # Needed for local dev 214 "app_id": os.environ.get("AZURE_CLIENT_ID", None), 215 "app_password": os.environ.get("AZURE_CLIENT_SECRET", None), 216 217 "registry_server": os.environ["REGISTRY_SERVER"], 218 "tfstate_container_name": os.environ['TERRAFORM_STATE_CONTAINER_NAME'], 219 "tfstate_resource_group_name": os.environ['MGMT_RESOURCE_GROUP_NAME'], 220 "tfstate_storage_account_name": os.environ['MGMT_STORAGE_ACCOUNT_NAME'], 221 "deployment_status_queue": os.environ['SERVICE_BUS_DEPLOYMENT_STATUS_UPDATE_QUEUE'], 222 "resource_request_queue": os.environ['SERVICE_BUS_RESOURCE_REQUEST_QUEUE'], 223 "service_bus_namespace": os.environ['SERVICE_BUS_FULLY_QUALIFIED_NAMESPACE'], 224 "vmss_msi_id": os.environ.get('VMSS_MSI_ID', None), 225 226 # Needed for running porter 227 "arm_use_msi": os.environ["ARM_USE_MSI"], 228 "arm_subscription_id": os.environ['ARM_SUBSCRIPTION_ID'], 229 "arm_client_id": os.environ["ARM_CLIENT_ID"], 230 "arm_tenant_id": os.environ["ARM_TENANT_ID"] 231 } 232 233 env_vars["arm_client_secret"] = os.environ["ARM_CLIENT_SECRET"] if env_vars["arm_use_msi"] == "false" else "" 234 235 return env_vars 236 237 238 if __name__ == "__main__": 239 try: 240 env_vars = read_env_vars() 241 except KeyError as e: 242 logger_adapter.error(f"Environment variable {e} is not set correctly...Exiting") 243 sys.exit(1) 244 logger_adapter.info("Started processor") 245 asyncio.run(runner(env_vars)) 246 [end of resource_processor/vmss_porter/runner.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/resource_processor/vmss_porter/runner.py b/resource_processor/vmss_porter/runner.py --- a/resource_processor/vmss_porter/runner.py +++ b/resource_processor/vmss_porter/runner.py @@ -99,7 +99,7 @@ porter_keys = await filter_parameters_not_needed_by_porter(msg_body, env_vars) for parameter in porter_keys: - porter_parameters = porter_parameters + f" --param {parameter}={msg_body['parameters'][parameter]}" + porter_parameters = porter_parameters + f" --param {parameter}=\"{msg_body['parameters'][parameter]}\"" installation_id = msg_body['parameters']['tre_id'] + "-" + msg_body['parameters']['workspace_id'] @@ -109,7 +109,7 @@ porter_parameters = porter_parameters + f" --param arm_use_msi={env_vars['arm_use_msi']}" command_line = [f"{azure_login_command(env_vars)} && az acr login --name {env_vars['registry_server'].replace('.azurecr.io','')} && porter " - f"{msg_body['action']} {installation_id} " + f"{msg_body['action']} \"{installation_id}\" " f" --reference {env_vars['registry_server']}/{msg_body['name']}:v{msg_body['version']}" f" {porter_parameters} --cred ./vmss_porter/azure.json --allow-docker-host-access" f" && porter show {installation_id}"] @@ -174,7 +174,7 @@ porter_command = await build_porter_command(msg_body, env_vars) returncode, _, err = await run_porter(porter_command, env_vars) if returncode != 0: - error_message = "Error context message = " + " ".join(err.split('\n')) + error_message = "Error context message = " + " ".join(err.split('\n')) + " ; Command executed: ".join(porter_command) resource_request_message = service_bus_message_generator(msg_body, strings.RESOURCE_STATUS_FAILED, error_message) await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body["id"])) message_logger_adapter.info(f"{installation_id}: Deployment job configuration failed error = {error_message}")
{"golden_diff": "diff --git a/resource_processor/vmss_porter/runner.py b/resource_processor/vmss_porter/runner.py\n--- a/resource_processor/vmss_porter/runner.py\n+++ b/resource_processor/vmss_porter/runner.py\n@@ -99,7 +99,7 @@\n \n porter_keys = await filter_parameters_not_needed_by_porter(msg_body, env_vars)\n for parameter in porter_keys:\n- porter_parameters = porter_parameters + f\" --param {parameter}={msg_body['parameters'][parameter]}\"\n+ porter_parameters = porter_parameters + f\" --param {parameter}=\\\"{msg_body['parameters'][parameter]}\\\"\"\n \n installation_id = msg_body['parameters']['tre_id'] + \"-\" + msg_body['parameters']['workspace_id']\n \n@@ -109,7 +109,7 @@\n porter_parameters = porter_parameters + f\" --param arm_use_msi={env_vars['arm_use_msi']}\"\n \n command_line = [f\"{azure_login_command(env_vars)} && az acr login --name {env_vars['registry_server'].replace('.azurecr.io','')} && porter \"\n- f\"{msg_body['action']} {installation_id} \"\n+ f\"{msg_body['action']} \\\"{installation_id}\\\" \"\n f\" --reference {env_vars['registry_server']}/{msg_body['name']}:v{msg_body['version']}\"\n f\" {porter_parameters} --cred ./vmss_porter/azure.json --allow-docker-host-access\"\n f\" && porter show {installation_id}\"]\n@@ -174,7 +174,7 @@\n porter_command = await build_porter_command(msg_body, env_vars)\n returncode, _, err = await run_porter(porter_command, env_vars)\n if returncode != 0:\n- error_message = \"Error context message = \" + \" \".join(err.split('\\n'))\n+ error_message = \"Error context message = \" + \" \".join(err.split('\\n')) + \" ; Command executed: \".join(porter_command)\n resource_request_message = service_bus_message_generator(msg_body, strings.RESOURCE_STATUS_FAILED, error_message)\n await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body[\"id\"]))\n message_logger_adapter.info(f\"{installation_id}: Deployment job configuration failed error = {error_message}\")\n", "issue": "[BUG] Failed workspace deployment: only one positional argument may be specified, the installation name, but multiple were received\n**Describe the bug**\r\nWorkspace deployment failed with:\r\n\r\n```\r\nonly one positional argument may be specified, the installation name, but multiple were received [mrtre17-9c1d X secure project]\r\n```\r\n\r\n**Steps to reproduce**\r\n\r\nDeployed a workspace with following values:\r\n\r\n```\r\n\"azure_location\":\"westeurope\",\r\n\"workspace_id\":\"9c1d\",\r\n\"tre_id\":\"mrtre17\",\r\n\"address_space\":\"192.168.10.0/24\",\r\n\"display_name\":\"Project X\",\r\n\"description\":\"Very secure project\"\r\n```\n", "before_files": [{"content": "import os\nimport sys\nimport json\nimport socket\nimport asyncio\nimport logging\n\nfrom shared.logging import disable_unwanted_loggers, initialize_logging, get_message_id_logger # pylint: disable=import-error # noqa\nfrom resources import strings # pylint: disable=import-error # noqa\nfrom contextlib import asynccontextmanager\nfrom azure.servicebus import ServiceBusMessage\nfrom azure.servicebus.aio import ServiceBusClient, AutoLockRenewer\nfrom azure.identity.aio import DefaultAzureCredential\n\nlogger_adapter = initialize_logging(logging.INFO, socket.gethostname())\ndisable_unwanted_loggers()\n\n\n@asynccontextmanager\nasync def default_credentials(msi_id):\n \"\"\"\n Context manager which yields the default credentials.\n \"\"\"\n credential = DefaultAzureCredential(managed_identity_client_id=msi_id) if msi_id else DefaultAzureCredential()\n yield credential\n await credential.close()\n\n\nasync def receive_message(env_vars, service_bus_client):\n \"\"\"\n This method is an async generator which receives messages from service bus\n and yields those messages. If the yielded function return True the message is\n marked complete.\n \"\"\"\n async with service_bus_client:\n q_name = env_vars[\"resource_request_queue\"]\n renewer = AutoLockRenewer(max_lock_renewal_duration=1800)\n receiver = service_bus_client.get_queue_receiver(queue_name=q_name, auto_lock_renewer=renewer)\n\n async with receiver:\n received_msgs = await receiver.receive_messages(max_message_count=10, max_wait_time=5)\n\n for msg in received_msgs:\n result = True\n message = \"\"\n\n try:\n message = json.loads(str(msg))\n result = (yield message)\n except (json.JSONDecodeError) as e:\n logging.error(f\"Received bad service bus resource request message: {e}\")\n if result:\n logging.info(f\"Resource request for {message} is complete\")\n else:\n logging.error('Message processing failed!')\n logger_adapter.info(f\"Message with id = {message['id']} processed as {result} and marked complete.\")\n await receiver.complete_message(msg)\n\n\ndef azure_login_command(env_vars):\n local_login = f\"az login --service-principal --username {env_vars['arm_client_id']} --password {env_vars['arm_client_secret']} --tenant {env_vars['arm_tenant_id']}\"\n vmss_login = f\"az login --identity -u {env_vars['vmss_msi_id']}\"\n command = vmss_login if env_vars['vmss_msi_id'] else local_login\n return command\n\n\nasync def filter_parameters_not_needed_by_porter(msg_body, env_vars):\n parameters = msg_body[\"parameters\"]\n command = [f\"{azure_login_command(env_vars)} >/dev/null && \\\n az acr login --name {env_vars['registry_server'].replace('.azurecr.io','')} >/dev/null && \\\n porter explain --reference {env_vars['registry_server']}/{msg_body['name']}:v{msg_body['version']} -ojson\"]\n proc = await asyncio.create_subprocess_shell(\n ''.join(command),\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n env=porter_envs(env_vars))\n\n stdout, stderr = await proc.communicate()\n logging.info(f'[{command!r} exited with {proc.returncode}]')\n result_stdout = None\n result_stderr = None\n if stdout:\n result_stdout = stdout.decode()\n porter_explain_parameters = json.loads(result_stdout)[\"parameters\"]\n items = [item[\"name\"] for item in porter_explain_parameters]\n porter_keys = set(items).intersection(set(parameters.keys()))\n return porter_keys\n if stderr:\n result_stderr = stderr.decode()\n logger_adapter.info('[stderr]')\n for string in result_stderr.split('\\n'):\n logger_adapter.info(str(string))\n\n return parameters.keys()\n\n\nasync def build_porter_command(msg_body, env_vars):\n porter_parameters = \"\"\n\n porter_keys = await filter_parameters_not_needed_by_porter(msg_body, env_vars)\n for parameter in porter_keys:\n porter_parameters = porter_parameters + f\" --param {parameter}={msg_body['parameters'][parameter]}\"\n\n installation_id = msg_body['parameters']['tre_id'] + \"-\" + msg_body['parameters']['workspace_id']\n\n porter_parameters = porter_parameters + f\" --param tfstate_container_name={env_vars['tfstate_container_name']}\"\n porter_parameters = porter_parameters + f\" --param tfstate_resource_group_name={env_vars['tfstate_resource_group_name']}\"\n porter_parameters = porter_parameters + f\" --param tfstate_storage_account_name={env_vars['tfstate_storage_account_name']}\"\n porter_parameters = porter_parameters + f\" --param arm_use_msi={env_vars['arm_use_msi']}\"\n\n command_line = [f\"{azure_login_command(env_vars)} && az acr login --name {env_vars['registry_server'].replace('.azurecr.io','')} && porter \"\n f\"{msg_body['action']} {installation_id} \"\n f\" --reference {env_vars['registry_server']}/{msg_body['name']}:v{msg_body['version']}\"\n f\" {porter_parameters} --cred ./vmss_porter/azure.json --allow-docker-host-access\"\n f\" && porter show {installation_id}\"]\n return command_line\n\n\ndef porter_envs(env_var):\n porter_env_vars = {}\n porter_env_vars[\"HOME\"] = os.environ['HOME']\n porter_env_vars[\"PATH\"] = os.environ['PATH']\n porter_env_vars[\"ARM_CLIENT_ID\"] = env_var[\"arm_client_id\"]\n porter_env_vars[\"ARM_CLIENT_SECRET\"] = env_var[\"arm_client_secret\"]\n porter_env_vars[\"ARM_SUBSCRIPTION_ID\"] = env_var[\"arm_subscription_id\"]\n porter_env_vars[\"ARM_TENANT_ID\"] = env_var[\"arm_tenant_id\"]\n\n return porter_env_vars\n\n\nasync def run_porter(command, env_vars):\n proc = await asyncio.create_subprocess_shell(\n ''.join(command),\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n env=porter_envs(env_vars))\n\n stdout, stderr = await proc.communicate()\n logging.info(f'[{command!r} exited with {proc.returncode}]')\n result_stdout = None\n result_stderr = None\n if stdout:\n result_stdout = stdout.decode()\n logger_adapter.info('[stdout]')\n for string in result_stdout.split('\\n'):\n if len(string) != 0:\n logger_adapter.info(str(string))\n if stderr:\n result_stderr = stderr.decode()\n logger_adapter.info('[stderr]')\n for string in result_stderr.split('\\n'):\n if len(string) != 0:\n logger_adapter.info(str(string))\n\n return (proc.returncode, result_stdout, result_stderr)\n\n\ndef service_bus_message_generator(sb_message, status, deployment_message):\n installation_id = sb_message['parameters']['tre_id'] + \"-\" + sb_message['parameters']['workspace_id']\n resource_request_message = json.dumps({\n \"id\": sb_message[\"id\"],\n \"status\": status,\n \"message\": f\"{installation_id}: {deployment_message}\"\n })\n return resource_request_message\n\n\nasync def deploy_porter_bundle(msg_body, sb_client, env_vars, message_logger_adapter):\n installation_id = msg_body['parameters']['tre_id'] + \"-\" + msg_body['parameters']['workspace_id']\n message_logger_adapter.info(f\"{installation_id}: Deployment job configuration starting\")\n sb_sender = sb_client.get_queue_sender(queue_name=env_vars[\"deployment_status_queue\"])\n resource_request_message = service_bus_message_generator(msg_body, strings.RESOURCE_STATUS_DEPLOYING, \"Deployment job starting\")\n await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body[\"id\"]))\n porter_command = await build_porter_command(msg_body, env_vars)\n returncode, _, err = await run_porter(porter_command, env_vars)\n if returncode != 0:\n error_message = \"Error context message = \" + \" \".join(err.split('\\n'))\n resource_request_message = service_bus_message_generator(msg_body, strings.RESOURCE_STATUS_FAILED, error_message)\n await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body[\"id\"]))\n message_logger_adapter.info(f\"{installation_id}: Deployment job configuration failed error = {error_message}\")\n return False\n else:\n success_message = \"Workspace was deployed successfully...\"\n resource_request_message = service_bus_message_generator(msg_body, strings.RESOURCE_STATUS_DEPLOYED, success_message)\n await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body[\"id\"]))\n message_logger_adapter.info(f\"{installation_id}: {success_message}\")\n return True\n\n\nasync def runner(env_vars):\n msi_id = env_vars[\"vmss_msi_id\"]\n service_bus_namespace = env_vars[\"service_bus_namespace\"]\n async with default_credentials(msi_id) as credential:\n service_bus_client = ServiceBusClient(service_bus_namespace, credential)\n logger_adapter.info(\"Starting message receiving loop...\")\n while True:\n logger_adapter.info(\"Checking for new messages...\")\n receive_message_gen = receive_message(env_vars, service_bus_client)\n try:\n async for message in receive_message_gen:\n logger_adapter.info(f\"Message received for id={message['id']}\")\n message_logger_adapter = get_message_id_logger(message['id']) # logger includes message id in every entry.\n result = await deploy_porter_bundle(message, service_bus_client, env_vars, message_logger_adapter)\n await receive_message_gen.asend(result)\n except StopAsyncIteration: # the async generator when finished signals end with this exception.\n pass\n logger_adapter.info(\"All messages done sleeping...\")\n await asyncio.sleep(60)\n\n\ndef read_env_vars():\n env_vars = {\n # Needed for local dev\n \"app_id\": os.environ.get(\"AZURE_CLIENT_ID\", None),\n \"app_password\": os.environ.get(\"AZURE_CLIENT_SECRET\", None),\n\n \"registry_server\": os.environ[\"REGISTRY_SERVER\"],\n \"tfstate_container_name\": os.environ['TERRAFORM_STATE_CONTAINER_NAME'],\n \"tfstate_resource_group_name\": os.environ['MGMT_RESOURCE_GROUP_NAME'],\n \"tfstate_storage_account_name\": os.environ['MGMT_STORAGE_ACCOUNT_NAME'],\n \"deployment_status_queue\": os.environ['SERVICE_BUS_DEPLOYMENT_STATUS_UPDATE_QUEUE'],\n \"resource_request_queue\": os.environ['SERVICE_BUS_RESOURCE_REQUEST_QUEUE'],\n \"service_bus_namespace\": os.environ['SERVICE_BUS_FULLY_QUALIFIED_NAMESPACE'],\n \"vmss_msi_id\": os.environ.get('VMSS_MSI_ID', None),\n\n # Needed for running porter\n \"arm_use_msi\": os.environ[\"ARM_USE_MSI\"],\n \"arm_subscription_id\": os.environ['ARM_SUBSCRIPTION_ID'],\n \"arm_client_id\": os.environ[\"ARM_CLIENT_ID\"],\n \"arm_tenant_id\": os.environ[\"ARM_TENANT_ID\"]\n }\n\n env_vars[\"arm_client_secret\"] = os.environ[\"ARM_CLIENT_SECRET\"] if env_vars[\"arm_use_msi\"] == \"false\" else \"\"\n\n return env_vars\n\n\nif __name__ == \"__main__\":\n try:\n env_vars = read_env_vars()\n except KeyError as e:\n logger_adapter.error(f\"Environment variable {e} is not set correctly...Exiting\")\n sys.exit(1)\n logger_adapter.info(\"Started processor\")\n asyncio.run(runner(env_vars))\n", "path": "resource_processor/vmss_porter/runner.py"}]}
3,770
501
gh_patches_debug_23525
rasdani/github-patches
git_diff
pretalx__pretalx-626
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> email sender address fallback is incorrect ## Current Behavior ![image](https://user-images.githubusercontent.com/2158203/54060323-98a49e00-41fc-11e9-8eab-234aca8fc4d0.png) the UI says emails will be sent from "[email protected]", but they are sent from "admin@localhost" (the value in the global `MAIL_FROM`). ## Expected Behavior the sender should be "[email protected]" ## Steps to Reproduce 1. create event 2. set an orga email address `[email protected]` 3. do _not_ set a "Sender address" in mail settings, but observe the text below 4. send email 5. email is has `From: event <admin@localhost>`, not `From: event <[email protected]>` ## Context <!--- How has this issue affected you? What are you trying to accomplish? --> <!--- Providing context helps us come up with a solution that is most useful in the real world --> ## Your Environment <!--- Include as many relevant details about the environment you experienced the bug in --> * Version used: 5a5ab5482dd9a7a3b19b91563946c535fe5abe1e * Environment name and version (e.g. Chrome 39, python 3.5): * Operating System and version (desktop or mobile): * Link to your instance, if in production: </issue> <code> [start of src/pretalx/common/mail.py] 1 import logging 2 from email.utils import formataddr 3 from smtplib import SMTPResponseException, SMTPSenderRefused 4 from typing import Any, Dict, Union 5 6 from django.conf import settings 7 from django.core.mail import EmailMultiAlternatives, get_connection 8 from django.core.mail.backends.smtp import EmailBackend 9 from django.utils.translation import override 10 from i18nfield.strings import LazyI18nString 11 from inlinestyler.utils import inline_css 12 13 from pretalx.celery_app import app 14 from pretalx.event.models import Event 15 from pretalx.person.models import User 16 17 logger = logging.getLogger(__name__) 18 19 20 class CustomSMTPBackend(EmailBackend): 21 def test(self, from_addr): 22 try: 23 self.open() 24 self.connection.ehlo_or_helo_if_needed() 25 (code, resp) = self.connection.mail(from_addr, []) 26 if code != 250: 27 logger.warning( 28 f'Error testing mail settings, code {code}, resp: {resp}' 29 ) 30 raise SMTPSenderRefused(code, resp) 31 (code, resp) = self.connection.rcpt('[email protected]') 32 if code not in (250, 251): 33 logger.warning( 34 f'Error testing mail settings, code {code}, resp: {resp}' 35 ) 36 raise SMTPSenderRefused(code, resp) 37 finally: 38 self.close() 39 40 41 class TolerantDict(dict): 42 def __missing__(self, key): 43 """Don't fail when formatting strings with a dict with missing keys.""" 44 return key 45 46 47 class SendMailException(Exception): 48 pass 49 50 51 def mail( 52 user: User, 53 subject: str, 54 template: Union[str, LazyI18nString], 55 context: Dict[str, Any] = None, 56 event: Event = None, 57 locale: str = None, 58 headers: dict = None, 59 ): 60 from pretalx.mail.models import QueuedMail 61 62 headers = headers or {} 63 64 with override(locale): 65 body = str(template) 66 if context: 67 body = body.format_map(TolerantDict(context)) 68 reply_to = headers.get('reply-to') 69 if reply_to and isinstance(reply_to, list): 70 reply_to = ','.join(reply_to) 71 QueuedMail( 72 event=event, 73 to=user.email, 74 subject=str(subject), 75 text=body, 76 reply_to=reply_to, 77 bcc=headers.get('bcc'), 78 ).send() 79 80 81 @app.task(bind=True) 82 def mail_send_task( 83 self, 84 to: str, 85 subject: str, 86 body: str, 87 html: str, 88 reply_to: list = None, 89 event: int = None, 90 cc: list = None, 91 bcc: list = None, 92 headers: dict = None, 93 ): 94 headers = headers or dict() 95 if event: 96 event = Event.objects.filter(id=event).first() 97 if event: 98 sender = event.settings.get('mail_from') 99 if sender == '[email protected]' or not sender: 100 sender = settings.MAIL_FROM 101 if reply_to: 102 headers['reply-to'] = reply_to.split(',') if isinstance(reply_to, str) else reply_to 103 backend = event.get_mail_backend() 104 sender = formataddr((str(event.name), sender)) 105 else: 106 sender = formataddr(('pretalx', settings.MAIL_FROM)) 107 backend = get_connection(fail_silently=False) 108 109 email = EmailMultiAlternatives( 110 subject, body, sender, to=to, cc=cc, bcc=bcc, headers=headers 111 ) 112 113 if html is not None: 114 email.attach_alternative(inline_css(html), 'text/html') 115 116 try: 117 backend.send_messages([email]) 118 except SMTPResponseException as exception: 119 # Retry on external problems: Connection issues (101, 111), timeouts (421), filled-up mailboxes (422), 120 # out of memory (431), network issues (442), another timeout (447), or too many mails sent (452) 121 if exception.smtp_code in (101, 111, 421, 422, 431, 442, 447, 452): 122 self.retry(max_retries=5, countdown=2 ** (self.request.retries * 2)) 123 logger.exception('Error sending email') 124 raise SendMailException('Failed to send an email to {}.'.format(to)) 125 except Exception: 126 logger.exception('Error sending email') 127 raise SendMailException('Failed to send an email to {}.'.format(to)) 128 [end of src/pretalx/common/mail.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/pretalx/common/mail.py b/src/pretalx/common/mail.py --- a/src/pretalx/common/mail.py +++ b/src/pretalx/common/mail.py @@ -92,14 +92,14 @@ headers: dict = None, ): headers = headers or dict() + if reply_to and isinstance(reply_to, str): + reply_to = reply_to.split(',') if event: event = Event.objects.filter(id=event).first() if event: sender = event.settings.get('mail_from') if sender == '[email protected]' or not sender: - sender = settings.MAIL_FROM - if reply_to: - headers['reply-to'] = reply_to.split(',') if isinstance(reply_to, str) else reply_to + sender = event.email backend = event.get_mail_backend() sender = formataddr((str(event.name), sender)) else: @@ -107,7 +107,7 @@ backend = get_connection(fail_silently=False) email = EmailMultiAlternatives( - subject, body, sender, to=to, cc=cc, bcc=bcc, headers=headers + subject, body, sender, to=to, cc=cc, bcc=bcc, headers=headers, reply_to=reply_to ) if html is not None:
{"golden_diff": "diff --git a/src/pretalx/common/mail.py b/src/pretalx/common/mail.py\n--- a/src/pretalx/common/mail.py\n+++ b/src/pretalx/common/mail.py\n@@ -92,14 +92,14 @@\n headers: dict = None,\n ):\n headers = headers or dict()\n+ if reply_to and isinstance(reply_to, str):\n+ reply_to = reply_to.split(',')\n if event:\n event = Event.objects.filter(id=event).first()\n if event:\n sender = event.settings.get('mail_from')\n if sender == '[email protected]' or not sender:\n- sender = settings.MAIL_FROM\n- if reply_to:\n- headers['reply-to'] = reply_to.split(',') if isinstance(reply_to, str) else reply_to\n+ sender = event.email\n backend = event.get_mail_backend()\n sender = formataddr((str(event.name), sender))\n else:\n@@ -107,7 +107,7 @@\n backend = get_connection(fail_silently=False)\n \n email = EmailMultiAlternatives(\n- subject, body, sender, to=to, cc=cc, bcc=bcc, headers=headers\n+ subject, body, sender, to=to, cc=cc, bcc=bcc, headers=headers, reply_to=reply_to\n )\n \n if html is not None:\n", "issue": "email sender address fallback is incorrect\n## Current Behavior\r\n\r\n![image](https://user-images.githubusercontent.com/2158203/54060323-98a49e00-41fc-11e9-8eab-234aca8fc4d0.png)\r\n\r\nthe UI says emails will be sent from \"[email protected]\", but they are sent from \"admin@localhost\" (the value in the global `MAIL_FROM`).\r\n\r\n## Expected Behavior\r\n\r\nthe sender should be \"[email protected]\"\r\n\r\n## Steps to Reproduce\r\n\r\n1. create event\r\n2. set an orga email address `[email protected]`\r\n3. do _not_ set a \"Sender address\" in mail settings, but observe the text below\r\n4. send email\r\n5. email is has `From: event <admin@localhost>`, not `From: event <[email protected]>`\r\n\r\n## Context\r\n\r\n<!--- How has this issue affected you? What are you trying to accomplish? -->\r\n<!--- Providing context helps us come up with a solution that is most useful in the real world -->\r\n\r\n## Your Environment\r\n\r\n<!--- Include as many relevant details about the environment you experienced the bug in -->\r\n* Version used: 5a5ab5482dd9a7a3b19b91563946c535fe5abe1e\r\n* Environment name and version (e.g. Chrome 39, python 3.5):\r\n* Operating System and version (desktop or mobile):\r\n* Link to your instance, if in production:\r\n\n", "before_files": [{"content": "import logging\nfrom email.utils import formataddr\nfrom smtplib import SMTPResponseException, SMTPSenderRefused\nfrom typing import Any, Dict, Union\n\nfrom django.conf import settings\nfrom django.core.mail import EmailMultiAlternatives, get_connection\nfrom django.core.mail.backends.smtp import EmailBackend\nfrom django.utils.translation import override\nfrom i18nfield.strings import LazyI18nString\nfrom inlinestyler.utils import inline_css\n\nfrom pretalx.celery_app import app\nfrom pretalx.event.models import Event\nfrom pretalx.person.models import User\n\nlogger = logging.getLogger(__name__)\n\n\nclass CustomSMTPBackend(EmailBackend):\n def test(self, from_addr):\n try:\n self.open()\n self.connection.ehlo_or_helo_if_needed()\n (code, resp) = self.connection.mail(from_addr, [])\n if code != 250:\n logger.warning(\n f'Error testing mail settings, code {code}, resp: {resp}'\n )\n raise SMTPSenderRefused(code, resp)\n (code, resp) = self.connection.rcpt('[email protected]')\n if code not in (250, 251):\n logger.warning(\n f'Error testing mail settings, code {code}, resp: {resp}'\n )\n raise SMTPSenderRefused(code, resp)\n finally:\n self.close()\n\n\nclass TolerantDict(dict):\n def __missing__(self, key):\n \"\"\"Don't fail when formatting strings with a dict with missing keys.\"\"\"\n return key\n\n\nclass SendMailException(Exception):\n pass\n\n\ndef mail(\n user: User,\n subject: str,\n template: Union[str, LazyI18nString],\n context: Dict[str, Any] = None,\n event: Event = None,\n locale: str = None,\n headers: dict = None,\n):\n from pretalx.mail.models import QueuedMail\n\n headers = headers or {}\n\n with override(locale):\n body = str(template)\n if context:\n body = body.format_map(TolerantDict(context))\n reply_to = headers.get('reply-to')\n if reply_to and isinstance(reply_to, list):\n reply_to = ','.join(reply_to)\n QueuedMail(\n event=event,\n to=user.email,\n subject=str(subject),\n text=body,\n reply_to=reply_to,\n bcc=headers.get('bcc'),\n ).send()\n\n\[email protected](bind=True)\ndef mail_send_task(\n self,\n to: str,\n subject: str,\n body: str,\n html: str,\n reply_to: list = None,\n event: int = None,\n cc: list = None,\n bcc: list = None,\n headers: dict = None,\n):\n headers = headers or dict()\n if event:\n event = Event.objects.filter(id=event).first()\n if event:\n sender = event.settings.get('mail_from')\n if sender == '[email protected]' or not sender:\n sender = settings.MAIL_FROM\n if reply_to:\n headers['reply-to'] = reply_to.split(',') if isinstance(reply_to, str) else reply_to\n backend = event.get_mail_backend()\n sender = formataddr((str(event.name), sender))\n else:\n sender = formataddr(('pretalx', settings.MAIL_FROM))\n backend = get_connection(fail_silently=False)\n\n email = EmailMultiAlternatives(\n subject, body, sender, to=to, cc=cc, bcc=bcc, headers=headers\n )\n\n if html is not None:\n email.attach_alternative(inline_css(html), 'text/html')\n\n try:\n backend.send_messages([email])\n except SMTPResponseException as exception:\n # Retry on external problems: Connection issues (101, 111), timeouts (421), filled-up mailboxes (422),\n # out of memory (431), network issues (442), another timeout (447), or too many mails sent (452)\n if exception.smtp_code in (101, 111, 421, 422, 431, 442, 447, 452):\n self.retry(max_retries=5, countdown=2 ** (self.request.retries * 2))\n logger.exception('Error sending email')\n raise SendMailException('Failed to send an email to {}.'.format(to))\n except Exception:\n logger.exception('Error sending email')\n raise SendMailException('Failed to send an email to {}.'.format(to))\n", "path": "src/pretalx/common/mail.py"}]}
2,162
304
gh_patches_debug_25799
rasdani/github-patches
git_diff
mlflow__mlflow-5121
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [BUG] Service metrics endpoint excludes many important routes ### Willingness to contribute The MLflow Community encourages bug fix contributions. Would you or another member of your organization be willing to contribute a fix for this bug to the MLflow code base? - [x] Yes. I can contribute a fix for this bug independently. - [ ] Yes. I would be willing to contribute a fix for this bug with guidance from the MLflow community. - [ ] No. I cannot contribute a bug fix at this time. ### System information - **Have I written custom code (as opposed to using a stock example script provided in MLflow)**: no - **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Debian GNU/Linux 11 - **MLflow installed from (source or binary)**: binary - **MLflow version (run ``mlflow --version``)**: 1.21.0 - **Python version**: 3.8 - **npm version, if running the dev UI**: - **Exact command to reproduce**: see section below ### Describe the problem The mlflow server option to expose a Prometheus metrics endpoint is a great observability feature for MLflow. Unfortunately, the current implementation leaves an incomplete view of the server health/performance. Currently, mlflow only logs metrics to a [subset of endpoints](https://github.com/mlflow/mlflow/blob/master/mlflow/server/prometheus_exporter.py#L18). As of MLflow version 1.21, the following routes are not being included in the service metrics: ``` ['static', '_get_experiment_by_name', '_create_experiment', '_list_experiments', '_get_experiment', '_delete_experiment', '_restore_experiment', '_update_experiment', '_update_run', '_delete_run', '_restore_run', '_set_experiment_tag', '_delete_tag', '_get_run', '_list_artifacts', '_get_metric_history', '_log_batch', '_log_model', '_create_registered_model', '_rename_registered_model', '_update_registered_model', '_delete_registered_model', '_get_registered_model', '_search_registered_models', '_list_registered_models', '_get_latest_versions', '_create_model_version', '_update_model_version', '_transition_stage', '_delete_model_version', '_get_model_version', '_search_model_versions', '_get_model_version_download_uri', '_set_registered_model_tag', '_set_model_version_tag', '_delete_registered_model_tag', '_delete_model_version_tag', 'health', 'serve_artifacts', 'serve_model_version_artifact', 'serve_static_file', 'serve'] ``` (see full list of endpoints) ``` from mlflow.server import app app.view_functions.keys() ``` Filtering the set of routes to be included in the metrics endpoint seems like a potentially fragile approach as new routes are added in later versions of mlflow. It's especially problematic that the list of filtered routes cannot be configured. We currently have no way to monitor the health of the overall service given that many key routes (e.g. `log_batch`) are not included in the service metrics. ### Code to reproduce issue Dockerfile for mlflow server ``` FROM python:3.8 RUN pip install mlflow==1.21.0 ENTRYPOINT mlflow server \ --backend-store-uri sqlite:///mlflow.sqlite \ --default-artifact-root file:///artifacts \ --host 0.0.0.0 \ --port 5000 \ --expose-prometheus /prometheus ``` Build and run the Docker container ``` docker build -t mlflow_example -f Dockerfile . docker run -p 5000:5000 mlflow_example ``` Script with incomplete representation in metrics endpoint ``` import mlflow import random mlflow.set_tracking_uri("http://127.0.0.1:5000") mlflow.set_experiment("service_metrics") with mlflow.start_run(run_name="test"): for _ in range(100): mlflow.log_metrics({ 'loss_a': random.random(), 'loss_b': random.random(), 'loss_c': random.random(), }) mlflow.log_params({'a': 1, 'b': 2, 'c': 3}) ``` See how metrics for these endpoints **_do not_** appear at http://127.0.0.1:5000/metrics --- Script with expected representation in metrics endpoint ``` import mlflow import random mlflow.set_tracking_uri("http://127.0.0.1:5000") mlflow.set_experiment("service_metrics") with mlflow.start_run(run_name="test"): for _ in range(100): mlflow.log_metric('loss', random.random()) mlflow.log_param('param', 'test') ``` See how metrics for these endpoints appear at http://127.0.0.1:5000/metrics ### Other info / logs Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. ### What component(s), interfaces, languages, and integrations does this bug affect? Components - [ ] `area/artifacts`: Artifact stores and artifact logging - [ ] `area/build`: Build and test infrastructure for MLflow - [ ] `area/docs`: MLflow documentation pages - [ ] `area/examples`: Example code - [ ] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry - [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors - [ ] `area/projects`: MLproject format, project running backends - [ ] `area/scoring`: MLflow Model server, model deployment tools, Spark UDFs - [x] `area/server-infra`: MLflow Tracking server backend - [ ] `area/tracking`: Tracking Service, tracking client APIs, autologging Interface - [ ] `area/uiux`: Front-end, user experience, plotting, JavaScript, JavaScript dev server - [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models - [ ] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry - [ ] `area/windows`: Windows support Language - [ ] `language/r`: R APIs and clients - [ ] `language/java`: Java APIs and clients - [ ] `language/new`: Proposals for new client languages Integrations - [ ] `integrations/azure`: Azure and Azure ML integrations - [ ] `integrations/sagemaker`: SageMaker integrations - [ ] `integrations/databricks`: Databricks integrations </issue> <code> [start of mlflow/server/prometheus_exporter.py] 1 from prometheus_flask_exporter.multiprocess import GunicornInternalPrometheusMetrics 2 from flask import request 3 4 5 def activate_prometheus_exporter(app): 6 metrics = GunicornInternalPrometheusMetrics(app, export_defaults=False) 7 8 endpoint = app.view_functions 9 histogram = metrics.histogram( 10 "mlflow_requests_by_status_and_path", 11 "Request latencies and count by status and path", 12 labels={ 13 "status": lambda r: r.status_code, 14 "path": lambda: change_path_for_metric(request.path), 15 }, 16 ) 17 for func_name, func in endpoint.items(): 18 if func_name in ["_search_runs", "_log_metric", "_log_param", "_set_tag", "_create_run"]: 19 app.view_functions[func_name] = histogram(func) 20 21 return app 22 23 24 def change_path_for_metric(path): 25 """ 26 Replace the '/' in the metric path by '_' so grafana can correctly use it. 27 :param path: path of the metric (example: runs/search) 28 :return: path with '_' instead of '/' 29 """ 30 if "mlflow/" in path: 31 path = path.split("mlflow/")[-1] 32 return path.replace("/", "_") 33 [end of mlflow/server/prometheus_exporter.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mlflow/server/prometheus_exporter.py b/mlflow/server/prometheus_exporter.py --- a/mlflow/server/prometheus_exporter.py +++ b/mlflow/server/prometheus_exporter.py @@ -1,32 +1,19 @@ from prometheus_flask_exporter.multiprocess import GunicornInternalPrometheusMetrics from flask import request +from mlflow.version import VERSION + def activate_prometheus_exporter(app): - metrics = GunicornInternalPrometheusMetrics(app, export_defaults=False) + def mlflow_version(_: request): + return VERSION - endpoint = app.view_functions - histogram = metrics.histogram( - "mlflow_requests_by_status_and_path", - "Request latencies and count by status and path", - labels={ - "status": lambda r: r.status_code, - "path": lambda: change_path_for_metric(request.path), - }, + metrics = GunicornInternalPrometheusMetrics( + app, + export_defaults=True, + defaults_prefix="mlflow", + excluded_paths=["/health"], + group_by=mlflow_version, ) - for func_name, func in endpoint.items(): - if func_name in ["_search_runs", "_log_metric", "_log_param", "_set_tag", "_create_run"]: - app.view_functions[func_name] = histogram(func) - - return app - -def change_path_for_metric(path): - """ - Replace the '/' in the metric path by '_' so grafana can correctly use it. - :param path: path of the metric (example: runs/search) - :return: path with '_' instead of '/' - """ - if "mlflow/" in path: - path = path.split("mlflow/")[-1] - return path.replace("/", "_") + return metrics
{"golden_diff": "diff --git a/mlflow/server/prometheus_exporter.py b/mlflow/server/prometheus_exporter.py\n--- a/mlflow/server/prometheus_exporter.py\n+++ b/mlflow/server/prometheus_exporter.py\n@@ -1,32 +1,19 @@\n from prometheus_flask_exporter.multiprocess import GunicornInternalPrometheusMetrics\r\n from flask import request\r\n \r\n+from mlflow.version import VERSION\r\n+\r\n \r\n def activate_prometheus_exporter(app):\r\n- metrics = GunicornInternalPrometheusMetrics(app, export_defaults=False)\r\n+ def mlflow_version(_: request):\r\n+ return VERSION\r\n \r\n- endpoint = app.view_functions\r\n- histogram = metrics.histogram(\r\n- \"mlflow_requests_by_status_and_path\",\r\n- \"Request latencies and count by status and path\",\r\n- labels={\r\n- \"status\": lambda r: r.status_code,\r\n- \"path\": lambda: change_path_for_metric(request.path),\r\n- },\r\n+ metrics = GunicornInternalPrometheusMetrics(\r\n+ app,\r\n+ export_defaults=True,\r\n+ defaults_prefix=\"mlflow\",\r\n+ excluded_paths=[\"/health\"],\r\n+ group_by=mlflow_version,\r\n )\r\n- for func_name, func in endpoint.items():\r\n- if func_name in [\"_search_runs\", \"_log_metric\", \"_log_param\", \"_set_tag\", \"_create_run\"]:\r\n- app.view_functions[func_name] = histogram(func)\r\n-\r\n- return app\r\n-\r\n \r\n-def change_path_for_metric(path):\r\n- \"\"\"\r\n- Replace the '/' in the metric path by '_' so grafana can correctly use it.\r\n- :param path: path of the metric (example: runs/search)\r\n- :return: path with '_' instead of '/'\r\n- \"\"\"\r\n- if \"mlflow/\" in path:\r\n- path = path.split(\"mlflow/\")[-1]\r\n- return path.replace(\"/\", \"_\")\r\n+ return metrics\n", "issue": "[BUG] Service metrics endpoint excludes many important routes\n### Willingness to contribute\r\nThe MLflow Community encourages bug fix contributions. Would you or another member of your organization be willing to contribute a fix for this bug to the MLflow code base?\r\n\r\n- [x] Yes. I can contribute a fix for this bug independently.\r\n- [ ] Yes. I would be willing to contribute a fix for this bug with guidance from the MLflow community.\r\n- [ ] No. I cannot contribute a bug fix at this time.\r\n\r\n### System information\r\n- **Have I written custom code (as opposed to using a stock example script provided in MLflow)**: no\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Debian GNU/Linux 11\r\n- **MLflow installed from (source or binary)**: binary\r\n- **MLflow version (run ``mlflow --version``)**: 1.21.0\r\n- **Python version**: 3.8\r\n- **npm version, if running the dev UI**:\r\n- **Exact command to reproduce**: see section below\r\n\r\n### Describe the problem\r\n\r\nThe mlflow server option to expose a Prometheus metrics endpoint is a great observability feature for MLflow. Unfortunately, the current implementation leaves an incomplete view of the server health/performance. Currently, mlflow only logs metrics to a [subset of endpoints](https://github.com/mlflow/mlflow/blob/master/mlflow/server/prometheus_exporter.py#L18).\r\n\r\nAs of MLflow version 1.21, the following routes are not being included in the service metrics:\r\n\r\n```\r\n['static', '_get_experiment_by_name', '_create_experiment', '_list_experiments', '_get_experiment',\r\n'_delete_experiment', '_restore_experiment', '_update_experiment', '_update_run', '_delete_run', \r\n'_restore_run', '_set_experiment_tag', '_delete_tag', '_get_run', '_list_artifacts', '_get_metric_history',\r\n'_log_batch', '_log_model', '_create_registered_model', '_rename_registered_model', \r\n'_update_registered_model', '_delete_registered_model', '_get_registered_model', '_search_registered_models', \r\n'_list_registered_models', '_get_latest_versions', '_create_model_version', '_update_model_version', \r\n'_transition_stage', '_delete_model_version', '_get_model_version', '_search_model_versions', \r\n'_get_model_version_download_uri', '_set_registered_model_tag', '_set_model_version_tag', \r\n'_delete_registered_model_tag', '_delete_model_version_tag', 'health', 'serve_artifacts', \r\n'serve_model_version_artifact', 'serve_static_file', 'serve']\r\n```\r\n\r\n(see full list of endpoints)\r\n```\r\nfrom mlflow.server import app\r\n\r\napp.view_functions.keys()\r\n```\r\n\r\nFiltering the set of routes to be included in the metrics endpoint seems like a potentially fragile approach as new routes are added in later versions of mlflow. It's especially problematic that the list of filtered routes cannot be configured. We currently have no way to monitor the health of the overall service given that many key routes (e.g. `log_batch`) are not included in the service metrics.\r\n\r\n### Code to reproduce issue\r\n\r\nDockerfile for mlflow server\r\n```\r\nFROM python:3.8\r\nRUN pip install mlflow==1.21.0\r\n\r\nENTRYPOINT mlflow server \\\r\n --backend-store-uri sqlite:///mlflow.sqlite \\\r\n --default-artifact-root file:///artifacts \\\r\n --host 0.0.0.0 \\\r\n --port 5000 \\\r\n --expose-prometheus /prometheus\r\n```\r\n\r\nBuild and run the Docker container\r\n```\r\ndocker build -t mlflow_example -f Dockerfile .\r\ndocker run -p 5000:5000 mlflow_example\r\n```\r\n\r\n\r\nScript with incomplete representation in metrics endpoint\r\n```\r\nimport mlflow\r\nimport random\r\n\r\nmlflow.set_tracking_uri(\"http://127.0.0.1:5000\")\r\nmlflow.set_experiment(\"service_metrics\")\r\n\r\nwith mlflow.start_run(run_name=\"test\"):\r\n\r\n for _ in range(100):\r\n mlflow.log_metrics({\r\n 'loss_a': random.random(),\r\n 'loss_b': random.random(),\r\n 'loss_c': random.random(),\r\n })\r\n\r\n mlflow.log_params({'a': 1, 'b': 2, 'c': 3})\r\n```\r\nSee how metrics for these endpoints **_do not_** appear at http://127.0.0.1:5000/metrics\r\n\r\n---\r\n\r\nScript with expected representation in metrics endpoint\r\n```\r\nimport mlflow\r\nimport random\r\n\r\nmlflow.set_tracking_uri(\"http://127.0.0.1:5000\")\r\nmlflow.set_experiment(\"service_metrics\")\r\n\r\nwith mlflow.start_run(run_name=\"test\"):\r\n for _ in range(100):\r\n mlflow.log_metric('loss', random.random())\r\n\r\n mlflow.log_param('param', 'test')\r\n```\r\nSee how metrics for these endpoints appear at http://127.0.0.1:5000/metrics\r\n\r\n### Other info / logs\r\nInclude any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached.\r\n\r\n\r\n### What component(s), interfaces, languages, and integrations does this bug affect?\r\nComponents \r\n- [ ] `area/artifacts`: Artifact stores and artifact logging\r\n- [ ] `area/build`: Build and test infrastructure for MLflow\r\n- [ ] `area/docs`: MLflow documentation pages\r\n- [ ] `area/examples`: Example code\r\n- [ ] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry\r\n- [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors\r\n- [ ] `area/projects`: MLproject format, project running backends\r\n- [ ] `area/scoring`: MLflow Model server, model deployment tools, Spark UDFs\r\n- [x] `area/server-infra`: MLflow Tracking server backend\r\n- [ ] `area/tracking`: Tracking Service, tracking client APIs, autologging\r\n\r\nInterface \r\n- [ ] `area/uiux`: Front-end, user experience, plotting, JavaScript, JavaScript dev server\r\n- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models\r\n- [ ] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry\r\n- [ ] `area/windows`: Windows support\r\n\r\nLanguage \r\n- [ ] `language/r`: R APIs and clients\r\n- [ ] `language/java`: Java APIs and clients\r\n- [ ] `language/new`: Proposals for new client languages\r\n\r\nIntegrations\r\n- [ ] `integrations/azure`: Azure and Azure ML integrations\r\n- [ ] `integrations/sagemaker`: SageMaker integrations\r\n- [ ] `integrations/databricks`: Databricks integrations\r\n\n", "before_files": [{"content": "from prometheus_flask_exporter.multiprocess import GunicornInternalPrometheusMetrics\r\nfrom flask import request\r\n\r\n\r\ndef activate_prometheus_exporter(app):\r\n metrics = GunicornInternalPrometheusMetrics(app, export_defaults=False)\r\n\r\n endpoint = app.view_functions\r\n histogram = metrics.histogram(\r\n \"mlflow_requests_by_status_and_path\",\r\n \"Request latencies and count by status and path\",\r\n labels={\r\n \"status\": lambda r: r.status_code,\r\n \"path\": lambda: change_path_for_metric(request.path),\r\n },\r\n )\r\n for func_name, func in endpoint.items():\r\n if func_name in [\"_search_runs\", \"_log_metric\", \"_log_param\", \"_set_tag\", \"_create_run\"]:\r\n app.view_functions[func_name] = histogram(func)\r\n\r\n return app\r\n\r\n\r\ndef change_path_for_metric(path):\r\n \"\"\"\r\n Replace the '/' in the metric path by '_' so grafana can correctly use it.\r\n :param path: path of the metric (example: runs/search)\r\n :return: path with '_' instead of '/'\r\n \"\"\"\r\n if \"mlflow/\" in path:\r\n path = path.split(\"mlflow/\")[-1]\r\n return path.replace(\"/\", \"_\")\r\n", "path": "mlflow/server/prometheus_exporter.py"}]}
2,322
407
gh_patches_debug_13249
rasdani/github-patches
git_diff
streamlit__streamlit-7256
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> pydeck_chart: Error (not a valid JSON) when data contains NaN ### Checklist - [X] I have searched the [existing issues](https://github.com/streamlit/streamlit/issues) for similar issues. - [X] I added a very descriptive title to this issue. - [X] I have provided sufficient information below to help reproduce this issue. ### Summary If the data used in pydeck contains NaN (even if it is not used), the application fails with ![image](https://user-images.githubusercontent.com/25638902/205078988-992fdce1-a6b8-4183-9237-5cd4e358e2ff.png) ### Reproducible Code Example ```Python import math, pandas, pydeck, streamlit streamlit.set_page_config(layout="wide") data = pandas.DataFrame({"lng": [-109.037673], "lat": [36.994672], "weight": [math.nan]}) layer = pydeck.Layer("ScatterplotLayer", data=data, get_position=["lng", "lat"], radius_min_pixels=4) deck = pydeck.Deck(layers=[layer], map_style=pydeck.map_styles.CARTO_LIGHT, tooltip={"text": "weight: {weight}"}) deck.to_html("test.html") streamlit.pydeck_chart(deck, use_container_width=True) ``` ### Steps To Reproduce Run the code as usual. ### Expected Behavior No error, as in the file ```test.html``` generated ### Current Behavior SyntaxError: Unexpected token 'N', ...""weight": NaN "... is not valid JSON ### Is this a regression? - [ ] Yes, this used to work in a previous version. ### Debug info - Streamlit version: Streamlit v1.15.1 - Python version: 3.10.4 (tags/v3.10.4:9d38120, Mar 23 2022, 23:13:41) [MSC v.1929 64 bit (AMD64)] - Operating System: Windows 10.0.19045.2251 - Browser: Chome, Opera, Edge, Firefox - Virtual environment: poetry ### Additional Information _No response_ ### Are you willing to submit a PR? - [ ] Yes, I am willing to submit a PR! </issue> <code> [start of e2e/scripts/st_pydeck_chart.py] 1 # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022) 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from typing import Any, cast 16 17 import numpy as np 18 import pandas as pd 19 import pydeck as pdk 20 21 import streamlit as st 22 23 # Empty chart. 24 25 st.pydeck_chart() 26 27 # Basic chart. 28 29 np.random.seed(12345) 30 31 df = pd.DataFrame( 32 cast(Any, np.random.randn(1000, 2) / [50, 50]) + [37.76, -122.4], 33 columns=["lat", "lon"], 34 ) 35 36 st.pydeck_chart( 37 pdk.Deck( 38 map_style="mapbox://styles/mapbox/light-v9", 39 initial_view_state=pdk.ViewState( 40 latitude=37.76, 41 longitude=-122.4, 42 zoom=11, 43 pitch=50, 44 ), 45 layers=[ 46 pdk.Layer( 47 "HexagonLayer", 48 data=df, 49 get_position="[lon, lat]", 50 radius=200, 51 elevation_scale=4, 52 elevation_range=[0, 1000], 53 pickable=True, 54 extruded=True, 55 ), 56 pdk.Layer( 57 "ScatterplotLayer", 58 data=df, 59 get_position="[lon, lat]", 60 get_color="[200, 30, 0, 160]", 61 get_radius=200, 62 ), 63 ], 64 ) 65 ) 66 [end of e2e/scripts/st_pydeck_chart.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/e2e/scripts/st_pydeck_chart.py b/e2e/scripts/st_pydeck_chart.py --- a/e2e/scripts/st_pydeck_chart.py +++ b/e2e/scripts/st_pydeck_chart.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import math from typing import Any, cast import numpy as np @@ -63,3 +64,15 @@ ], ) ) + +# Chart w/ invalid JSON - issue #5799. +data = pd.DataFrame({"lng": [-109.037673], "lat": [36.994672], "weight": [math.nan]}) +layer = pdk.Layer( + "ScatterplotLayer", data=data, get_position=["lng", "lat"], radius_min_pixels=4 +) +deck = pdk.Deck( + layers=[layer], + map_style=pdk.map_styles.CARTO_LIGHT, + tooltip={"text": "weight: {weight}"}, +) +st.pydeck_chart(deck, use_container_width=True)
{"golden_diff": "diff --git a/e2e/scripts/st_pydeck_chart.py b/e2e/scripts/st_pydeck_chart.py\n--- a/e2e/scripts/st_pydeck_chart.py\n+++ b/e2e/scripts/st_pydeck_chart.py\n@@ -12,6 +12,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import math\n from typing import Any, cast\n \n import numpy as np\n@@ -63,3 +64,15 @@\n ],\n )\n )\n+\n+# Chart w/ invalid JSON - issue #5799.\n+data = pd.DataFrame({\"lng\": [-109.037673], \"lat\": [36.994672], \"weight\": [math.nan]})\n+layer = pdk.Layer(\n+ \"ScatterplotLayer\", data=data, get_position=[\"lng\", \"lat\"], radius_min_pixels=4\n+)\n+deck = pdk.Deck(\n+ layers=[layer],\n+ map_style=pdk.map_styles.CARTO_LIGHT,\n+ tooltip={\"text\": \"weight: {weight}\"},\n+)\n+st.pydeck_chart(deck, use_container_width=True)\n", "issue": "pydeck_chart: Error (not a valid JSON) when data contains NaN\n### Checklist\r\n\r\n- [X] I have searched the [existing issues](https://github.com/streamlit/streamlit/issues) for similar issues.\r\n- [X] I added a very descriptive title to this issue.\r\n- [X] I have provided sufficient information below to help reproduce this issue.\r\n\r\n### Summary\r\n\r\nIf the data used in pydeck contains NaN (even if it is not used), the application fails with \r\n![image](https://user-images.githubusercontent.com/25638902/205078988-992fdce1-a6b8-4183-9237-5cd4e358e2ff.png)\r\n\r\n\r\n### Reproducible Code Example\r\n\r\n```Python\r\nimport math, pandas, pydeck, streamlit\r\n\r\nstreamlit.set_page_config(layout=\"wide\")\r\ndata = pandas.DataFrame({\"lng\": [-109.037673], \"lat\": [36.994672], \"weight\": [math.nan]})\r\nlayer = pydeck.Layer(\"ScatterplotLayer\", data=data, get_position=[\"lng\", \"lat\"], radius_min_pixels=4)\r\ndeck = pydeck.Deck(layers=[layer], map_style=pydeck.map_styles.CARTO_LIGHT, tooltip={\"text\": \"weight: {weight}\"})\r\ndeck.to_html(\"test.html\")\r\nstreamlit.pydeck_chart(deck, use_container_width=True)\r\n```\r\n\r\n\r\n### Steps To Reproduce\r\n\r\nRun the code as usual.\r\n\r\n### Expected Behavior\r\n\r\nNo error, as in the file ```test.html``` generated\r\n\r\n### Current Behavior\r\n\r\nSyntaxError:\r\nUnexpected token 'N', ...\"\"weight\": NaN \"... is not valid JSON\r\n\r\n\r\n### Is this a regression?\r\n\r\n- [ ] Yes, this used to work in a previous version.\r\n\r\n### Debug info\r\n\r\n- Streamlit version: Streamlit v1.15.1\r\n- Python version: 3.10.4 (tags/v3.10.4:9d38120, Mar 23 2022, 23:13:41) [MSC v.1929 64 bit (AMD64)]\r\n- Operating System: Windows 10.0.19045.2251\r\n- Browser: Chome, Opera, Edge, Firefox\r\n- Virtual environment: poetry\r\n\r\n\r\n### Additional Information\r\n\r\n_No response_\r\n\r\n### Are you willing to submit a PR?\r\n\r\n- [ ] Yes, I am willing to submit a PR!\n", "before_files": [{"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Any, cast\n\nimport numpy as np\nimport pandas as pd\nimport pydeck as pdk\n\nimport streamlit as st\n\n# Empty chart.\n\nst.pydeck_chart()\n\n# Basic chart.\n\nnp.random.seed(12345)\n\ndf = pd.DataFrame(\n cast(Any, np.random.randn(1000, 2) / [50, 50]) + [37.76, -122.4],\n columns=[\"lat\", \"lon\"],\n)\n\nst.pydeck_chart(\n pdk.Deck(\n map_style=\"mapbox://styles/mapbox/light-v9\",\n initial_view_state=pdk.ViewState(\n latitude=37.76,\n longitude=-122.4,\n zoom=11,\n pitch=50,\n ),\n layers=[\n pdk.Layer(\n \"HexagonLayer\",\n data=df,\n get_position=\"[lon, lat]\",\n radius=200,\n elevation_scale=4,\n elevation_range=[0, 1000],\n pickable=True,\n extruded=True,\n ),\n pdk.Layer(\n \"ScatterplotLayer\",\n data=df,\n get_position=\"[lon, lat]\",\n get_color=\"[200, 30, 0, 160]\",\n get_radius=200,\n ),\n ],\n )\n)\n", "path": "e2e/scripts/st_pydeck_chart.py"}]}
1,666
257
gh_patches_debug_50331
rasdani/github-patches
git_diff
pypi__warehouse-13706
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Wrong key name used for PEP 658 metadata files in the JSON index **Describe the bug** [PEP 691](https://peps.python.org/pep-0691/#project-detail) states that the key name for metadata files in the JSON index should be `dist-info-metadata`: > `dist-info-metadata`: An optional key that indicates that metadata for this file is available, via the same location as specified in [PEP 658](https://peps.python.org/pep-0658) (`{file_url}.metadata`). However, warehouse is providing it under the `data-dist-info-metadata` key instead: ``` $ curl -H 'Accept: application/vnd.pypi.simple.v1+json' https://pypi.org/simple/fluffy-server/ | jq .files [...] { "data-dist-info-metadata": { "sha256": "4db99543165cbdeef42ccb6257545911ccd7865d65e304e3e056f383a25f309c" }, "filename": "fluffy_server-1.39.2-py3-none-any.whl", [...] ``` This is causing pip to not use the metadata files as it is looking for the `dist-info-metadata` key only: https://github.com/pypa/pip/blob/f25f8fffbbd16fdb13a4f8977946afe9a3248453/src/pip/_internal/models/link.py#L265 **Additional context** There are two bugs discovered recently in pip which may make this tricky to fix: * https://github.com/pypa/pip/issues/12042 * https://github.com/pypa/pip/issues/12038 I believe if we simply fix the key name in pypi.org, it will break existing pip versions as it will cause users to encounter these bugs. It may be necessary to coordinate this fix with fixes to the above bugs in pip to avoid disruption? </issue> <code> [start of warehouse/packaging/utils.py] 1 # Licensed under the Apache License, Version 2.0 (the "License"); 2 # you may not use this file except in compliance with the License. 3 # You may obtain a copy of the License at 4 # 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # 7 # Unless required by applicable law or agreed to in writing, software 8 # distributed under the License is distributed on an "AS IS" BASIS, 9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 # See the License for the specific language governing permissions and 11 # limitations under the License. 12 13 import hashlib 14 import os.path 15 import tempfile 16 17 import packaging_legacy.version 18 19 from pyramid_jinja2 import IJinja2Environment 20 from sqlalchemy.orm import joinedload 21 22 from warehouse.packaging.interfaces import ISimpleStorage 23 from warehouse.packaging.models import File, Project, Release 24 25 API_VERSION = "1.1" 26 27 28 def _simple_index(request, serial): 29 # Fetch the name and normalized name for all of our projects 30 projects = ( 31 request.db.query(Project.name, Project.normalized_name, Project.last_serial) 32 .order_by(Project.normalized_name) 33 .all() 34 ) 35 36 return { 37 "meta": {"api-version": API_VERSION, "_last-serial": serial}, 38 "projects": [{"name": p.name, "_last-serial": p.last_serial} for p in projects], 39 } 40 41 42 def _simple_detail(project, request): 43 # Get all of the files for this project. 44 files = sorted( 45 request.db.query(File) 46 .options(joinedload(File.release)) 47 .join(Release) 48 .filter(Release.project == project) 49 .all(), 50 key=lambda f: (packaging_legacy.version.parse(f.release.version), f.filename), 51 ) 52 versions = sorted( 53 {f.release.version for f in files}, key=packaging_legacy.version.parse 54 ) 55 56 return { 57 "meta": {"api-version": API_VERSION, "_last-serial": project.last_serial}, 58 "name": project.normalized_name, 59 "versions": versions, 60 "files": [ 61 { 62 "filename": file.filename, 63 "url": request.route_url("packaging.file", path=file.path), 64 "hashes": { 65 "sha256": file.sha256_digest, 66 }, 67 "requires-python": file.release.requires_python, 68 "size": file.size, 69 "upload-time": file.upload_time.isoformat() + "Z", 70 "yanked": file.release.yanked_reason 71 if file.release.yanked and file.release.yanked_reason 72 else file.release.yanked, 73 "data-dist-info-metadata": {"sha256": file.metadata_file_sha256_digest} 74 if file.metadata_file_sha256_digest 75 else False, 76 } 77 for file in files 78 ], 79 } 80 81 82 def render_simple_detail(project, request, store=False): 83 context = _simple_detail(project, request) 84 85 env = request.registry.queryUtility(IJinja2Environment, name=".jinja2") 86 template = env.get_template("templates/api/simple/detail.html") 87 content = template.render(**context, request=request) 88 89 content_hasher = hashlib.blake2b(digest_size=256 // 8) 90 content_hasher.update(content.encode("utf-8")) 91 content_hash = content_hasher.hexdigest().lower() 92 93 simple_detail_path = ( 94 f"{project.normalized_name}/{content_hash}.{project.normalized_name}.html" 95 ) 96 97 if store: 98 storage = request.find_service(ISimpleStorage) 99 with tempfile.NamedTemporaryFile() as f: 100 f.write(content.encode("utf-8")) 101 f.flush() 102 103 storage.store( 104 simple_detail_path, 105 f.name, 106 meta={ 107 "project": project.normalized_name, 108 "pypi-last-serial": project.last_serial, 109 "hash": content_hash, 110 }, 111 ) 112 storage.store( 113 os.path.join(project.normalized_name, "index.html"), 114 f.name, 115 meta={ 116 "project": project.normalized_name, 117 "pypi-last-serial": project.last_serial, 118 "hash": content_hash, 119 }, 120 ) 121 122 return (content_hash, simple_detail_path) 123 [end of warehouse/packaging/utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/warehouse/packaging/utils.py b/warehouse/packaging/utils.py --- a/warehouse/packaging/utils.py +++ b/warehouse/packaging/utils.py @@ -73,6 +73,9 @@ "data-dist-info-metadata": {"sha256": file.metadata_file_sha256_digest} if file.metadata_file_sha256_digest else False, + "core-metadata": {"sha256": file.metadata_file_sha256_digest} + if file.metadata_file_sha256_digest + else False, } for file in files ],
{"golden_diff": "diff --git a/warehouse/packaging/utils.py b/warehouse/packaging/utils.py\n--- a/warehouse/packaging/utils.py\n+++ b/warehouse/packaging/utils.py\n@@ -73,6 +73,9 @@\n \"data-dist-info-metadata\": {\"sha256\": file.metadata_file_sha256_digest}\n if file.metadata_file_sha256_digest\n else False,\n+ \"core-metadata\": {\"sha256\": file.metadata_file_sha256_digest}\n+ if file.metadata_file_sha256_digest\n+ else False,\n }\n for file in files\n ],\n", "issue": "Wrong key name used for PEP 658 metadata files in the JSON index\n**Describe the bug**\r\n\r\n[PEP 691](https://peps.python.org/pep-0691/#project-detail) states that the key name for metadata files in the JSON index should be `dist-info-metadata`:\r\n\r\n> `dist-info-metadata`: An optional key that indicates that metadata for this file is available, via the same location as specified in [PEP 658](https://peps.python.org/pep-0658) (`{file_url}.metadata`).\r\n\r\nHowever, warehouse is providing it under the `data-dist-info-metadata` key instead:\r\n\r\n```\r\n$ curl -H 'Accept: application/vnd.pypi.simple.v1+json' https://pypi.org/simple/fluffy-server/ | jq .files\r\n[...]\r\n {\r\n \"data-dist-info-metadata\": {\r\n \"sha256\": \"4db99543165cbdeef42ccb6257545911ccd7865d65e304e3e056f383a25f309c\"\r\n },\r\n \"filename\": \"fluffy_server-1.39.2-py3-none-any.whl\",\r\n [...]\r\n```\r\n\r\nThis is causing pip to not use the metadata files as it is looking for the `dist-info-metadata` key only:\r\nhttps://github.com/pypa/pip/blob/f25f8fffbbd16fdb13a4f8977946afe9a3248453/src/pip/_internal/models/link.py#L265\r\n\r\n\r\n**Additional context**\r\n\r\nThere are two bugs discovered recently in pip which may make this tricky to fix:\r\n\r\n* https://github.com/pypa/pip/issues/12042\r\n* https://github.com/pypa/pip/issues/12038\r\n\r\nI believe if we simply fix the key name in pypi.org, it will break existing pip versions as it will cause users to encounter these bugs. It may be necessary to coordinate this fix with fixes to the above bugs in pip to avoid disruption?\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport hashlib\nimport os.path\nimport tempfile\n\nimport packaging_legacy.version\n\nfrom pyramid_jinja2 import IJinja2Environment\nfrom sqlalchemy.orm import joinedload\n\nfrom warehouse.packaging.interfaces import ISimpleStorage\nfrom warehouse.packaging.models import File, Project, Release\n\nAPI_VERSION = \"1.1\"\n\n\ndef _simple_index(request, serial):\n # Fetch the name and normalized name for all of our projects\n projects = (\n request.db.query(Project.name, Project.normalized_name, Project.last_serial)\n .order_by(Project.normalized_name)\n .all()\n )\n\n return {\n \"meta\": {\"api-version\": API_VERSION, \"_last-serial\": serial},\n \"projects\": [{\"name\": p.name, \"_last-serial\": p.last_serial} for p in projects],\n }\n\n\ndef _simple_detail(project, request):\n # Get all of the files for this project.\n files = sorted(\n request.db.query(File)\n .options(joinedload(File.release))\n .join(Release)\n .filter(Release.project == project)\n .all(),\n key=lambda f: (packaging_legacy.version.parse(f.release.version), f.filename),\n )\n versions = sorted(\n {f.release.version for f in files}, key=packaging_legacy.version.parse\n )\n\n return {\n \"meta\": {\"api-version\": API_VERSION, \"_last-serial\": project.last_serial},\n \"name\": project.normalized_name,\n \"versions\": versions,\n \"files\": [\n {\n \"filename\": file.filename,\n \"url\": request.route_url(\"packaging.file\", path=file.path),\n \"hashes\": {\n \"sha256\": file.sha256_digest,\n },\n \"requires-python\": file.release.requires_python,\n \"size\": file.size,\n \"upload-time\": file.upload_time.isoformat() + \"Z\",\n \"yanked\": file.release.yanked_reason\n if file.release.yanked and file.release.yanked_reason\n else file.release.yanked,\n \"data-dist-info-metadata\": {\"sha256\": file.metadata_file_sha256_digest}\n if file.metadata_file_sha256_digest\n else False,\n }\n for file in files\n ],\n }\n\n\ndef render_simple_detail(project, request, store=False):\n context = _simple_detail(project, request)\n\n env = request.registry.queryUtility(IJinja2Environment, name=\".jinja2\")\n template = env.get_template(\"templates/api/simple/detail.html\")\n content = template.render(**context, request=request)\n\n content_hasher = hashlib.blake2b(digest_size=256 // 8)\n content_hasher.update(content.encode(\"utf-8\"))\n content_hash = content_hasher.hexdigest().lower()\n\n simple_detail_path = (\n f\"{project.normalized_name}/{content_hash}.{project.normalized_name}.html\"\n )\n\n if store:\n storage = request.find_service(ISimpleStorage)\n with tempfile.NamedTemporaryFile() as f:\n f.write(content.encode(\"utf-8\"))\n f.flush()\n\n storage.store(\n simple_detail_path,\n f.name,\n meta={\n \"project\": project.normalized_name,\n \"pypi-last-serial\": project.last_serial,\n \"hash\": content_hash,\n },\n )\n storage.store(\n os.path.join(project.normalized_name, \"index.html\"),\n f.name,\n meta={\n \"project\": project.normalized_name,\n \"pypi-last-serial\": project.last_serial,\n \"hash\": content_hash,\n },\n )\n\n return (content_hash, simple_detail_path)\n", "path": "warehouse/packaging/utils.py"}]}
2,161
137
gh_patches_debug_14288
rasdani/github-patches
git_diff
easybuilders__easybuild-easyblocks-2981
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> MATLAB changes `$LD_LIBRARY_PATH`, breaks Qt apps I've been debugging an issue with an app that depends on the Qt5 module. The app won't start because it can't find Qt's platform plugins: ``` qt.qpa.plugin: Could not find the Qt platform plugin "xcb" in "" ``` I wrote a minimal Qt app to find the cause of this and it turns out to be MATLAB. More specifically, the fact that the MATLAB module prepends `LD_LIBRARY_PATH` with `$EBROOTMATLAB/bin/glnxa64`, a directory that contains tons of bundled MATLAB libraries, including their own versions of essential Qt5 libraries and many other unrelated libs which could potentially interfere with other modules too. This change was introduced in #2008, presumably to make it easier for MCR compiled apps to load. For MATLAB itself this does not seem to be necessary because the `matlab` startup script sets `LD_LIBRARY_PATH` and other required environment variables on invocation. The MATLAB [docs](https://www.mathworks.com/help/compiler/mcr-path-settings-for-run-time-deployment.html) cautions (bottom of the page) against setting LD_LIBRARY_PATH permanently on Linux due to this interference risk with other software. They suggest to "run MATLAB Compiler applications using the generated shell script" instead. I would propose to revert #2008 but would obviously like to hear @smoors and maybe other's opinion on this first. </issue> <code> [start of easybuild/easyblocks/m/matlab.py] 1 ## 2 # Copyright 2009-2023 Ghent University 3 # 4 # This file is part of EasyBuild, 5 # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), 6 # with support of Ghent University (http://ugent.be/hpc), 7 # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be), 8 # Flemish Research Foundation (FWO) (http://www.fwo.be/en) 9 # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). 10 # 11 # https://github.com/easybuilders/easybuild 12 # 13 # EasyBuild is free software: you can redistribute it and/or modify 14 # it under the terms of the GNU General Public License as published by 15 # the Free Software Foundation v2. 16 # 17 # EasyBuild is distributed in the hope that it will be useful, 18 # but WITHOUT ANY WARRANTY; without even the implied warranty of 19 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 # GNU General Public License for more details. 21 # 22 # You should have received a copy of the GNU General Public License 23 # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. 24 ## 25 """ 26 EasyBuild support for installing MATLAB, implemented as an easyblock 27 28 @author: Stijn De Weirdt (Ghent University) 29 @author: Dries Verdegem (Ghent University) 30 @author: Kenneth Hoste (Ghent University) 31 @author: Pieter De Baets (Ghent University) 32 @author: Jens Timmerman (Ghent University) 33 @author: Fotis Georgatos (Uni.Lu, NTUA) 34 """ 35 import re 36 import os 37 import stat 38 import tempfile 39 40 from distutils.version import LooseVersion 41 42 from easybuild.easyblocks.generic.packedbinary import PackedBinary 43 from easybuild.framework.easyconfig import CUSTOM 44 from easybuild.tools.build_log import EasyBuildError 45 from easybuild.tools.filetools import adjust_permissions, change_dir, copy_file, read_file, write_file 46 from easybuild.tools.py2vs3 import string_type 47 from easybuild.tools.run import run_cmd 48 49 50 class EB_MATLAB(PackedBinary): 51 """Support for installing MATLAB.""" 52 53 def __init__(self, *args, **kwargs): 54 """Add extra config options specific to MATLAB.""" 55 super(EB_MATLAB, self).__init__(*args, **kwargs) 56 self.comp_fam = None 57 self.configfile = os.path.join(self.builddir, 'my_installer_input.txt') 58 self.outputfile = os.path.join(self.builddir, 'my_installer_output.txt') 59 60 @staticmethod 61 def extra_options(): 62 extra_vars = { 63 'java_options': ['-Xmx256m', "$_JAVA_OPTIONS value set for install and in module file.", CUSTOM], 64 'key': [None, "Installation key(s), make one install for each key. Single key or a list of keys", CUSTOM], 65 } 66 return PackedBinary.extra_options(extra_vars) 67 68 def configure_step(self): 69 """Configure MATLAB installation: create license file.""" 70 71 licfile = self.cfg['license_file'] 72 if licfile is None: 73 licserv = self.cfg['license_server'] 74 if licserv is None: 75 licserv = os.getenv('EB_MATLAB_LICENSE_SERVER', 'license.example.com') 76 licport = self.cfg['license_server_port'] 77 if licport is None: 78 licport = os.getenv('EB_MATLAB_LICENSE_SERVER_PORT', '00000') 79 # create license file 80 lictxt = '\n'.join([ 81 "SERVER %s 000000000000 %s" % (licserv, licport), 82 "USE_SERVER", 83 ]) 84 85 licfile = os.path.join(self.builddir, 'matlab.lic') 86 write_file(licfile, lictxt) 87 88 try: 89 copy_file(os.path.join(self.cfg['start_dir'], 'installer_input.txt'), self.configfile) 90 adjust_permissions(self.configfile, stat.S_IWUSR) 91 92 # read file in binary mode to avoid UTF-8 encoding issues when using Python 3, 93 # due to non-UTF-8 characters... 94 config = read_file(self.configfile, mode='rb') 95 96 # use raw byte strings (must be 'br', not 'rb'), 97 # required when using Python 3 because file was read in binary mode 98 regdest = re.compile(br"^# destinationFolder=.*", re.M) 99 regagree = re.compile(br"^# agreeToLicense=.*", re.M) 100 regmode = re.compile(br"^# mode=.*", re.M) 101 reglicpath = re.compile(br"^# licensePath=.*", re.M) 102 regoutfile = re.compile(br"^# outputFile=.*", re.M) 103 104 # must use byte-strings here when using Python 3, see above 105 config = regdest.sub(b"destinationFolder=%s" % self.installdir.encode('utf-8'), config) 106 config = regagree.sub(b"agreeToLicense=Yes", config) 107 config = regmode.sub(b"mode=silent", config) 108 config = reglicpath.sub(b"licensePath=%s" % licfile.encode('utf-8'), config) 109 config = regoutfile.sub(b"outputFile=%s" % self.outputfile.encode('utf-8'), config) 110 111 write_file(self.configfile, config) 112 113 except IOError as err: 114 raise EasyBuildError("Failed to create installation config file %s: %s", self.configfile, err) 115 116 self.log.debug('configuration file written to %s:\n %s', self.configfile, config) 117 118 def install_step(self): 119 """MATLAB install procedure using 'install' command.""" 120 121 src = os.path.join(self.cfg['start_dir'], 'install') 122 123 # make sure install script is executable 124 adjust_permissions(src, stat.S_IXUSR) 125 126 if LooseVersion(self.version) >= LooseVersion('2016b'): 127 perm_dirs = [os.path.join(self.cfg['start_dir'], 'bin', 'glnxa64')] 128 if LooseVersion(self.version) < LooseVersion('2021b'): 129 jdir = os.path.join(self.cfg['start_dir'], 'sys', 'java', 'jre', 'glnxa64', 'jre', 'bin') 130 perm_dirs.append(jdir) 131 for perm_dir in perm_dirs: 132 adjust_permissions(perm_dir, stat.S_IXUSR) 133 134 # make sure $DISPLAY is not defined, which may lead to (hard to trace) problems 135 # this is a workaround for not being able to specify --nodisplay to the install scripts 136 if 'DISPLAY' in os.environ: 137 os.environ.pop('DISPLAY') 138 139 if '_JAVA_OPTIONS' not in self.cfg['preinstallopts']: 140 java_opts = 'export _JAVA_OPTIONS="%s" && ' % self.cfg['java_options'] 141 self.cfg['preinstallopts'] = java_opts + self.cfg['preinstallopts'] 142 if LooseVersion(self.version) >= LooseVersion('2016b'): 143 change_dir(self.builddir) 144 145 # Build the cmd string 146 cmdlist = [ 147 self.cfg['preinstallopts'], 148 src, 149 '-inputFile', 150 self.configfile, 151 ] 152 if LooseVersion(self.version) < LooseVersion('2020a'): 153 # MATLAB installers < 2020a ignore $TMPDIR (always use /tmp) and might need a large tmpdir 154 tmpdir = tempfile.mkdtemp() 155 cmdlist.extend([ 156 '-v', 157 '-tmpdir', 158 tmpdir, 159 ]) 160 cmdlist.append(self.cfg['installopts']) 161 cmd = ' '.join(cmdlist) 162 163 keys = self.cfg['key'] 164 if keys is None: 165 try: 166 keys = os.environ['EB_MATLAB_KEY'] 167 except KeyError: 168 raise EasyBuildError("The MATLAB install key is not set. This can be set either with the environment " 169 "variable EB_MATLAB_KEY or by the easyconfig variable 'key'.") 170 if isinstance(keys, string_type): 171 keys = keys.split(',') 172 173 # Compile the installation key regex outside of the loop 174 regkey = re.compile(br"^(# )?fileInstallationKey=.*", re.M) 175 176 # Run an install for each key 177 for i, key in enumerate(keys): 178 179 self.log.info('Installing MATLAB with key %s of %s', i + 1, len(keys)) 180 181 try: 182 config = read_file(self.configfile, mode='rb') 183 config = regkey.sub(b"fileInstallationKey=%s" % key.encode('utf-8'), config) 184 write_file(self.configfile, config) 185 186 except IOError as err: 187 raise EasyBuildError("Failed to update config file %s: %s", self.configfile, err) 188 189 (out, _) = run_cmd(cmd, log_all=True, simple=False) 190 191 # check installer output for known signs of trouble 192 patterns = [ 193 "Error: You have entered an invalid File Installation Key", 194 "Not a valid key", 195 "All selected products are already installed", 196 "The application encountered an unexpected error and needs to close", 197 "Error: Unable to write to", 198 "Exiting with status -\\d", 199 "End - Unsuccessful", 200 ] 201 202 for pattern in patterns: 203 regex = re.compile(pattern, re.I) 204 if regex.search(out): 205 raise EasyBuildError("Found error pattern '%s' in output of installation command '%s': %s", 206 regex.pattern, cmd, out) 207 with open(self.outputfile) as f: 208 if regex.search(f.read()): 209 raise EasyBuildError("Found error pattern '%s' in output file of installer", 210 regex.pattern) 211 212 def sanity_check_step(self): 213 """Custom sanity check for MATLAB.""" 214 custom_paths = { 215 'files': ["bin/matlab", "bin/glnxa64/MATLAB", "toolbox/local/classpath.txt"], 216 'dirs': ["java/jar"], 217 } 218 super(EB_MATLAB, self).sanity_check_step(custom_paths=custom_paths) 219 220 def make_module_extra(self): 221 """Extend PATH and set proper _JAVA_OPTIONS (e.g., -Xmx).""" 222 txt = super(EB_MATLAB, self).make_module_extra() 223 224 # make MATLAB runtime available 225 if LooseVersion(self.version) >= LooseVersion('2017a'): 226 for ldlibdir in ['runtime', 'bin', os.path.join('sys', 'os')]: 227 libdir = os.path.join(ldlibdir, 'glnxa64') 228 txt += self.module_generator.prepend_paths('LD_LIBRARY_PATH', libdir) 229 if self.cfg['java_options']: 230 txt += self.module_generator.set_environment('_JAVA_OPTIONS', self.cfg['java_options']) 231 return txt 232 [end of easybuild/easyblocks/m/matlab.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/easybuild/easyblocks/m/matlab.py b/easybuild/easyblocks/m/matlab.py --- a/easybuild/easyblocks/m/matlab.py +++ b/easybuild/easyblocks/m/matlab.py @@ -221,11 +221,6 @@ """Extend PATH and set proper _JAVA_OPTIONS (e.g., -Xmx).""" txt = super(EB_MATLAB, self).make_module_extra() - # make MATLAB runtime available - if LooseVersion(self.version) >= LooseVersion('2017a'): - for ldlibdir in ['runtime', 'bin', os.path.join('sys', 'os')]: - libdir = os.path.join(ldlibdir, 'glnxa64') - txt += self.module_generator.prepend_paths('LD_LIBRARY_PATH', libdir) if self.cfg['java_options']: txt += self.module_generator.set_environment('_JAVA_OPTIONS', self.cfg['java_options']) return txt
{"golden_diff": "diff --git a/easybuild/easyblocks/m/matlab.py b/easybuild/easyblocks/m/matlab.py\n--- a/easybuild/easyblocks/m/matlab.py\n+++ b/easybuild/easyblocks/m/matlab.py\n@@ -221,11 +221,6 @@\n \"\"\"Extend PATH and set proper _JAVA_OPTIONS (e.g., -Xmx).\"\"\"\n txt = super(EB_MATLAB, self).make_module_extra()\n \n- # make MATLAB runtime available\n- if LooseVersion(self.version) >= LooseVersion('2017a'):\n- for ldlibdir in ['runtime', 'bin', os.path.join('sys', 'os')]:\n- libdir = os.path.join(ldlibdir, 'glnxa64')\n- txt += self.module_generator.prepend_paths('LD_LIBRARY_PATH', libdir)\n if self.cfg['java_options']:\n txt += self.module_generator.set_environment('_JAVA_OPTIONS', self.cfg['java_options'])\n return txt\n", "issue": "MATLAB changes `$LD_LIBRARY_PATH`, breaks Qt apps\nI've been debugging an issue with an app that depends on the Qt5 module. The app won't start because it can't find Qt's platform plugins:\r\n\r\n```\r\nqt.qpa.plugin: Could not find the Qt platform plugin \"xcb\" in \"\"\r\n```\r\n\r\nI wrote a minimal Qt app to find the cause of this and it turns out to be MATLAB. More specifically, the fact that the MATLAB module prepends `LD_LIBRARY_PATH` with `$EBROOTMATLAB/bin/glnxa64`, a directory that contains tons of bundled MATLAB libraries, including their own versions of essential Qt5 libraries and many other unrelated libs which could potentially interfere with other modules too. \r\n\r\nThis change was introduced in #2008, presumably to make it easier for MCR compiled apps to load. For MATLAB itself this does not seem to be necessary because the `matlab` startup script sets `LD_LIBRARY_PATH` and other required environment variables on invocation. \r\n\r\nThe MATLAB [docs](https://www.mathworks.com/help/compiler/mcr-path-settings-for-run-time-deployment.html) cautions (bottom of the page) against setting LD_LIBRARY_PATH permanently on Linux due to this interference risk with other software. They suggest to \"run MATLAB Compiler applications using the generated shell script\" instead.\r\n\r\nI would propose to revert #2008 but would obviously like to hear @smoors and maybe other's opinion on this first.\r\n\r\n\r\n\n", "before_files": [{"content": "##\n# Copyright 2009-2023 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nEasyBuild support for installing MATLAB, implemented as an easyblock\n\n@author: Stijn De Weirdt (Ghent University)\n@author: Dries Verdegem (Ghent University)\n@author: Kenneth Hoste (Ghent University)\n@author: Pieter De Baets (Ghent University)\n@author: Jens Timmerman (Ghent University)\n@author: Fotis Georgatos (Uni.Lu, NTUA)\n\"\"\"\nimport re\nimport os\nimport stat\nimport tempfile\n\nfrom distutils.version import LooseVersion\n\nfrom easybuild.easyblocks.generic.packedbinary import PackedBinary\nfrom easybuild.framework.easyconfig import CUSTOM\nfrom easybuild.tools.build_log import EasyBuildError\nfrom easybuild.tools.filetools import adjust_permissions, change_dir, copy_file, read_file, write_file\nfrom easybuild.tools.py2vs3 import string_type\nfrom easybuild.tools.run import run_cmd\n\n\nclass EB_MATLAB(PackedBinary):\n \"\"\"Support for installing MATLAB.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Add extra config options specific to MATLAB.\"\"\"\n super(EB_MATLAB, self).__init__(*args, **kwargs)\n self.comp_fam = None\n self.configfile = os.path.join(self.builddir, 'my_installer_input.txt')\n self.outputfile = os.path.join(self.builddir, 'my_installer_output.txt')\n\n @staticmethod\n def extra_options():\n extra_vars = {\n 'java_options': ['-Xmx256m', \"$_JAVA_OPTIONS value set for install and in module file.\", CUSTOM],\n 'key': [None, \"Installation key(s), make one install for each key. Single key or a list of keys\", CUSTOM],\n }\n return PackedBinary.extra_options(extra_vars)\n\n def configure_step(self):\n \"\"\"Configure MATLAB installation: create license file.\"\"\"\n\n licfile = self.cfg['license_file']\n if licfile is None:\n licserv = self.cfg['license_server']\n if licserv is None:\n licserv = os.getenv('EB_MATLAB_LICENSE_SERVER', 'license.example.com')\n licport = self.cfg['license_server_port']\n if licport is None:\n licport = os.getenv('EB_MATLAB_LICENSE_SERVER_PORT', '00000')\n # create license file\n lictxt = '\\n'.join([\n \"SERVER %s 000000000000 %s\" % (licserv, licport),\n \"USE_SERVER\",\n ])\n\n licfile = os.path.join(self.builddir, 'matlab.lic')\n write_file(licfile, lictxt)\n\n try:\n copy_file(os.path.join(self.cfg['start_dir'], 'installer_input.txt'), self.configfile)\n adjust_permissions(self.configfile, stat.S_IWUSR)\n\n # read file in binary mode to avoid UTF-8 encoding issues when using Python 3,\n # due to non-UTF-8 characters...\n config = read_file(self.configfile, mode='rb')\n\n # use raw byte strings (must be 'br', not 'rb'),\n # required when using Python 3 because file was read in binary mode\n regdest = re.compile(br\"^# destinationFolder=.*\", re.M)\n regagree = re.compile(br\"^# agreeToLicense=.*\", re.M)\n regmode = re.compile(br\"^# mode=.*\", re.M)\n reglicpath = re.compile(br\"^# licensePath=.*\", re.M)\n regoutfile = re.compile(br\"^# outputFile=.*\", re.M)\n\n # must use byte-strings here when using Python 3, see above\n config = regdest.sub(b\"destinationFolder=%s\" % self.installdir.encode('utf-8'), config)\n config = regagree.sub(b\"agreeToLicense=Yes\", config)\n config = regmode.sub(b\"mode=silent\", config)\n config = reglicpath.sub(b\"licensePath=%s\" % licfile.encode('utf-8'), config)\n config = regoutfile.sub(b\"outputFile=%s\" % self.outputfile.encode('utf-8'), config)\n\n write_file(self.configfile, config)\n\n except IOError as err:\n raise EasyBuildError(\"Failed to create installation config file %s: %s\", self.configfile, err)\n\n self.log.debug('configuration file written to %s:\\n %s', self.configfile, config)\n\n def install_step(self):\n \"\"\"MATLAB install procedure using 'install' command.\"\"\"\n\n src = os.path.join(self.cfg['start_dir'], 'install')\n\n # make sure install script is executable\n adjust_permissions(src, stat.S_IXUSR)\n\n if LooseVersion(self.version) >= LooseVersion('2016b'):\n perm_dirs = [os.path.join(self.cfg['start_dir'], 'bin', 'glnxa64')]\n if LooseVersion(self.version) < LooseVersion('2021b'):\n jdir = os.path.join(self.cfg['start_dir'], 'sys', 'java', 'jre', 'glnxa64', 'jre', 'bin')\n perm_dirs.append(jdir)\n for perm_dir in perm_dirs:\n adjust_permissions(perm_dir, stat.S_IXUSR)\n\n # make sure $DISPLAY is not defined, which may lead to (hard to trace) problems\n # this is a workaround for not being able to specify --nodisplay to the install scripts\n if 'DISPLAY' in os.environ:\n os.environ.pop('DISPLAY')\n\n if '_JAVA_OPTIONS' not in self.cfg['preinstallopts']:\n java_opts = 'export _JAVA_OPTIONS=\"%s\" && ' % self.cfg['java_options']\n self.cfg['preinstallopts'] = java_opts + self.cfg['preinstallopts']\n if LooseVersion(self.version) >= LooseVersion('2016b'):\n change_dir(self.builddir)\n\n # Build the cmd string\n cmdlist = [\n self.cfg['preinstallopts'],\n src,\n '-inputFile',\n self.configfile,\n ]\n if LooseVersion(self.version) < LooseVersion('2020a'):\n # MATLAB installers < 2020a ignore $TMPDIR (always use /tmp) and might need a large tmpdir\n tmpdir = tempfile.mkdtemp()\n cmdlist.extend([\n '-v',\n '-tmpdir',\n tmpdir,\n ])\n cmdlist.append(self.cfg['installopts'])\n cmd = ' '.join(cmdlist)\n\n keys = self.cfg['key']\n if keys is None:\n try:\n keys = os.environ['EB_MATLAB_KEY']\n except KeyError:\n raise EasyBuildError(\"The MATLAB install key is not set. This can be set either with the environment \"\n \"variable EB_MATLAB_KEY or by the easyconfig variable 'key'.\")\n if isinstance(keys, string_type):\n keys = keys.split(',')\n\n # Compile the installation key regex outside of the loop\n regkey = re.compile(br\"^(# )?fileInstallationKey=.*\", re.M)\n\n # Run an install for each key\n for i, key in enumerate(keys):\n\n self.log.info('Installing MATLAB with key %s of %s', i + 1, len(keys))\n\n try:\n config = read_file(self.configfile, mode='rb')\n config = regkey.sub(b\"fileInstallationKey=%s\" % key.encode('utf-8'), config)\n write_file(self.configfile, config)\n\n except IOError as err:\n raise EasyBuildError(\"Failed to update config file %s: %s\", self.configfile, err)\n\n (out, _) = run_cmd(cmd, log_all=True, simple=False)\n\n # check installer output for known signs of trouble\n patterns = [\n \"Error: You have entered an invalid File Installation Key\",\n \"Not a valid key\",\n \"All selected products are already installed\",\n \"The application encountered an unexpected error and needs to close\",\n \"Error: Unable to write to\",\n \"Exiting with status -\\\\d\",\n \"End - Unsuccessful\",\n ]\n\n for pattern in patterns:\n regex = re.compile(pattern, re.I)\n if regex.search(out):\n raise EasyBuildError(\"Found error pattern '%s' in output of installation command '%s': %s\",\n regex.pattern, cmd, out)\n with open(self.outputfile) as f:\n if regex.search(f.read()):\n raise EasyBuildError(\"Found error pattern '%s' in output file of installer\",\n regex.pattern)\n\n def sanity_check_step(self):\n \"\"\"Custom sanity check for MATLAB.\"\"\"\n custom_paths = {\n 'files': [\"bin/matlab\", \"bin/glnxa64/MATLAB\", \"toolbox/local/classpath.txt\"],\n 'dirs': [\"java/jar\"],\n }\n super(EB_MATLAB, self).sanity_check_step(custom_paths=custom_paths)\n\n def make_module_extra(self):\n \"\"\"Extend PATH and set proper _JAVA_OPTIONS (e.g., -Xmx).\"\"\"\n txt = super(EB_MATLAB, self).make_module_extra()\n\n # make MATLAB runtime available\n if LooseVersion(self.version) >= LooseVersion('2017a'):\n for ldlibdir in ['runtime', 'bin', os.path.join('sys', 'os')]:\n libdir = os.path.join(ldlibdir, 'glnxa64')\n txt += self.module_generator.prepend_paths('LD_LIBRARY_PATH', libdir)\n if self.cfg['java_options']:\n txt += self.module_generator.set_environment('_JAVA_OPTIONS', self.cfg['java_options'])\n return txt\n", "path": "easybuild/easyblocks/m/matlab.py"}]}
3,800
217
gh_patches_debug_23707
rasdani/github-patches
git_diff
dbt-labs__dbt-core-1656
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> dbt sometimes writes a `.user.yml` and ignores configs ## Issue If you pass dbt a relative path to `--profiles-dir` or the `DBT_PROFILES_DIR` environment variable and execute from a subdirectory, dbt will look for the user cookie and profiles.yml in different places (the latter correct, the former incorrect). ### Results What happened? What did you expect to happen? I got a `./profiles/.user.yml` and my settings were ignored. I expected my settings to be respected or to receive an error, and to not have a new file. Suggested fixes: My preference: don't allow users to run dbt from subdirectories. Other option: defer loading of the user config until after we've found `dbt_project.yml`. ### System information The output of `dbt --version`: ``` 0.14.0 ``` The operating system you're running on: Any The python version you're using (probably the output of `python --version`) Any ### Steps to reproduce In as much detail as possible, please provide steps to reproduce the issue. Sample data that triggers the issue, example models, etc are all very helpful here. 1) Make a dbt project in `${project_root}` 2) put your profiles.yml in `${project_root}/profiles` 3) Make a subdirectory (`${project_root}/models`) and cd into it 4) Run dbt with the relative path to the profiles directory as an argument to `--profiles-dir`: `dbt run --profiles-dir profiles` 5) You will now have a `${project_root}/models/profiles/.user.yml` with cookie information. Also, any settings in your `profiles.yml` won't be respected. </issue> <code> [start of core/dbt/tracking.py] 1 from dbt.logger import GLOBAL_LOGGER as logger 2 from dbt import version as dbt_version 3 from snowplow_tracker import Subject, Tracker, Emitter, logger as sp_logger 4 from snowplow_tracker import SelfDescribingJson 5 from datetime import datetime 6 7 from dbt.adapters.factory import get_adapter 8 9 import pytz 10 import platform 11 import uuid 12 import requests 13 import yaml 14 import os 15 16 import dbt.clients.system 17 18 sp_logger.setLevel(100) 19 20 COLLECTOR_URL = "fishtownanalytics.sinter-collect.com" 21 COLLECTOR_PROTOCOL = "https" 22 23 INVOCATION_SPEC = 'iglu:com.dbt/invocation/jsonschema/1-0-1' 24 PLATFORM_SPEC = 'iglu:com.dbt/platform/jsonschema/1-0-0' 25 RUN_MODEL_SPEC = 'iglu:com.dbt/run_model/jsonschema/1-0-1' 26 INVOCATION_ENV_SPEC = 'iglu:com.dbt/invocation_env/jsonschema/1-0-0' 27 PACKAGE_INSTALL_SPEC = 'iglu:com.dbt/package_install/jsonschema/1-0-0' 28 RPC_REQUEST_SPEC = 'iglu:com.dbt/rpc_request/jsonschema/1-0-1' 29 30 DBT_INVOCATION_ENV = 'DBT_INVOCATION_ENV' 31 32 33 class TimeoutEmitter(Emitter): 34 def __init__(self): 35 super(TimeoutEmitter, self).__init__(COLLECTOR_URL, 36 protocol=COLLECTOR_PROTOCOL, 37 buffer_size=1, 38 on_failure=self.handle_failure) 39 40 @staticmethod 41 def handle_failure(num_ok, unsent): 42 # num_ok will always be 0, unsent will always be 1 entry long, because 43 # the buffer is length 1, so not much to talk about 44 logger.warning('Error sending message, disabling tracking') 45 do_not_track() 46 47 def http_get(self, payload): 48 sp_logger.info("Sending GET request to %s..." % self.endpoint) 49 sp_logger.debug("Payload: %s" % payload) 50 r = requests.get(self.endpoint, params=payload, timeout=5.0) 51 52 msg = "GET request finished with status code: " + str(r.status_code) 53 if self.is_good_status_code(r.status_code): 54 sp_logger.info(msg) 55 else: 56 sp_logger.warn(msg) 57 return r 58 59 60 emitter = TimeoutEmitter() 61 tracker = Tracker(emitter, namespace="cf", app_id="dbt") 62 63 active_user = None 64 65 66 class User(object): 67 68 def __init__(self, cookie_dir): 69 self.do_not_track = True 70 self.cookie_dir = cookie_dir 71 72 self.id = None 73 self.invocation_id = str(uuid.uuid4()) 74 self.run_started_at = datetime.now(tz=pytz.utc) 75 76 def state(self): 77 return "do not track" if self.do_not_track else "tracking" 78 79 @property 80 def cookie_path(self): 81 return os.path.join(self.cookie_dir, '.user.yml') 82 83 def initialize(self): 84 self.do_not_track = False 85 86 cookie = self.get_cookie() 87 self.id = cookie.get('id') 88 89 subject = Subject() 90 subject.set_user_id(self.id) 91 tracker.set_subject(subject) 92 93 def set_cookie(self): 94 user = {"id": str(uuid.uuid4())} 95 96 dbt.clients.system.make_directory(self.cookie_dir) 97 98 with open(self.cookie_path, "w") as fh: 99 yaml.dump(user, fh) 100 101 return user 102 103 def get_cookie(self): 104 if not os.path.isfile(self.cookie_path): 105 user = self.set_cookie() 106 else: 107 with open(self.cookie_path, "r") as fh: 108 try: 109 user = yaml.safe_load(fh) 110 if user is None: 111 user = self.set_cookie() 112 except yaml.reader.ReaderError: 113 user = self.set_cookie() 114 return user 115 116 117 def get_run_type(args): 118 return 'regular' 119 120 121 def get_invocation_context(user, config, args): 122 try: 123 adapter_type = get_adapter(config).type() 124 except Exception: 125 adapter_type = None 126 127 return { 128 "project_id": None if config is None else config.hashed_name(), 129 "user_id": user.id, 130 "invocation_id": user.invocation_id, 131 132 "command": args.which, 133 "options": None, 134 "version": str(dbt_version.installed), 135 136 "run_type": get_run_type(args), 137 "adapter_type": adapter_type, 138 } 139 140 141 def get_invocation_start_context(user, config, args): 142 data = get_invocation_context(user, config, args) 143 144 start_data = { 145 "progress": "start", 146 "result_type": None, 147 "result": None 148 } 149 150 data.update(start_data) 151 return SelfDescribingJson(INVOCATION_SPEC, data) 152 153 154 def get_invocation_end_context(user, config, args, result_type): 155 data = get_invocation_context(user, config, args) 156 157 start_data = { 158 "progress": "end", 159 "result_type": result_type, 160 "result": None 161 } 162 163 data.update(start_data) 164 return SelfDescribingJson(INVOCATION_SPEC, data) 165 166 167 def get_invocation_invalid_context(user, config, args, result_type): 168 data = get_invocation_context(user, config, args) 169 170 start_data = { 171 "progress": "invalid", 172 "result_type": result_type, 173 "result": None 174 } 175 176 data.update(start_data) 177 return SelfDescribingJson(INVOCATION_SPEC, data) 178 179 180 def get_platform_context(): 181 data = { 182 "platform": platform.platform(), 183 "python": platform.python_version(), 184 "python_version": platform.python_implementation(), 185 } 186 187 return SelfDescribingJson(PLATFORM_SPEC, data) 188 189 190 def get_dbt_env_context(): 191 default = 'manual' 192 193 dbt_invocation_env = os.getenv(DBT_INVOCATION_ENV, default) 194 if dbt_invocation_env == '': 195 dbt_invocation_env = default 196 197 data = { 198 "environment": dbt_invocation_env, 199 } 200 201 return SelfDescribingJson(INVOCATION_ENV_SPEC, data) 202 203 204 def track(user, *args, **kwargs): 205 if user.do_not_track: 206 return 207 else: 208 logger.debug("Sending event: {}".format(kwargs)) 209 try: 210 tracker.track_struct_event(*args, **kwargs) 211 except Exception: 212 logger.debug( 213 "An error was encountered while trying to send an event" 214 ) 215 216 217 def track_invocation_start(config=None, args=None): 218 context = [ 219 get_invocation_start_context(active_user, config, args), 220 get_platform_context(), 221 get_dbt_env_context() 222 ] 223 224 track( 225 active_user, 226 category="dbt", 227 action='invocation', 228 label='start', 229 context=context 230 ) 231 232 233 def track_model_run(options): 234 context = [SelfDescribingJson(RUN_MODEL_SPEC, options)] 235 236 track( 237 active_user, 238 category="dbt", 239 action='run_model', 240 label=active_user.invocation_id, 241 context=context 242 ) 243 244 245 def track_rpc_request(options): 246 context = [SelfDescribingJson(RPC_REQUEST_SPEC, options)] 247 248 track( 249 active_user, 250 category="dbt", 251 action='rpc_request', 252 label=active_user.invocation_id, 253 context=context 254 ) 255 256 257 def track_package_install(options): 258 context = [SelfDescribingJson(PACKAGE_INSTALL_SPEC, options)] 259 track( 260 active_user, 261 category="dbt", 262 action='package', 263 label=active_user.invocation_id, 264 property_='install', 265 context=context 266 ) 267 268 269 def track_invocation_end( 270 config=None, args=None, result_type=None 271 ): 272 user = active_user 273 context = [ 274 get_invocation_end_context(user, config, args, result_type), 275 get_platform_context(), 276 get_dbt_env_context() 277 ] 278 track( 279 active_user, 280 category="dbt", 281 action='invocation', 282 label='end', 283 context=context 284 ) 285 286 287 def track_invalid_invocation( 288 config=None, args=None, result_type=None 289 ): 290 291 user = active_user 292 invocation_context = get_invocation_invalid_context( 293 user, 294 config, 295 args, 296 result_type 297 ) 298 299 context = [ 300 invocation_context, 301 get_platform_context(), 302 get_dbt_env_context() 303 ] 304 305 track( 306 active_user, 307 category="dbt", 308 action='invocation', 309 label='invalid', 310 context=context 311 ) 312 313 314 def flush(): 315 logger.debug("Flushing usage events") 316 tracker.flush() 317 318 319 def do_not_track(): 320 global active_user 321 active_user = User(None) 322 323 324 def initialize_tracking(cookie_dir): 325 global active_user 326 active_user = User(cookie_dir) 327 try: 328 active_user.initialize() 329 except Exception: 330 logger.debug('Got an exception trying to initialize tracking', 331 exc_info=True) 332 active_user = User(None) 333 [end of core/dbt/tracking.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/core/dbt/tracking.py b/core/dbt/tracking.py --- a/core/dbt/tracking.py +++ b/core/dbt/tracking.py @@ -13,8 +13,6 @@ import yaml import os -import dbt.clients.system - sp_logger.setLevel(100) COLLECTOR_URL = "fishtownanalytics.sinter-collect.com" @@ -91,12 +89,22 @@ tracker.set_subject(subject) def set_cookie(self): - user = {"id": str(uuid.uuid4())} + # If the user points dbt to a profile directory which exists AND + # contains a profiles.yml file, then we can set a cookie. If the + # specified folder does not exist, or if there is not a profiles.yml + # file in this folder, then an inconsistent cookie can be used. This + # will change in every dbt invocation until the user points to a + # profile dir file which contains a valid profiles.yml file. + # + # See: https://github.com/fishtown-analytics/dbt/issues/1645 - dbt.clients.system.make_directory(self.cookie_dir) + user = {"id": str(uuid.uuid4())} - with open(self.cookie_path, "w") as fh: - yaml.dump(user, fh) + cookie_path = os.path.abspath(self.cookie_dir) + profiles_file = os.path.join(cookie_path, 'profiles.yml') + if os.path.exists(cookie_path) and os.path.exists(profiles_file): + with open(self.cookie_path, "w") as fh: + yaml.dump(user, fh) return user
{"golden_diff": "diff --git a/core/dbt/tracking.py b/core/dbt/tracking.py\n--- a/core/dbt/tracking.py\n+++ b/core/dbt/tracking.py\n@@ -13,8 +13,6 @@\n import yaml\n import os\n \n-import dbt.clients.system\n-\n sp_logger.setLevel(100)\n \n COLLECTOR_URL = \"fishtownanalytics.sinter-collect.com\"\n@@ -91,12 +89,22 @@\n tracker.set_subject(subject)\n \n def set_cookie(self):\n- user = {\"id\": str(uuid.uuid4())}\n+ # If the user points dbt to a profile directory which exists AND\n+ # contains a profiles.yml file, then we can set a cookie. If the\n+ # specified folder does not exist, or if there is not a profiles.yml\n+ # file in this folder, then an inconsistent cookie can be used. This\n+ # will change in every dbt invocation until the user points to a\n+ # profile dir file which contains a valid profiles.yml file.\n+ #\n+ # See: https://github.com/fishtown-analytics/dbt/issues/1645\n \n- dbt.clients.system.make_directory(self.cookie_dir)\n+ user = {\"id\": str(uuid.uuid4())}\n \n- with open(self.cookie_path, \"w\") as fh:\n- yaml.dump(user, fh)\n+ cookie_path = os.path.abspath(self.cookie_dir)\n+ profiles_file = os.path.join(cookie_path, 'profiles.yml')\n+ if os.path.exists(cookie_path) and os.path.exists(profiles_file):\n+ with open(self.cookie_path, \"w\") as fh:\n+ yaml.dump(user, fh)\n \n return user\n", "issue": "dbt sometimes writes a `.user.yml` and ignores configs\n## Issue\r\nIf you pass dbt a relative path to `--profiles-dir` or the `DBT_PROFILES_DIR` environment variable and execute from a subdirectory, dbt will look for the user cookie and profiles.yml in different places (the latter correct, the former incorrect).\r\n\r\n### Results\r\nWhat happened? What did you expect to happen?\r\nI got a `./profiles/.user.yml` and my settings were ignored. I expected my settings to be respected or to receive an error, and to not have a new file.\r\n\r\nSuggested fixes:\r\nMy preference: don't allow users to run dbt from subdirectories.\r\nOther option: defer loading of the user config until after we've found `dbt_project.yml`.\r\n\r\n### System information\r\nThe output of `dbt --version`:\r\n```\r\n0.14.0\r\n```\r\n\r\nThe operating system you're running on:\r\nAny\r\n\r\nThe python version you're using (probably the output of `python --version`)\r\nAny\r\n\r\n### Steps to reproduce\r\nIn as much detail as possible, please provide steps to reproduce the issue. Sample data that triggers the issue, example models, etc are all very helpful here.\r\n\r\n1) Make a dbt project in `${project_root}`\r\n2) put your profiles.yml in `${project_root}/profiles`\r\n3) Make a subdirectory (`${project_root}/models`) and cd into it\r\n4) Run dbt with the relative path to the profiles directory as an argument to `--profiles-dir`: `dbt run --profiles-dir profiles`\r\n5) You will now have a `${project_root}/models/profiles/.user.yml` with cookie information. Also, any settings in your `profiles.yml` won't be respected.\r\n\n", "before_files": [{"content": "from dbt.logger import GLOBAL_LOGGER as logger\nfrom dbt import version as dbt_version\nfrom snowplow_tracker import Subject, Tracker, Emitter, logger as sp_logger\nfrom snowplow_tracker import SelfDescribingJson\nfrom datetime import datetime\n\nfrom dbt.adapters.factory import get_adapter\n\nimport pytz\nimport platform\nimport uuid\nimport requests\nimport yaml\nimport os\n\nimport dbt.clients.system\n\nsp_logger.setLevel(100)\n\nCOLLECTOR_URL = \"fishtownanalytics.sinter-collect.com\"\nCOLLECTOR_PROTOCOL = \"https\"\n\nINVOCATION_SPEC = 'iglu:com.dbt/invocation/jsonschema/1-0-1'\nPLATFORM_SPEC = 'iglu:com.dbt/platform/jsonschema/1-0-0'\nRUN_MODEL_SPEC = 'iglu:com.dbt/run_model/jsonschema/1-0-1'\nINVOCATION_ENV_SPEC = 'iglu:com.dbt/invocation_env/jsonschema/1-0-0'\nPACKAGE_INSTALL_SPEC = 'iglu:com.dbt/package_install/jsonschema/1-0-0'\nRPC_REQUEST_SPEC = 'iglu:com.dbt/rpc_request/jsonschema/1-0-1'\n\nDBT_INVOCATION_ENV = 'DBT_INVOCATION_ENV'\n\n\nclass TimeoutEmitter(Emitter):\n def __init__(self):\n super(TimeoutEmitter, self).__init__(COLLECTOR_URL,\n protocol=COLLECTOR_PROTOCOL,\n buffer_size=1,\n on_failure=self.handle_failure)\n\n @staticmethod\n def handle_failure(num_ok, unsent):\n # num_ok will always be 0, unsent will always be 1 entry long, because\n # the buffer is length 1, so not much to talk about\n logger.warning('Error sending message, disabling tracking')\n do_not_track()\n\n def http_get(self, payload):\n sp_logger.info(\"Sending GET request to %s...\" % self.endpoint)\n sp_logger.debug(\"Payload: %s\" % payload)\n r = requests.get(self.endpoint, params=payload, timeout=5.0)\n\n msg = \"GET request finished with status code: \" + str(r.status_code)\n if self.is_good_status_code(r.status_code):\n sp_logger.info(msg)\n else:\n sp_logger.warn(msg)\n return r\n\n\nemitter = TimeoutEmitter()\ntracker = Tracker(emitter, namespace=\"cf\", app_id=\"dbt\")\n\nactive_user = None\n\n\nclass User(object):\n\n def __init__(self, cookie_dir):\n self.do_not_track = True\n self.cookie_dir = cookie_dir\n\n self.id = None\n self.invocation_id = str(uuid.uuid4())\n self.run_started_at = datetime.now(tz=pytz.utc)\n\n def state(self):\n return \"do not track\" if self.do_not_track else \"tracking\"\n\n @property\n def cookie_path(self):\n return os.path.join(self.cookie_dir, '.user.yml')\n\n def initialize(self):\n self.do_not_track = False\n\n cookie = self.get_cookie()\n self.id = cookie.get('id')\n\n subject = Subject()\n subject.set_user_id(self.id)\n tracker.set_subject(subject)\n\n def set_cookie(self):\n user = {\"id\": str(uuid.uuid4())}\n\n dbt.clients.system.make_directory(self.cookie_dir)\n\n with open(self.cookie_path, \"w\") as fh:\n yaml.dump(user, fh)\n\n return user\n\n def get_cookie(self):\n if not os.path.isfile(self.cookie_path):\n user = self.set_cookie()\n else:\n with open(self.cookie_path, \"r\") as fh:\n try:\n user = yaml.safe_load(fh)\n if user is None:\n user = self.set_cookie()\n except yaml.reader.ReaderError:\n user = self.set_cookie()\n return user\n\n\ndef get_run_type(args):\n return 'regular'\n\n\ndef get_invocation_context(user, config, args):\n try:\n adapter_type = get_adapter(config).type()\n except Exception:\n adapter_type = None\n\n return {\n \"project_id\": None if config is None else config.hashed_name(),\n \"user_id\": user.id,\n \"invocation_id\": user.invocation_id,\n\n \"command\": args.which,\n \"options\": None,\n \"version\": str(dbt_version.installed),\n\n \"run_type\": get_run_type(args),\n \"adapter_type\": adapter_type,\n }\n\n\ndef get_invocation_start_context(user, config, args):\n data = get_invocation_context(user, config, args)\n\n start_data = {\n \"progress\": \"start\",\n \"result_type\": None,\n \"result\": None\n }\n\n data.update(start_data)\n return SelfDescribingJson(INVOCATION_SPEC, data)\n\n\ndef get_invocation_end_context(user, config, args, result_type):\n data = get_invocation_context(user, config, args)\n\n start_data = {\n \"progress\": \"end\",\n \"result_type\": result_type,\n \"result\": None\n }\n\n data.update(start_data)\n return SelfDescribingJson(INVOCATION_SPEC, data)\n\n\ndef get_invocation_invalid_context(user, config, args, result_type):\n data = get_invocation_context(user, config, args)\n\n start_data = {\n \"progress\": \"invalid\",\n \"result_type\": result_type,\n \"result\": None\n }\n\n data.update(start_data)\n return SelfDescribingJson(INVOCATION_SPEC, data)\n\n\ndef get_platform_context():\n data = {\n \"platform\": platform.platform(),\n \"python\": platform.python_version(),\n \"python_version\": platform.python_implementation(),\n }\n\n return SelfDescribingJson(PLATFORM_SPEC, data)\n\n\ndef get_dbt_env_context():\n default = 'manual'\n\n dbt_invocation_env = os.getenv(DBT_INVOCATION_ENV, default)\n if dbt_invocation_env == '':\n dbt_invocation_env = default\n\n data = {\n \"environment\": dbt_invocation_env,\n }\n\n return SelfDescribingJson(INVOCATION_ENV_SPEC, data)\n\n\ndef track(user, *args, **kwargs):\n if user.do_not_track:\n return\n else:\n logger.debug(\"Sending event: {}\".format(kwargs))\n try:\n tracker.track_struct_event(*args, **kwargs)\n except Exception:\n logger.debug(\n \"An error was encountered while trying to send an event\"\n )\n\n\ndef track_invocation_start(config=None, args=None):\n context = [\n get_invocation_start_context(active_user, config, args),\n get_platform_context(),\n get_dbt_env_context()\n ]\n\n track(\n active_user,\n category=\"dbt\",\n action='invocation',\n label='start',\n context=context\n )\n\n\ndef track_model_run(options):\n context = [SelfDescribingJson(RUN_MODEL_SPEC, options)]\n\n track(\n active_user,\n category=\"dbt\",\n action='run_model',\n label=active_user.invocation_id,\n context=context\n )\n\n\ndef track_rpc_request(options):\n context = [SelfDescribingJson(RPC_REQUEST_SPEC, options)]\n\n track(\n active_user,\n category=\"dbt\",\n action='rpc_request',\n label=active_user.invocation_id,\n context=context\n )\n\n\ndef track_package_install(options):\n context = [SelfDescribingJson(PACKAGE_INSTALL_SPEC, options)]\n track(\n active_user,\n category=\"dbt\",\n action='package',\n label=active_user.invocation_id,\n property_='install',\n context=context\n )\n\n\ndef track_invocation_end(\n config=None, args=None, result_type=None\n):\n user = active_user\n context = [\n get_invocation_end_context(user, config, args, result_type),\n get_platform_context(),\n get_dbt_env_context()\n ]\n track(\n active_user,\n category=\"dbt\",\n action='invocation',\n label='end',\n context=context\n )\n\n\ndef track_invalid_invocation(\n config=None, args=None, result_type=None\n):\n\n user = active_user\n invocation_context = get_invocation_invalid_context(\n user,\n config,\n args,\n result_type\n )\n\n context = [\n invocation_context,\n get_platform_context(),\n get_dbt_env_context()\n ]\n\n track(\n active_user,\n category=\"dbt\",\n action='invocation',\n label='invalid',\n context=context\n )\n\n\ndef flush():\n logger.debug(\"Flushing usage events\")\n tracker.flush()\n\n\ndef do_not_track():\n global active_user\n active_user = User(None)\n\n\ndef initialize_tracking(cookie_dir):\n global active_user\n active_user = User(cookie_dir)\n try:\n active_user.initialize()\n except Exception:\n logger.debug('Got an exception trying to initialize tracking',\n exc_info=True)\n active_user = User(None)\n", "path": "core/dbt/tracking.py"}]}
3,753
375
gh_patches_debug_17218
rasdani/github-patches
git_diff
graspologic-org__graspologic-336
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> improve nonpar by calling mgcpy's two-sample test the current nonpar implementation seems "correct", meaning it seems to do what they did in the nonpar paper. however, there is a much better way to implement it, and we'd like to write a short paper about that. specifically, in the current implementation, it uses the biased MMD, which is equivalent to the biased Dcorr. better would be to use the unbiased MMD/Dcorr, and even better would be to use the unbiased MGC. possibly further better would be to use RF, though we don't have that fully implemented just yet. </issue> <code> [start of setup.py] 1 import os 2 import sys 3 from setuptools import setup, find_packages 4 from sys import platform 5 6 PACKAGE_NAME = "graspy" 7 DESCRIPTION = "A set of python modules for graph statistics" 8 with open("README.md", "r") as f: 9 LONG_DESCRIPTION = f.read() 10 AUTHOR = ("Eric Bridgeford, Jaewon Chung, Benjamin Pedigo, Bijan Varjavand",) 11 AUTHOR_EMAIL = "[email protected]" 12 URL = "https://github.com/neurodata/graspy" 13 MINIMUM_PYTHON_VERSION = 3, 5 # Minimum of Python 3.5 14 REQUIRED_PACKAGES = [ 15 "networkx>=2.1", 16 "numpy>=1.8.1", 17 "scikit-learn>=0.19.1", 18 "scipy>=1.1.0", 19 "seaborn>=0.9.0", 20 "matplotlib>=3.0.0", 21 ] 22 23 # Find GraSPy version. 24 PROJECT_PATH = os.path.dirname(os.path.abspath(__file__)) 25 for line in open(os.path.join(PROJECT_PATH, "graspy", "__init__.py")): 26 if line.startswith("__version__ = "): 27 VERSION = line.strip().split()[2][1:-1] 28 29 30 def check_python_version(): 31 """Exit when the Python version is too low.""" 32 if sys.version_info < MINIMUM_PYTHON_VERSION: 33 sys.exit("Python {}.{}+ is required.".format(*MINIMUM_PYTHON_VERSION)) 34 35 36 check_python_version() 37 38 setup( 39 name=PACKAGE_NAME, 40 version=VERSION, 41 description=DESCRIPTION, 42 long_description=LONG_DESCRIPTION, 43 long_description_content_type="text/markdown", 44 author=AUTHOR, 45 author_email=AUTHOR_EMAIL, 46 install_requires=REQUIRED_PACKAGES, 47 url=URL, 48 license="Apache License 2.0", 49 classifiers=[ 50 "Development Status :: 3 - Alpha", 51 "Intended Audience :: Science/Research", 52 "Topic :: Scientific/Engineering :: Mathematics", 53 "License :: OSI Approved :: Apache Software License", 54 "Programming Language :: Python :: 3", 55 "Programming Language :: Python :: 3.5", 56 "Programming Language :: Python :: 3.6", 57 "Programming Language :: Python :: 3.7", 58 ], 59 packages=find_packages(), 60 include_package_data=True, 61 ) 62 [end of setup.py] [start of docs/conf.py] 1 # -*- coding: utf-8 -*- 2 # 3 # Configuration file for the Sphinx documentation builder. 4 # 5 # This file does only contain a selection of the most common options. For a 6 # full list see the documentation: 7 # http://www.sphinx-doc.org/en/master/config 8 9 # -- Path setup -------------------------------------------------------------- 10 11 # If extensions (or modules to document with autodoc) are in another directory, 12 # add these directories to sys.path here. If the directory is relative to the 13 # documentation root, use os.path.abspath to make it absolute, like shown here. 14 # 15 import os 16 import sys 17 18 sys.path.insert(0, os.path.abspath("..")) 19 20 # -- Project information ----------------------------------------------------- 21 22 project = "GraSPy" 23 copyright = "2018" 24 authors = u"NeuroData" 25 26 # The short X.Y version 27 # Find GraSPy version. 28 PROJECT_PATH = os.path.dirname(os.path.abspath(__file__)) 29 for line in open(os.path.join(PROJECT_PATH, "..", "graspy", "__init__.py")): 30 if line.startswith("__version__ = "): 31 version = line.strip().split()[2][1:-1] 32 33 # The full version, including alpha/beta/rc tags 34 release = "alpha" 35 36 # -- Extension configuration ------------------------------------------------- 37 extensions = [ 38 "sphinx.ext.autodoc", 39 "sphinx.ext.autosummary", 40 "sphinx.ext.todo", 41 "sphinx.ext.viewcode", 42 "sphinx.ext.mathjax", 43 "numpydoc", 44 "sphinx.ext.ifconfig", 45 "sphinx.ext.githubpages", 46 "sphinxcontrib.rawfiles", 47 "nbsphinx", 48 "sphinx.ext.intersphinx", 49 ] 50 51 # -- sphinxcontrib.rawfiles 52 rawfiles = ["CNAME"] 53 54 # -- numpydoc 55 # Below is needed to prevent errors 56 numpydoc_show_class_members = False 57 numpydoc_attributes_as_param_list = True 58 numpydoc_use_blockquotes = True 59 60 # -- sphinx.ext.autosummary 61 autosummary_generate = True 62 63 # -- sphinx.ext.autodoc 64 autoclass_content = "both" 65 autodoc_default_flags = ["members", "inherited-members"] 66 autodoc_member_order = "bysource" # default is alphabetical 67 68 # -- sphinx.ext.intersphinx 69 intersphinx_mapping = { 70 "numpy": ("https://docs.scipy.org/doc/numpy", None), 71 "python": ("https://docs.python.org/3", None), 72 "scipy": ("https://docs.scipy.org/doc/scipy/reference", None), 73 "sklearn": ("http://scikit-learn.org/dev", None), 74 } 75 76 # -- sphinx options ---------------------------------------------------------- 77 source_suffix = ".rst" 78 exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "**.ipynb_checkpoints"] 79 master_doc = "index" 80 source_encoding = "utf-8" 81 82 # -- Options for HTML output ------------------------------------------------- 83 # Add any paths that contain templates here, relative to this directory. 84 templates_path = ["_templates"] 85 html_static_path = ["_static"] 86 modindex_common_prefix = ["graspy."] 87 88 pygments_style = "sphinx" 89 smartquotes = False 90 91 # Use RTD Theme 92 import sphinx_rtd_theme 93 94 html_theme = "sphinx_rtd_theme" 95 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] 96 html_theme_options = { 97 #'includehidden': False, 98 "navigation_depth": 2, 99 "collapse_navigation": False, 100 "navigation_depth": 3, 101 } 102 103 # Custom sidebar templates, must be a dictionary that maps document names 104 # to template names. 105 # 106 # The default sidebars (for documents that don't match any pattern) are 107 # defined by theme itself. Builtin themes are using these templates by 108 # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', 109 # 'searchbox.html']``. 110 # 111 # html_sidebars = {} 112 113 # -- Options for HTMLHelp output --------------------------------------------- 114 115 # Output file base name for HTML help builder. 116 htmlhelp_basename = "graspydoc" 117 118 # -- Options for LaTeX output ------------------------------------------------ 119 120 latex_elements = { 121 # The paper size ('letterpaper' or 'a4paper'). 122 # 123 # 'papersize': 'letterpaper', 124 # The font size ('10pt', '11pt' or '12pt'). 125 # 126 # 'pointsize': '10pt', 127 # Additional stuff for the LaTeX preamble. 128 # 129 # 'preamble': '', 130 # Latex figure (float) alignment 131 # 132 # 'figure_align': 'htbp', 133 } 134 135 # Grouping the document tree into LaTeX files. List of tuples 136 # (source start file, target name, title, 137 # author, documentclass [howto, manual, or own class]). 138 latex_documents = [ 139 (master_doc, "graspy.tex", "GraSPy Documentation", authors, "manual") 140 ] 141 142 # -- Options for manual page output ------------------------------------------ 143 144 # One entry per manual page. List of tuples 145 # (source start file, name, description, authors, manual section). 146 man_pages = [(master_doc, "graspy", "graspy Documentation", [authors], 1)] 147 148 # -- Options for Texinfo output ---------------------------------------------- 149 150 # Grouping the document tree into Texinfo files. List of tuples 151 # (source start file, target name, title, author, 152 # dir menu entry, description, category) 153 texinfo_documents = [ 154 ( 155 master_doc, 156 "graspy", 157 "graspy Documentation", 158 authors, 159 "graspy", 160 "One line description of project.", 161 "Miscellaneous", 162 ) 163 ] 164 165 # -- Options for Epub output ------------------------------------------------- 166 167 # Bibliographic Dublin Core info. 168 epub_title = project 169 170 # The unique identifier of the text. This can be a ISBN number 171 # or the project homepage. 172 # 173 # epub_identifier = '' 174 175 # A unique identification for the text. 176 # 177 # epub_uid = '' 178 179 # A list of files that should not be packed into the epub file. 180 epub_exclude_files = ["search.html"] 181 [end of docs/conf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -71,6 +71,7 @@ "python": ("https://docs.python.org/3", None), "scipy": ("https://docs.scipy.org/doc/scipy/reference", None), "sklearn": ("http://scikit-learn.org/dev", None), + "hyppo": ("https://hyppo.neurodata.io", None), } # -- sphinx options ---------------------------------------------------------- diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -18,8 +18,10 @@ "scipy>=1.1.0", "seaborn>=0.9.0", "matplotlib>=3.0.0", + "hyppo>=0.1.2", ] + # Find GraSPy version. PROJECT_PATH = os.path.dirname(os.path.abspath(__file__)) for line in open(os.path.join(PROJECT_PATH, "graspy", "__init__.py")):
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -71,6 +71,7 @@\n \"python\": (\"https://docs.python.org/3\", None),\n \"scipy\": (\"https://docs.scipy.org/doc/scipy/reference\", None),\n \"sklearn\": (\"http://scikit-learn.org/dev\", None),\n+ \"hyppo\": (\"https://hyppo.neurodata.io\", None),\n }\n \n # -- sphinx options ----------------------------------------------------------\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -18,8 +18,10 @@\n \"scipy>=1.1.0\",\n \"seaborn>=0.9.0\",\n \"matplotlib>=3.0.0\",\n+ \"hyppo>=0.1.2\",\n ]\n \n+\n # Find GraSPy version.\n PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))\n for line in open(os.path.join(PROJECT_PATH, \"graspy\", \"__init__.py\")):\n", "issue": "improve nonpar by calling mgcpy's two-sample test\nthe current nonpar implementation seems \"correct\",\r\nmeaning it seems to do what they did in the nonpar paper.\r\nhowever, there is a much better way to implement it,\r\nand we'd like to write a short paper about that.\r\nspecifically, in the current implementation, it uses the biased MMD, \r\nwhich is equivalent to the biased Dcorr.\r\nbetter would be to use the unbiased MMD/Dcorr,\r\nand even better would be to use the unbiased MGC.\r\npossibly further better would be to use RF, though we don't have that fully implemented just yet.\r\n\r\n\n", "before_files": [{"content": "import os\nimport sys\nfrom setuptools import setup, find_packages\nfrom sys import platform\n\nPACKAGE_NAME = \"graspy\"\nDESCRIPTION = \"A set of python modules for graph statistics\"\nwith open(\"README.md\", \"r\") as f:\n LONG_DESCRIPTION = f.read()\nAUTHOR = (\"Eric Bridgeford, Jaewon Chung, Benjamin Pedigo, Bijan Varjavand\",)\nAUTHOR_EMAIL = \"[email protected]\"\nURL = \"https://github.com/neurodata/graspy\"\nMINIMUM_PYTHON_VERSION = 3, 5 # Minimum of Python 3.5\nREQUIRED_PACKAGES = [\n \"networkx>=2.1\",\n \"numpy>=1.8.1\",\n \"scikit-learn>=0.19.1\",\n \"scipy>=1.1.0\",\n \"seaborn>=0.9.0\",\n \"matplotlib>=3.0.0\",\n]\n\n# Find GraSPy version.\nPROJECT_PATH = os.path.dirname(os.path.abspath(__file__))\nfor line in open(os.path.join(PROJECT_PATH, \"graspy\", \"__init__.py\")):\n if line.startswith(\"__version__ = \"):\n VERSION = line.strip().split()[2][1:-1]\n\n\ndef check_python_version():\n \"\"\"Exit when the Python version is too low.\"\"\"\n if sys.version_info < MINIMUM_PYTHON_VERSION:\n sys.exit(\"Python {}.{}+ is required.\".format(*MINIMUM_PYTHON_VERSION))\n\n\ncheck_python_version()\n\nsetup(\n name=PACKAGE_NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n install_requires=REQUIRED_PACKAGES,\n url=URL,\n license=\"Apache License 2.0\",\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n packages=find_packages(),\n include_package_data=True,\n)\n", "path": "setup.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath(\"..\"))\n\n# -- Project information -----------------------------------------------------\n\nproject = \"GraSPy\"\ncopyright = \"2018\"\nauthors = u\"NeuroData\"\n\n# The short X.Y version\n# Find GraSPy version.\nPROJECT_PATH = os.path.dirname(os.path.abspath(__file__))\nfor line in open(os.path.join(PROJECT_PATH, \"..\", \"graspy\", \"__init__.py\")):\n if line.startswith(\"__version__ = \"):\n version = line.strip().split()[2][1:-1]\n\n# The full version, including alpha/beta/rc tags\nrelease = \"alpha\"\n\n# -- Extension configuration -------------------------------------------------\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.mathjax\",\n \"numpydoc\",\n \"sphinx.ext.ifconfig\",\n \"sphinx.ext.githubpages\",\n \"sphinxcontrib.rawfiles\",\n \"nbsphinx\",\n \"sphinx.ext.intersphinx\",\n]\n\n# -- sphinxcontrib.rawfiles\nrawfiles = [\"CNAME\"]\n\n# -- numpydoc\n# Below is needed to prevent errors\nnumpydoc_show_class_members = False\nnumpydoc_attributes_as_param_list = True\nnumpydoc_use_blockquotes = True\n\n# -- sphinx.ext.autosummary\nautosummary_generate = True\n\n# -- sphinx.ext.autodoc\nautoclass_content = \"both\"\nautodoc_default_flags = [\"members\", \"inherited-members\"]\nautodoc_member_order = \"bysource\" # default is alphabetical\n\n# -- sphinx.ext.intersphinx\nintersphinx_mapping = {\n \"numpy\": (\"https://docs.scipy.org/doc/numpy\", None),\n \"python\": (\"https://docs.python.org/3\", None),\n \"scipy\": (\"https://docs.scipy.org/doc/scipy/reference\", None),\n \"sklearn\": (\"http://scikit-learn.org/dev\", None),\n}\n\n# -- sphinx options ----------------------------------------------------------\nsource_suffix = \".rst\"\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\", \"**.ipynb_checkpoints\"]\nmaster_doc = \"index\"\nsource_encoding = \"utf-8\"\n\n# -- Options for HTML output -------------------------------------------------\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\nhtml_static_path = [\"_static\"]\nmodindex_common_prefix = [\"graspy.\"]\n\npygments_style = \"sphinx\"\nsmartquotes = False\n\n# Use RTD Theme\nimport sphinx_rtd_theme\n\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\nhtml_theme_options = {\n #'includehidden': False,\n \"navigation_depth\": 2,\n \"collapse_navigation\": False,\n \"navigation_depth\": 3,\n}\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\n# html_sidebars = {}\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"graspydoc\"\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, \"graspy.tex\", \"GraSPy Documentation\", authors, \"manual\")\n]\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"graspy\", \"graspy Documentation\", [authors], 1)]\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"graspy\",\n \"graspy Documentation\",\n authors,\n \"graspy\",\n \"One line description of project.\",\n \"Miscellaneous\",\n )\n]\n\n# -- Options for Epub output -------------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = project\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#\n# epub_identifier = ''\n\n# A unique identification for the text.\n#\n# epub_uid = ''\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = [\"search.html\"]\n", "path": "docs/conf.py"}]}
3,028
236
gh_patches_debug_13388
rasdani/github-patches
git_diff
jazzband__pip-tools-1802
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> pip-sync errors when merging multiple requirements.txt files that point at the same editable install pip-sync raises an `AttributeError: 'NoneType' object has no attribute 'specifier'` error when you try and run it with multiple requirements files that each include an editable install for the same package. #### Environment Versions 1. OS Type: Linux 1. Python version: Python 3.9.11 1. pip version: pip 22.3 1. pip-tools version: pip-compile, version 6.9.0 #### Steps to replicate 1. Create a setup.py, dev_requirements.in, requirements.in file ``` # setup.py from setuptools import find_packages, setup setup(name="a", version="0.0.1", packages=find_packages()) # dev_requirements.in -e file:. # requirements.in -e file:. ``` 2. Run pip-compile ``` pip-compile requirements.in pip-compile dev_requirements.in ``` 3. Run pip-sync ``` pip-sync requirements.in dev_requirements.in ``` #### Expected result I expected the editable package to install once. Similar to running `pip install -r requirements.txt -r dev_requirements.txt` #### Actual result `pip-sync` raised an error Full stack trace ``` ➜ scratch pip-sync requirements.txt dev_requirements.txt Traceback (most recent call last): File "/home/vivek/.pyenv/versions/3.9.11/bin/pip-sync", line 8, in <module> sys.exit(cli()) File "/home/vivek/.pyenv/versions/3.9.11/lib/python3.9/site-packages/click/core.py", line 1130, in __call__ return self.main(*args, **kwargs) File "/home/vivek/.pyenv/versions/3.9.11/lib/python3.9/site-packages/click/core.py", line 1055, in main rv = self.invoke(ctx) File "/home/vivek/.pyenv/versions/3.9.11/lib/python3.9/site-packages/click/core.py", line 1404, in invoke return ctx.invoke(self.callback, **ctx.params) File "/home/vivek/.pyenv/versions/3.9.11/lib/python3.9/site-packages/click/core.py", line 760, in invoke return __callback(*args, **kwargs) File "/home/vivek/.pyenv/versions/3.9.11/lib/python3.9/site-packages/piptools/scripts/sync.py", line 146, in cli merged_requirements = sync.merge(requirements, ignore_conflicts=force) File "/home/vivek/.pyenv/versions/3.9.11/lib/python3.9/site-packages/piptools/sync.py", line 115, in merge if ireq.specifier != existing_ireq.specifier: File "/home/vivek/.pyenv/versions/3.9.11/lib/python3.9/site-packages/pip/_internal/req/req_install.py", line 245, in specifier return self.req.specifier AttributeError: 'NoneType' object has no attribute 'specifier' ``` ... </issue> <code> [start of piptools/sync.py] 1 from __future__ import annotations 2 3 import collections 4 import os 5 import sys 6 import tempfile 7 from subprocess import run # nosec 8 from typing import Deque, Iterable, Mapping, ValuesView 9 10 import click 11 from pip._internal.commands.freeze import DEV_PKGS 12 from pip._internal.req import InstallRequirement 13 from pip._internal.utils.compat import stdlib_pkgs 14 15 from ._compat.pip_compat import Distribution, dist_requires 16 from .exceptions import IncompatibleRequirements 17 from .logging import log 18 from .utils import ( 19 flat_map, 20 format_requirement, 21 get_hashes_from_ireq, 22 is_url_requirement, 23 key_from_ireq, 24 key_from_req, 25 ) 26 27 PACKAGES_TO_IGNORE = [ 28 "-markerlib", 29 "pip", 30 "pip-tools", 31 "pip-review", 32 "pkg-resources", 33 *stdlib_pkgs, 34 *DEV_PKGS, 35 ] 36 37 38 def dependency_tree( 39 installed_keys: Mapping[str, Distribution], root_key: str 40 ) -> set[str]: 41 """ 42 Calculate the dependency tree for the package `root_key` and return 43 a collection of all its dependencies. Uses a DFS traversal algorithm. 44 45 `installed_keys` should be a {key: requirement} mapping, e.g. 46 {'django': from_line('django==1.8')} 47 `root_key` should be the key to return the dependency tree for. 48 """ 49 dependencies = set() 50 queue: Deque[Distribution] = collections.deque() 51 52 if root_key in installed_keys: 53 dep = installed_keys[root_key] 54 queue.append(dep) 55 56 while queue: 57 v = queue.popleft() 58 key = key_from_req(v) 59 if key in dependencies: 60 continue 61 62 dependencies.add(key) 63 64 for dep_specifier in dist_requires(v): 65 dep_name = key_from_req(dep_specifier) 66 if dep_name in installed_keys: 67 dep = installed_keys[dep_name] 68 69 if dep_specifier.specifier.contains(dep.version): 70 queue.append(dep) 71 72 return dependencies 73 74 75 def get_dists_to_ignore(installed: Iterable[Distribution]) -> list[str]: 76 """ 77 Returns a collection of package names to ignore when performing pip-sync, 78 based on the currently installed environment. For example, when pip-tools 79 is installed in the local environment, it should be ignored, including all 80 of its dependencies (e.g. click). When pip-tools is not installed 81 locally, click should also be installed/uninstalled depending on the given 82 requirements. 83 """ 84 installed_keys = {key_from_req(r): r for r in installed} 85 return list( 86 flat_map(lambda req: dependency_tree(installed_keys, req), PACKAGES_TO_IGNORE) 87 ) 88 89 90 def merge( 91 requirements: Iterable[InstallRequirement], ignore_conflicts: bool 92 ) -> ValuesView[InstallRequirement]: 93 by_key: dict[str, InstallRequirement] = {} 94 95 for ireq in requirements: 96 # Limitation: URL requirements are merged by precise string match, so 97 # "file:///example.zip#egg=example", "file:///example.zip", and 98 # "example==1.0" will not merge with each other 99 if ireq.match_markers(): 100 key = key_from_ireq(ireq) 101 102 if not ignore_conflicts: 103 existing_ireq = by_key.get(key) 104 if existing_ireq: 105 # NOTE: We check equality here since we can assume that the 106 # requirements are all pinned 107 if ireq.specifier != existing_ireq.specifier: 108 raise IncompatibleRequirements(ireq, existing_ireq) 109 110 # TODO: Always pick the largest specifier in case of a conflict 111 by_key[key] = ireq 112 return by_key.values() 113 114 115 def diff_key_from_ireq(ireq: InstallRequirement) -> str: 116 """ 117 Calculate a key for comparing a compiled requirement with installed modules. 118 For URL requirements, only provide a useful key if the url includes 119 #egg=name==version, which will set ireq.req.name and ireq.specifier. 120 Otherwise return ireq.link so the key will not match and the package will 121 reinstall. Reinstall is necessary to ensure that packages will reinstall 122 if the URL is changed but the version is not. 123 """ 124 if is_url_requirement(ireq): 125 if ( 126 ireq.req 127 and (getattr(ireq.req, "key", None) or getattr(ireq.req, "name", None)) 128 and ireq.specifier 129 ): 130 return key_from_ireq(ireq) 131 return str(ireq.link) 132 return key_from_ireq(ireq) 133 134 135 def diff( 136 compiled_requirements: Iterable[InstallRequirement], 137 installed_dists: Iterable[Distribution], 138 ) -> tuple[set[InstallRequirement], set[str]]: 139 """ 140 Calculate which packages should be installed or uninstalled, given a set 141 of compiled requirements and a list of currently installed modules. 142 """ 143 requirements_lut = {diff_key_from_ireq(r): r for r in compiled_requirements} 144 145 satisfied = set() # holds keys 146 to_install = set() # holds InstallRequirement objects 147 to_uninstall = set() # holds keys 148 149 pkgs_to_ignore = get_dists_to_ignore(installed_dists) 150 for dist in installed_dists: 151 key = key_from_req(dist) 152 if key not in requirements_lut or not requirements_lut[key].match_markers(): 153 to_uninstall.add(key) 154 elif requirements_lut[key].specifier.contains(dist.version): 155 satisfied.add(key) 156 157 for key, requirement in requirements_lut.items(): 158 if key not in satisfied and requirement.match_markers(): 159 to_install.add(requirement) 160 161 # Make sure to not uninstall any packages that should be ignored 162 to_uninstall -= set(pkgs_to_ignore) 163 164 return (to_install, to_uninstall) 165 166 167 def sync( 168 to_install: Iterable[InstallRequirement], 169 to_uninstall: Iterable[InstallRequirement], 170 dry_run: bool = False, 171 install_flags: list[str] | None = None, 172 ask: bool = False, 173 python_executable: str | None = None, 174 ) -> int: 175 """ 176 Install and uninstalls the given sets of modules. 177 """ 178 exit_code = 0 179 180 python_executable = python_executable or sys.executable 181 182 if not to_uninstall and not to_install: 183 log.info("Everything up-to-date", err=False) 184 return exit_code 185 186 pip_flags = [] 187 if log.verbosity < 0: 188 pip_flags += ["-q"] 189 190 if ask: 191 dry_run = True 192 193 if dry_run: 194 if to_uninstall: 195 click.echo("Would uninstall:") 196 for pkg in sorted(to_uninstall): 197 click.echo(f" {pkg}") 198 199 if to_install: 200 click.echo("Would install:") 201 for ireq in sorted(to_install, key=key_from_ireq): 202 click.echo(f" {format_requirement(ireq)}") 203 204 exit_code = 1 205 206 if ask and click.confirm("Would you like to proceed with these changes?"): 207 dry_run = False 208 exit_code = 0 209 210 if not dry_run: 211 if to_uninstall: 212 run( # nosec 213 [ 214 python_executable, 215 "-m", 216 "pip", 217 "uninstall", 218 "-y", 219 *pip_flags, 220 *sorted(to_uninstall), 221 ], 222 check=True, 223 ) 224 225 if to_install: 226 if install_flags is None: 227 install_flags = [] 228 # prepare requirement lines 229 req_lines = [] 230 for ireq in sorted(to_install, key=key_from_ireq): 231 ireq_hashes = get_hashes_from_ireq(ireq) 232 req_lines.append(format_requirement(ireq, hashes=ireq_hashes)) 233 234 # save requirement lines to a temporary file 235 tmp_req_file = tempfile.NamedTemporaryFile(mode="wt", delete=False) 236 tmp_req_file.write("\n".join(req_lines)) 237 tmp_req_file.close() 238 239 try: 240 run( # nosec 241 [ 242 python_executable, 243 "-m", 244 "pip", 245 "install", 246 "-r", 247 tmp_req_file.name, 248 *pip_flags, 249 *install_flags, 250 ], 251 check=True, 252 ) 253 finally: 254 os.unlink(tmp_req_file.name) 255 256 return exit_code 257 [end of piptools/sync.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/piptools/sync.py b/piptools/sync.py --- a/piptools/sync.py +++ b/piptools/sync.py @@ -104,7 +104,11 @@ if existing_ireq: # NOTE: We check equality here since we can assume that the # requirements are all pinned - if ireq.specifier != existing_ireq.specifier: + if ( + ireq.req + and existing_ireq.req + and ireq.specifier != existing_ireq.specifier + ): raise IncompatibleRequirements(ireq, existing_ireq) # TODO: Always pick the largest specifier in case of a conflict
{"golden_diff": "diff --git a/piptools/sync.py b/piptools/sync.py\n--- a/piptools/sync.py\n+++ b/piptools/sync.py\n@@ -104,7 +104,11 @@\n if existing_ireq:\n # NOTE: We check equality here since we can assume that the\n # requirements are all pinned\n- if ireq.specifier != existing_ireq.specifier:\n+ if (\n+ ireq.req\n+ and existing_ireq.req\n+ and ireq.specifier != existing_ireq.specifier\n+ ):\n raise IncompatibleRequirements(ireq, existing_ireq)\n \n # TODO: Always pick the largest specifier in case of a conflict\n", "issue": "pip-sync errors when merging multiple requirements.txt files that point at the same editable install\npip-sync raises an `AttributeError: 'NoneType' object has no attribute 'specifier'` error when you try and run it with multiple requirements files that each include an editable install for the same package.\r\n\r\n#### Environment Versions\r\n\r\n1. OS Type: Linux\r\n1. Python version: Python 3.9.11\r\n1. pip version: pip 22.3\r\n1. pip-tools version: pip-compile, version 6.9.0\r\n\r\n#### Steps to replicate\r\n1. Create a setup.py, dev_requirements.in, requirements.in file\r\n```\r\n# setup.py\r\nfrom setuptools import find_packages, setup\r\nsetup(name=\"a\", version=\"0.0.1\", packages=find_packages())\r\n\r\n# dev_requirements.in\r\n-e file:.\r\n\r\n# requirements.in\r\n-e file:.\r\n```\r\n2. Run pip-compile\r\n```\r\npip-compile requirements.in\r\npip-compile dev_requirements.in\r\n```\r\n3. Run pip-sync\r\n```\r\npip-sync requirements.in dev_requirements.in\r\n```\r\n\r\n#### Expected result\r\nI expected the editable package to install once. Similar to running `pip install -r requirements.txt -r dev_requirements.txt`\r\n\r\n\r\n#### Actual result\r\n`pip-sync` raised an error\r\nFull stack trace\r\n```\r\n\u279c scratch pip-sync requirements.txt dev_requirements.txt \r\nTraceback (most recent call last):\r\n File \"/home/vivek/.pyenv/versions/3.9.11/bin/pip-sync\", line 8, in <module>\r\n sys.exit(cli())\r\n File \"/home/vivek/.pyenv/versions/3.9.11/lib/python3.9/site-packages/click/core.py\", line 1130, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/home/vivek/.pyenv/versions/3.9.11/lib/python3.9/site-packages/click/core.py\", line 1055, in main\r\n rv = self.invoke(ctx)\r\n File \"/home/vivek/.pyenv/versions/3.9.11/lib/python3.9/site-packages/click/core.py\", line 1404, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/home/vivek/.pyenv/versions/3.9.11/lib/python3.9/site-packages/click/core.py\", line 760, in invoke\r\n return __callback(*args, **kwargs)\r\n File \"/home/vivek/.pyenv/versions/3.9.11/lib/python3.9/site-packages/piptools/scripts/sync.py\", line 146, in cli\r\n merged_requirements = sync.merge(requirements, ignore_conflicts=force)\r\n File \"/home/vivek/.pyenv/versions/3.9.11/lib/python3.9/site-packages/piptools/sync.py\", line 115, in merge\r\n if ireq.specifier != existing_ireq.specifier:\r\n File \"/home/vivek/.pyenv/versions/3.9.11/lib/python3.9/site-packages/pip/_internal/req/req_install.py\", line 245, in specifier\r\n return self.req.specifier\r\nAttributeError: 'NoneType' object has no attribute 'specifier'\r\n```\r\n\r\n...\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport collections\nimport os\nimport sys\nimport tempfile\nfrom subprocess import run # nosec\nfrom typing import Deque, Iterable, Mapping, ValuesView\n\nimport click\nfrom pip._internal.commands.freeze import DEV_PKGS\nfrom pip._internal.req import InstallRequirement\nfrom pip._internal.utils.compat import stdlib_pkgs\n\nfrom ._compat.pip_compat import Distribution, dist_requires\nfrom .exceptions import IncompatibleRequirements\nfrom .logging import log\nfrom .utils import (\n flat_map,\n format_requirement,\n get_hashes_from_ireq,\n is_url_requirement,\n key_from_ireq,\n key_from_req,\n)\n\nPACKAGES_TO_IGNORE = [\n \"-markerlib\",\n \"pip\",\n \"pip-tools\",\n \"pip-review\",\n \"pkg-resources\",\n *stdlib_pkgs,\n *DEV_PKGS,\n]\n\n\ndef dependency_tree(\n installed_keys: Mapping[str, Distribution], root_key: str\n) -> set[str]:\n \"\"\"\n Calculate the dependency tree for the package `root_key` and return\n a collection of all its dependencies. Uses a DFS traversal algorithm.\n\n `installed_keys` should be a {key: requirement} mapping, e.g.\n {'django': from_line('django==1.8')}\n `root_key` should be the key to return the dependency tree for.\n \"\"\"\n dependencies = set()\n queue: Deque[Distribution] = collections.deque()\n\n if root_key in installed_keys:\n dep = installed_keys[root_key]\n queue.append(dep)\n\n while queue:\n v = queue.popleft()\n key = key_from_req(v)\n if key in dependencies:\n continue\n\n dependencies.add(key)\n\n for dep_specifier in dist_requires(v):\n dep_name = key_from_req(dep_specifier)\n if dep_name in installed_keys:\n dep = installed_keys[dep_name]\n\n if dep_specifier.specifier.contains(dep.version):\n queue.append(dep)\n\n return dependencies\n\n\ndef get_dists_to_ignore(installed: Iterable[Distribution]) -> list[str]:\n \"\"\"\n Returns a collection of package names to ignore when performing pip-sync,\n based on the currently installed environment. For example, when pip-tools\n is installed in the local environment, it should be ignored, including all\n of its dependencies (e.g. click). When pip-tools is not installed\n locally, click should also be installed/uninstalled depending on the given\n requirements.\n \"\"\"\n installed_keys = {key_from_req(r): r for r in installed}\n return list(\n flat_map(lambda req: dependency_tree(installed_keys, req), PACKAGES_TO_IGNORE)\n )\n\n\ndef merge(\n requirements: Iterable[InstallRequirement], ignore_conflicts: bool\n) -> ValuesView[InstallRequirement]:\n by_key: dict[str, InstallRequirement] = {}\n\n for ireq in requirements:\n # Limitation: URL requirements are merged by precise string match, so\n # \"file:///example.zip#egg=example\", \"file:///example.zip\", and\n # \"example==1.0\" will not merge with each other\n if ireq.match_markers():\n key = key_from_ireq(ireq)\n\n if not ignore_conflicts:\n existing_ireq = by_key.get(key)\n if existing_ireq:\n # NOTE: We check equality here since we can assume that the\n # requirements are all pinned\n if ireq.specifier != existing_ireq.specifier:\n raise IncompatibleRequirements(ireq, existing_ireq)\n\n # TODO: Always pick the largest specifier in case of a conflict\n by_key[key] = ireq\n return by_key.values()\n\n\ndef diff_key_from_ireq(ireq: InstallRequirement) -> str:\n \"\"\"\n Calculate a key for comparing a compiled requirement with installed modules.\n For URL requirements, only provide a useful key if the url includes\n #egg=name==version, which will set ireq.req.name and ireq.specifier.\n Otherwise return ireq.link so the key will not match and the package will\n reinstall. Reinstall is necessary to ensure that packages will reinstall\n if the URL is changed but the version is not.\n \"\"\"\n if is_url_requirement(ireq):\n if (\n ireq.req\n and (getattr(ireq.req, \"key\", None) or getattr(ireq.req, \"name\", None))\n and ireq.specifier\n ):\n return key_from_ireq(ireq)\n return str(ireq.link)\n return key_from_ireq(ireq)\n\n\ndef diff(\n compiled_requirements: Iterable[InstallRequirement],\n installed_dists: Iterable[Distribution],\n) -> tuple[set[InstallRequirement], set[str]]:\n \"\"\"\n Calculate which packages should be installed or uninstalled, given a set\n of compiled requirements and a list of currently installed modules.\n \"\"\"\n requirements_lut = {diff_key_from_ireq(r): r for r in compiled_requirements}\n\n satisfied = set() # holds keys\n to_install = set() # holds InstallRequirement objects\n to_uninstall = set() # holds keys\n\n pkgs_to_ignore = get_dists_to_ignore(installed_dists)\n for dist in installed_dists:\n key = key_from_req(dist)\n if key not in requirements_lut or not requirements_lut[key].match_markers():\n to_uninstall.add(key)\n elif requirements_lut[key].specifier.contains(dist.version):\n satisfied.add(key)\n\n for key, requirement in requirements_lut.items():\n if key not in satisfied and requirement.match_markers():\n to_install.add(requirement)\n\n # Make sure to not uninstall any packages that should be ignored\n to_uninstall -= set(pkgs_to_ignore)\n\n return (to_install, to_uninstall)\n\n\ndef sync(\n to_install: Iterable[InstallRequirement],\n to_uninstall: Iterable[InstallRequirement],\n dry_run: bool = False,\n install_flags: list[str] | None = None,\n ask: bool = False,\n python_executable: str | None = None,\n) -> int:\n \"\"\"\n Install and uninstalls the given sets of modules.\n \"\"\"\n exit_code = 0\n\n python_executable = python_executable or sys.executable\n\n if not to_uninstall and not to_install:\n log.info(\"Everything up-to-date\", err=False)\n return exit_code\n\n pip_flags = []\n if log.verbosity < 0:\n pip_flags += [\"-q\"]\n\n if ask:\n dry_run = True\n\n if dry_run:\n if to_uninstall:\n click.echo(\"Would uninstall:\")\n for pkg in sorted(to_uninstall):\n click.echo(f\" {pkg}\")\n\n if to_install:\n click.echo(\"Would install:\")\n for ireq in sorted(to_install, key=key_from_ireq):\n click.echo(f\" {format_requirement(ireq)}\")\n\n exit_code = 1\n\n if ask and click.confirm(\"Would you like to proceed with these changes?\"):\n dry_run = False\n exit_code = 0\n\n if not dry_run:\n if to_uninstall:\n run( # nosec\n [\n python_executable,\n \"-m\",\n \"pip\",\n \"uninstall\",\n \"-y\",\n *pip_flags,\n *sorted(to_uninstall),\n ],\n check=True,\n )\n\n if to_install:\n if install_flags is None:\n install_flags = []\n # prepare requirement lines\n req_lines = []\n for ireq in sorted(to_install, key=key_from_ireq):\n ireq_hashes = get_hashes_from_ireq(ireq)\n req_lines.append(format_requirement(ireq, hashes=ireq_hashes))\n\n # save requirement lines to a temporary file\n tmp_req_file = tempfile.NamedTemporaryFile(mode=\"wt\", delete=False)\n tmp_req_file.write(\"\\n\".join(req_lines))\n tmp_req_file.close()\n\n try:\n run( # nosec\n [\n python_executable,\n \"-m\",\n \"pip\",\n \"install\",\n \"-r\",\n tmp_req_file.name,\n *pip_flags,\n *install_flags,\n ],\n check=True,\n )\n finally:\n os.unlink(tmp_req_file.name)\n\n return exit_code\n", "path": "piptools/sync.py"}]}
3,735
162
gh_patches_debug_24568
rasdani/github-patches
git_diff
mdn__kuma-6829
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> T - Fix post-sign-ins redirect so that user lands on correct page **Summary** You're not taken back to where you were when you sign in. At least via the modal and GitHub. ![Screen-Recording-2020-04-03-at-2 31 33-PM](https://user-images.githubusercontent.com/26739/78393569-167dd900-75b8-11ea-9d78-3bf0ca358bb2.gif) **Steps To Reproduce (STR)** 1. Go to some page other than the home page. Be not-signed in. 2. Click sign in. 3. Click GitHub and complete your GitHub auth stuff **Actual behavior** I ended up back on the home page `/en-US/`. :( **Expected behavior** To be taken to the page I was originally on. **Additional context** Pretty sure this is regression but don't know if it started when we switched to a modal. </issue> <code> [start of kuma/core/context_processors.py] 1 from urllib.parse import urlparse 2 3 from constance import config 4 from django.conf import settings 5 from django.utils import translation 6 7 from .i18n import get_language_mapping 8 9 10 def global_settings(request): 11 """Adds settings to the context.""" 12 13 def clean_safe_url(url): 14 if "://" not in url: 15 # E.g. 'elasticsearch:9200' 16 url = "http://" + url 17 parsed = urlparse(url) 18 if "@" in parsed.netloc: 19 parsed = parsed._replace( 20 netloc="username:secret@" + parsed.netloc.split("@")[-1] 21 ) 22 return parsed.geturl() 23 24 # TODO: Ideally, GOOGLE_ANALYTICS_ACCOUNT is only set in settings (from 25 # an environment variable) but for safe transition, we rely on 26 # constance if it hasn't been put into settings yet. 27 # Once we know with confidence, that GOOGLE_ANALYTICS_ACCOUNT is set 28 # and a valid value in the environment (for production!) then we 29 # can delete these lines of code. 30 # See https://bugzilla.mozilla.org/show_bug.cgi?id=1570076 31 google_analytics_account = getattr(settings, "GOOGLE_ANALYTICS_ACCOUNT", None) 32 if google_analytics_account is None: 33 if config.GOOGLE_ANALYTICS_ACCOUNT != "0": 34 settings.GOOGLE_ANALYTICS_ACCOUNT = config.GOOGLE_ANALYTICS_ACCOUNT 35 36 return { 37 "settings": settings, 38 # Because the 'settings.ES_URLS' might contain the username:password 39 # it's never appropriate to display in templates. So clean them up. 40 # But return it as a lambda so it only executes if really needed. 41 "safe_es_urls": lambda: [clean_safe_url(x) for x in settings.ES_URLS], 42 } 43 44 45 def i18n(request): 46 return { 47 "LANGUAGES": get_language_mapping(), 48 "LANG": ( 49 settings.LANGUAGE_URL_MAP.get(translation.get_language()) 50 or translation.get_language() 51 ), 52 "DIR": "rtl" if translation.get_language_bidi() else "ltr", 53 } 54 55 56 def next_url(request): 57 if ( 58 hasattr(request, "path") 59 and "login" not in request.path 60 and "register" not in request.path 61 ): 62 return {"next_url": request.get_full_path()} 63 return {} 64 [end of kuma/core/context_processors.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kuma/core/context_processors.py b/kuma/core/context_processors.py --- a/kuma/core/context_processors.py +++ b/kuma/core/context_processors.py @@ -4,6 +4,8 @@ from django.conf import settings from django.utils import translation +from kuma.core.urlresolvers import reverse + from .i18n import get_language_mapping @@ -54,10 +56,30 @@ def next_url(request): - if ( - hasattr(request, "path") - and "login" not in request.path - and "register" not in request.path - ): - return {"next_url": request.get_full_path()} - return {} + """Return a function by the same name as the context processor. + That means, in the jinja templates, instead of doing + + {% set url = next_url %} + + you just have to do: + + {% set url = next_url() %} + + which means that the actual context processor function isn't executed + every single time any jinja template is rendered. Now, only if the + context processor is actually needed, it gets executed. + + See https://www.peterbe.com/plog/closure-django-context-processors + """ + + def inner(): + if hasattr(request, "path"): + if request.GET.get("next"): + if "://" not in request.GET["next"]: + return request.GET["next"] + elif reverse(settings.LOGIN_URL) != request.get_full_path(): + # The only exception is the sign-in landing page which you get to + # if you can't use the auth modal. + return request.get_full_path() + + return {"next_url": inner}
{"golden_diff": "diff --git a/kuma/core/context_processors.py b/kuma/core/context_processors.py\n--- a/kuma/core/context_processors.py\n+++ b/kuma/core/context_processors.py\n@@ -4,6 +4,8 @@\n from django.conf import settings\n from django.utils import translation\n \n+from kuma.core.urlresolvers import reverse\n+\n from .i18n import get_language_mapping\n \n \n@@ -54,10 +56,30 @@\n \n \n def next_url(request):\n- if (\n- hasattr(request, \"path\")\n- and \"login\" not in request.path\n- and \"register\" not in request.path\n- ):\n- return {\"next_url\": request.get_full_path()}\n- return {}\n+ \"\"\"Return a function by the same name as the context processor.\n+ That means, in the jinja templates, instead of doing\n+\n+ {% set url = next_url %}\n+\n+ you just have to do:\n+\n+ {% set url = next_url() %}\n+\n+ which means that the actual context processor function isn't executed\n+ every single time any jinja template is rendered. Now, only if the\n+ context processor is actually needed, it gets executed.\n+\n+ See https://www.peterbe.com/plog/closure-django-context-processors\n+ \"\"\"\n+\n+ def inner():\n+ if hasattr(request, \"path\"):\n+ if request.GET.get(\"next\"):\n+ if \"://\" not in request.GET[\"next\"]:\n+ return request.GET[\"next\"]\n+ elif reverse(settings.LOGIN_URL) != request.get_full_path():\n+ # The only exception is the sign-in landing page which you get to\n+ # if you can't use the auth modal.\n+ return request.get_full_path()\n+\n+ return {\"next_url\": inner}\n", "issue": "T - Fix post-sign-ins redirect so that user lands on correct page\n**Summary**\r\nYou're not taken back to where you were when you sign in. At least via the modal and GitHub.\r\n\r\n![Screen-Recording-2020-04-03-at-2 31 33-PM](https://user-images.githubusercontent.com/26739/78393569-167dd900-75b8-11ea-9d78-3bf0ca358bb2.gif)\r\n\r\n\r\n**Steps To Reproduce (STR)**\r\n\r\n\r\n1. Go to some page other than the home page. Be not-signed in.\r\n2. Click sign in. \r\n3. Click GitHub and complete your GitHub auth stuff\r\n\r\n\r\n**Actual behavior**\r\nI ended up back on the home page `/en-US/`. :(\r\n\r\n\r\n**Expected behavior**\r\nTo be taken to the page I was originally on. \r\n\r\n\r\n**Additional context**\r\nPretty sure this is regression but don't know if it started when we switched to a modal. \r\n\n", "before_files": [{"content": "from urllib.parse import urlparse\n\nfrom constance import config\nfrom django.conf import settings\nfrom django.utils import translation\n\nfrom .i18n import get_language_mapping\n\n\ndef global_settings(request):\n \"\"\"Adds settings to the context.\"\"\"\n\n def clean_safe_url(url):\n if \"://\" not in url:\n # E.g. 'elasticsearch:9200'\n url = \"http://\" + url\n parsed = urlparse(url)\n if \"@\" in parsed.netloc:\n parsed = parsed._replace(\n netloc=\"username:secret@\" + parsed.netloc.split(\"@\")[-1]\n )\n return parsed.geturl()\n\n # TODO: Ideally, GOOGLE_ANALYTICS_ACCOUNT is only set in settings (from\n # an environment variable) but for safe transition, we rely on\n # constance if it hasn't been put into settings yet.\n # Once we know with confidence, that GOOGLE_ANALYTICS_ACCOUNT is set\n # and a valid value in the environment (for production!) then we\n # can delete these lines of code.\n # See https://bugzilla.mozilla.org/show_bug.cgi?id=1570076\n google_analytics_account = getattr(settings, \"GOOGLE_ANALYTICS_ACCOUNT\", None)\n if google_analytics_account is None:\n if config.GOOGLE_ANALYTICS_ACCOUNT != \"0\":\n settings.GOOGLE_ANALYTICS_ACCOUNT = config.GOOGLE_ANALYTICS_ACCOUNT\n\n return {\n \"settings\": settings,\n # Because the 'settings.ES_URLS' might contain the username:password\n # it's never appropriate to display in templates. So clean them up.\n # But return it as a lambda so it only executes if really needed.\n \"safe_es_urls\": lambda: [clean_safe_url(x) for x in settings.ES_URLS],\n }\n\n\ndef i18n(request):\n return {\n \"LANGUAGES\": get_language_mapping(),\n \"LANG\": (\n settings.LANGUAGE_URL_MAP.get(translation.get_language())\n or translation.get_language()\n ),\n \"DIR\": \"rtl\" if translation.get_language_bidi() else \"ltr\",\n }\n\n\ndef next_url(request):\n if (\n hasattr(request, \"path\")\n and \"login\" not in request.path\n and \"register\" not in request.path\n ):\n return {\"next_url\": request.get_full_path()}\n return {}\n", "path": "kuma/core/context_processors.py"}]}
1,398
389
gh_patches_debug_9354
rasdani/github-patches
git_diff
huggingface__text-generation-inference-579
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Tied weight optimization for checkpoints doesn't work with text-generation-inference. ### System Info Ubuntu 20.04 4 A10 NVIDIA GPU's I think checkpoints saved after this feature was merged don't work with text-generation-inference. https://github.com/huggingface/transformers/issues/23868 With falcon models getting "`lm_head` not found" I'll add more details once I find minimal steps to reproduce. ### Information - [X] Docker - [ ] The CLI directly ### Tasks - [X] An officially supported command - [ ] My own modifications ### Reproduction Save tiiuae/falcon-40b checkpoint using transformers==4.30.2 launch text-generation-inference server (using transformers==4.27.4 works without issue) ### Expected behavior Expect the text-generation-inference weight loader to be able to find the `lm_head` weight in the checkpoint. Note this may be a safetensor issue. </issue> <code> [start of server/text_generation_server/models/flash_rw.py] 1 import torch 2 import torch.distributed 3 4 from opentelemetry import trace 5 from transformers import AutoTokenizer 6 from typing import Optional 7 8 from text_generation_server.models import FlashCausalLM 9 from text_generation_server.models.custom_modeling.flash_rw_modeling import ( 10 RWConfig, 11 FlashRWForCausalLM, 12 ) 13 from text_generation_server.utils import ( 14 initialize_torch_distributed, 15 weight_files, 16 Weights, 17 ) 18 19 tracer = trace.get_tracer(__name__) 20 21 22 class FlashRWSharded(FlashCausalLM): 23 def __init__( 24 self, 25 model_id: str, 26 revision: Optional[str] = None, 27 quantize: Optional[str] = None, 28 dtype: Optional[torch.dtype] = None, 29 trust_remote_code: bool = False, 30 ): 31 self.process_group, rank, world_size = initialize_torch_distributed() 32 if torch.cuda.is_available(): 33 device = torch.device(f"cuda:{rank}") 34 dtype = torch.float16 if dtype is None else dtype 35 else: 36 raise NotImplementedError("FlashRW is only available on GPU") 37 38 tokenizer = AutoTokenizer.from_pretrained( 39 model_id, 40 revision=revision, 41 padding_side="left", 42 truncation_side="left", 43 trust_remote_code=trust_remote_code, 44 ) 45 46 config = RWConfig.from_pretrained( 47 model_id, revision=revision, trust_remote_code=trust_remote_code 48 ) 49 50 torch.distributed.barrier(group=self.process_group) 51 filenames = weight_files(model_id, revision=revision, extension=".safetensors") 52 weights = Weights(filenames, device, dtype, process_group=self.process_group) 53 54 config.quantize = quantize 55 56 model = FlashRWForCausalLM(config, weights) 57 58 torch.distributed.barrier(group=self.process_group) 59 super(FlashRWSharded, self).__init__( 60 model=model.to(device), 61 tokenizer=tokenizer, 62 num_layers=len(model.transformer.h), 63 num_kv_heads=model.transformer.cache_size, 64 head_size=model.transformer.head_size, 65 dtype=dtype, 66 device=device, 67 rank=rank, 68 world_size=world_size, 69 ) 70 [end of server/text_generation_server/models/flash_rw.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/server/text_generation_server/models/flash_rw.py b/server/text_generation_server/models/flash_rw.py --- a/server/text_generation_server/models/flash_rw.py +++ b/server/text_generation_server/models/flash_rw.py @@ -49,7 +49,13 @@ torch.distributed.barrier(group=self.process_group) filenames = weight_files(model_id, revision=revision, extension=".safetensors") - weights = Weights(filenames, device, dtype, process_group=self.process_group) + weights = Weights( + filenames, + device, + dtype, + process_group=self.process_group, + aliases={"transformer.word_embeddings.weight": ["lm_head.weight"]}, + ) config.quantize = quantize
{"golden_diff": "diff --git a/server/text_generation_server/models/flash_rw.py b/server/text_generation_server/models/flash_rw.py\n--- a/server/text_generation_server/models/flash_rw.py\n+++ b/server/text_generation_server/models/flash_rw.py\n@@ -49,7 +49,13 @@\n \n torch.distributed.barrier(group=self.process_group)\n filenames = weight_files(model_id, revision=revision, extension=\".safetensors\")\n- weights = Weights(filenames, device, dtype, process_group=self.process_group)\n+ weights = Weights(\n+ filenames,\n+ device,\n+ dtype,\n+ process_group=self.process_group,\n+ aliases={\"transformer.word_embeddings.weight\": [\"lm_head.weight\"]},\n+ )\n \n config.quantize = quantize\n", "issue": "Tied weight optimization for checkpoints doesn't work with text-generation-inference.\n### System Info\r\nUbuntu 20.04\r\n4 A10 NVIDIA GPU's\r\n\r\nI think checkpoints saved after this feature was merged don't work with text-generation-inference.\r\nhttps://github.com/huggingface/transformers/issues/23868\r\n\r\nWith falcon models getting \"`lm_head` not found\"\r\nI'll add more details once I find minimal steps to reproduce.\r\n\r\n### Information\r\n\r\n- [X] Docker\r\n- [ ] The CLI directly\r\n\r\n### Tasks\r\n\r\n- [X] An officially supported command\r\n- [ ] My own modifications\r\n\r\n### Reproduction\r\n\r\nSave tiiuae/falcon-40b checkpoint using transformers==4.30.2\r\nlaunch text-generation-inference server\r\n\r\n(using transformers==4.27.4 works without issue)\r\n\r\n### Expected behavior\r\n\r\nExpect the text-generation-inference weight loader to be able to find the `lm_head` weight in the checkpoint. Note this may be a safetensor issue.\n", "before_files": [{"content": "import torch\nimport torch.distributed\n\nfrom opentelemetry import trace\nfrom transformers import AutoTokenizer\nfrom typing import Optional\n\nfrom text_generation_server.models import FlashCausalLM\nfrom text_generation_server.models.custom_modeling.flash_rw_modeling import (\n RWConfig,\n FlashRWForCausalLM,\n)\nfrom text_generation_server.utils import (\n initialize_torch_distributed,\n weight_files,\n Weights,\n)\n\ntracer = trace.get_tracer(__name__)\n\n\nclass FlashRWSharded(FlashCausalLM):\n def __init__(\n self,\n model_id: str,\n revision: Optional[str] = None,\n quantize: Optional[str] = None,\n dtype: Optional[torch.dtype] = None,\n trust_remote_code: bool = False,\n ):\n self.process_group, rank, world_size = initialize_torch_distributed()\n if torch.cuda.is_available():\n device = torch.device(f\"cuda:{rank}\")\n dtype = torch.float16 if dtype is None else dtype\n else:\n raise NotImplementedError(\"FlashRW is only available on GPU\")\n\n tokenizer = AutoTokenizer.from_pretrained(\n model_id,\n revision=revision,\n padding_side=\"left\",\n truncation_side=\"left\",\n trust_remote_code=trust_remote_code,\n )\n\n config = RWConfig.from_pretrained(\n model_id, revision=revision, trust_remote_code=trust_remote_code\n )\n\n torch.distributed.barrier(group=self.process_group)\n filenames = weight_files(model_id, revision=revision, extension=\".safetensors\")\n weights = Weights(filenames, device, dtype, process_group=self.process_group)\n\n config.quantize = quantize\n\n model = FlashRWForCausalLM(config, weights)\n\n torch.distributed.barrier(group=self.process_group)\n super(FlashRWSharded, self).__init__(\n model=model.to(device),\n tokenizer=tokenizer,\n num_layers=len(model.transformer.h),\n num_kv_heads=model.transformer.cache_size,\n head_size=model.transformer.head_size,\n dtype=dtype,\n device=device,\n rank=rank,\n world_size=world_size,\n )\n", "path": "server/text_generation_server/models/flash_rw.py"}]}
1,346
166
gh_patches_debug_37200
rasdani/github-patches
git_diff
conan-io__conan-center-index-14544
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [package] m4/1.4.19: Fails test_package with latest Conan 2.0 ### Description Trying to build with the latest version of the CCI m4 recipe results in the following error when using Conan 2 > -------- Installing (downloading, building) binaries... -------- > cmake/3.24.2: Already installed! > m4/1.4.19: Already installed! > m4/1.4.19: Appending PATH environment variable: /home/marc/.conan2/p/4c23d7e6a6e4b9b7/p/bin > WARN: The use of 'env_info' is deprecated in Conan 2.0 and will be removed in Conan 2.X. Please, update your recipes unless you are maintaining compatibility with Conan 1.X > ERROR: m4/1.4.19: 'NoneType' object has no attribute 'append'. No default values are set for components. You are probably trying to manipulate a component attribute in the 'package_info' method without defining it previously > ERROR: Conan-2.0 test failed for 'm4' ### Package and Environment Details * Package Name/Version: **m4/1.4.19** * Operating System+version: **Debian 10** * Compiler+version: **GCC 8** * Docker image: **N/A** * Conan version: **conan 2.0.0-dev beta5+** * Python version: **Python 3.7.3** ### Conan profile Profile host: [settings] arch=x86_64 build_type=Release compiler=gcc compiler.libcxx=libstdc++11 compiler.version=8 os=Linux [options] cmake*:bootstrap=True [tool_requires] !openssl*: cmake/3.24.2 Profile build: [settings] arch=x86_64 build_type=Release compiler=gcc compiler.libcxx=libstdc++11 compiler.version=8 os=Linux [options] cmake*:bootstrap=True [tool_requires] !openssl*: cmake/3.24.2 ### Steps to reproduce conan-2.0 test -pr:b tools.jinja -pr:h tools.jinja test_package m4/1.4.19 ### Logs <details><summary>Click to expand log</summary> ``` conan-2.0 test -pr:b tools.jinja -pr:h tools.jinja test_package m4/1.4.19 -------- Input profiles -------- Profile host: [settings] arch=x86_64 build_type=Release compiler=gcc compiler.libcxx=libstdc++11 compiler.version=8 os=Linux [options] cmake*:bootstrap=True [tool_requires] !openssl*: cmake/3.24.2 Profile build: [settings] arch=x86_64 build_type=Release compiler=gcc compiler.libcxx=libstdc++11 compiler.version=8 os=Linux [options] cmake*:bootstrap=True [tool_requires] !openssl*: cmake/3.24.2 -------- test_package: Computing dependency graph -------- Graph root m4/1.4.19 (test package): /tmp/tmp.Hu84j8HryY/m4/test_package/conanfile.py Build requirements cmake/3.24.2#623afae5289cadc0b9f11a8f43eae83b - Cache m4/1.4.19#d5e4c4ec85145f2a2bfea4bceef5e56a - Cache openssl/1.1.1s#b304462aeda9923b735e6b37368c233e - Cache -------- test_package: Computing necessary packages -------- Build requirements cmake/3.24.2#623afae5289cadc0b9f11a8f43eae83b:ccbf287e38142241463d713141c76d0c18207a9d#5e10a6d92143344f25472aa9178b24fc - Cache m4/1.4.19#d5e4c4ec85145f2a2bfea4bceef5e56a:3593751651824fb813502c69c971267624ced41a#028e06b2ec8f151ba5ee47e640cc2c5c - Cache openssl/1.1.1s#b304462aeda9923b735e6b37368c233e:896855b1e3b5961bfcc08e699116d9ed588cac00#7fbe50a5f635dbba8a47e75cf8b42165 - Skip -------- test_package: Installing packages -------- -------- Installing (downloading, building) binaries... -------- cmake/3.24.2: Already installed! m4/1.4.19: Already installed! m4/1.4.19: Appending PATH environment variable: /home/marc/.conan2/p/4c23d7e6a6e4b9b7/p/bin WARN: The use of 'env_info' is deprecated in Conan 2.0 and will be removed in Conan 2.X. Please, update your recipes unless you are maintaining compatibility with Conan 1.X ERROR: m4/1.4.19: 'NoneType' object has no attribute 'append'. No default values are set for components. You are probably trying to manipulate a component attribute in the 'package_info' method without defining it previously ERROR: Conan-2.0 test failed for 'm4' ``` </details> </issue> <code> [start of recipes/m4/all/conanfile.py] 1 from conan import ConanFile 2 from conan.tools.env import VirtualBuildEnv 3 from conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get, rmdir, save 4 from conan.tools.gnu import Autotools, AutotoolsToolchain 5 from conan.tools.layout import basic_layout 6 from conan.tools.microsoft import is_msvc, unix_path 7 from conan.tools.scm import Version 8 import os 9 10 required_conan_version = ">=1.52.0" 11 12 13 class M4Conan(ConanFile): 14 name = "m4" 15 description = "GNU M4 is an implementation of the traditional Unix macro processor" 16 topics = ("macro", "preprocessor") 17 homepage = "https://www.gnu.org/software/m4/" 18 url = "https://github.com/conan-io/conan-center-index" 19 license = "GPL-3.0-only" 20 settings = "os", "arch", "compiler", "build_type" 21 22 @property 23 def _settings_build(self): 24 return getattr(self, "settings_build", self.settings) 25 26 def export_sources(self): 27 export_conandata_patches(self) 28 29 def layout(self): 30 basic_layout(self, src_folder="src") 31 32 def package_id(self): 33 del self.info.settings.compiler 34 35 def build_requirements(self): 36 if self._settings_build.os == "Windows": 37 if not self.conf.get("tools.microsoft.bash:path", default=False, check_type=bool): 38 self.tool_requires("msys2/cci.latest") 39 self.win_bash = True 40 41 def source(self): 42 get(self, **self.conan_data["sources"][self.version], 43 destination=self.source_folder, strip_root=True) 44 45 def generate(self): 46 env = VirtualBuildEnv(self) 47 env.generate() 48 49 tc = AutotoolsToolchain(self) 50 if is_msvc(self): 51 tc.extra_cflags.append("-FS") 52 # Avoid a `Assertion Failed Dialog Box` during configure with build_type=Debug 53 # Visual Studio does not support the %n format flag: 54 # https://docs.microsoft.com/en-us/cpp/c-runtime-library/format-specification-syntax-printf-and-wprintf-functions 55 # Because the %n format is inherently insecure, it is disabled by default. If %n is encountered in a format string, 56 # the invalid parameter handler is invoked, as described in Parameter Validation. To enable %n support, see _set_printf_count_output. 57 tc.configure_args.extend([ 58 "gl_cv_func_printf_directive_n=no", 59 "gl_cv_func_snprintf_directive_n=no", 60 "gl_cv_func_snprintf_directive_n=no", 61 ]) 62 if self.settings.build_type in ("Debug", "RelWithDebInfo"): 63 tc.extra_ldflags.append("-PDB") 64 elif self.settings.compiler == "clang": 65 if Version(self.version) < "1.4.19": 66 tc.extra_cflags.extend([ 67 "-rtlib=compiler-rt", 68 "-Wno-unused-command-line-argument", 69 ]) 70 if self.settings.os == "Windows": 71 tc.configure_args.append("ac_cv_func__set_invalid_parameter_handler=yes") 72 env = tc.environment() 73 # help2man trick 74 env.prepend_path("PATH", self.source_folder) 75 # handle msvc 76 if is_msvc(self): 77 ar_wrapper = unix_path(self, os.path.join(self.source_folder, "build-aux", "ar-lib")) 78 env.define("CC", "cl -nologo") 79 env.define("CXX", "cl -nologo") 80 env.define("AR", f"{ar_wrapper} lib") 81 env.define("LD", "link") 82 env.define("NM", "dumpbin -symbols") 83 env.define("OBJDUMP", ":") 84 env.define("RANLIB", ":") 85 env.define("STRIP", ":") 86 tc.generate(env) 87 88 def _patch_sources(self): 89 apply_conandata_patches(self) 90 # dummy file for configure 91 help2man = os.path.join(self.source_folder, "help2man") 92 save(self, help2man, "#!/usr/bin/env bash\n:") 93 if os.name == "posix": 94 os.chmod(help2man, os.stat(help2man).st_mode | 0o111) 95 96 def build(self): 97 self._patch_sources() 98 autotools = Autotools(self) 99 autotools.configure() 100 autotools.make() 101 102 def package(self): 103 copy(self, "COPYING", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses")) 104 autotools = Autotools(self) 105 # TODO: replace by autotools.install() once https://github.com/conan-io/conan/issues/12153 fixed 106 autotools.install(args=[f"DESTDIR={unix_path(self, self.package_folder)}"]) 107 rmdir(self, os.path.join(self.package_folder, "share")) 108 109 def package_info(self): 110 self.cpp_info.libdirs = [] 111 self.cpp_info.includedirs = [] 112 113 # M4 environment variable is used by a lot of scripts as a way to override a hard-coded embedded m4 path 114 bin_ext = ".exe" if self.settings.os == "Windows" else "" 115 m4_bin = os.path.join(self.package_folder, "bin", f"m4{bin_ext}").replace("\\", "/") 116 self.runenv_info.define_path("M4", m4_bin) 117 self.buildenv_info.define_path("M4", m4_bin) 118 119 # TODO: to remove in conan v2 120 bin_path = os.path.join(self.package_folder, "bin") 121 self.output.info(f"Appending PATH environment variable: {bin_path}") 122 self.env_info.PATH.append(bin_path) 123 self.env_info.M4 = m4_bin 124 [end of recipes/m4/all/conanfile.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/recipes/m4/all/conanfile.py b/recipes/m4/all/conanfile.py --- a/recipes/m4/all/conanfile.py +++ b/recipes/m4/all/conanfile.py @@ -6,12 +6,14 @@ from conan.tools.microsoft import is_msvc, unix_path from conan.tools.scm import Version import os +import shutil -required_conan_version = ">=1.52.0" +required_conan_version = ">=1.55.0" class M4Conan(ConanFile): name = "m4" + package_type = "application" description = "GNU M4 is an implementation of the traditional Unix macro processor" topics = ("macro", "preprocessor") homepage = "https://www.gnu.org/software/m4/" @@ -34,9 +36,9 @@ def build_requirements(self): if self._settings_build.os == "Windows": - if not self.conf.get("tools.microsoft.bash:path", default=False, check_type=bool): - self.tool_requires("msys2/cci.latest") self.win_bash = True + if not self.conf.get("tools.microsoft.bash:path", check_type=str): + self.tool_requires("msys2/cci.latest") def source(self): get(self, **self.conan_data["sources"][self.version], @@ -87,11 +89,12 @@ def _patch_sources(self): apply_conandata_patches(self) - # dummy file for configure - help2man = os.path.join(self.source_folder, "help2man") - save(self, help2man, "#!/usr/bin/env bash\n:") - if os.name == "posix": - os.chmod(help2man, os.stat(help2man).st_mode | 0o111) + if shutil.which("help2man") == None: + # dummy file for configure + help2man = os.path.join(self.source_folder, "help2man") + save(self, help2man, "#!/usr/bin/env bash\n:") + if os.name == "posix": + os.chmod(help2man, os.stat(help2man).st_mode | 0o111) def build(self): self._patch_sources() @@ -102,8 +105,7 @@ def package(self): copy(self, "COPYING", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses")) autotools = Autotools(self) - # TODO: replace by autotools.install() once https://github.com/conan-io/conan/issues/12153 fixed - autotools.install(args=[f"DESTDIR={unix_path(self, self.package_folder)}"]) + autotools.install() rmdir(self, os.path.join(self.package_folder, "share")) def package_info(self):
{"golden_diff": "diff --git a/recipes/m4/all/conanfile.py b/recipes/m4/all/conanfile.py\n--- a/recipes/m4/all/conanfile.py\n+++ b/recipes/m4/all/conanfile.py\n@@ -6,12 +6,14 @@\n from conan.tools.microsoft import is_msvc, unix_path\n from conan.tools.scm import Version\n import os\n+import shutil\n \n-required_conan_version = \">=1.52.0\"\n+required_conan_version = \">=1.55.0\"\n \n \n class M4Conan(ConanFile):\n name = \"m4\"\n+ package_type = \"application\"\n description = \"GNU M4 is an implementation of the traditional Unix macro processor\"\n topics = (\"macro\", \"preprocessor\")\n homepage = \"https://www.gnu.org/software/m4/\"\n@@ -34,9 +36,9 @@\n \n def build_requirements(self):\n if self._settings_build.os == \"Windows\":\n- if not self.conf.get(\"tools.microsoft.bash:path\", default=False, check_type=bool):\n- self.tool_requires(\"msys2/cci.latest\")\n self.win_bash = True\n+ if not self.conf.get(\"tools.microsoft.bash:path\", check_type=str):\n+ self.tool_requires(\"msys2/cci.latest\")\n \n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version],\n@@ -87,11 +89,12 @@\n \n def _patch_sources(self):\n apply_conandata_patches(self)\n- # dummy file for configure\n- help2man = os.path.join(self.source_folder, \"help2man\")\n- save(self, help2man, \"#!/usr/bin/env bash\\n:\")\n- if os.name == \"posix\":\n- os.chmod(help2man, os.stat(help2man).st_mode | 0o111)\n+ if shutil.which(\"help2man\") == None:\n+ # dummy file for configure\n+ help2man = os.path.join(self.source_folder, \"help2man\")\n+ save(self, help2man, \"#!/usr/bin/env bash\\n:\")\n+ if os.name == \"posix\":\n+ os.chmod(help2man, os.stat(help2man).st_mode | 0o111)\n \n def build(self):\n self._patch_sources()\n@@ -102,8 +105,7 @@\n def package(self):\n copy(self, \"COPYING\", src=self.source_folder, dst=os.path.join(self.package_folder, \"licenses\"))\n autotools = Autotools(self)\n- # TODO: replace by autotools.install() once https://github.com/conan-io/conan/issues/12153 fixed\n- autotools.install(args=[f\"DESTDIR={unix_path(self, self.package_folder)}\"])\n+ autotools.install()\n rmdir(self, os.path.join(self.package_folder, \"share\"))\n \n def package_info(self):\n", "issue": "[package] m4/1.4.19: Fails test_package with latest Conan 2.0\n### Description\n\nTrying to build with the latest version of the CCI m4 recipe results in the following error when using Conan 2\r\n\r\n> -------- Installing (downloading, building) binaries... --------\r\n> cmake/3.24.2: Already installed!\r\n> m4/1.4.19: Already installed!\r\n> m4/1.4.19: Appending PATH environment variable: /home/marc/.conan2/p/4c23d7e6a6e4b9b7/p/bin\r\n> WARN: The use of 'env_info' is deprecated in Conan 2.0 and will be removed in Conan 2.X. Please, update your recipes unless you are maintaining compatibility with Conan 1.X\r\n> ERROR: m4/1.4.19: 'NoneType' object has no attribute 'append'. No default values are set for components. You are probably trying to manipulate a component attribute in the 'package_info' method without defining it previously\r\n> ERROR: Conan-2.0 test failed for 'm4'\n\n### Package and Environment Details\n\n* Package Name/Version: **m4/1.4.19**\r\n* Operating System+version: **Debian 10**\r\n* Compiler+version: **GCC 8**\r\n* Docker image: **N/A**\r\n* Conan version: **conan 2.0.0-dev beta5+**\r\n* Python version: **Python 3.7.3**\r\n\n\n### Conan profile\n\nProfile host:\r\n[settings]\r\narch=x86_64\r\nbuild_type=Release\r\ncompiler=gcc\r\ncompiler.libcxx=libstdc++11\r\ncompiler.version=8\r\nos=Linux\r\n[options]\r\ncmake*:bootstrap=True\r\n[tool_requires]\r\n!openssl*: cmake/3.24.2\r\n\r\nProfile build:\r\n[settings]\r\narch=x86_64\r\nbuild_type=Release\r\ncompiler=gcc\r\ncompiler.libcxx=libstdc++11\r\ncompiler.version=8\r\nos=Linux\r\n[options]\r\ncmake*:bootstrap=True\r\n[tool_requires]\r\n!openssl*: cmake/3.24.2\r\n\n\n### Steps to reproduce\n\nconan-2.0 test -pr:b tools.jinja -pr:h tools.jinja test_package m4/1.4.19\n\n### Logs\n\n<details><summary>Click to expand log</summary>\r\n\r\n```\r\nconan-2.0 test -pr:b tools.jinja -pr:h tools.jinja test_package m4/1.4.19\r\n\r\n-------- Input profiles --------\r\nProfile host:\r\n[settings]\r\narch=x86_64\r\nbuild_type=Release\r\ncompiler=gcc\r\ncompiler.libcxx=libstdc++11\r\ncompiler.version=8\r\nos=Linux\r\n[options]\r\ncmake*:bootstrap=True\r\n[tool_requires]\r\n!openssl*: cmake/3.24.2\r\n\r\nProfile build:\r\n[settings]\r\narch=x86_64\r\nbuild_type=Release\r\ncompiler=gcc\r\ncompiler.libcxx=libstdc++11\r\ncompiler.version=8\r\nos=Linux\r\n[options]\r\ncmake*:bootstrap=True\r\n[tool_requires]\r\n!openssl*: cmake/3.24.2\r\n\r\n\r\n-------- test_package: Computing dependency graph --------\r\nGraph root\r\n m4/1.4.19 (test package): /tmp/tmp.Hu84j8HryY/m4/test_package/conanfile.py\r\nBuild requirements\r\n cmake/3.24.2#623afae5289cadc0b9f11a8f43eae83b - Cache\r\n m4/1.4.19#d5e4c4ec85145f2a2bfea4bceef5e56a - Cache\r\n openssl/1.1.1s#b304462aeda9923b735e6b37368c233e - Cache\r\n\r\n-------- test_package: Computing necessary packages --------\r\nBuild requirements\r\n cmake/3.24.2#623afae5289cadc0b9f11a8f43eae83b:ccbf287e38142241463d713141c76d0c18207a9d#5e10a6d92143344f25472aa9178b24fc - Cache\r\n m4/1.4.19#d5e4c4ec85145f2a2bfea4bceef5e56a:3593751651824fb813502c69c971267624ced41a#028e06b2ec8f151ba5ee47e640cc2c5c - Cache\r\n openssl/1.1.1s#b304462aeda9923b735e6b37368c233e:896855b1e3b5961bfcc08e699116d9ed588cac00#7fbe50a5f635dbba8a47e75cf8b42165 - Skip\r\n\r\n-------- test_package: Installing packages --------\r\n\r\n-------- Installing (downloading, building) binaries... --------\r\ncmake/3.24.2: Already installed!\r\nm4/1.4.19: Already installed!\r\nm4/1.4.19: Appending PATH environment variable: /home/marc/.conan2/p/4c23d7e6a6e4b9b7/p/bin\r\nWARN: The use of 'env_info' is deprecated in Conan 2.0 and will be removed in Conan 2.X. Please, update your recipes unless you are maintaining compatibility with Conan 1.X\r\nERROR: m4/1.4.19: 'NoneType' object has no attribute 'append'. No default values are set for components. You are probably trying to manipulate a component attribute in the 'package_info' method without defining it previously\r\nERROR: Conan-2.0 test failed for 'm4'\r\n```\r\n\r\n</details>\r\n\n", "before_files": [{"content": "from conan import ConanFile\nfrom conan.tools.env import VirtualBuildEnv\nfrom conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get, rmdir, save\nfrom conan.tools.gnu import Autotools, AutotoolsToolchain\nfrom conan.tools.layout import basic_layout\nfrom conan.tools.microsoft import is_msvc, unix_path\nfrom conan.tools.scm import Version\nimport os\n\nrequired_conan_version = \">=1.52.0\"\n\n\nclass M4Conan(ConanFile):\n name = \"m4\"\n description = \"GNU M4 is an implementation of the traditional Unix macro processor\"\n topics = (\"macro\", \"preprocessor\")\n homepage = \"https://www.gnu.org/software/m4/\"\n url = \"https://github.com/conan-io/conan-center-index\"\n license = \"GPL-3.0-only\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n\n @property\n def _settings_build(self):\n return getattr(self, \"settings_build\", self.settings)\n\n def export_sources(self):\n export_conandata_patches(self)\n\n def layout(self):\n basic_layout(self, src_folder=\"src\")\n\n def package_id(self):\n del self.info.settings.compiler\n\n def build_requirements(self):\n if self._settings_build.os == \"Windows\":\n if not self.conf.get(\"tools.microsoft.bash:path\", default=False, check_type=bool):\n self.tool_requires(\"msys2/cci.latest\")\n self.win_bash = True\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version],\n destination=self.source_folder, strip_root=True)\n\n def generate(self):\n env = VirtualBuildEnv(self)\n env.generate()\n\n tc = AutotoolsToolchain(self)\n if is_msvc(self):\n tc.extra_cflags.append(\"-FS\")\n # Avoid a `Assertion Failed Dialog Box` during configure with build_type=Debug\n # Visual Studio does not support the %n format flag:\n # https://docs.microsoft.com/en-us/cpp/c-runtime-library/format-specification-syntax-printf-and-wprintf-functions\n # Because the %n format is inherently insecure, it is disabled by default. If %n is encountered in a format string,\n # the invalid parameter handler is invoked, as described in Parameter Validation. To enable %n support, see _set_printf_count_output.\n tc.configure_args.extend([\n \"gl_cv_func_printf_directive_n=no\",\n \"gl_cv_func_snprintf_directive_n=no\",\n \"gl_cv_func_snprintf_directive_n=no\",\n ])\n if self.settings.build_type in (\"Debug\", \"RelWithDebInfo\"):\n tc.extra_ldflags.append(\"-PDB\")\n elif self.settings.compiler == \"clang\":\n if Version(self.version) < \"1.4.19\":\n tc.extra_cflags.extend([\n \"-rtlib=compiler-rt\",\n \"-Wno-unused-command-line-argument\",\n ])\n if self.settings.os == \"Windows\":\n tc.configure_args.append(\"ac_cv_func__set_invalid_parameter_handler=yes\")\n env = tc.environment()\n # help2man trick\n env.prepend_path(\"PATH\", self.source_folder)\n # handle msvc\n if is_msvc(self):\n ar_wrapper = unix_path(self, os.path.join(self.source_folder, \"build-aux\", \"ar-lib\"))\n env.define(\"CC\", \"cl -nologo\")\n env.define(\"CXX\", \"cl -nologo\")\n env.define(\"AR\", f\"{ar_wrapper} lib\")\n env.define(\"LD\", \"link\")\n env.define(\"NM\", \"dumpbin -symbols\")\n env.define(\"OBJDUMP\", \":\")\n env.define(\"RANLIB\", \":\")\n env.define(\"STRIP\", \":\")\n tc.generate(env)\n\n def _patch_sources(self):\n apply_conandata_patches(self)\n # dummy file for configure\n help2man = os.path.join(self.source_folder, \"help2man\")\n save(self, help2man, \"#!/usr/bin/env bash\\n:\")\n if os.name == \"posix\":\n os.chmod(help2man, os.stat(help2man).st_mode | 0o111)\n\n def build(self):\n self._patch_sources()\n autotools = Autotools(self)\n autotools.configure()\n autotools.make()\n\n def package(self):\n copy(self, \"COPYING\", src=self.source_folder, dst=os.path.join(self.package_folder, \"licenses\"))\n autotools = Autotools(self)\n # TODO: replace by autotools.install() once https://github.com/conan-io/conan/issues/12153 fixed\n autotools.install(args=[f\"DESTDIR={unix_path(self, self.package_folder)}\"])\n rmdir(self, os.path.join(self.package_folder, \"share\"))\n\n def package_info(self):\n self.cpp_info.libdirs = []\n self.cpp_info.includedirs = []\n\n # M4 environment variable is used by a lot of scripts as a way to override a hard-coded embedded m4 path\n bin_ext = \".exe\" if self.settings.os == \"Windows\" else \"\"\n m4_bin = os.path.join(self.package_folder, \"bin\", f\"m4{bin_ext}\").replace(\"\\\\\", \"/\")\n self.runenv_info.define_path(\"M4\", m4_bin)\n self.buildenv_info.define_path(\"M4\", m4_bin)\n\n # TODO: to remove in conan v2\n bin_path = os.path.join(self.package_folder, \"bin\")\n self.output.info(f\"Appending PATH environment variable: {bin_path}\")\n self.env_info.PATH.append(bin_path)\n self.env_info.M4 = m4_bin\n", "path": "recipes/m4/all/conanfile.py"}]}
3,470
652
gh_patches_debug_38598
rasdani/github-patches
git_diff
apache__airflow-32382
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add DagProcessor status to health endpoint. ### Description Add DagProcessor status including latest heartbeat to health endpoint similar to Triggerer status added recently. Related PRs. https://github.com/apache/airflow/pull/31529 https://github.com/apache/airflow/pull/27755 ### Use case/motivation It helps in dag processor monitoring ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md) </issue> <code> [start of airflow/api_connexion/schemas/health_schema.py] 1 # Licensed to the Apache Software Foundation (ASF) under one 2 # or more contributor license agreements. See the NOTICE file 3 # distributed with this work for additional information 4 # regarding copyright ownership. The ASF licenses this file 5 # to you under the Apache License, Version 2.0 (the 6 # "License"); you may not use this file except in compliance 7 # with the License. You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, 12 # software distributed under the License is distributed on an 13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 # KIND, either express or implied. See the License for the 15 # specific language governing permissions and limitations 16 # under the License. 17 from __future__ import annotations 18 19 from marshmallow import Schema, fields 20 21 22 class BaseInfoSchema(Schema): 23 """Base status field for metadatabase and scheduler.""" 24 25 status = fields.String(dump_only=True) 26 27 28 class MetaDatabaseInfoSchema(BaseInfoSchema): 29 """Schema for Metadatabase info.""" 30 31 32 class SchedulerInfoSchema(BaseInfoSchema): 33 """Schema for Scheduler info.""" 34 35 latest_scheduler_heartbeat = fields.String(dump_only=True) 36 37 38 class TriggererInfoSchema(BaseInfoSchema): 39 """Schema for Triggerer info.""" 40 41 latest_triggerer_heartbeat = fields.String(dump_only=True) 42 43 44 class HealthInfoSchema(Schema): 45 """Schema for the Health endpoint.""" 46 47 metadatabase = fields.Nested(MetaDatabaseInfoSchema) 48 scheduler = fields.Nested(SchedulerInfoSchema) 49 triggerer = fields.Nested(TriggererInfoSchema) 50 51 52 health_schema = HealthInfoSchema() 53 [end of airflow/api_connexion/schemas/health_schema.py] [start of airflow/api/common/airflow_health.py] 1 # Licensed to the Apache Software Foundation (ASF) under one 2 # or more contributor license agreements. See the NOTICE file 3 # distributed with this work for additional information 4 # regarding copyright ownership. The ASF licenses this file 5 # to you under the Apache License, Version 2.0 (the 6 # "License"); you may not use this file except in compliance 7 # with the License. You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, 12 # software distributed under the License is distributed on an 13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 # KIND, either express or implied. See the License for the 15 # specific language governing permissions and limitations 16 # under the License. 17 from __future__ import annotations 18 19 from typing import Any 20 21 from airflow.jobs.scheduler_job_runner import SchedulerJobRunner 22 from airflow.jobs.triggerer_job_runner import TriggererJobRunner 23 24 HEALTHY = "healthy" 25 UNHEALTHY = "unhealthy" 26 27 28 def get_airflow_health() -> dict[str, Any]: 29 """Get the health for Airflow metadatabase, scheduler and triggerer.""" 30 metadatabase_status = HEALTHY 31 latest_scheduler_heartbeat = None 32 latest_triggerer_heartbeat = None 33 scheduler_status = UNHEALTHY 34 triggerer_status: str | None = UNHEALTHY 35 36 try: 37 latest_scheduler_job = SchedulerJobRunner.most_recent_job() 38 39 if latest_scheduler_job: 40 latest_scheduler_heartbeat = latest_scheduler_job.latest_heartbeat.isoformat() 41 if latest_scheduler_job.is_alive(): 42 scheduler_status = HEALTHY 43 except Exception: 44 metadatabase_status = UNHEALTHY 45 46 try: 47 latest_triggerer_job = TriggererJobRunner.most_recent_job() 48 49 if latest_triggerer_job: 50 latest_triggerer_heartbeat = latest_triggerer_job.latest_heartbeat.isoformat() 51 if latest_triggerer_job.is_alive(): 52 triggerer_status = HEALTHY 53 else: 54 triggerer_status = None 55 except Exception: 56 metadatabase_status = UNHEALTHY 57 58 airflow_health_status = { 59 "metadatabase": {"status": metadatabase_status}, 60 "scheduler": { 61 "status": scheduler_status, 62 "latest_scheduler_heartbeat": latest_scheduler_heartbeat, 63 }, 64 "triggerer": { 65 "status": triggerer_status, 66 "latest_triggerer_heartbeat": latest_triggerer_heartbeat, 67 }, 68 } 69 70 return airflow_health_status 71 [end of airflow/api/common/airflow_health.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/airflow/api/common/airflow_health.py b/airflow/api/common/airflow_health.py --- a/airflow/api/common/airflow_health.py +++ b/airflow/api/common/airflow_health.py @@ -18,6 +18,7 @@ from typing import Any +from airflow.jobs.dag_processor_job_runner import DagProcessorJobRunner from airflow.jobs.scheduler_job_runner import SchedulerJobRunner from airflow.jobs.triggerer_job_runner import TriggererJobRunner @@ -30,8 +31,10 @@ metadatabase_status = HEALTHY latest_scheduler_heartbeat = None latest_triggerer_heartbeat = None + latest_dag_processor_heartbeat = None scheduler_status = UNHEALTHY triggerer_status: str | None = UNHEALTHY + dag_processor_status: str | None = UNHEALTHY try: latest_scheduler_job = SchedulerJobRunner.most_recent_job() @@ -55,6 +58,18 @@ except Exception: metadatabase_status = UNHEALTHY + try: + latest_dag_processor_job = DagProcessorJobRunner.most_recent_job() + + if latest_dag_processor_job: + latest_dag_processor_heartbeat = latest_dag_processor_job.latest_heartbeat.isoformat() + if latest_dag_processor_job.is_alive(): + dag_processor_status = HEALTHY + else: + dag_processor_status = None + except Exception: + metadatabase_status = UNHEALTHY + airflow_health_status = { "metadatabase": {"status": metadatabase_status}, "scheduler": { @@ -65,6 +80,10 @@ "status": triggerer_status, "latest_triggerer_heartbeat": latest_triggerer_heartbeat, }, + "dag_processor": { + "status": dag_processor_status, + "latest_dag_processor_heartbeat": latest_dag_processor_heartbeat, + }, } return airflow_health_status diff --git a/airflow/api_connexion/schemas/health_schema.py b/airflow/api_connexion/schemas/health_schema.py --- a/airflow/api_connexion/schemas/health_schema.py +++ b/airflow/api_connexion/schemas/health_schema.py @@ -41,12 +41,19 @@ latest_triggerer_heartbeat = fields.String(dump_only=True) +class DagProcessorInfoSchema(BaseInfoSchema): + """Schema for DagProcessor info.""" + + latest_dag_processor_heartbeat = fields.String(dump_only=True) + + class HealthInfoSchema(Schema): """Schema for the Health endpoint.""" metadatabase = fields.Nested(MetaDatabaseInfoSchema) scheduler = fields.Nested(SchedulerInfoSchema) triggerer = fields.Nested(TriggererInfoSchema) + dag_processor = fields.Nested(DagProcessorInfoSchema) health_schema = HealthInfoSchema()
{"golden_diff": "diff --git a/airflow/api/common/airflow_health.py b/airflow/api/common/airflow_health.py\n--- a/airflow/api/common/airflow_health.py\n+++ b/airflow/api/common/airflow_health.py\n@@ -18,6 +18,7 @@\n \n from typing import Any\n \n+from airflow.jobs.dag_processor_job_runner import DagProcessorJobRunner\n from airflow.jobs.scheduler_job_runner import SchedulerJobRunner\n from airflow.jobs.triggerer_job_runner import TriggererJobRunner\n \n@@ -30,8 +31,10 @@\n metadatabase_status = HEALTHY\n latest_scheduler_heartbeat = None\n latest_triggerer_heartbeat = None\n+ latest_dag_processor_heartbeat = None\n scheduler_status = UNHEALTHY\n triggerer_status: str | None = UNHEALTHY\n+ dag_processor_status: str | None = UNHEALTHY\n \n try:\n latest_scheduler_job = SchedulerJobRunner.most_recent_job()\n@@ -55,6 +58,18 @@\n except Exception:\n metadatabase_status = UNHEALTHY\n \n+ try:\n+ latest_dag_processor_job = DagProcessorJobRunner.most_recent_job()\n+\n+ if latest_dag_processor_job:\n+ latest_dag_processor_heartbeat = latest_dag_processor_job.latest_heartbeat.isoformat()\n+ if latest_dag_processor_job.is_alive():\n+ dag_processor_status = HEALTHY\n+ else:\n+ dag_processor_status = None\n+ except Exception:\n+ metadatabase_status = UNHEALTHY\n+\n airflow_health_status = {\n \"metadatabase\": {\"status\": metadatabase_status},\n \"scheduler\": {\n@@ -65,6 +80,10 @@\n \"status\": triggerer_status,\n \"latest_triggerer_heartbeat\": latest_triggerer_heartbeat,\n },\n+ \"dag_processor\": {\n+ \"status\": dag_processor_status,\n+ \"latest_dag_processor_heartbeat\": latest_dag_processor_heartbeat,\n+ },\n }\n \n return airflow_health_status\ndiff --git a/airflow/api_connexion/schemas/health_schema.py b/airflow/api_connexion/schemas/health_schema.py\n--- a/airflow/api_connexion/schemas/health_schema.py\n+++ b/airflow/api_connexion/schemas/health_schema.py\n@@ -41,12 +41,19 @@\n latest_triggerer_heartbeat = fields.String(dump_only=True)\n \n \n+class DagProcessorInfoSchema(BaseInfoSchema):\n+ \"\"\"Schema for DagProcessor info.\"\"\"\n+\n+ latest_dag_processor_heartbeat = fields.String(dump_only=True)\n+\n+\n class HealthInfoSchema(Schema):\n \"\"\"Schema for the Health endpoint.\"\"\"\n \n metadatabase = fields.Nested(MetaDatabaseInfoSchema)\n scheduler = fields.Nested(SchedulerInfoSchema)\n triggerer = fields.Nested(TriggererInfoSchema)\n+ dag_processor = fields.Nested(DagProcessorInfoSchema)\n \n \n health_schema = HealthInfoSchema()\n", "issue": "Add DagProcessor status to health endpoint.\n### Description\n\nAdd DagProcessor status including latest heartbeat to health endpoint similar to Triggerer status added recently. Related PRs.\r\n\r\nhttps://github.com/apache/airflow/pull/31529\r\nhttps://github.com/apache/airflow/pull/27755\n\n### Use case/motivation\n\nIt helps in dag processor monitoring \n\n### Related issues\n\n_No response_\n\n### Are you willing to submit a PR?\n\n- [X] Yes I am willing to submit a PR!\n\n### Code of Conduct\n\n- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)\n\n", "before_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom __future__ import annotations\n\nfrom marshmallow import Schema, fields\n\n\nclass BaseInfoSchema(Schema):\n \"\"\"Base status field for metadatabase and scheduler.\"\"\"\n\n status = fields.String(dump_only=True)\n\n\nclass MetaDatabaseInfoSchema(BaseInfoSchema):\n \"\"\"Schema for Metadatabase info.\"\"\"\n\n\nclass SchedulerInfoSchema(BaseInfoSchema):\n \"\"\"Schema for Scheduler info.\"\"\"\n\n latest_scheduler_heartbeat = fields.String(dump_only=True)\n\n\nclass TriggererInfoSchema(BaseInfoSchema):\n \"\"\"Schema for Triggerer info.\"\"\"\n\n latest_triggerer_heartbeat = fields.String(dump_only=True)\n\n\nclass HealthInfoSchema(Schema):\n \"\"\"Schema for the Health endpoint.\"\"\"\n\n metadatabase = fields.Nested(MetaDatabaseInfoSchema)\n scheduler = fields.Nested(SchedulerInfoSchema)\n triggerer = fields.Nested(TriggererInfoSchema)\n\n\nhealth_schema = HealthInfoSchema()\n", "path": "airflow/api_connexion/schemas/health_schema.py"}, {"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom __future__ import annotations\n\nfrom typing import Any\n\nfrom airflow.jobs.scheduler_job_runner import SchedulerJobRunner\nfrom airflow.jobs.triggerer_job_runner import TriggererJobRunner\n\nHEALTHY = \"healthy\"\nUNHEALTHY = \"unhealthy\"\n\n\ndef get_airflow_health() -> dict[str, Any]:\n \"\"\"Get the health for Airflow metadatabase, scheduler and triggerer.\"\"\"\n metadatabase_status = HEALTHY\n latest_scheduler_heartbeat = None\n latest_triggerer_heartbeat = None\n scheduler_status = UNHEALTHY\n triggerer_status: str | None = UNHEALTHY\n\n try:\n latest_scheduler_job = SchedulerJobRunner.most_recent_job()\n\n if latest_scheduler_job:\n latest_scheduler_heartbeat = latest_scheduler_job.latest_heartbeat.isoformat()\n if latest_scheduler_job.is_alive():\n scheduler_status = HEALTHY\n except Exception:\n metadatabase_status = UNHEALTHY\n\n try:\n latest_triggerer_job = TriggererJobRunner.most_recent_job()\n\n if latest_triggerer_job:\n latest_triggerer_heartbeat = latest_triggerer_job.latest_heartbeat.isoformat()\n if latest_triggerer_job.is_alive():\n triggerer_status = HEALTHY\n else:\n triggerer_status = None\n except Exception:\n metadatabase_status = UNHEALTHY\n\n airflow_health_status = {\n \"metadatabase\": {\"status\": metadatabase_status},\n \"scheduler\": {\n \"status\": scheduler_status,\n \"latest_scheduler_heartbeat\": latest_scheduler_heartbeat,\n },\n \"triggerer\": {\n \"status\": triggerer_status,\n \"latest_triggerer_heartbeat\": latest_triggerer_heartbeat,\n },\n }\n\n return airflow_health_status\n", "path": "airflow/api/common/airflow_health.py"}]}
1,867
652
gh_patches_debug_31339
rasdani/github-patches
git_diff
pwndbg__pwndbg-1853
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Better exception handling for unmet dependencies Some of our users update Pwndbg only by `git pull`ing the newest version and not re-running `setup.sh`. If that happens and when we updated our dependencies recently, the user may end up with broken Pwndbg such as (likely) happened in https://github.com/pwndbg/pwndbg/issues/1790. We should do the two following things: 1) When we detect/handle an exception in the [`pwndbg/exception.py:handle` function]( https://github.com/pwndbg/pwndbg/blob/de4acb2f40da3b0c97353a8b680ffa6440346f7b/pwndbg/exception.py#L61-L96), we should check the installed dependencies versions against the required ones and if they do not match, we should inform the user to update them. 2) We may want to display installed dependencies versions within the `bugreport` command output? </issue> <code> [start of pwndbg/exception.py] 1 from __future__ import annotations 2 3 import functools 4 import sys 5 import traceback 6 7 import gdb 8 9 import pwndbg.lib.cache 10 import pwndbg.lib.stdio 11 from pwndbg.color import message 12 from pwndbg.gdblib import config 13 14 with pwndbg.lib.stdio.stdio: 15 try: 16 import ipdb as pdb 17 except ImportError: 18 import pdb 19 try: 20 from rich.console import Console 21 22 _rich_console = Console() 23 except ImportError: 24 _rich_console = None 25 26 verbose = config.add_param( 27 "exception-verbose", 28 False, 29 "whether to print a full stacktrace for exceptions raised in Pwndbg commands", 30 ) 31 debug = config.add_param( 32 "exception-debugger", False, "whether to debug exceptions raised in Pwndbg commands" 33 ) 34 35 36 @pwndbg.lib.cache.cache_until("forever") 37 def inform_report_issue(exception_msg) -> None: 38 """ 39 Informs user that he can report an issue. 40 The use of caching makes it reporting only once for a given exception message. 41 """ 42 print( 43 message.notice( 44 "If that is an issue, you can report it on https://github.com/pwndbg/pwndbg/issues\n" 45 "(Please don't forget to search if it hasn't been reported before)\n" 46 "To generate the report and open a browser, you may run " 47 ) 48 + message.hint("`bugreport --run-browser`") 49 + message.notice("\nPS: Pull requests are welcome") 50 ) 51 52 53 def inform_verbose_and_debug() -> None: 54 print( 55 message.notice("For more info invoke `") 56 + message.hint("set exception-verbose on") 57 + message.notice("` and rerun the command\nor debug it by yourself with `") 58 + message.hint("set exception-debugger on") 59 + message.notice("`") 60 ) 61 62 63 def handle(name="Error"): 64 """Displays an exception to the user, optionally displaying a full traceback 65 and spawning an interactive post-moretem debugger. 66 67 Notes: 68 - ``set exception-verbose on`` enables stack traces. 69 - ``set exception-debugger on`` enables the post-mortem debugger. 70 """ 71 72 # This is for unit tests so they fail on exceptions instead of displaying them. 73 if getattr(sys, "_pwndbg_unittest_run", False) is True: 74 E, V, T = sys.exc_info() 75 e = E(V) 76 e.__traceback__ = T 77 raise e 78 79 # Display the error 80 if debug or verbose: 81 exception_msg = traceback.format_exc() 82 if _rich_console: 83 _rich_console.print_exception() 84 else: 85 print(exception_msg) 86 inform_report_issue(exception_msg) 87 88 else: 89 exc_type, exc_value, exc_traceback = sys.exc_info() 90 91 print(message.error(f"Exception occurred: {name}: {exc_value} ({exc_type})")) 92 93 inform_verbose_and_debug() 94 95 # Break into the interactive debugger 96 if debug: 97 with pwndbg.lib.stdio.stdio: 98 pdb.post_mortem() 99 100 101 @functools.wraps(pdb.set_trace) 102 def set_trace() -> None: 103 """Enable sane debugging in Pwndbg by switching to the "real" stdio.""" 104 debugger = pdb.Pdb( 105 stdin=sys.__stdin__, stdout=sys.__stdout__, skip=["pwndbg.lib.stdio", "pwndbg.exception"] 106 ) 107 debugger.set_trace() 108 109 110 pdb.set_trace = set_trace 111 112 113 @config.trigger(verbose, debug) 114 def update() -> None: 115 if verbose or debug: 116 command = "set python print-stack full" 117 else: 118 command = "set python print-stack message" 119 120 gdb.execute(command, from_tty=True, to_string=True) 121 [end of pwndbg/exception.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pwndbg/exception.py b/pwndbg/exception.py --- a/pwndbg/exception.py +++ b/pwndbg/exception.py @@ -1,10 +1,12 @@ from __future__ import annotations import functools +import os import sys import traceback import gdb +import pkg_resources import pwndbg.lib.cache import pwndbg.lib.stdio @@ -33,6 +35,41 @@ ) +def inform_unmet_dependencies(errors) -> None: + """ + Informs user about unmet dependencies + """ + msg = message.error("You appear to have unmet Pwndbg dependencies.\n") + for e in errors: + if isinstance(e, pkg_resources.DistributionNotFound): + msg += message.notice(f"- required {e.args[0]}, but not installed\n") + else: + msg += message.notice(f"- required {e.args[1]}, installed: {e.args[0]}\n") + msg += message.notice("Consider running: ") + msg += message.hint("`setup.sh` ") + msg += message.notice("from Pwndbg project directory.\n") + print(msg) + + [email protected]_until("forever") +def check_dependencies(): + """ + Checks if there are any unmet dependencies in requirements.txt + """ + project_path = os.path.dirname(os.path.abspath(__file__)) + requirements_path = os.path.join(project_path, os.pardir, "requirements.txt") + with open(requirements_path, "r") as f: + errors = [] + for line in f.readlines(): + try: + pkg_resources.require(line) + except (pkg_resources.VersionConflict, pkg_resources.DistributionNotFound) as e: + errors.append(e) + + if errors: + inform_unmet_dependencies(errors) + + @pwndbg.lib.cache.cache_until("forever") def inform_report_issue(exception_msg) -> None: """ @@ -76,6 +113,9 @@ e.__traceback__ = T raise e + # Check dependencies against requirements.txt and warn user + check_dependencies() + # Display the error if debug or verbose: exception_msg = traceback.format_exc()
{"golden_diff": "diff --git a/pwndbg/exception.py b/pwndbg/exception.py\n--- a/pwndbg/exception.py\n+++ b/pwndbg/exception.py\n@@ -1,10 +1,12 @@\n from __future__ import annotations\n \n import functools\n+import os\n import sys\n import traceback\n \n import gdb\n+import pkg_resources\n \n import pwndbg.lib.cache\n import pwndbg.lib.stdio\n@@ -33,6 +35,41 @@\n )\n \n \n+def inform_unmet_dependencies(errors) -> None:\n+ \"\"\"\n+ Informs user about unmet dependencies\n+ \"\"\"\n+ msg = message.error(\"You appear to have unmet Pwndbg dependencies.\\n\")\n+ for e in errors:\n+ if isinstance(e, pkg_resources.DistributionNotFound):\n+ msg += message.notice(f\"- required {e.args[0]}, but not installed\\n\")\n+ else:\n+ msg += message.notice(f\"- required {e.args[1]}, installed: {e.args[0]}\\n\")\n+ msg += message.notice(\"Consider running: \")\n+ msg += message.hint(\"`setup.sh` \")\n+ msg += message.notice(\"from Pwndbg project directory.\\n\")\n+ print(msg)\n+\n+\[email protected]_until(\"forever\")\n+def check_dependencies():\n+ \"\"\"\n+ Checks if there are any unmet dependencies in requirements.txt\n+ \"\"\"\n+ project_path = os.path.dirname(os.path.abspath(__file__))\n+ requirements_path = os.path.join(project_path, os.pardir, \"requirements.txt\")\n+ with open(requirements_path, \"r\") as f:\n+ errors = []\n+ for line in f.readlines():\n+ try:\n+ pkg_resources.require(line)\n+ except (pkg_resources.VersionConflict, pkg_resources.DistributionNotFound) as e:\n+ errors.append(e)\n+\n+ if errors:\n+ inform_unmet_dependencies(errors)\n+\n+\n @pwndbg.lib.cache.cache_until(\"forever\")\n def inform_report_issue(exception_msg) -> None:\n \"\"\"\n@@ -76,6 +113,9 @@\n e.__traceback__ = T\n raise e\n \n+ # Check dependencies against requirements.txt and warn user\n+ check_dependencies()\n+\n # Display the error\n if debug or verbose:\n exception_msg = traceback.format_exc()\n", "issue": "Better exception handling for unmet dependencies\nSome of our users update Pwndbg only by `git pull`ing the newest version and not re-running `setup.sh`. If that happens and when we updated our dependencies recently, the user may end up with broken Pwndbg such as (likely) happened in https://github.com/pwndbg/pwndbg/issues/1790.\r\n\r\nWe should do the two following things:\r\n1) When we detect/handle an exception in the [`pwndbg/exception.py:handle` function]( https://github.com/pwndbg/pwndbg/blob/de4acb2f40da3b0c97353a8b680ffa6440346f7b/pwndbg/exception.py#L61-L96), we should check the installed dependencies versions against the required ones and if they do not match, we should inform the user to update them.\r\n2) We may want to display installed dependencies versions within the `bugreport` command output? \n", "before_files": [{"content": "from __future__ import annotations\n\nimport functools\nimport sys\nimport traceback\n\nimport gdb\n\nimport pwndbg.lib.cache\nimport pwndbg.lib.stdio\nfrom pwndbg.color import message\nfrom pwndbg.gdblib import config\n\nwith pwndbg.lib.stdio.stdio:\n try:\n import ipdb as pdb\n except ImportError:\n import pdb\n try:\n from rich.console import Console\n\n _rich_console = Console()\n except ImportError:\n _rich_console = None\n\nverbose = config.add_param(\n \"exception-verbose\",\n False,\n \"whether to print a full stacktrace for exceptions raised in Pwndbg commands\",\n)\ndebug = config.add_param(\n \"exception-debugger\", False, \"whether to debug exceptions raised in Pwndbg commands\"\n)\n\n\[email protected]_until(\"forever\")\ndef inform_report_issue(exception_msg) -> None:\n \"\"\"\n Informs user that he can report an issue.\n The use of caching makes it reporting only once for a given exception message.\n \"\"\"\n print(\n message.notice(\n \"If that is an issue, you can report it on https://github.com/pwndbg/pwndbg/issues\\n\"\n \"(Please don't forget to search if it hasn't been reported before)\\n\"\n \"To generate the report and open a browser, you may run \"\n )\n + message.hint(\"`bugreport --run-browser`\")\n + message.notice(\"\\nPS: Pull requests are welcome\")\n )\n\n\ndef inform_verbose_and_debug() -> None:\n print(\n message.notice(\"For more info invoke `\")\n + message.hint(\"set exception-verbose on\")\n + message.notice(\"` and rerun the command\\nor debug it by yourself with `\")\n + message.hint(\"set exception-debugger on\")\n + message.notice(\"`\")\n )\n\n\ndef handle(name=\"Error\"):\n \"\"\"Displays an exception to the user, optionally displaying a full traceback\n and spawning an interactive post-moretem debugger.\n\n Notes:\n - ``set exception-verbose on`` enables stack traces.\n - ``set exception-debugger on`` enables the post-mortem debugger.\n \"\"\"\n\n # This is for unit tests so they fail on exceptions instead of displaying them.\n if getattr(sys, \"_pwndbg_unittest_run\", False) is True:\n E, V, T = sys.exc_info()\n e = E(V)\n e.__traceback__ = T\n raise e\n\n # Display the error\n if debug or verbose:\n exception_msg = traceback.format_exc()\n if _rich_console:\n _rich_console.print_exception()\n else:\n print(exception_msg)\n inform_report_issue(exception_msg)\n\n else:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n\n print(message.error(f\"Exception occurred: {name}: {exc_value} ({exc_type})\"))\n\n inform_verbose_and_debug()\n\n # Break into the interactive debugger\n if debug:\n with pwndbg.lib.stdio.stdio:\n pdb.post_mortem()\n\n\[email protected](pdb.set_trace)\ndef set_trace() -> None:\n \"\"\"Enable sane debugging in Pwndbg by switching to the \"real\" stdio.\"\"\"\n debugger = pdb.Pdb(\n stdin=sys.__stdin__, stdout=sys.__stdout__, skip=[\"pwndbg.lib.stdio\", \"pwndbg.exception\"]\n )\n debugger.set_trace()\n\n\npdb.set_trace = set_trace\n\n\[email protected](verbose, debug)\ndef update() -> None:\n if verbose or debug:\n command = \"set python print-stack full\"\n else:\n command = \"set python print-stack message\"\n\n gdb.execute(command, from_tty=True, to_string=True)\n", "path": "pwndbg/exception.py"}]}
1,818
506
gh_patches_debug_19367
rasdani/github-patches
git_diff
pypa__pip-2028
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Use of uninitialised variable in pip/commands/list.py The BestVersionAlreadyInstalled case of find_packages_latests_versions() does not set the variable remote_version_parsed, thus it could be used uninitialised. </issue> <code> [start of pip/commands/list.py] 1 from __future__ import absolute_import 2 3 import logging 4 import warnings 5 6 from pip.basecommand import Command 7 from pip.exceptions import DistributionNotFound, BestVersionAlreadyInstalled 8 from pip.index import PackageFinder 9 from pip.req import InstallRequirement 10 from pip.utils import get_installed_distributions, dist_is_editable 11 from pip.utils.deprecation import RemovedInPip17Warning 12 from pip.cmdoptions import make_option_group, index_group 13 14 15 logger = logging.getLogger(__name__) 16 17 18 class ListCommand(Command): 19 """ 20 List installed packages, including editables. 21 22 Packages are listed in a case-insensitive sorted order. 23 """ 24 name = 'list' 25 usage = """ 26 %prog [options]""" 27 summary = 'List installed packages.' 28 29 def __init__(self, *args, **kw): 30 super(ListCommand, self).__init__(*args, **kw) 31 32 cmd_opts = self.cmd_opts 33 34 cmd_opts.add_option( 35 '-o', '--outdated', 36 action='store_true', 37 default=False, 38 help='List outdated packages (excluding editables)') 39 cmd_opts.add_option( 40 '-u', '--uptodate', 41 action='store_true', 42 default=False, 43 help='List uptodate packages (excluding editables)') 44 cmd_opts.add_option( 45 '-e', '--editable', 46 action='store_true', 47 default=False, 48 help='List editable projects.') 49 cmd_opts.add_option( 50 '-l', '--local', 51 action='store_true', 52 default=False, 53 help=('If in a virtualenv that has global access, do not list ' 54 'globally-installed packages.'), 55 ) 56 57 cmd_opts.add_option( 58 '--pre', 59 action='store_true', 60 default=False, 61 help=("Include pre-release and development versions. By default, " 62 "pip only finds stable versions."), 63 ) 64 65 index_opts = make_option_group(index_group, self.parser) 66 67 self.parser.insert_option_group(0, index_opts) 68 self.parser.insert_option_group(0, cmd_opts) 69 70 def _build_package_finder(self, options, index_urls, session): 71 """ 72 Create a package finder appropriate to this list command. 73 """ 74 return PackageFinder( 75 find_links=options.find_links, 76 index_urls=index_urls, 77 allow_external=options.allow_external, 78 allow_unverified=options.allow_unverified, 79 allow_all_external=options.allow_all_external, 80 allow_all_prereleases=options.pre, 81 process_dependency_links=options.process_dependency_links, 82 session=session, 83 ) 84 85 def run(self, options, args): 86 if options.outdated: 87 self.run_outdated(options) 88 elif options.uptodate: 89 self.run_uptodate(options) 90 elif options.editable: 91 self.run_editables(options) 92 else: 93 self.run_listing(options) 94 95 def run_outdated(self, options): 96 for dist, remote_version_raw, remote_version_parsed in \ 97 self.find_packages_latests_versions(options): 98 if remote_version_parsed > dist.parsed_version: 99 logger.info( 100 '%s (Current: %s Latest: %s)', 101 dist.project_name, dist.version, remote_version_raw, 102 ) 103 104 def find_packages_latests_versions(self, options): 105 index_urls = [options.index_url] + options.extra_index_urls 106 if options.no_index: 107 logger.info('Ignoring indexes: %s', ','.join(index_urls)) 108 index_urls = [] 109 110 if options.use_mirrors: 111 warnings.warn( 112 "--use-mirrors has been deprecated and will be removed in the " 113 "future. Explicit uses of --index-url and/or --extra-index-url" 114 " is suggested.", 115 RemovedInPip17Warning, 116 ) 117 118 if options.mirrors: 119 warnings.warn( 120 "--mirrors has been deprecated and will be removed in the " 121 "future. Explicit uses of --index-url and/or --extra-index-url" 122 " is suggested.", 123 RemovedInPip17Warning, 124 ) 125 index_urls += options.mirrors 126 127 dependency_links = [] 128 for dist in get_installed_distributions(local_only=options.local): 129 if dist.has_metadata('dependency_links.txt'): 130 dependency_links.extend( 131 dist.get_metadata_lines('dependency_links.txt'), 132 ) 133 134 with self._build_session(options) as session: 135 finder = self._build_package_finder(options, index_urls, session) 136 finder.add_dependency_links(dependency_links) 137 138 installed_packages = get_installed_distributions( 139 local_only=options.local, 140 include_editables=False, 141 ) 142 for dist in installed_packages: 143 req = InstallRequirement.from_line(dist.key, None) 144 try: 145 link = finder.find_requirement(req, True) 146 147 # If link is None, means installed version is most 148 # up-to-date 149 if link is None: 150 continue 151 except DistributionNotFound: 152 continue 153 except BestVersionAlreadyInstalled: 154 remote_version = req.installed_version 155 else: 156 # It might be a good idea that link or finder had a public 157 # method that returned version 158 remote_version = finder._link_package_versions( 159 link, req.name 160 )[0] 161 remote_version_raw = remote_version[2] 162 remote_version_parsed = remote_version[0] 163 yield dist, remote_version_raw, remote_version_parsed 164 165 def run_listing(self, options): 166 installed_packages = get_installed_distributions( 167 local_only=options.local, 168 ) 169 self.output_package_listing(installed_packages) 170 171 def run_editables(self, options): 172 installed_packages = get_installed_distributions( 173 local_only=options.local, 174 editables_only=True, 175 ) 176 self.output_package_listing(installed_packages) 177 178 def output_package_listing(self, installed_packages): 179 installed_packages = sorted( 180 installed_packages, 181 key=lambda dist: dist.project_name.lower(), 182 ) 183 for dist in installed_packages: 184 if dist_is_editable(dist): 185 line = '%s (%s, %s)' % ( 186 dist.project_name, 187 dist.version, 188 dist.location, 189 ) 190 else: 191 line = '%s (%s)' % (dist.project_name, dist.version) 192 logger.info(line) 193 194 def run_uptodate(self, options): 195 uptodate = [] 196 for dist, remote_version_raw, remote_version_parsed in \ 197 self.find_packages_latests_versions(options): 198 if dist.parsed_version == remote_version_parsed: 199 uptodate.append(dist) 200 self.output_package_listing(uptodate) 201 [end of pip/commands/list.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pip/commands/list.py b/pip/commands/list.py --- a/pip/commands/list.py +++ b/pip/commands/list.py @@ -4,7 +4,7 @@ import warnings from pip.basecommand import Command -from pip.exceptions import DistributionNotFound, BestVersionAlreadyInstalled +from pip.exceptions import DistributionNotFound from pip.index import PackageFinder from pip.req import InstallRequirement from pip.utils import get_installed_distributions, dist_is_editable @@ -150,8 +150,6 @@ continue except DistributionNotFound: continue - except BestVersionAlreadyInstalled: - remote_version = req.installed_version else: # It might be a good idea that link or finder had a public # method that returned version
{"golden_diff": "diff --git a/pip/commands/list.py b/pip/commands/list.py\n--- a/pip/commands/list.py\n+++ b/pip/commands/list.py\n@@ -4,7 +4,7 @@\n import warnings\n \n from pip.basecommand import Command\n-from pip.exceptions import DistributionNotFound, BestVersionAlreadyInstalled\n+from pip.exceptions import DistributionNotFound\n from pip.index import PackageFinder\n from pip.req import InstallRequirement\n from pip.utils import get_installed_distributions, dist_is_editable\n@@ -150,8 +150,6 @@\n continue\n except DistributionNotFound:\n continue\n- except BestVersionAlreadyInstalled:\n- remote_version = req.installed_version\n else:\n # It might be a good idea that link or finder had a public\n # method that returned version\n", "issue": "Use of uninitialised variable in pip/commands/list.py\nThe BestVersionAlreadyInstalled case of find_packages_latests_versions() does not set the variable remote_version_parsed, thus it could be used uninitialised.\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport logging\nimport warnings\n\nfrom pip.basecommand import Command\nfrom pip.exceptions import DistributionNotFound, BestVersionAlreadyInstalled\nfrom pip.index import PackageFinder\nfrom pip.req import InstallRequirement\nfrom pip.utils import get_installed_distributions, dist_is_editable\nfrom pip.utils.deprecation import RemovedInPip17Warning\nfrom pip.cmdoptions import make_option_group, index_group\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass ListCommand(Command):\n \"\"\"\n List installed packages, including editables.\n\n Packages are listed in a case-insensitive sorted order.\n \"\"\"\n name = 'list'\n usage = \"\"\"\n %prog [options]\"\"\"\n summary = 'List installed packages.'\n\n def __init__(self, *args, **kw):\n super(ListCommand, self).__init__(*args, **kw)\n\n cmd_opts = self.cmd_opts\n\n cmd_opts.add_option(\n '-o', '--outdated',\n action='store_true',\n default=False,\n help='List outdated packages (excluding editables)')\n cmd_opts.add_option(\n '-u', '--uptodate',\n action='store_true',\n default=False,\n help='List uptodate packages (excluding editables)')\n cmd_opts.add_option(\n '-e', '--editable',\n action='store_true',\n default=False,\n help='List editable projects.')\n cmd_opts.add_option(\n '-l', '--local',\n action='store_true',\n default=False,\n help=('If in a virtualenv that has global access, do not list '\n 'globally-installed packages.'),\n )\n\n cmd_opts.add_option(\n '--pre',\n action='store_true',\n default=False,\n help=(\"Include pre-release and development versions. By default, \"\n \"pip only finds stable versions.\"),\n )\n\n index_opts = make_option_group(index_group, self.parser)\n\n self.parser.insert_option_group(0, index_opts)\n self.parser.insert_option_group(0, cmd_opts)\n\n def _build_package_finder(self, options, index_urls, session):\n \"\"\"\n Create a package finder appropriate to this list command.\n \"\"\"\n return PackageFinder(\n find_links=options.find_links,\n index_urls=index_urls,\n allow_external=options.allow_external,\n allow_unverified=options.allow_unverified,\n allow_all_external=options.allow_all_external,\n allow_all_prereleases=options.pre,\n process_dependency_links=options.process_dependency_links,\n session=session,\n )\n\n def run(self, options, args):\n if options.outdated:\n self.run_outdated(options)\n elif options.uptodate:\n self.run_uptodate(options)\n elif options.editable:\n self.run_editables(options)\n else:\n self.run_listing(options)\n\n def run_outdated(self, options):\n for dist, remote_version_raw, remote_version_parsed in \\\n self.find_packages_latests_versions(options):\n if remote_version_parsed > dist.parsed_version:\n logger.info(\n '%s (Current: %s Latest: %s)',\n dist.project_name, dist.version, remote_version_raw,\n )\n\n def find_packages_latests_versions(self, options):\n index_urls = [options.index_url] + options.extra_index_urls\n if options.no_index:\n logger.info('Ignoring indexes: %s', ','.join(index_urls))\n index_urls = []\n\n if options.use_mirrors:\n warnings.warn(\n \"--use-mirrors has been deprecated and will be removed in the \"\n \"future. Explicit uses of --index-url and/or --extra-index-url\"\n \" is suggested.\",\n RemovedInPip17Warning,\n )\n\n if options.mirrors:\n warnings.warn(\n \"--mirrors has been deprecated and will be removed in the \"\n \"future. Explicit uses of --index-url and/or --extra-index-url\"\n \" is suggested.\",\n RemovedInPip17Warning,\n )\n index_urls += options.mirrors\n\n dependency_links = []\n for dist in get_installed_distributions(local_only=options.local):\n if dist.has_metadata('dependency_links.txt'):\n dependency_links.extend(\n dist.get_metadata_lines('dependency_links.txt'),\n )\n\n with self._build_session(options) as session:\n finder = self._build_package_finder(options, index_urls, session)\n finder.add_dependency_links(dependency_links)\n\n installed_packages = get_installed_distributions(\n local_only=options.local,\n include_editables=False,\n )\n for dist in installed_packages:\n req = InstallRequirement.from_line(dist.key, None)\n try:\n link = finder.find_requirement(req, True)\n\n # If link is None, means installed version is most\n # up-to-date\n if link is None:\n continue\n except DistributionNotFound:\n continue\n except BestVersionAlreadyInstalled:\n remote_version = req.installed_version\n else:\n # It might be a good idea that link or finder had a public\n # method that returned version\n remote_version = finder._link_package_versions(\n link, req.name\n )[0]\n remote_version_raw = remote_version[2]\n remote_version_parsed = remote_version[0]\n yield dist, remote_version_raw, remote_version_parsed\n\n def run_listing(self, options):\n installed_packages = get_installed_distributions(\n local_only=options.local,\n )\n self.output_package_listing(installed_packages)\n\n def run_editables(self, options):\n installed_packages = get_installed_distributions(\n local_only=options.local,\n editables_only=True,\n )\n self.output_package_listing(installed_packages)\n\n def output_package_listing(self, installed_packages):\n installed_packages = sorted(\n installed_packages,\n key=lambda dist: dist.project_name.lower(),\n )\n for dist in installed_packages:\n if dist_is_editable(dist):\n line = '%s (%s, %s)' % (\n dist.project_name,\n dist.version,\n dist.location,\n )\n else:\n line = '%s (%s)' % (dist.project_name, dist.version)\n logger.info(line)\n\n def run_uptodate(self, options):\n uptodate = []\n for dist, remote_version_raw, remote_version_parsed in \\\n self.find_packages_latests_versions(options):\n if dist.parsed_version == remote_version_parsed:\n uptodate.append(dist)\n self.output_package_listing(uptodate)\n", "path": "pip/commands/list.py"}]}
2,430
173
gh_patches_debug_13721
rasdani/github-patches
git_diff
mdn__kuma-6226
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> SSR Sentry error reporting At the time of writing, 7 times this has happened: ``` HTTPError: 500 Server Error: Internal Server Error for url: http://ssr/ssr/SPA File "django/core/handlers/exception.py", line 41, in inner response = get_response(request) File "django/core/handlers/base.py", line 187, in _get_response response = self.process_exception_by_middleware(e, request) File "django/core/handlers/base.py", line 185, in _get_response response = wrapped_callback(request, *callback_args, **callback_kwargs) File "newrelic/hooks/framework_django.py", line 544, in wrapper return wrapped(*args, **kwargs) File "django/views/decorators/cache.py", line 57, in _wrapped_view_func response = view_func(request, *args, **kwargs) File "django/views/decorators/http.py", line 40, in inner return func(request, *args, **kwargs) File "ratelimit/decorators.py", line 30, in _wrapped return fn(*args, **kw) File "kuma/search/views.py", line 34, in search return render(request, 'search/react.html', context) File "django/shortcuts.py", line 30, in render content = loader.render_to_string(template_name, context, request, using=using) File "django/template/loader.py", line 68, in render_to_string return template.render(context, request) File "django_jinja/backend.py", line 106, in render return mark_safe(self.template.render(context)) File "newrelic/api/function_trace.py", line 121, in dynamic_wrapper return wrapped(*args, **kwargs) File "jinja2/environment.py", line 1008, in render return self.environment.handle_exception(exc_info, True) File "jinja2/environment.py", line 780, in handle_exception reraise(exc_type, exc_value, tb) File "/app/kuma/search/jinja2/search/react.html", line 3, in top-level template code {% set query = request.GET.get('q') %} File "/app/jinja2/react_base.html", line 103, in top-level template code {% block document_head %}{% endblock %} File "/app/kuma/search/jinja2/search/react.html", line 20, in block "document_head" results)|safe }} File "kuma/wiki/templatetags/ssr.py", line 50, in render_react return server_side_render(component_name, data) File "kuma/wiki/templatetags/ssr.py", line 133, in server_side_render response.raise_for_status() File "requests/models.py", line 940, in raise_for_status raise HTTPError(http_error_msg, response=self) ``` https://sentry.prod.mozaws.net/operations/mdn-prod/issues/6448273/?query=is:unresolved It's "fine". The SSR service is bound to be imperfect. What's worrying is that there are no Node errors in Sentry. It's just too unlikely that there have been no exceptions in the SSR Node service. </issue> <code> [start of kuma/wiki/templatetags/ssr.py] 1 2 3 import json 4 import os 5 from functools import lru_cache 6 7 import requests 8 import requests.exceptions 9 from django.conf import settings 10 from django_jinja import library 11 12 13 @lru_cache() 14 def get_localization_data(locale): 15 """ 16 Read the frontend string catalog for the specified locale, parse 17 it as JSON, and return the resulting dict. The returned values 18 are cached so that we don't have to read files all the time. 19 """ 20 path = os.path.join(settings.BASE_DIR, 21 'static', 'jsi18n', 22 locale, 'react.json') 23 with open(path, 'r') as f: 24 return json.load(f) 25 26 27 @library.global_function 28 def render_react(component_name, locale, url, document_data, ssr=True): 29 """ 30 Render a script tag to define the data and any other HTML tags needed 31 to enable the display of a React-based UI. By default, this does 32 server side rendering, falling back to client-side rendering if 33 the SSR attempt fails. Pass False as the second argument to do 34 client-side rendering unconditionally. 35 36 Note that we are not defining a generic Jinja template tag here. 37 The code in this file is specific to Kuma's React-based UI. 38 """ 39 localization_data = get_localization_data(locale) 40 41 data = { 42 'locale': locale, 43 'stringCatalog': localization_data['catalog'], 44 'pluralExpression': localization_data['plural'], 45 'url': url, 46 'documentData': document_data, 47 } 48 49 if ssr: 50 return server_side_render(component_name, data) 51 else: 52 return client_side_render(component_name, data) 53 54 55 def _render(component_name, html, script, needs_serialization=False): 56 """A utility function used by both client side and server side rendering. 57 Returns a string that includes the specified HTML and a serialized 58 form of the state dict, in the format expected by the client-side code 59 in kuma/javascript/src/index.jsx. 60 """ 61 if needs_serialization: 62 assert isinstance(script, dict), type(script) 63 script = json.dumps(script).replace('</', '<\\/') 64 else: 65 script = 'JSON.parse({})'.format(script) 66 67 return ( 68 '<div id="react-container" data-component-name="{}">{}</div>\n' 69 '<script>window._react_data = {};</script>\n' 70 ).format(component_name, html, script) 71 72 73 def client_side_render(component_name, data): 74 """ 75 Output an empty <div> and a script with complete state so that 76 the UI can be rendered on the client-side. 77 """ 78 return _render(component_name, '', data, needs_serialization=True) 79 80 81 def server_side_render(component_name, data): 82 """ 83 Pre-render the React UI to HTML and output it in a <div>, and then 84 also pass the necessary serialized state in a <script> so that 85 React on the client side can sync itself with the pre-rendred HTML. 86 87 If any exceptions are thrown during the server-side rendering, we 88 fall back to client-side rendering instead. 89 """ 90 url = '{}/{}'.format(settings.SSR_URL, component_name) 91 timeout = settings.SSR_TIMEOUT 92 # Try server side rendering 93 try: 94 # POST the document data as JSON to the SSR server and we 95 # should get HTML text (encoded as plain text) in the body 96 # of the response 97 response = requests.post(url, 98 headers={'Content-Type': 'application/json'}, 99 data=json.dumps(data).encode('utf8'), 100 timeout=timeout) 101 102 # Even though we've got fully rendered HTML now, we still need to 103 # send the document data along with it so that React can sync its 104 # state on the client side with what is in the HTML. When rendering 105 # a document page, the data includes long strings of HTML that 106 # we can get away without duplicating. So as an optimization when 107 # component_name is "document", we're going to make a copy of the 108 # data (because the original belongs to our caller) and delete those 109 # strings from the copy. 110 # 111 # WARNING: This optimization can save 20kb in data transfer 112 # for typical pages, but it requires us to be very careful on 113 # the frontend. If any components render conditionally based on 114 # the state of bodyHTML, tocHTML or quickLinkHTML, then they will 115 # render differently on the client than during SSR, and the hydrate 116 # will not just work cleanly, and those components will re-render 117 # with empty strings. This has already caused Bug 1558308, and 118 # I've commented it out because the benefit in file size doesn't 119 # seem worth the risk of client-side bugs. 120 # 121 # As an alternative, it ought to be possible to extract the HTML 122 # strings from the SSR'ed document and rebuild the document object 123 # on the client right before we call hydrate(). So if you uncomment 124 # the lines below, you should also edit kuma/javascript/src/index.jsx 125 # to extract the HTML from the document as well. 126 # 127 # if component_name == 'document': 128 # data = data.copy() 129 # data['documentData'] = data['documentData'].copy() 130 # data['documentData'].update(bodyHTML='', 131 # tocHTML='', 132 # quickLinksHTML='') 133 response.raise_for_status() 134 result = response.json() 135 return _render(component_name, result['html'], result['script']) 136 137 except requests.exceptions.ConnectionError: 138 print("Connection error contacting SSR server.") 139 print("Falling back to client side rendering.") 140 return client_side_render(component_name, data) 141 except requests.exceptions.ReadTimeout: 142 print("Timeout contacting SSR server.") 143 print("Falling back to client side rendering.") 144 return client_side_render(component_name, data) 145 [end of kuma/wiki/templatetags/ssr.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kuma/wiki/templatetags/ssr.py b/kuma/wiki/templatetags/ssr.py --- a/kuma/wiki/templatetags/ssr.py +++ b/kuma/wiki/templatetags/ssr.py @@ -134,11 +134,7 @@ result = response.json() return _render(component_name, result['html'], result['script']) - except requests.exceptions.ConnectionError: - print("Connection error contacting SSR server.") - print("Falling back to client side rendering.") - return client_side_render(component_name, data) - except requests.exceptions.ReadTimeout: - print("Timeout contacting SSR server.") + except requests.exceptions.RequestException as exception: + print(f"{exception.__class__} error contacting SSR server.") print("Falling back to client side rendering.") return client_side_render(component_name, data)
{"golden_diff": "diff --git a/kuma/wiki/templatetags/ssr.py b/kuma/wiki/templatetags/ssr.py\n--- a/kuma/wiki/templatetags/ssr.py\n+++ b/kuma/wiki/templatetags/ssr.py\n@@ -134,11 +134,7 @@\n result = response.json()\n return _render(component_name, result['html'], result['script'])\n \n- except requests.exceptions.ConnectionError:\n- print(\"Connection error contacting SSR server.\")\n- print(\"Falling back to client side rendering.\")\n- return client_side_render(component_name, data)\n- except requests.exceptions.ReadTimeout:\n- print(\"Timeout contacting SSR server.\")\n+ except requests.exceptions.RequestException as exception:\n+ print(f\"{exception.__class__} error contacting SSR server.\")\n print(\"Falling back to client side rendering.\")\n return client_side_render(component_name, data)\n", "issue": "SSR Sentry error reporting\nAt the time of writing, 7 times this has happened:\r\n```\r\nHTTPError: 500 Server Error: Internal Server Error for url: http://ssr/ssr/SPA\r\n File \"django/core/handlers/exception.py\", line 41, in inner\r\n response = get_response(request)\r\n File \"django/core/handlers/base.py\", line 187, in _get_response\r\n response = self.process_exception_by_middleware(e, request)\r\n File \"django/core/handlers/base.py\", line 185, in _get_response\r\n response = wrapped_callback(request, *callback_args, **callback_kwargs)\r\n File \"newrelic/hooks/framework_django.py\", line 544, in wrapper\r\n return wrapped(*args, **kwargs)\r\n File \"django/views/decorators/cache.py\", line 57, in _wrapped_view_func\r\n response = view_func(request, *args, **kwargs)\r\n File \"django/views/decorators/http.py\", line 40, in inner\r\n return func(request, *args, **kwargs)\r\n File \"ratelimit/decorators.py\", line 30, in _wrapped\r\n return fn(*args, **kw)\r\n File \"kuma/search/views.py\", line 34, in search\r\n return render(request, 'search/react.html', context)\r\n File \"django/shortcuts.py\", line 30, in render\r\n content = loader.render_to_string(template_name, context, request, using=using)\r\n File \"django/template/loader.py\", line 68, in render_to_string\r\n return template.render(context, request)\r\n File \"django_jinja/backend.py\", line 106, in render\r\n return mark_safe(self.template.render(context))\r\n File \"newrelic/api/function_trace.py\", line 121, in dynamic_wrapper\r\n return wrapped(*args, **kwargs)\r\n File \"jinja2/environment.py\", line 1008, in render\r\n return self.environment.handle_exception(exc_info, True)\r\n File \"jinja2/environment.py\", line 780, in handle_exception\r\n reraise(exc_type, exc_value, tb)\r\n File \"/app/kuma/search/jinja2/search/react.html\", line 3, in top-level template code\r\n {% set query = request.GET.get('q') %}\r\n File \"/app/jinja2/react_base.html\", line 103, in top-level template code\r\n {% block document_head %}{% endblock %}\r\n File \"/app/kuma/search/jinja2/search/react.html\", line 20, in block \"document_head\"\r\n results)|safe }}\r\n File \"kuma/wiki/templatetags/ssr.py\", line 50, in render_react\r\n return server_side_render(component_name, data)\r\n File \"kuma/wiki/templatetags/ssr.py\", line 133, in server_side_render\r\n response.raise_for_status()\r\n File \"requests/models.py\", line 940, in raise_for_status\r\n raise HTTPError(http_error_msg, response=self)\r\n```\r\nhttps://sentry.prod.mozaws.net/operations/mdn-prod/issues/6448273/?query=is:unresolved\r\n\r\nIt's \"fine\". The SSR service is bound to be imperfect. What's worrying is that there are no Node errors in Sentry. It's just too unlikely that there have been no exceptions in the SSR Node service. \n", "before_files": [{"content": "\n\nimport json\nimport os\nfrom functools import lru_cache\n\nimport requests\nimport requests.exceptions\nfrom django.conf import settings\nfrom django_jinja import library\n\n\n@lru_cache()\ndef get_localization_data(locale):\n \"\"\"\n Read the frontend string catalog for the specified locale, parse\n it as JSON, and return the resulting dict. The returned values\n are cached so that we don't have to read files all the time.\n \"\"\"\n path = os.path.join(settings.BASE_DIR,\n 'static', 'jsi18n',\n locale, 'react.json')\n with open(path, 'r') as f:\n return json.load(f)\n\n\[email protected]_function\ndef render_react(component_name, locale, url, document_data, ssr=True):\n \"\"\"\n Render a script tag to define the data and any other HTML tags needed\n to enable the display of a React-based UI. By default, this does\n server side rendering, falling back to client-side rendering if\n the SSR attempt fails. Pass False as the second argument to do\n client-side rendering unconditionally.\n\n Note that we are not defining a generic Jinja template tag here.\n The code in this file is specific to Kuma's React-based UI.\n \"\"\"\n localization_data = get_localization_data(locale)\n\n data = {\n 'locale': locale,\n 'stringCatalog': localization_data['catalog'],\n 'pluralExpression': localization_data['plural'],\n 'url': url,\n 'documentData': document_data,\n }\n\n if ssr:\n return server_side_render(component_name, data)\n else:\n return client_side_render(component_name, data)\n\n\ndef _render(component_name, html, script, needs_serialization=False):\n \"\"\"A utility function used by both client side and server side rendering.\n Returns a string that includes the specified HTML and a serialized\n form of the state dict, in the format expected by the client-side code\n in kuma/javascript/src/index.jsx.\n \"\"\"\n if needs_serialization:\n assert isinstance(script, dict), type(script)\n script = json.dumps(script).replace('</', '<\\\\/')\n else:\n script = 'JSON.parse({})'.format(script)\n\n return (\n '<div id=\"react-container\" data-component-name=\"{}\">{}</div>\\n'\n '<script>window._react_data = {};</script>\\n'\n ).format(component_name, html, script)\n\n\ndef client_side_render(component_name, data):\n \"\"\"\n Output an empty <div> and a script with complete state so that\n the UI can be rendered on the client-side.\n \"\"\"\n return _render(component_name, '', data, needs_serialization=True)\n\n\ndef server_side_render(component_name, data):\n \"\"\"\n Pre-render the React UI to HTML and output it in a <div>, and then\n also pass the necessary serialized state in a <script> so that\n React on the client side can sync itself with the pre-rendred HTML.\n\n If any exceptions are thrown during the server-side rendering, we\n fall back to client-side rendering instead.\n \"\"\"\n url = '{}/{}'.format(settings.SSR_URL, component_name)\n timeout = settings.SSR_TIMEOUT\n # Try server side rendering\n try:\n # POST the document data as JSON to the SSR server and we\n # should get HTML text (encoded as plain text) in the body\n # of the response\n response = requests.post(url,\n headers={'Content-Type': 'application/json'},\n data=json.dumps(data).encode('utf8'),\n timeout=timeout)\n\n # Even though we've got fully rendered HTML now, we still need to\n # send the document data along with it so that React can sync its\n # state on the client side with what is in the HTML. When rendering\n # a document page, the data includes long strings of HTML that\n # we can get away without duplicating. So as an optimization when\n # component_name is \"document\", we're going to make a copy of the\n # data (because the original belongs to our caller) and delete those\n # strings from the copy.\n #\n # WARNING: This optimization can save 20kb in data transfer\n # for typical pages, but it requires us to be very careful on\n # the frontend. If any components render conditionally based on\n # the state of bodyHTML, tocHTML or quickLinkHTML, then they will\n # render differently on the client than during SSR, and the hydrate\n # will not just work cleanly, and those components will re-render\n # with empty strings. This has already caused Bug 1558308, and\n # I've commented it out because the benefit in file size doesn't\n # seem worth the risk of client-side bugs.\n #\n # As an alternative, it ought to be possible to extract the HTML\n # strings from the SSR'ed document and rebuild the document object\n # on the client right before we call hydrate(). So if you uncomment\n # the lines below, you should also edit kuma/javascript/src/index.jsx\n # to extract the HTML from the document as well.\n #\n # if component_name == 'document':\n # data = data.copy()\n # data['documentData'] = data['documentData'].copy()\n # data['documentData'].update(bodyHTML='',\n # tocHTML='',\n # quickLinksHTML='')\n response.raise_for_status()\n result = response.json()\n return _render(component_name, result['html'], result['script'])\n\n except requests.exceptions.ConnectionError:\n print(\"Connection error contacting SSR server.\")\n print(\"Falling back to client side rendering.\")\n return client_side_render(component_name, data)\n except requests.exceptions.ReadTimeout:\n print(\"Timeout contacting SSR server.\")\n print(\"Falling back to client side rendering.\")\n return client_side_render(component_name, data)\n", "path": "kuma/wiki/templatetags/ssr.py"}]}
2,909
197
gh_patches_debug_42461
rasdani/github-patches
git_diff
python-gitlab__python-gitlab-2773
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ProjectMergeRequestApprovalRule.save() throws 404 ## Description of the problem, including code/CLI snippet `gl.project.get(proj_id).merge_requests.get(mr_iid).approval_rules.get(rule_id).save()` This example is an MVP example; actually making changes to the rule object before calling `.save()` doesn't change the behaviour ## Expected Behavior The function should succeed silently, returning `None` ## Actual Behavior `gitlab.exceptions.GitlabUpdateError: 404: 404 Not found` is thrown. Trying it with debug mode on, it appears as though the root cause of the issue is that when the CLI invokes `/projects/:id/merge_requests/:merge_request_iid/approval_rules/:approval_rule_id` in the API, `:id` (i.e. project ID) is passed where the URL expects `:approval_rule_id`, as can be seen from this debug output (anonymized to remove sensitive information) ``` >>> rule.save() DEBUG:urllib3.connectionpool:Resetting dropped connection: mygitlab.example.com send: b'PUT /api/v4/projects/93/merge_requests/1/approval_rules/93 HTTP/1.1\r\nHost: mygitlab.example.com\r\nUser-Agent: python-gitlab/3.14.0\r\nAccept-Encoding: gzip, deflate\r\nAccept: */*\r\nConnection: keep-alive\r\nPRIVATE-TOKEN: TOKEN\r\nContent-type: application/json\r\nContent-Length: 768\r\n\r\n' send: b'{"id": "93", "merge_request_iid": "1", "approval_rule_id": 89, "name": "testrule", "approvals_required": 1, "users": ["{\\"id\\": 168, \\"username\\": \\"myusername\\", \\"name\\": \\"My Name\\", \\"state\\": \\"active\\", \\"avatar_url\\": \\"https://secure.gravatar.com/avatar/8306d9f17d1c91970c2447b61c7a9f29?s=80&d=identicon\\", \\"web_url\\": \\"https://mygitlab.example.com/myusername\\", \\"created_at\\": \\"2023-03-29T14:30:13.371Z\\", \\"bio\\": \\"\\", \\"location\\": null, \\"public_email\\": null, \\"skype\\": \\"\\", \\"linkedin\\": \\"\\", \\"twitter\\": \\"\\", \\"website_url\\": \\"\\", \\"organization\\": null, \\"job_title\\": \\"\\", \\"pronouns\\": null, \\"bot\\": false, \\"work_information\\": null, \\"followers\\": 0, \\"following\\": 0, \\"is_followed\\": false, \\"local_time\\": null}"]}' reply: 'HTTP/1.1 404 Not Found\r\n' ``` ## Specifications - python-gitlab version: 3.14.0 - API version you are using (v3/v4): v4 - Gitlab server version (or gitlab.com): 15.7.2-ee </issue> <code> [start of gitlab/v4/objects/merge_request_approvals.py] 1 from typing import Any, cast, Dict, List, Optional, TYPE_CHECKING, Union 2 3 from gitlab import exceptions as exc 4 from gitlab.base import RESTManager, RESTObject 5 from gitlab.mixins import ( 6 CreateMixin, 7 CRUDMixin, 8 DeleteMixin, 9 GetWithoutIdMixin, 10 ListMixin, 11 ObjectDeleteMixin, 12 SaveMixin, 13 UpdateMethod, 14 UpdateMixin, 15 ) 16 from gitlab.types import RequiredOptional 17 18 __all__ = [ 19 "ProjectApproval", 20 "ProjectApprovalManager", 21 "ProjectApprovalRule", 22 "ProjectApprovalRuleManager", 23 "ProjectMergeRequestApproval", 24 "ProjectMergeRequestApprovalManager", 25 "ProjectMergeRequestApprovalRule", 26 "ProjectMergeRequestApprovalRuleManager", 27 "ProjectMergeRequestApprovalState", 28 "ProjectMergeRequestApprovalStateManager", 29 ] 30 31 32 class ProjectApproval(SaveMixin, RESTObject): 33 _id_attr = None 34 35 36 class ProjectApprovalManager(GetWithoutIdMixin, UpdateMixin, RESTManager): 37 _path = "/projects/{project_id}/approvals" 38 _obj_cls = ProjectApproval 39 _from_parent_attrs = {"project_id": "id"} 40 _update_attrs = RequiredOptional( 41 optional=( 42 "approvals_before_merge", 43 "reset_approvals_on_push", 44 "disable_overriding_approvers_per_merge_request", 45 "merge_requests_author_approval", 46 "merge_requests_disable_committers_approval", 47 ), 48 ) 49 _update_method = UpdateMethod.POST 50 51 def get(self, **kwargs: Any) -> ProjectApproval: 52 return cast(ProjectApproval, super().get(**kwargs)) 53 54 55 class ProjectApprovalRule(SaveMixin, ObjectDeleteMixin, RESTObject): 56 _id_attr = "id" 57 58 59 class ProjectApprovalRuleManager( 60 ListMixin, CreateMixin, UpdateMixin, DeleteMixin, RESTManager 61 ): 62 _path = "/projects/{project_id}/approval_rules" 63 _obj_cls = ProjectApprovalRule 64 _from_parent_attrs = {"project_id": "id"} 65 _create_attrs = RequiredOptional( 66 required=("name", "approvals_required"), 67 optional=("user_ids", "group_ids", "protected_branch_ids", "usernames"), 68 ) 69 70 71 class ProjectMergeRequestApproval(SaveMixin, RESTObject): 72 _id_attr = None 73 74 75 class ProjectMergeRequestApprovalManager(GetWithoutIdMixin, UpdateMixin, RESTManager): 76 _path = "/projects/{project_id}/merge_requests/{mr_iid}/approvals" 77 _obj_cls = ProjectMergeRequestApproval 78 _from_parent_attrs = {"project_id": "project_id", "mr_iid": "iid"} 79 _update_attrs = RequiredOptional(required=("approvals_required",)) 80 _update_method = UpdateMethod.POST 81 82 def get(self, **kwargs: Any) -> ProjectMergeRequestApproval: 83 return cast(ProjectMergeRequestApproval, super().get(**kwargs)) 84 85 @exc.on_http_error(exc.GitlabUpdateError) 86 def set_approvers( 87 self, 88 approvals_required: int, 89 approver_ids: Optional[List[int]] = None, 90 approver_group_ids: Optional[List[int]] = None, 91 approval_rule_name: str = "name", 92 **kwargs: Any, 93 ) -> RESTObject: 94 """Change MR-level allowed approvers and approver groups. 95 96 Args: 97 approvals_required: The number of required approvals for this rule 98 approver_ids: User IDs that can approve MRs 99 approver_group_ids: Group IDs whose members can approve MRs 100 101 Raises: 102 GitlabAuthenticationError: If authentication is not correct 103 GitlabUpdateError: If the server failed to perform the request 104 """ 105 approver_ids = approver_ids or [] 106 approver_group_ids = approver_group_ids or [] 107 108 data = { 109 "name": approval_rule_name, 110 "approvals_required": approvals_required, 111 "rule_type": "regular", 112 "user_ids": approver_ids, 113 "group_ids": approver_group_ids, 114 } 115 if TYPE_CHECKING: 116 assert self._parent is not None 117 approval_rules: ProjectMergeRequestApprovalRuleManager = ( 118 self._parent.approval_rules 119 ) 120 # update any existing approval rule matching the name 121 existing_approval_rules = approval_rules.list() 122 for ar in existing_approval_rules: 123 if ar.name == approval_rule_name: 124 ar.user_ids = data["user_ids"] 125 ar.approvals_required = data["approvals_required"] 126 ar.group_ids = data["group_ids"] 127 ar.save() 128 return ar 129 # if there was no rule matching the rule name, create a new one 130 return approval_rules.create(data=data, **kwargs) 131 132 133 class ProjectMergeRequestApprovalRule(SaveMixin, ObjectDeleteMixin, RESTObject): 134 _repr_attr = "name" 135 id: int 136 approval_rule_id: int 137 merge_request_iid: int 138 139 @exc.on_http_error(exc.GitlabUpdateError) 140 def save(self, **kwargs: Any) -> None: 141 """Save the changes made to the object to the server. 142 143 The object is updated to match what the server returns. 144 145 Args: 146 **kwargs: Extra options to send to the server (e.g. sudo) 147 148 Raise: 149 GitlabAuthenticationError: If authentication is not correct 150 GitlabUpdateError: If the server cannot perform the request 151 """ 152 # There is a mismatch between the name of our id attribute and the put 153 # REST API name for the project_id, so we override it here. 154 self.approval_rule_id = self.id 155 self.merge_request_iid = self._parent_attrs["mr_iid"] 156 self.id = self._parent_attrs["project_id"] 157 # save will update self.id with the result from the server, so no need 158 # to overwrite with what it was before we overwrote it. 159 SaveMixin.save(self, **kwargs) 160 161 162 class ProjectMergeRequestApprovalRuleManager(CRUDMixin, RESTManager): 163 _path = "/projects/{project_id}/merge_requests/{mr_iid}/approval_rules" 164 _obj_cls = ProjectMergeRequestApprovalRule 165 _from_parent_attrs = {"project_id": "project_id", "mr_iid": "iid"} 166 _update_attrs = RequiredOptional( 167 required=( 168 "id", 169 "merge_request_iid", 170 "approval_rule_id", 171 "name", 172 "approvals_required", 173 ), 174 optional=("user_ids", "group_ids"), 175 ) 176 # Important: When approval_project_rule_id is set, the name, users and 177 # groups of project-level rule will be copied. The approvals_required 178 # specified will be used. 179 _create_attrs = RequiredOptional( 180 required=("id", "merge_request_iid", "name", "approvals_required"), 181 optional=("approval_project_rule_id", "user_ids", "group_ids"), 182 ) 183 184 def get( 185 self, id: Union[str, int], lazy: bool = False, **kwargs: Any 186 ) -> ProjectMergeRequestApprovalRule: 187 return cast( 188 ProjectMergeRequestApprovalRule, super().get(id=id, lazy=lazy, **kwargs) 189 ) 190 191 def create( 192 self, data: Optional[Dict[str, Any]] = None, **kwargs: Any 193 ) -> RESTObject: 194 """Create a new object. 195 196 Args: 197 data: Parameters to send to the server to create the 198 resource 199 **kwargs: Extra options to send to the server (e.g. sudo or 200 'ref_name', 'stage', 'name', 'all') 201 202 Raises: 203 GitlabAuthenticationError: If authentication is not correct 204 GitlabCreateError: If the server cannot perform the request 205 206 Returns: 207 A new instance of the manage object class build with 208 the data sent by the server 209 """ 210 if TYPE_CHECKING: 211 assert data is not None 212 new_data = data.copy() 213 new_data["id"] = self._from_parent_attrs["project_id"] 214 new_data["merge_request_iid"] = self._from_parent_attrs["mr_iid"] 215 return CreateMixin.create(self, new_data, **kwargs) 216 217 218 class ProjectMergeRequestApprovalState(RESTObject): 219 pass 220 221 222 class ProjectMergeRequestApprovalStateManager(GetWithoutIdMixin, RESTManager): 223 _path = "/projects/{project_id}/merge_requests/{mr_iid}/approval_state" 224 _obj_cls = ProjectMergeRequestApprovalState 225 _from_parent_attrs = {"project_id": "project_id", "mr_iid": "iid"} 226 227 def get(self, **kwargs: Any) -> ProjectMergeRequestApprovalState: 228 return cast(ProjectMergeRequestApprovalState, super().get(**kwargs)) 229 [end of gitlab/v4/objects/merge_request_approvals.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/gitlab/v4/objects/merge_request_approvals.py b/gitlab/v4/objects/merge_request_approvals.py --- a/gitlab/v4/objects/merge_request_approvals.py +++ b/gitlab/v4/objects/merge_request_approvals.py @@ -1,4 +1,4 @@ -from typing import Any, cast, Dict, List, Optional, TYPE_CHECKING, Union +from typing import Any, cast, List, Optional, TYPE_CHECKING, Union from gitlab import exceptions as exc from gitlab.base import RESTManager, RESTObject @@ -132,42 +132,16 @@ class ProjectMergeRequestApprovalRule(SaveMixin, ObjectDeleteMixin, RESTObject): _repr_attr = "name" - id: int - approval_rule_id: int - merge_request_iid: int - - @exc.on_http_error(exc.GitlabUpdateError) - def save(self, **kwargs: Any) -> None: - """Save the changes made to the object to the server. - - The object is updated to match what the server returns. - - Args: - **kwargs: Extra options to send to the server (e.g. sudo) - - Raise: - GitlabAuthenticationError: If authentication is not correct - GitlabUpdateError: If the server cannot perform the request - """ - # There is a mismatch between the name of our id attribute and the put - # REST API name for the project_id, so we override it here. - self.approval_rule_id = self.id - self.merge_request_iid = self._parent_attrs["mr_iid"] - self.id = self._parent_attrs["project_id"] - # save will update self.id with the result from the server, so no need - # to overwrite with what it was before we overwrote it. - SaveMixin.save(self, **kwargs) class ProjectMergeRequestApprovalRuleManager(CRUDMixin, RESTManager): - _path = "/projects/{project_id}/merge_requests/{mr_iid}/approval_rules" + _path = "/projects/{project_id}/merge_requests/{merge_request_iid}/approval_rules" _obj_cls = ProjectMergeRequestApprovalRule - _from_parent_attrs = {"project_id": "project_id", "mr_iid": "iid"} + _from_parent_attrs = {"project_id": "project_id", "merge_request_iid": "iid"} _update_attrs = RequiredOptional( required=( "id", "merge_request_iid", - "approval_rule_id", "name", "approvals_required", ), @@ -177,7 +151,7 @@ # groups of project-level rule will be copied. The approvals_required # specified will be used. _create_attrs = RequiredOptional( - required=("id", "merge_request_iid", "name", "approvals_required"), + required=("name", "approvals_required"), optional=("approval_project_rule_id", "user_ids", "group_ids"), ) @@ -188,32 +162,6 @@ ProjectMergeRequestApprovalRule, super().get(id=id, lazy=lazy, **kwargs) ) - def create( - self, data: Optional[Dict[str, Any]] = None, **kwargs: Any - ) -> RESTObject: - """Create a new object. - - Args: - data: Parameters to send to the server to create the - resource - **kwargs: Extra options to send to the server (e.g. sudo or - 'ref_name', 'stage', 'name', 'all') - - Raises: - GitlabAuthenticationError: If authentication is not correct - GitlabCreateError: If the server cannot perform the request - - Returns: - A new instance of the manage object class build with - the data sent by the server - """ - if TYPE_CHECKING: - assert data is not None - new_data = data.copy() - new_data["id"] = self._from_parent_attrs["project_id"] - new_data["merge_request_iid"] = self._from_parent_attrs["mr_iid"] - return CreateMixin.create(self, new_data, **kwargs) - class ProjectMergeRequestApprovalState(RESTObject): pass
{"golden_diff": "diff --git a/gitlab/v4/objects/merge_request_approvals.py b/gitlab/v4/objects/merge_request_approvals.py\n--- a/gitlab/v4/objects/merge_request_approvals.py\n+++ b/gitlab/v4/objects/merge_request_approvals.py\n@@ -1,4 +1,4 @@\n-from typing import Any, cast, Dict, List, Optional, TYPE_CHECKING, Union\n+from typing import Any, cast, List, Optional, TYPE_CHECKING, Union\n \n from gitlab import exceptions as exc\n from gitlab.base import RESTManager, RESTObject\n@@ -132,42 +132,16 @@\n \n class ProjectMergeRequestApprovalRule(SaveMixin, ObjectDeleteMixin, RESTObject):\n _repr_attr = \"name\"\n- id: int\n- approval_rule_id: int\n- merge_request_iid: int\n-\n- @exc.on_http_error(exc.GitlabUpdateError)\n- def save(self, **kwargs: Any) -> None:\n- \"\"\"Save the changes made to the object to the server.\n-\n- The object is updated to match what the server returns.\n-\n- Args:\n- **kwargs: Extra options to send to the server (e.g. sudo)\n-\n- Raise:\n- GitlabAuthenticationError: If authentication is not correct\n- GitlabUpdateError: If the server cannot perform the request\n- \"\"\"\n- # There is a mismatch between the name of our id attribute and the put\n- # REST API name for the project_id, so we override it here.\n- self.approval_rule_id = self.id\n- self.merge_request_iid = self._parent_attrs[\"mr_iid\"]\n- self.id = self._parent_attrs[\"project_id\"]\n- # save will update self.id with the result from the server, so no need\n- # to overwrite with what it was before we overwrote it.\n- SaveMixin.save(self, **kwargs)\n \n \n class ProjectMergeRequestApprovalRuleManager(CRUDMixin, RESTManager):\n- _path = \"/projects/{project_id}/merge_requests/{mr_iid}/approval_rules\"\n+ _path = \"/projects/{project_id}/merge_requests/{merge_request_iid}/approval_rules\"\n _obj_cls = ProjectMergeRequestApprovalRule\n- _from_parent_attrs = {\"project_id\": \"project_id\", \"mr_iid\": \"iid\"}\n+ _from_parent_attrs = {\"project_id\": \"project_id\", \"merge_request_iid\": \"iid\"}\n _update_attrs = RequiredOptional(\n required=(\n \"id\",\n \"merge_request_iid\",\n- \"approval_rule_id\",\n \"name\",\n \"approvals_required\",\n ),\n@@ -177,7 +151,7 @@\n # groups of project-level rule will be copied. The approvals_required\n # specified will be used.\n _create_attrs = RequiredOptional(\n- required=(\"id\", \"merge_request_iid\", \"name\", \"approvals_required\"),\n+ required=(\"name\", \"approvals_required\"),\n optional=(\"approval_project_rule_id\", \"user_ids\", \"group_ids\"),\n )\n \n@@ -188,32 +162,6 @@\n ProjectMergeRequestApprovalRule, super().get(id=id, lazy=lazy, **kwargs)\n )\n \n- def create(\n- self, data: Optional[Dict[str, Any]] = None, **kwargs: Any\n- ) -> RESTObject:\n- \"\"\"Create a new object.\n-\n- Args:\n- data: Parameters to send to the server to create the\n- resource\n- **kwargs: Extra options to send to the server (e.g. sudo or\n- 'ref_name', 'stage', 'name', 'all')\n-\n- Raises:\n- GitlabAuthenticationError: If authentication is not correct\n- GitlabCreateError: If the server cannot perform the request\n-\n- Returns:\n- A new instance of the manage object class build with\n- the data sent by the server\n- \"\"\"\n- if TYPE_CHECKING:\n- assert data is not None\n- new_data = data.copy()\n- new_data[\"id\"] = self._from_parent_attrs[\"project_id\"]\n- new_data[\"merge_request_iid\"] = self._from_parent_attrs[\"mr_iid\"]\n- return CreateMixin.create(self, new_data, **kwargs)\n-\n \n class ProjectMergeRequestApprovalState(RESTObject):\n pass\n", "issue": "ProjectMergeRequestApprovalRule.save() throws 404\n## Description of the problem, including code/CLI snippet\r\n`gl.project.get(proj_id).merge_requests.get(mr_iid).approval_rules.get(rule_id).save()`\r\n\r\nThis example is an MVP example; actually making changes to the rule object before calling `.save()` doesn't change the behaviour\r\n\r\n## Expected Behavior\r\nThe function should succeed silently, returning `None`\r\n\r\n## Actual Behavior\r\n`gitlab.exceptions.GitlabUpdateError: 404: 404 Not found` is thrown. Trying it with debug mode on, it appears as though the root cause of the issue is that when the CLI invokes `/projects/:id/merge_requests/:merge_request_iid/approval_rules/:approval_rule_id` in the API, `:id` (i.e. project ID) is passed where the URL expects `:approval_rule_id`, as can be seen from this debug output (anonymized to remove sensitive information)\r\n\r\n```\r\n>>> rule.save()\r\nDEBUG:urllib3.connectionpool:Resetting dropped connection: mygitlab.example.com\r\nsend: b'PUT /api/v4/projects/93/merge_requests/1/approval_rules/93 HTTP/1.1\\r\\nHost: mygitlab.example.com\\r\\nUser-Agent: python-gitlab/3.14.0\\r\\nAccept-Encoding: gzip, deflate\\r\\nAccept: */*\\r\\nConnection: keep-alive\\r\\nPRIVATE-TOKEN: TOKEN\\r\\nContent-type: application/json\\r\\nContent-Length: 768\\r\\n\\r\\n'\r\nsend: b'{\"id\": \"93\", \"merge_request_iid\": \"1\", \"approval_rule_id\": 89, \"name\": \"testrule\", \"approvals_required\": 1, \"users\": [\"{\\\\\"id\\\\\": 168, \\\\\"username\\\\\": \\\\\"myusername\\\\\", \\\\\"name\\\\\": \\\\\"My Name\\\\\", \\\\\"state\\\\\": \\\\\"active\\\\\", \\\\\"avatar_url\\\\\": \\\\\"https://secure.gravatar.com/avatar/8306d9f17d1c91970c2447b61c7a9f29?s=80&d=identicon\\\\\", \\\\\"web_url\\\\\": \\\\\"https://mygitlab.example.com/myusername\\\\\", \\\\\"created_at\\\\\": \\\\\"2023-03-29T14:30:13.371Z\\\\\", \\\\\"bio\\\\\": \\\\\"\\\\\", \\\\\"location\\\\\": null, \\\\\"public_email\\\\\": null, \\\\\"skype\\\\\": \\\\\"\\\\\", \\\\\"linkedin\\\\\": \\\\\"\\\\\", \\\\\"twitter\\\\\": \\\\\"\\\\\", \\\\\"website_url\\\\\": \\\\\"\\\\\", \\\\\"organization\\\\\": null, \\\\\"job_title\\\\\": \\\\\"\\\\\", \\\\\"pronouns\\\\\": null, \\\\\"bot\\\\\": false, \\\\\"work_information\\\\\": null, \\\\\"followers\\\\\": 0, \\\\\"following\\\\\": 0, \\\\\"is_followed\\\\\": false, \\\\\"local_time\\\\\": null}\"]}'\r\nreply: 'HTTP/1.1 404 Not Found\\r\\n'\r\n```\r\n\r\n## Specifications\r\n\r\n - python-gitlab version: 3.14.0\r\n - API version you are using (v3/v4): v4\r\n - Gitlab server version (or gitlab.com): 15.7.2-ee\r\n\n", "before_files": [{"content": "from typing import Any, cast, Dict, List, Optional, TYPE_CHECKING, Union\n\nfrom gitlab import exceptions as exc\nfrom gitlab.base import RESTManager, RESTObject\nfrom gitlab.mixins import (\n CreateMixin,\n CRUDMixin,\n DeleteMixin,\n GetWithoutIdMixin,\n ListMixin,\n ObjectDeleteMixin,\n SaveMixin,\n UpdateMethod,\n UpdateMixin,\n)\nfrom gitlab.types import RequiredOptional\n\n__all__ = [\n \"ProjectApproval\",\n \"ProjectApprovalManager\",\n \"ProjectApprovalRule\",\n \"ProjectApprovalRuleManager\",\n \"ProjectMergeRequestApproval\",\n \"ProjectMergeRequestApprovalManager\",\n \"ProjectMergeRequestApprovalRule\",\n \"ProjectMergeRequestApprovalRuleManager\",\n \"ProjectMergeRequestApprovalState\",\n \"ProjectMergeRequestApprovalStateManager\",\n]\n\n\nclass ProjectApproval(SaveMixin, RESTObject):\n _id_attr = None\n\n\nclass ProjectApprovalManager(GetWithoutIdMixin, UpdateMixin, RESTManager):\n _path = \"/projects/{project_id}/approvals\"\n _obj_cls = ProjectApproval\n _from_parent_attrs = {\"project_id\": \"id\"}\n _update_attrs = RequiredOptional(\n optional=(\n \"approvals_before_merge\",\n \"reset_approvals_on_push\",\n \"disable_overriding_approvers_per_merge_request\",\n \"merge_requests_author_approval\",\n \"merge_requests_disable_committers_approval\",\n ),\n )\n _update_method = UpdateMethod.POST\n\n def get(self, **kwargs: Any) -> ProjectApproval:\n return cast(ProjectApproval, super().get(**kwargs))\n\n\nclass ProjectApprovalRule(SaveMixin, ObjectDeleteMixin, RESTObject):\n _id_attr = \"id\"\n\n\nclass ProjectApprovalRuleManager(\n ListMixin, CreateMixin, UpdateMixin, DeleteMixin, RESTManager\n):\n _path = \"/projects/{project_id}/approval_rules\"\n _obj_cls = ProjectApprovalRule\n _from_parent_attrs = {\"project_id\": \"id\"}\n _create_attrs = RequiredOptional(\n required=(\"name\", \"approvals_required\"),\n optional=(\"user_ids\", \"group_ids\", \"protected_branch_ids\", \"usernames\"),\n )\n\n\nclass ProjectMergeRequestApproval(SaveMixin, RESTObject):\n _id_attr = None\n\n\nclass ProjectMergeRequestApprovalManager(GetWithoutIdMixin, UpdateMixin, RESTManager):\n _path = \"/projects/{project_id}/merge_requests/{mr_iid}/approvals\"\n _obj_cls = ProjectMergeRequestApproval\n _from_parent_attrs = {\"project_id\": \"project_id\", \"mr_iid\": \"iid\"}\n _update_attrs = RequiredOptional(required=(\"approvals_required\",))\n _update_method = UpdateMethod.POST\n\n def get(self, **kwargs: Any) -> ProjectMergeRequestApproval:\n return cast(ProjectMergeRequestApproval, super().get(**kwargs))\n\n @exc.on_http_error(exc.GitlabUpdateError)\n def set_approvers(\n self,\n approvals_required: int,\n approver_ids: Optional[List[int]] = None,\n approver_group_ids: Optional[List[int]] = None,\n approval_rule_name: str = \"name\",\n **kwargs: Any,\n ) -> RESTObject:\n \"\"\"Change MR-level allowed approvers and approver groups.\n\n Args:\n approvals_required: The number of required approvals for this rule\n approver_ids: User IDs that can approve MRs\n approver_group_ids: Group IDs whose members can approve MRs\n\n Raises:\n GitlabAuthenticationError: If authentication is not correct\n GitlabUpdateError: If the server failed to perform the request\n \"\"\"\n approver_ids = approver_ids or []\n approver_group_ids = approver_group_ids or []\n\n data = {\n \"name\": approval_rule_name,\n \"approvals_required\": approvals_required,\n \"rule_type\": \"regular\",\n \"user_ids\": approver_ids,\n \"group_ids\": approver_group_ids,\n }\n if TYPE_CHECKING:\n assert self._parent is not None\n approval_rules: ProjectMergeRequestApprovalRuleManager = (\n self._parent.approval_rules\n )\n # update any existing approval rule matching the name\n existing_approval_rules = approval_rules.list()\n for ar in existing_approval_rules:\n if ar.name == approval_rule_name:\n ar.user_ids = data[\"user_ids\"]\n ar.approvals_required = data[\"approvals_required\"]\n ar.group_ids = data[\"group_ids\"]\n ar.save()\n return ar\n # if there was no rule matching the rule name, create a new one\n return approval_rules.create(data=data, **kwargs)\n\n\nclass ProjectMergeRequestApprovalRule(SaveMixin, ObjectDeleteMixin, RESTObject):\n _repr_attr = \"name\"\n id: int\n approval_rule_id: int\n merge_request_iid: int\n\n @exc.on_http_error(exc.GitlabUpdateError)\n def save(self, **kwargs: Any) -> None:\n \"\"\"Save the changes made to the object to the server.\n\n The object is updated to match what the server returns.\n\n Args:\n **kwargs: Extra options to send to the server (e.g. sudo)\n\n Raise:\n GitlabAuthenticationError: If authentication is not correct\n GitlabUpdateError: If the server cannot perform the request\n \"\"\"\n # There is a mismatch between the name of our id attribute and the put\n # REST API name for the project_id, so we override it here.\n self.approval_rule_id = self.id\n self.merge_request_iid = self._parent_attrs[\"mr_iid\"]\n self.id = self._parent_attrs[\"project_id\"]\n # save will update self.id with the result from the server, so no need\n # to overwrite with what it was before we overwrote it.\n SaveMixin.save(self, **kwargs)\n\n\nclass ProjectMergeRequestApprovalRuleManager(CRUDMixin, RESTManager):\n _path = \"/projects/{project_id}/merge_requests/{mr_iid}/approval_rules\"\n _obj_cls = ProjectMergeRequestApprovalRule\n _from_parent_attrs = {\"project_id\": \"project_id\", \"mr_iid\": \"iid\"}\n _update_attrs = RequiredOptional(\n required=(\n \"id\",\n \"merge_request_iid\",\n \"approval_rule_id\",\n \"name\",\n \"approvals_required\",\n ),\n optional=(\"user_ids\", \"group_ids\"),\n )\n # Important: When approval_project_rule_id is set, the name, users and\n # groups of project-level rule will be copied. The approvals_required\n # specified will be used.\n _create_attrs = RequiredOptional(\n required=(\"id\", \"merge_request_iid\", \"name\", \"approvals_required\"),\n optional=(\"approval_project_rule_id\", \"user_ids\", \"group_ids\"),\n )\n\n def get(\n self, id: Union[str, int], lazy: bool = False, **kwargs: Any\n ) -> ProjectMergeRequestApprovalRule:\n return cast(\n ProjectMergeRequestApprovalRule, super().get(id=id, lazy=lazy, **kwargs)\n )\n\n def create(\n self, data: Optional[Dict[str, Any]] = None, **kwargs: Any\n ) -> RESTObject:\n \"\"\"Create a new object.\n\n Args:\n data: Parameters to send to the server to create the\n resource\n **kwargs: Extra options to send to the server (e.g. sudo or\n 'ref_name', 'stage', 'name', 'all')\n\n Raises:\n GitlabAuthenticationError: If authentication is not correct\n GitlabCreateError: If the server cannot perform the request\n\n Returns:\n A new instance of the manage object class build with\n the data sent by the server\n \"\"\"\n if TYPE_CHECKING:\n assert data is not None\n new_data = data.copy()\n new_data[\"id\"] = self._from_parent_attrs[\"project_id\"]\n new_data[\"merge_request_iid\"] = self._from_parent_attrs[\"mr_iid\"]\n return CreateMixin.create(self, new_data, **kwargs)\n\n\nclass ProjectMergeRequestApprovalState(RESTObject):\n pass\n\n\nclass ProjectMergeRequestApprovalStateManager(GetWithoutIdMixin, RESTManager):\n _path = \"/projects/{project_id}/merge_requests/{mr_iid}/approval_state\"\n _obj_cls = ProjectMergeRequestApprovalState\n _from_parent_attrs = {\"project_id\": \"project_id\", \"mr_iid\": \"iid\"}\n\n def get(self, **kwargs: Any) -> ProjectMergeRequestApprovalState:\n return cast(ProjectMergeRequestApprovalState, super().get(**kwargs))\n", "path": "gitlab/v4/objects/merge_request_approvals.py"}]}
3,734
970
gh_patches_debug_5461
rasdani/github-patches
git_diff
pyodide__pyodide-2913
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> https://pyodide.org/en/latest/console.html doesn't show latest `main` version ## 🐛 Bug The https://pyodide.org/en/latest/console.html console is stuck at `v0.21.0a3`. I believe this is because the version wasn't bumped to the next 'dev' version after the `v0.21.0a3` release, so somebody probably needs to run ``` ./tools/bump_version.py --new-version 0.21.0.dev0 ``` Without `dev` in the version, the documentation's console uses the release: https://github.com/pyodide/pyodide/blob/7231cab3ffc83f6221fafb7458f9b223d2a7c759/docs/conf.py#L20-L23 ### To Reproduce Go to https://pyodide.org/en/latest/console.html and load a package added since v0.21.0a3, e.g., `import rebound` </issue> <code> [start of docs/conf.py] 1 # Configuration file for the Sphinx documentation builder. 2 3 # -- Path setup -------------------------------------------------------------- 4 5 import atexit 6 import os 7 import shutil 8 import subprocess 9 import sys 10 from pathlib import Path 11 from typing import Any 12 from unittest import mock 13 14 # -- Project information ----------------------------------------------------- 15 16 project = "Pyodide" 17 copyright = "2019-2022, Pyodide contributors and Mozilla" 18 pyodide_version = "0.21.0a3" 19 20 if ".dev" in pyodide_version: 21 CDN_URL = "https://cdn.jsdelivr.net/pyodide/dev/full/" 22 else: 23 CDN_URL = f"https://cdn.jsdelivr.net/pyodide/v{pyodide_version}/full/" 24 25 # -- General configuration --------------------------------------------------- 26 27 # If your documentation needs a minimal Sphinx version, state it here. 28 # 29 # needs_sphinx = '1.0' 30 31 extensions = [ 32 "sphinx.ext.autodoc", 33 "sphinx.ext.autosummary", 34 "sphinxcontrib.napoleon", 35 "myst_parser", 36 "sphinx_js", 37 "autodocsumm", 38 "sphinx_panels", 39 "sphinx_pyodide", 40 "sphinx_argparse_cli", 41 "versionwarning.extension", 42 "sphinx_issues", 43 ] 44 45 myst_enable_extensions = ["substitution"] 46 47 js_language = "typescript" 48 jsdoc_config_path = "../src/js/tsconfig.json" 49 root_for_relative_js_paths = "../src/" 50 issues_github_path = "pyodide/pyodide" 51 52 versionwarning_messages = { 53 "latest": ( 54 "This is the development version of the documentation. " 55 'See <a href="https://pyodide.org/">here</a> for latest stable ' 56 "documentation. Please do not use Pyodide with non " 57 "versioned (`dev`) URLs from the CDN for deployed applications!" 58 ) 59 } 60 versionwarning_body_selector = "#main-content > div" 61 62 autosummary_generate = True 63 autodoc_default_flags = ["members", "inherited-members"] 64 65 # Add modules to be mocked. 66 mock_modules = ["ruamel.yaml", "tomli"] 67 68 # Add any paths that contain templates here, relative to this directory. 69 templates_path = ["_templates"] 70 71 # The suffix(es) of source filenames. 72 source_suffix = [".rst", ".md"] 73 74 # The master toctree document. 75 master_doc = "index" 76 77 # The language for content autogenerated by Sphinx. 78 language = None 79 80 # List of patterns, relative to source directory, that match files and 81 # directories to ignore when looking for source files. 82 exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "README.md"] 83 84 # The name of the Pygments (syntax highlighting) style to use. 85 pygments_style = None 86 87 # -- Options for HTML output ------------------------------------------------- 88 89 # The theme to use for HTML and HTML Help pages. See the documentation for 90 # a list of builtin themes. 91 # 92 html_theme = "sphinx_book_theme" 93 html_logo = "_static/img/pyodide-logo.png" 94 95 # theme-specific options 96 html_theme_options: dict[str, Any] = {} 97 98 # paths that contain custom static files (such as style sheets) 99 html_static_path = ["_static"] 100 101 102 html_css_files = [ 103 "css/pyodide.css", 104 ] 105 106 # Custom sidebar templates, must be a dictionary that maps document names 107 # to template names. 108 # html_sidebars = {} 109 110 # -- Options for HTMLHelp output --------------------------------------------- 111 112 # Output file base name for HTML help builder. 113 htmlhelp_basename = "Pyodidedoc" 114 115 # A list of files that should not be packed into the epub file. 116 epub_exclude_files = ["search.html"] 117 118 119 def delete_attrs(cls): 120 for name in dir(cls): 121 if not name.startswith("_"): 122 try: 123 delattr(cls, name) 124 except Exception: 125 pass 126 127 128 # Try not to cause side effects if we are imported incidentally. 129 130 try: 131 import sphinx 132 133 IN_SPHINX = hasattr(sphinx, "application") 134 except ImportError: 135 IN_SPHINX = False 136 137 IN_READTHEDOCS = "READTHEDOCS" in os.environ 138 139 if IN_READTHEDOCS: 140 env = {"PYODIDE_BASE_URL": CDN_URL} 141 os.makedirs("_build/html", exist_ok=True) 142 res = subprocess.check_output( 143 ["make", "-C", "..", "docs/_build/html/console.html"], 144 env=env, 145 stderr=subprocess.STDOUT, 146 encoding="utf-8", 147 ) 148 print(res) 149 150 if IN_SPHINX: 151 # Compatibility shims. sphinx-js and sphinxcontrib-napoleon have not been updated for Python 3.10 152 import collections 153 from typing import Callable, Mapping 154 155 collections.Mapping = Mapping # type: ignore[attr-defined] 156 collections.Callable = Callable # type: ignore[attr-defined] 157 158 base_dir = Path(__file__).resolve().parent.parent 159 path_dirs = [ 160 str(base_dir), 161 str(base_dir / "pyodide-build"), 162 str(base_dir / "docs/sphinx_pyodide"), 163 str(base_dir / "src/py"), 164 str(base_dir / "packages/micropip/src"), 165 ] 166 sys.path = path_dirs + sys.path 167 168 import micropip # noqa: F401 169 import pyodide 170 171 # We hacked it so that autodoc will look for submodules, but only if we import 172 # them here. TODO: look these up in the source directory? 173 import pyodide.code 174 import pyodide.console 175 import pyodide.ffi.wrappers 176 import pyodide.http 177 import pyodide.webloop 178 179 # The full version, including alpha/beta/rc tags. 180 release = version = pyodide.__version__ 181 html_title = f"Version {version}" 182 183 shutil.copy("../src/core/pyproxy.ts", "../src/js/pyproxy.gen.ts") 184 shutil.copy("../src/core/error_handling.ts", "../src/js/error_handling.gen.ts") 185 js_source_path = [str(x) for x in Path("../src/js").glob("*.ts")] 186 187 def remove_pyproxy_gen_ts(): 188 Path("../src/js/pyproxy.gen.ts").unlink(missing_ok=True) 189 190 atexit.register(remove_pyproxy_gen_ts) 191 192 os.environ["PATH"] += f':{str(Path("../src/js/node_modules/.bin").resolve())}' 193 print(os.environ["PATH"]) 194 if IN_READTHEDOCS: 195 subprocess.run(["npm", "ci"], cwd="../src/js") 196 elif not shutil.which("typedoc"): 197 raise Exception( 198 "Before building the Pyodide docs you must run 'npm install' in 'src/js'." 199 ) 200 201 # Prevent API docs for webloop methods: they are the same as for base event loop 202 # and it clutters api docs too much 203 delete_attrs(pyodide.webloop.WebLoop) 204 delete_attrs(pyodide.webloop.WebLoopPolicy) 205 delete_attrs(pyodide.console.PyodideConsole) 206 207 for module in mock_modules: 208 sys.modules[module] = mock.Mock() 209 210 211 # https://github.com/sphinx-doc/sphinx/issues/4054 212 def globalReplace(app, docname, source): 213 result = source[0] 214 for key in app.config.global_replacements: 215 result = result.replace(key, app.config.global_replacements[key]) 216 source[0] = result 217 218 219 global_replacements = {"{{PYODIDE_CDN_URL}}": CDN_URL} 220 221 222 def setup(app): 223 app.add_config_value("global_replacements", {}, True) 224 app.connect("source-read", globalReplace) 225 [end of docs/conf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -17,7 +17,7 @@ copyright = "2019-2022, Pyodide contributors and Mozilla" pyodide_version = "0.21.0a3" -if ".dev" in pyodide_version: +if ".dev" in pyodide_version or os.environ.get("READTHEDOCS_VERSION") == "latest": CDN_URL = "https://cdn.jsdelivr.net/pyodide/dev/full/" else: CDN_URL = f"https://cdn.jsdelivr.net/pyodide/v{pyodide_version}/full/"
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -17,7 +17,7 @@\n copyright = \"2019-2022, Pyodide contributors and Mozilla\"\n pyodide_version = \"0.21.0a3\"\n \n-if \".dev\" in pyodide_version:\n+if \".dev\" in pyodide_version or os.environ.get(\"READTHEDOCS_VERSION\") == \"latest\":\n CDN_URL = \"https://cdn.jsdelivr.net/pyodide/dev/full/\"\n else:\n CDN_URL = f\"https://cdn.jsdelivr.net/pyodide/v{pyodide_version}/full/\"\n", "issue": "https://pyodide.org/en/latest/console.html doesn't show latest `main` version\n## \ud83d\udc1b Bug\r\n\r\nThe https://pyodide.org/en/latest/console.html console is stuck at `v0.21.0a3`. I believe this is because the version wasn't bumped to the next 'dev' version after the `v0.21.0a3` release, so somebody probably needs to run\r\n```\r\n./tools/bump_version.py --new-version 0.21.0.dev0\r\n\r\n```\r\nWithout `dev` in the version, the documentation's console uses the release:\r\nhttps://github.com/pyodide/pyodide/blob/7231cab3ffc83f6221fafb7458f9b223d2a7c759/docs/conf.py#L20-L23\r\n\r\n### To Reproduce\r\n\r\nGo to https://pyodide.org/en/latest/console.html and load a package added since v0.21.0a3, e.g., `import rebound`\r\n\r\n\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n\n# -- Path setup --------------------------------------------------------------\n\nimport atexit\nimport os\nimport shutil\nimport subprocess\nimport sys\nfrom pathlib import Path\nfrom typing import Any\nfrom unittest import mock\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Pyodide\"\ncopyright = \"2019-2022, Pyodide contributors and Mozilla\"\npyodide_version = \"0.21.0a3\"\n\nif \".dev\" in pyodide_version:\n CDN_URL = \"https://cdn.jsdelivr.net/pyodide/dev/full/\"\nelse:\n CDN_URL = f\"https://cdn.jsdelivr.net/pyodide/v{pyodide_version}/full/\"\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"sphinxcontrib.napoleon\",\n \"myst_parser\",\n \"sphinx_js\",\n \"autodocsumm\",\n \"sphinx_panels\",\n \"sphinx_pyodide\",\n \"sphinx_argparse_cli\",\n \"versionwarning.extension\",\n \"sphinx_issues\",\n]\n\nmyst_enable_extensions = [\"substitution\"]\n\njs_language = \"typescript\"\njsdoc_config_path = \"../src/js/tsconfig.json\"\nroot_for_relative_js_paths = \"../src/\"\nissues_github_path = \"pyodide/pyodide\"\n\nversionwarning_messages = {\n \"latest\": (\n \"This is the development version of the documentation. \"\n 'See <a href=\"https://pyodide.org/\">here</a> for latest stable '\n \"documentation. Please do not use Pyodide with non \"\n \"versioned (`dev`) URLs from the CDN for deployed applications!\"\n )\n}\nversionwarning_body_selector = \"#main-content > div\"\n\nautosummary_generate = True\nautodoc_default_flags = [\"members\", \"inherited-members\"]\n\n# Add modules to be mocked.\nmock_modules = [\"ruamel.yaml\", \"tomli\"]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\nsource_suffix = [\".rst\", \".md\"]\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\", \"README.md\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = None\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_book_theme\"\nhtml_logo = \"_static/img/pyodide-logo.png\"\n\n# theme-specific options\nhtml_theme_options: dict[str, Any] = {}\n\n# paths that contain custom static files (such as style sheets)\nhtml_static_path = [\"_static\"]\n\n\nhtml_css_files = [\n \"css/pyodide.css\",\n]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n# html_sidebars = {}\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"Pyodidedoc\"\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = [\"search.html\"]\n\n\ndef delete_attrs(cls):\n for name in dir(cls):\n if not name.startswith(\"_\"):\n try:\n delattr(cls, name)\n except Exception:\n pass\n\n\n# Try not to cause side effects if we are imported incidentally.\n\ntry:\n import sphinx\n\n IN_SPHINX = hasattr(sphinx, \"application\")\nexcept ImportError:\n IN_SPHINX = False\n\nIN_READTHEDOCS = \"READTHEDOCS\" in os.environ\n\nif IN_READTHEDOCS:\n env = {\"PYODIDE_BASE_URL\": CDN_URL}\n os.makedirs(\"_build/html\", exist_ok=True)\n res = subprocess.check_output(\n [\"make\", \"-C\", \"..\", \"docs/_build/html/console.html\"],\n env=env,\n stderr=subprocess.STDOUT,\n encoding=\"utf-8\",\n )\n print(res)\n\nif IN_SPHINX:\n # Compatibility shims. sphinx-js and sphinxcontrib-napoleon have not been updated for Python 3.10\n import collections\n from typing import Callable, Mapping\n\n collections.Mapping = Mapping # type: ignore[attr-defined]\n collections.Callable = Callable # type: ignore[attr-defined]\n\n base_dir = Path(__file__).resolve().parent.parent\n path_dirs = [\n str(base_dir),\n str(base_dir / \"pyodide-build\"),\n str(base_dir / \"docs/sphinx_pyodide\"),\n str(base_dir / \"src/py\"),\n str(base_dir / \"packages/micropip/src\"),\n ]\n sys.path = path_dirs + sys.path\n\n import micropip # noqa: F401\n import pyodide\n\n # We hacked it so that autodoc will look for submodules, but only if we import\n # them here. TODO: look these up in the source directory?\n import pyodide.code\n import pyodide.console\n import pyodide.ffi.wrappers\n import pyodide.http\n import pyodide.webloop\n\n # The full version, including alpha/beta/rc tags.\n release = version = pyodide.__version__\n html_title = f\"Version {version}\"\n\n shutil.copy(\"../src/core/pyproxy.ts\", \"../src/js/pyproxy.gen.ts\")\n shutil.copy(\"../src/core/error_handling.ts\", \"../src/js/error_handling.gen.ts\")\n js_source_path = [str(x) for x in Path(\"../src/js\").glob(\"*.ts\")]\n\n def remove_pyproxy_gen_ts():\n Path(\"../src/js/pyproxy.gen.ts\").unlink(missing_ok=True)\n\n atexit.register(remove_pyproxy_gen_ts)\n\n os.environ[\"PATH\"] += f':{str(Path(\"../src/js/node_modules/.bin\").resolve())}'\n print(os.environ[\"PATH\"])\n if IN_READTHEDOCS:\n subprocess.run([\"npm\", \"ci\"], cwd=\"../src/js\")\n elif not shutil.which(\"typedoc\"):\n raise Exception(\n \"Before building the Pyodide docs you must run 'npm install' in 'src/js'.\"\n )\n\n # Prevent API docs for webloop methods: they are the same as for base event loop\n # and it clutters api docs too much\n delete_attrs(pyodide.webloop.WebLoop)\n delete_attrs(pyodide.webloop.WebLoopPolicy)\n delete_attrs(pyodide.console.PyodideConsole)\n\n for module in mock_modules:\n sys.modules[module] = mock.Mock()\n\n\n# https://github.com/sphinx-doc/sphinx/issues/4054\ndef globalReplace(app, docname, source):\n result = source[0]\n for key in app.config.global_replacements:\n result = result.replace(key, app.config.global_replacements[key])\n source[0] = result\n\n\nglobal_replacements = {\"{{PYODIDE_CDN_URL}}\": CDN_URL}\n\n\ndef setup(app):\n app.add_config_value(\"global_replacements\", {}, True)\n app.connect(\"source-read\", globalReplace)\n", "path": "docs/conf.py"}]}
2,976
148
gh_patches_debug_41319
rasdani/github-patches
git_diff
pytorch__ignite-1328
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Update SSIM metric ## 🚀 Feature Current implementation of [SSIM](https://github.com/pytorch/ignite/blob/master/ignite/metrics/ssim.py) does not use torch tensor as internal storage and hasn't device in the constructor. Following #1238 we expect all metrics to have a similar structure. @n2cholas , could you please work on that ? </issue> <code> [start of ignite/metrics/ssim.py] 1 from typing import Callable, Sequence, Union 2 3 import torch 4 import torch.nn.functional as F 5 6 from ignite.exceptions import NotComputableError 7 from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce 8 9 __all__ = ["SSIM"] 10 11 12 class SSIM(Metric): 13 """ 14 Computes Structual Similarity Index Measure 15 16 Args: 17 data_range (int or float): Range of the image. Typically, ``1.0`` or ``255``. 18 kernel_size (int or list or tuple of int): Size of the kernel. Default: (11, 11) 19 sigma (float or list or tuple of float): Standard deviation of the gaussian kernel. 20 Argument is used if ``gaussian=True``. Default: (1.5, 1.5) 21 k1 (float): Parameter of SSIM. Default: 0.01 22 k2 (float): Parameter of SSIM. Default: 0.03 23 gaussian (bool): ``True`` to use gaussian kernel, ``False`` to use uniform kernel 24 output_transform (callable, optional): A callable that is used to transform the 25 :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the 26 form expected by the metric. 27 28 Example: 29 30 To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine. 31 The output of the engine's ``process_function`` needs to be in the format of 32 ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``. 33 34 ``y_pred`` and ``y`` can be un-normalized or normalized image tensors. Depending on that, the user might need 35 to adjust ``data_range``. ``y_pred`` and ``y`` should have the same shape. 36 37 .. code-block:: python 38 39 def process_function(engine, batch): 40 # ... 41 return y_pred, y 42 engine = Engine(process_function) 43 metric = SSIM(data_range=1.0) 44 metric.attach(engine, "ssim") 45 """ 46 47 def __init__( 48 self, 49 data_range: Union[int, float], 50 kernel_size: Union[int, Sequence[int]] = (11, 11), 51 sigma: Union[float, Sequence[float]] = (1.5, 1.5), 52 k1: float = 0.01, 53 k2: float = 0.03, 54 gaussian: bool = True, 55 output_transform: Callable = lambda x: x, 56 ): 57 if isinstance(kernel_size, int): 58 self.kernel_size = [kernel_size, kernel_size] 59 elif isinstance(kernel_size, Sequence): 60 self.kernel_size = kernel_size 61 else: 62 raise ValueError("Argument kernel_size should be either int or a sequence of int.") 63 64 if isinstance(sigma, float): 65 self.sigma = [sigma, sigma] 66 elif isinstance(sigma, Sequence): 67 self.sigma = sigma 68 else: 69 raise ValueError("Argument sigma should be either float or a sequence of float.") 70 71 if any(x % 2 == 0 or x <= 0 for x in self.kernel_size): 72 raise ValueError("Expected kernel_size to have odd positive number. Got {}.".format(kernel_size)) 73 74 if any(y <= 0 for y in self.sigma): 75 raise ValueError("Expected sigma to have positive number. Got {}.".format(sigma)) 76 77 self.gaussian = gaussian 78 self.c1 = (k1 * data_range) ** 2 79 self.c2 = (k2 * data_range) ** 2 80 self.pad_h = (self.kernel_size[0] - 1) // 2 81 self.pad_w = (self.kernel_size[1] - 1) // 2 82 self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma) 83 super(SSIM, self).__init__(output_transform=output_transform) 84 85 @reinit__is_reduced 86 def reset(self) -> None: 87 self._sum_of_batchwise_ssim = 0.0 88 self._num_examples = 0 89 self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma) 90 91 def _uniform(self, kernel_size): 92 max, min = 2.5, -2.5 93 kernel = torch.arange(start=(1 - kernel_size) / 2, end=(1 + kernel_size) / 2, step=1, dtype=torch.float32) 94 for i, j in enumerate(kernel): 95 if min <= j <= max: 96 kernel[i] = 1 / (max - min) 97 else: 98 kernel[i] = 0 99 100 return kernel.unsqueeze(dim=0) # (1, kernel_size) 101 102 def _gaussian(self, kernel_size, sigma): 103 kernel = torch.arange(start=(1 - kernel_size) / 2, end=(1 + kernel_size) / 2, step=1, dtype=torch.float32) 104 gauss = torch.exp(-kernel.pow(2) / (2 * pow(sigma, 2))) 105 return (gauss / gauss.sum()).unsqueeze(dim=0) # (1, kernel_size) 106 107 def _gaussian_or_uniform_kernel(self, kernel_size, sigma): 108 if self.gaussian: 109 kernel_x = self._gaussian(kernel_size[0], sigma[0]) 110 kernel_y = self._gaussian(kernel_size[1], sigma[1]) 111 else: 112 kernel_x = self._uniform(kernel_size[0]) 113 kernel_y = self._uniform(kernel_size[1]) 114 115 return torch.matmul(kernel_x.t(), kernel_y) # (kernel_size, 1) * (1, kernel_size) 116 117 @reinit__is_reduced 118 def update(self, output: Sequence[torch.Tensor]) -> None: 119 y_pred, y = output[0].detach(), output[1].detach() 120 121 if y_pred.dtype != y.dtype: 122 raise TypeError( 123 "Expected y_pred and y to have the same data type. Got y_pred: {} and y: {}.".format( 124 y_pred.dtype, y.dtype 125 ) 126 ) 127 128 if y_pred.shape != y.shape: 129 raise ValueError( 130 "Expected y_pred and y to have the same shape. Got y_pred: {} and y: {}.".format(y_pred.shape, y.shape) 131 ) 132 133 if len(y_pred.shape) != 4 or len(y.shape) != 4: 134 raise ValueError( 135 "Expected y_pred and y to have BxCxHxW shape. Got y_pred: {} and y: {}.".format(y_pred.shape, y.shape) 136 ) 137 138 channel = y_pred.size(1) 139 if len(self._kernel.shape) < 4: 140 self._kernel = self._kernel.expand(channel, 1, -1, -1).to(device=y_pred.device) 141 142 y_pred = F.pad(y_pred, (self.pad_w, self.pad_w, self.pad_h, self.pad_h), mode="reflect") 143 y = F.pad(y, (self.pad_w, self.pad_w, self.pad_h, self.pad_h), mode="reflect") 144 145 input_list = torch.cat([y_pred, y, y_pred * y_pred, y * y, y_pred * y]) 146 outputs = F.conv2d(input_list, self._kernel, groups=channel) 147 148 output_list = [outputs[x * y_pred.size(0) : (x + 1) * y_pred.size(0)] for x in range(len(outputs))] 149 150 mu_pred_sq = output_list[0].pow(2) 151 mu_target_sq = output_list[1].pow(2) 152 mu_pred_target = output_list[0] * output_list[1] 153 154 sigma_pred_sq = output_list[2] - mu_pred_sq 155 sigma_target_sq = output_list[3] - mu_target_sq 156 sigma_pred_target = output_list[4] - mu_pred_target 157 158 a1 = 2 * mu_pred_target + self.c1 159 a2 = 2 * sigma_pred_target + self.c2 160 b1 = mu_pred_sq + mu_target_sq + self.c1 161 b2 = sigma_pred_sq + sigma_target_sq + self.c2 162 163 ssim_idx = (a1 * a2) / (b1 * b2) 164 self._sum_of_batchwise_ssim += torch.mean(ssim_idx, (1, 2, 3), dtype=torch.float64) 165 self._num_examples += y.shape[0] 166 167 @sync_all_reduce("_sum_of_batchwise_ssim", "_num_examples") 168 def compute(self) -> torch.Tensor: 169 if self._num_examples == 0: 170 raise NotComputableError("SSIM must have at least one example before it can be computed.") 171 return torch.sum(self._sum_of_batchwise_ssim / self._num_examples) 172 [end of ignite/metrics/ssim.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ignite/metrics/ssim.py b/ignite/metrics/ssim.py --- a/ignite/metrics/ssim.py +++ b/ignite/metrics/ssim.py @@ -53,6 +53,7 @@ k2: float = 0.03, gaussian: bool = True, output_transform: Callable = lambda x: x, + device: Union[str, torch.device] = torch.device("cpu"), ): if isinstance(kernel_size, int): self.kernel_size = [kernel_size, kernel_size] @@ -74,23 +75,24 @@ if any(y <= 0 for y in self.sigma): raise ValueError("Expected sigma to have positive number. Got {}.".format(sigma)) + super(SSIM, self).__init__(output_transform=output_transform, device=device) self.gaussian = gaussian self.c1 = (k1 * data_range) ** 2 self.c2 = (k2 * data_range) ** 2 self.pad_h = (self.kernel_size[0] - 1) // 2 self.pad_w = (self.kernel_size[1] - 1) // 2 self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma) - super(SSIM, self).__init__(output_transform=output_transform) @reinit__is_reduced def reset(self) -> None: - self._sum_of_batchwise_ssim = 0.0 + self._sum_of_batchwise_ssim = 0.0 # Not a tensor because batch size is not known in advance. self._num_examples = 0 self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma) def _uniform(self, kernel_size): max, min = 2.5, -2.5 - kernel = torch.arange(start=(1 - kernel_size) / 2, end=(1 + kernel_size) / 2, step=1, dtype=torch.float32) + ksize_half = (kernel_size - 1) * 0.5 + kernel = torch.linspace(-ksize_half, ksize_half, steps=kernel_size, device=self._device) for i, j in enumerate(kernel): if min <= j <= max: kernel[i] = 1 / (max - min) @@ -100,8 +102,9 @@ return kernel.unsqueeze(dim=0) # (1, kernel_size) def _gaussian(self, kernel_size, sigma): - kernel = torch.arange(start=(1 - kernel_size) / 2, end=(1 + kernel_size) / 2, step=1, dtype=torch.float32) - gauss = torch.exp(-kernel.pow(2) / (2 * pow(sigma, 2))) + ksize_half = (kernel_size - 1) * 0.5 + kernel = torch.linspace(-ksize_half, ksize_half, steps=kernel_size, device=self._device) + gauss = torch.exp(-0.5 * (kernel / sigma).pow(2)) return (gauss / gauss.sum()).unsqueeze(dim=0) # (1, kernel_size) def _gaussian_or_uniform_kernel(self, kernel_size, sigma): @@ -161,7 +164,7 @@ b2 = sigma_pred_sq + sigma_target_sq + self.c2 ssim_idx = (a1 * a2) / (b1 * b2) - self._sum_of_batchwise_ssim += torch.mean(ssim_idx, (1, 2, 3), dtype=torch.float64) + self._sum_of_batchwise_ssim += torch.mean(ssim_idx, (1, 2, 3), dtype=torch.float64).to(self._device) self._num_examples += y.shape[0] @sync_all_reduce("_sum_of_batchwise_ssim", "_num_examples")
{"golden_diff": "diff --git a/ignite/metrics/ssim.py b/ignite/metrics/ssim.py\n--- a/ignite/metrics/ssim.py\n+++ b/ignite/metrics/ssim.py\n@@ -53,6 +53,7 @@\n k2: float = 0.03,\n gaussian: bool = True,\n output_transform: Callable = lambda x: x,\n+ device: Union[str, torch.device] = torch.device(\"cpu\"),\n ):\n if isinstance(kernel_size, int):\n self.kernel_size = [kernel_size, kernel_size]\n@@ -74,23 +75,24 @@\n if any(y <= 0 for y in self.sigma):\n raise ValueError(\"Expected sigma to have positive number. Got {}.\".format(sigma))\n \n+ super(SSIM, self).__init__(output_transform=output_transform, device=device)\n self.gaussian = gaussian\n self.c1 = (k1 * data_range) ** 2\n self.c2 = (k2 * data_range) ** 2\n self.pad_h = (self.kernel_size[0] - 1) // 2\n self.pad_w = (self.kernel_size[1] - 1) // 2\n self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma)\n- super(SSIM, self).__init__(output_transform=output_transform)\n \n @reinit__is_reduced\n def reset(self) -> None:\n- self._sum_of_batchwise_ssim = 0.0\n+ self._sum_of_batchwise_ssim = 0.0 # Not a tensor because batch size is not known in advance.\n self._num_examples = 0\n self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma)\n \n def _uniform(self, kernel_size):\n max, min = 2.5, -2.5\n- kernel = torch.arange(start=(1 - kernel_size) / 2, end=(1 + kernel_size) / 2, step=1, dtype=torch.float32)\n+ ksize_half = (kernel_size - 1) * 0.5\n+ kernel = torch.linspace(-ksize_half, ksize_half, steps=kernel_size, device=self._device)\n for i, j in enumerate(kernel):\n if min <= j <= max:\n kernel[i] = 1 / (max - min)\n@@ -100,8 +102,9 @@\n return kernel.unsqueeze(dim=0) # (1, kernel_size)\n \n def _gaussian(self, kernel_size, sigma):\n- kernel = torch.arange(start=(1 - kernel_size) / 2, end=(1 + kernel_size) / 2, step=1, dtype=torch.float32)\n- gauss = torch.exp(-kernel.pow(2) / (2 * pow(sigma, 2)))\n+ ksize_half = (kernel_size - 1) * 0.5\n+ kernel = torch.linspace(-ksize_half, ksize_half, steps=kernel_size, device=self._device)\n+ gauss = torch.exp(-0.5 * (kernel / sigma).pow(2))\n return (gauss / gauss.sum()).unsqueeze(dim=0) # (1, kernel_size)\n \n def _gaussian_or_uniform_kernel(self, kernel_size, sigma):\n@@ -161,7 +164,7 @@\n b2 = sigma_pred_sq + sigma_target_sq + self.c2\n \n ssim_idx = (a1 * a2) / (b1 * b2)\n- self._sum_of_batchwise_ssim += torch.mean(ssim_idx, (1, 2, 3), dtype=torch.float64)\n+ self._sum_of_batchwise_ssim += torch.mean(ssim_idx, (1, 2, 3), dtype=torch.float64).to(self._device)\n self._num_examples += y.shape[0]\n \n @sync_all_reduce(\"_sum_of_batchwise_ssim\", \"_num_examples\")\n", "issue": "Update SSIM metric\n## \ud83d\ude80 Feature\r\n\r\nCurrent implementation of [SSIM](https://github.com/pytorch/ignite/blob/master/ignite/metrics/ssim.py) does not use torch tensor as internal storage and hasn't device in the constructor. Following #1238 we expect all metrics to have a similar structure.\r\n\r\n@n2cholas , could you please work on that ?\n", "before_files": [{"content": "from typing import Callable, Sequence, Union\n\nimport torch\nimport torch.nn.functional as F\n\nfrom ignite.exceptions import NotComputableError\nfrom ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce\n\n__all__ = [\"SSIM\"]\n\n\nclass SSIM(Metric):\n \"\"\"\n Computes Structual Similarity Index Measure\n\n Args:\n data_range (int or float): Range of the image. Typically, ``1.0`` or ``255``.\n kernel_size (int or list or tuple of int): Size of the kernel. Default: (11, 11)\n sigma (float or list or tuple of float): Standard deviation of the gaussian kernel.\n Argument is used if ``gaussian=True``. Default: (1.5, 1.5)\n k1 (float): Parameter of SSIM. Default: 0.01\n k2 (float): Parameter of SSIM. Default: 0.03\n gaussian (bool): ``True`` to use gaussian kernel, ``False`` to use uniform kernel\n output_transform (callable, optional): A callable that is used to transform the\n :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the\n form expected by the metric.\n\n Example:\n\n To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.\n The output of the engine's ``process_function`` needs to be in the format of\n ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.\n\n ``y_pred`` and ``y`` can be un-normalized or normalized image tensors. Depending on that, the user might need\n to adjust ``data_range``. ``y_pred`` and ``y`` should have the same shape.\n\n .. code-block:: python\n\n def process_function(engine, batch):\n # ...\n return y_pred, y\n engine = Engine(process_function)\n metric = SSIM(data_range=1.0)\n metric.attach(engine, \"ssim\")\n \"\"\"\n\n def __init__(\n self,\n data_range: Union[int, float],\n kernel_size: Union[int, Sequence[int]] = (11, 11),\n sigma: Union[float, Sequence[float]] = (1.5, 1.5),\n k1: float = 0.01,\n k2: float = 0.03,\n gaussian: bool = True,\n output_transform: Callable = lambda x: x,\n ):\n if isinstance(kernel_size, int):\n self.kernel_size = [kernel_size, kernel_size]\n elif isinstance(kernel_size, Sequence):\n self.kernel_size = kernel_size\n else:\n raise ValueError(\"Argument kernel_size should be either int or a sequence of int.\")\n\n if isinstance(sigma, float):\n self.sigma = [sigma, sigma]\n elif isinstance(sigma, Sequence):\n self.sigma = sigma\n else:\n raise ValueError(\"Argument sigma should be either float or a sequence of float.\")\n\n if any(x % 2 == 0 or x <= 0 for x in self.kernel_size):\n raise ValueError(\"Expected kernel_size to have odd positive number. Got {}.\".format(kernel_size))\n\n if any(y <= 0 for y in self.sigma):\n raise ValueError(\"Expected sigma to have positive number. Got {}.\".format(sigma))\n\n self.gaussian = gaussian\n self.c1 = (k1 * data_range) ** 2\n self.c2 = (k2 * data_range) ** 2\n self.pad_h = (self.kernel_size[0] - 1) // 2\n self.pad_w = (self.kernel_size[1] - 1) // 2\n self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma)\n super(SSIM, self).__init__(output_transform=output_transform)\n\n @reinit__is_reduced\n def reset(self) -> None:\n self._sum_of_batchwise_ssim = 0.0\n self._num_examples = 0\n self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma)\n\n def _uniform(self, kernel_size):\n max, min = 2.5, -2.5\n kernel = torch.arange(start=(1 - kernel_size) / 2, end=(1 + kernel_size) / 2, step=1, dtype=torch.float32)\n for i, j in enumerate(kernel):\n if min <= j <= max:\n kernel[i] = 1 / (max - min)\n else:\n kernel[i] = 0\n\n return kernel.unsqueeze(dim=0) # (1, kernel_size)\n\n def _gaussian(self, kernel_size, sigma):\n kernel = torch.arange(start=(1 - kernel_size) / 2, end=(1 + kernel_size) / 2, step=1, dtype=torch.float32)\n gauss = torch.exp(-kernel.pow(2) / (2 * pow(sigma, 2)))\n return (gauss / gauss.sum()).unsqueeze(dim=0) # (1, kernel_size)\n\n def _gaussian_or_uniform_kernel(self, kernel_size, sigma):\n if self.gaussian:\n kernel_x = self._gaussian(kernel_size[0], sigma[0])\n kernel_y = self._gaussian(kernel_size[1], sigma[1])\n else:\n kernel_x = self._uniform(kernel_size[0])\n kernel_y = self._uniform(kernel_size[1])\n\n return torch.matmul(kernel_x.t(), kernel_y) # (kernel_size, 1) * (1, kernel_size)\n\n @reinit__is_reduced\n def update(self, output: Sequence[torch.Tensor]) -> None:\n y_pred, y = output[0].detach(), output[1].detach()\n\n if y_pred.dtype != y.dtype:\n raise TypeError(\n \"Expected y_pred and y to have the same data type. Got y_pred: {} and y: {}.\".format(\n y_pred.dtype, y.dtype\n )\n )\n\n if y_pred.shape != y.shape:\n raise ValueError(\n \"Expected y_pred and y to have the same shape. Got y_pred: {} and y: {}.\".format(y_pred.shape, y.shape)\n )\n\n if len(y_pred.shape) != 4 or len(y.shape) != 4:\n raise ValueError(\n \"Expected y_pred and y to have BxCxHxW shape. Got y_pred: {} and y: {}.\".format(y_pred.shape, y.shape)\n )\n\n channel = y_pred.size(1)\n if len(self._kernel.shape) < 4:\n self._kernel = self._kernel.expand(channel, 1, -1, -1).to(device=y_pred.device)\n\n y_pred = F.pad(y_pred, (self.pad_w, self.pad_w, self.pad_h, self.pad_h), mode=\"reflect\")\n y = F.pad(y, (self.pad_w, self.pad_w, self.pad_h, self.pad_h), mode=\"reflect\")\n\n input_list = torch.cat([y_pred, y, y_pred * y_pred, y * y, y_pred * y])\n outputs = F.conv2d(input_list, self._kernel, groups=channel)\n\n output_list = [outputs[x * y_pred.size(0) : (x + 1) * y_pred.size(0)] for x in range(len(outputs))]\n\n mu_pred_sq = output_list[0].pow(2)\n mu_target_sq = output_list[1].pow(2)\n mu_pred_target = output_list[0] * output_list[1]\n\n sigma_pred_sq = output_list[2] - mu_pred_sq\n sigma_target_sq = output_list[3] - mu_target_sq\n sigma_pred_target = output_list[4] - mu_pred_target\n\n a1 = 2 * mu_pred_target + self.c1\n a2 = 2 * sigma_pred_target + self.c2\n b1 = mu_pred_sq + mu_target_sq + self.c1\n b2 = sigma_pred_sq + sigma_target_sq + self.c2\n\n ssim_idx = (a1 * a2) / (b1 * b2)\n self._sum_of_batchwise_ssim += torch.mean(ssim_idx, (1, 2, 3), dtype=torch.float64)\n self._num_examples += y.shape[0]\n\n @sync_all_reduce(\"_sum_of_batchwise_ssim\", \"_num_examples\")\n def compute(self) -> torch.Tensor:\n if self._num_examples == 0:\n raise NotComputableError(\"SSIM must have at least one example before it can be computed.\")\n return torch.sum(self._sum_of_batchwise_ssim / self._num_examples)\n", "path": "ignite/metrics/ssim.py"}]}
2,952
883
gh_patches_debug_18840
rasdani/github-patches
git_diff
pyro-ppl__numpyro-1325
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> compat module: compat.infer.MCMC broken Calling ```python compat.infer.MCMC(kernel, num_warmup=100, num_samples=1000) ``` throws an error since no kwargs are handed to `mcmc.MCMC`. Also, shouldnt the correct `compat` call be ```python compat.infer.MCMC(kernel, warmup_steps=100, num_samples=1000) ``` Thanks :) </issue> <code> [start of numpyro/compat/infer.py] 1 # Copyright Contributors to the Pyro project. 2 # SPDX-License-Identifier: Apache-2.0 3 4 import math 5 6 from jax import jit 7 8 import numpyro 9 from numpyro.compat.pyro import get_param_store 10 from numpyro.infer import elbo, hmc, mcmc, svi 11 12 13 class HMC(hmc.HMC): 14 def __init__( 15 self, 16 model=None, 17 potential_fn=None, 18 step_size=1, 19 adapt_step_size=True, 20 adapt_mass_matrix=True, 21 full_mass=False, 22 use_multinomial_sampling=True, 23 transforms=None, 24 max_plate_nesting=None, 25 jit_compile=False, 26 jit_options=None, 27 ignore_jit_warnings=False, 28 trajectory_length=2 * math.pi, 29 target_accept_prob=0.8, 30 ): 31 super(HMC, self).__init__( 32 model=model, 33 potential_fn=potential_fn, 34 step_size=step_size, 35 adapt_step_size=adapt_step_size, 36 adapt_mass_matrix=adapt_mass_matrix, 37 dense_mass=full_mass, 38 target_accept_prob=target_accept_prob, 39 trajectory_length=trajectory_length, 40 ) 41 42 43 class NUTS(hmc.NUTS): 44 def __init__( 45 self, 46 model=None, 47 potential_fn=None, 48 step_size=1, 49 adapt_step_size=True, 50 adapt_mass_matrix=True, 51 full_mass=False, 52 use_multinomial_sampling=True, 53 transforms=None, 54 max_plate_nesting=None, 55 jit_compile=False, 56 jit_options=None, 57 ignore_jit_warnings=False, 58 trajectory_length=2 * math.pi, 59 target_accept_prob=0.8, 60 max_tree_depth=10, 61 ): 62 if potential_fn is not None: 63 raise ValueError( 64 "Only `model` argument is supported in generic module;" 65 " `potential_fn` is not supported." 66 ) 67 super(NUTS, self).__init__( 68 model=model, 69 potential_fn=potential_fn, 70 step_size=step_size, 71 adapt_step_size=adapt_step_size, 72 adapt_mass_matrix=adapt_mass_matrix, 73 dense_mass=full_mass, 74 target_accept_prob=target_accept_prob, 75 trajectory_length=trajectory_length, 76 max_tree_depth=max_tree_depth, 77 ) 78 79 80 class MCMC(object): 81 def __init__( 82 self, 83 kernel, 84 num_samples, 85 num_warmup=None, 86 initial_params=None, 87 num_chains=1, 88 hook_fn=None, 89 mp_context=None, 90 disable_progbar=False, 91 disable_validation=True, 92 transforms=None, 93 ): 94 if num_warmup is None: 95 num_warmup = num_samples 96 self._initial_params = initial_params 97 self._mcmc = mcmc.MCMC( 98 kernel, 99 num_warmup, 100 num_samples, 101 num_chains=num_chains, 102 progress_bar=(not disable_progbar), 103 ) 104 105 def run(self, *args, rng_key=None, **kwargs): 106 if rng_key is None: 107 rng_key = numpyro.prng_key() 108 self._mcmc.run(rng_key, *args, init_params=self._initial_params, **kwargs) 109 110 def get_samples(self, num_samples=None, group_by_chain=False): 111 if num_samples is not None: 112 raise ValueError("`num_samples` arg unsupported in NumPyro.") 113 return self._mcmc.get_samples(group_by_chain=group_by_chain) 114 115 def summary(self, prob=0.9): 116 self._mcmc.print_summary() 117 118 119 class SVI(svi.SVI): 120 def __init__( 121 self, 122 model, 123 guide, 124 optim, 125 loss, 126 loss_and_grads=None, 127 num_samples=10, 128 num_steps=0, 129 **kwargs 130 ): 131 super(SVI, self).__init__(model=model, guide=guide, optim=optim, loss=loss) 132 self.svi_state = None 133 134 def evaluate_loss(self, *args, **kwargs): 135 return self.evaluate(self.svi_state, *args, **kwargs) 136 137 def step(self, *args, rng_key=None, **kwargs): 138 if self.svi_state is None: 139 if rng_key is None: 140 rng_key = numpyro.prng_key() 141 self.svi_state = self.init(rng_key, *args, **kwargs) 142 try: 143 self.svi_state, loss = jit(self.update)(self.svi_state, *args, **kwargs) 144 except TypeError as e: 145 if "not a valid JAX type" in str(e): 146 raise TypeError( 147 "NumPyro backend requires args, kwargs to be arrays or tuples, " 148 "dicts of arrays." 149 ) from e 150 else: 151 raise e 152 params = jit(super(SVI, self).get_params)(self.svi_state) 153 get_param_store().update(params) 154 return loss 155 156 def get_params(self): 157 return super(SVI, self).get_params(self.svi_state) 158 159 160 class Trace_ELBO(elbo.Trace_ELBO): 161 def __init__( 162 self, 163 num_particles=1, 164 max_plate_nesting=float("inf"), 165 max_iarange_nesting=None, # DEPRECATED 166 vectorize_particles=False, 167 strict_enumeration_warning=True, 168 ignore_jit_warnings=False, 169 jit_options=None, 170 retain_graph=None, 171 tail_adaptive_beta=-1.0, 172 ): 173 super(Trace_ELBO, self).__init__(num_particles=num_particles) 174 175 176 # JIT is enabled by default 177 JitTrace_ELBO = Trace_ELBO 178 [end of numpyro/compat/infer.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/numpyro/compat/infer.py b/numpyro/compat/infer.py --- a/numpyro/compat/infer.py +++ b/numpyro/compat/infer.py @@ -82,7 +82,7 @@ self, kernel, num_samples, - num_warmup=None, + warmup_steps=None, initial_params=None, num_chains=1, hook_fn=None, @@ -91,13 +91,13 @@ disable_validation=True, transforms=None, ): - if num_warmup is None: - num_warmup = num_samples + if warmup_steps is None: + warmup_steps = num_samples self._initial_params = initial_params self._mcmc = mcmc.MCMC( kernel, - num_warmup, - num_samples, + num_warmup=warmup_steps, + num_samples=num_samples, num_chains=num_chains, progress_bar=(not disable_progbar), )
{"golden_diff": "diff --git a/numpyro/compat/infer.py b/numpyro/compat/infer.py\n--- a/numpyro/compat/infer.py\n+++ b/numpyro/compat/infer.py\n@@ -82,7 +82,7 @@\n self,\n kernel,\n num_samples,\n- num_warmup=None,\n+ warmup_steps=None,\n initial_params=None,\n num_chains=1,\n hook_fn=None,\n@@ -91,13 +91,13 @@\n disable_validation=True,\n transforms=None,\n ):\n- if num_warmup is None:\n- num_warmup = num_samples\n+ if warmup_steps is None:\n+ warmup_steps = num_samples\n self._initial_params = initial_params\n self._mcmc = mcmc.MCMC(\n kernel,\n- num_warmup,\n- num_samples,\n+ num_warmup=warmup_steps,\n+ num_samples=num_samples,\n num_chains=num_chains,\n progress_bar=(not disable_progbar),\n )\n", "issue": "compat module: compat.infer.MCMC broken\nCalling\r\n```python\r\ncompat.infer.MCMC(kernel, num_warmup=100, num_samples=1000)\r\n```\r\nthrows an error since no kwargs are handed to `mcmc.MCMC`. Also, shouldnt the correct `compat` call be\r\n```python\r\ncompat.infer.MCMC(kernel, warmup_steps=100, num_samples=1000)\r\n```\r\nThanks :)\n", "before_files": [{"content": "# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nimport math\n\nfrom jax import jit\n\nimport numpyro\nfrom numpyro.compat.pyro import get_param_store\nfrom numpyro.infer import elbo, hmc, mcmc, svi\n\n\nclass HMC(hmc.HMC):\n def __init__(\n self,\n model=None,\n potential_fn=None,\n step_size=1,\n adapt_step_size=True,\n adapt_mass_matrix=True,\n full_mass=False,\n use_multinomial_sampling=True,\n transforms=None,\n max_plate_nesting=None,\n jit_compile=False,\n jit_options=None,\n ignore_jit_warnings=False,\n trajectory_length=2 * math.pi,\n target_accept_prob=0.8,\n ):\n super(HMC, self).__init__(\n model=model,\n potential_fn=potential_fn,\n step_size=step_size,\n adapt_step_size=adapt_step_size,\n adapt_mass_matrix=adapt_mass_matrix,\n dense_mass=full_mass,\n target_accept_prob=target_accept_prob,\n trajectory_length=trajectory_length,\n )\n\n\nclass NUTS(hmc.NUTS):\n def __init__(\n self,\n model=None,\n potential_fn=None,\n step_size=1,\n adapt_step_size=True,\n adapt_mass_matrix=True,\n full_mass=False,\n use_multinomial_sampling=True,\n transforms=None,\n max_plate_nesting=None,\n jit_compile=False,\n jit_options=None,\n ignore_jit_warnings=False,\n trajectory_length=2 * math.pi,\n target_accept_prob=0.8,\n max_tree_depth=10,\n ):\n if potential_fn is not None:\n raise ValueError(\n \"Only `model` argument is supported in generic module;\"\n \" `potential_fn` is not supported.\"\n )\n super(NUTS, self).__init__(\n model=model,\n potential_fn=potential_fn,\n step_size=step_size,\n adapt_step_size=adapt_step_size,\n adapt_mass_matrix=adapt_mass_matrix,\n dense_mass=full_mass,\n target_accept_prob=target_accept_prob,\n trajectory_length=trajectory_length,\n max_tree_depth=max_tree_depth,\n )\n\n\nclass MCMC(object):\n def __init__(\n self,\n kernel,\n num_samples,\n num_warmup=None,\n initial_params=None,\n num_chains=1,\n hook_fn=None,\n mp_context=None,\n disable_progbar=False,\n disable_validation=True,\n transforms=None,\n ):\n if num_warmup is None:\n num_warmup = num_samples\n self._initial_params = initial_params\n self._mcmc = mcmc.MCMC(\n kernel,\n num_warmup,\n num_samples,\n num_chains=num_chains,\n progress_bar=(not disable_progbar),\n )\n\n def run(self, *args, rng_key=None, **kwargs):\n if rng_key is None:\n rng_key = numpyro.prng_key()\n self._mcmc.run(rng_key, *args, init_params=self._initial_params, **kwargs)\n\n def get_samples(self, num_samples=None, group_by_chain=False):\n if num_samples is not None:\n raise ValueError(\"`num_samples` arg unsupported in NumPyro.\")\n return self._mcmc.get_samples(group_by_chain=group_by_chain)\n\n def summary(self, prob=0.9):\n self._mcmc.print_summary()\n\n\nclass SVI(svi.SVI):\n def __init__(\n self,\n model,\n guide,\n optim,\n loss,\n loss_and_grads=None,\n num_samples=10,\n num_steps=0,\n **kwargs\n ):\n super(SVI, self).__init__(model=model, guide=guide, optim=optim, loss=loss)\n self.svi_state = None\n\n def evaluate_loss(self, *args, **kwargs):\n return self.evaluate(self.svi_state, *args, **kwargs)\n\n def step(self, *args, rng_key=None, **kwargs):\n if self.svi_state is None:\n if rng_key is None:\n rng_key = numpyro.prng_key()\n self.svi_state = self.init(rng_key, *args, **kwargs)\n try:\n self.svi_state, loss = jit(self.update)(self.svi_state, *args, **kwargs)\n except TypeError as e:\n if \"not a valid JAX type\" in str(e):\n raise TypeError(\n \"NumPyro backend requires args, kwargs to be arrays or tuples, \"\n \"dicts of arrays.\"\n ) from e\n else:\n raise e\n params = jit(super(SVI, self).get_params)(self.svi_state)\n get_param_store().update(params)\n return loss\n\n def get_params(self):\n return super(SVI, self).get_params(self.svi_state)\n\n\nclass Trace_ELBO(elbo.Trace_ELBO):\n def __init__(\n self,\n num_particles=1,\n max_plate_nesting=float(\"inf\"),\n max_iarange_nesting=None, # DEPRECATED\n vectorize_particles=False,\n strict_enumeration_warning=True,\n ignore_jit_warnings=False,\n jit_options=None,\n retain_graph=None,\n tail_adaptive_beta=-1.0,\n ):\n super(Trace_ELBO, self).__init__(num_particles=num_particles)\n\n\n# JIT is enabled by default\nJitTrace_ELBO = Trace_ELBO\n", "path": "numpyro/compat/infer.py"}]}
2,249
234
gh_patches_debug_8297
rasdani/github-patches
git_diff
acl-org__acl-anthology-2133
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Correction to Anthology ID 2022.clpsych-1.9 Metadata correction: author’s name is listed as Aren, but should be Areán (as shown in the pdf) </issue> <code> [start of bin/add_revision.py] 1 #! /usr/bin/env python3 2 # -*- coding: utf-8 -*- 3 # 4 # Copyright 2019 Matt Post <[email protected]> 5 # 6 # Licensed under the Apache License, Version 2.0 (the "License"); 7 # you may not use this file except in compliance with the License. 8 # You may obtain a copy of the License at 9 # 10 # http://www.apache.org/licenses/LICENSE-2.0 11 # 12 # Unless required by applicable law or agreed to in writing, software 13 # distributed under the License is distributed on an "AS IS" BASIS, 14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 # See the License for the specific language governing permissions and 16 # limitations under the License. 17 18 """ 19 Used to add revisions to the Anthology. 20 Assumes all files have a base format like ANTHOLOGY_ROOT/P/P18/P18-1234.pdf format. 21 The revision process is as follows. 22 23 - The original paper is named as above. 24 - When a first revision is created, the original paper is archived to PYY-XXXXv1.pdf. 25 - The new revision is copied to PYY-XXXXvN, where N is the next revision ID (usually 2). 26 The new revision is also copied to PYY-XXXX.pdf. 27 This causes it to be returned by the anthology when the base paper format is queried. 28 29 Usage: 30 31 add_revision.py [-e] paper_id URL_OR_PATH.pdf "Short explanation". 32 33 `-e` denotes erratum instead of revision. 34 By default, a dry run happens. 35 When you are ready, add `--do`. 36 """ 37 38 import argparse 39 import filetype 40 import os 41 import shutil 42 import ssl 43 import sys 44 import tempfile 45 46 from anthology.utils import ( 47 deconstruct_anthology_id, 48 make_simple_element, 49 indent, 50 compute_hash_from_file, 51 infer_url, 52 is_newstyle_id, 53 retrieve_url, 54 get_pdf_dir, 55 get_xml_file, 56 ) 57 from anthology.data import ( 58 PDF_LOCATION_TEMPLATE, 59 ANTHOLOGY_FILE_DIR, 60 ) 61 62 import lxml.etree as ET 63 import urllib.request 64 65 from datetime import datetime 66 67 68 def validate_file_type(path): 69 """Ensure downloaded file mime type matches its extension (e.g., PDF)""" 70 detected = filetype.guess(path) 71 if detected is None or not detected.mime.endswith(detected.extension): 72 mime_type = 'UNKNOWN' if detected is None else detected.mime 73 print( 74 f"FATAL: file {path} has MIME type {mime_type}", 75 file=sys.stderr, 76 ) 77 sys.exit(1) 78 79 80 def add_revision( 81 anth_id, pdf_path, explanation, change_type="revision", dry_run=True, date=None 82 ): 83 """ 84 Takes an Anthology ID. It then adds a revision to the Anthology XML, 85 updating and writing the XML file, and copies the PDFs into place. 86 For PDFs, the revised PDF is saved to {anth_id}.pdf and {anth_id}v{version}.pdf. 87 For the first revision, we first copy {anth_id}.pdf to {anth_id}v1.pdf. 88 """ 89 if date is None: 90 now = datetime.now() 91 date = f"{now.year}-{now.month:02d}-{now.day:02d}" 92 93 def maybe_copy(file_from, file_to): 94 if not dry_run: 95 print("-> Copying from {} -> {}".format(file_from, file_to), file=sys.stderr) 96 shutil.copy(file_from, file_to) 97 os.chmod(file_to, 0o644) 98 else: 99 print( 100 "-> DRY RUN: Copying from {} -> {}".format(file_from, file_to), 101 file=sys.stderr, 102 ) 103 104 # The new version 105 revno = None 106 107 change_letter = "e" if change_type == "erratum" else "v" 108 109 checksum = compute_hash_from_file(pdf_path) 110 111 # Files for old-style IDs are stored under anthology-files/pdf/P/P19/* 112 # Files for new-style IDs are stored under anthology-files/pdf/2020.acl/* 113 output_dir = get_pdf_dir(anth_id) 114 115 # Make sure directory exists 116 if not os.path.exists(output_dir): 117 print(f"-> Creating directory {output_dir}", file=sys.stderr) 118 os.makedirs(output_dir) 119 120 canonical_path = os.path.join(output_dir, f"{anth_id}.pdf") 121 122 # Update XML 123 xml_file = get_xml_file(anth_id) 124 collection_id, volume_id, paper_id = deconstruct_anthology_id(anth_id) 125 tree = ET.parse(xml_file) 126 if paper_id == "0": 127 paper = tree.getroot().find(f"./volume[@id='{volume_id}']/frontmatter") 128 else: 129 paper = tree.getroot().find( 130 f"./volume[@id='{volume_id}']/paper[@id='{paper_id}']" 131 ) 132 if paper is not None: 133 revisions = paper.findall(change_type) 134 revno = 1 if change_type == "erratum" else 2 135 for revision in revisions: 136 revno = int(revision.attrib["id"]) + 1 137 138 if not dry_run: 139 # Update the URL hash on the <url> tag 140 url = paper.find("./url") 141 if url is not None: 142 url.attrib["hash"] = checksum 143 144 if change_type == "revision" and revno == 2: 145 if paper.find("./url") is not None: 146 current_version_url = infer_url(paper.find("./url").text) + ".pdf" 147 148 # Download original file 149 # There are no versioned files the first time around, so create the first one 150 # (essentially backing up the original version) 151 revised_file_v1_path = os.path.join( 152 output_dir, f"{anth_id}{change_letter}1.pdf" 153 ) 154 155 retrieve_url(current_version_url, revised_file_v1_path) 156 validate_file_type(revised_file_v1_path) 157 158 old_checksum = compute_hash_from_file(revised_file_v1_path) 159 160 # First revision requires making the original version explicit 161 revision = make_simple_element( 162 change_type, 163 None, 164 attrib={ 165 "id": "1", 166 "href": f"{anth_id}{change_letter}1", 167 "hash": old_checksum, 168 }, 169 parent=paper, 170 ) 171 172 revision = make_simple_element( 173 change_type, 174 explanation, 175 attrib={ 176 "id": str(revno), 177 "href": f"{anth_id}{change_letter}{revno}", 178 "hash": checksum, 179 "date": date, 180 }, 181 parent=paper, 182 ) 183 indent(tree.getroot()) 184 185 tree.write(xml_file, encoding="UTF-8", xml_declaration=True) 186 print( 187 f'-> Added {change_type} node "{revision.text}" to XML', file=sys.stderr 188 ) 189 190 else: 191 print( 192 f"-> FATAL: paper ID {anth_id} not found in the Anthology", 193 file=sys.stderr, 194 ) 195 sys.exit(1) 196 197 revised_file_versioned_path = os.path.join( 198 output_dir, f"{anth_id}{change_letter}{revno}.pdf" 199 ) 200 201 # Copy the file to the versioned path 202 maybe_copy(pdf_path, revised_file_versioned_path) 203 204 # Copy it over the canonical path 205 if change_type == "revision": 206 maybe_copy(pdf_path, canonical_path) 207 208 209 def main(args): 210 change_type = "erratum" if args.erratum else "revision" 211 212 print(f"Processing {change_type} to {args.anthology_id}...") 213 214 # TODO: make sure path exists, or download URL to temp file 215 if args.path.startswith("http"): 216 _, input_file_path = tempfile.mkstemp() 217 retrieve_url(args.path, input_file_path) 218 else: 219 input_file_path = args.path 220 221 validate_file_type(input_file_path) 222 223 add_revision( 224 args.anthology_id, 225 input_file_path, 226 args.explanation, 227 change_type=change_type, 228 dry_run=args.dry_run, 229 ) 230 231 if args.path.startswith("http"): 232 os.remove(input_file_path) 233 234 235 if __name__ == "__main__": 236 parser = argparse.ArgumentParser() 237 parser.add_argument( 238 "anthology_id", help="The Anthology paper ID to revise (e.g., P18-1001)" 239 ) 240 parser.add_argument( 241 "path", type=str, help="Path to the revised paper ID (can be URL)" 242 ) 243 parser.add_argument("explanation", help="Brief description of the changes.") 244 parser.add_argument( 245 "--erratum", 246 "-e", 247 action="store_true", 248 help="This is an erratum instead of a revision.", 249 ) 250 now = datetime.now() 251 today = f"{now.year}-{now.month:02d}-{now.day:02d}" 252 parser.add_argument( 253 "--date", 254 "-d", 255 type=str, 256 default=today, 257 help="The date of the revision (ISO 8601 format)", 258 ) 259 parser.add_argument( 260 "--dry-run", "-n", action="store_true", default=False, help="Just a dry run." 261 ) 262 args = parser.parse_args() 263 264 main(args) 265 [end of bin/add_revision.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bin/add_revision.py b/bin/add_revision.py --- a/bin/add_revision.py +++ b/bin/add_revision.py @@ -137,9 +137,10 @@ if not dry_run: # Update the URL hash on the <url> tag - url = paper.find("./url") - if url is not None: - url.attrib["hash"] = checksum + if change_type != "erratum": + url = paper.find("./url") + if url is not None: + url.attrib["hash"] = checksum if change_type == "revision" and revno == 2: if paper.find("./url") is not None:
{"golden_diff": "diff --git a/bin/add_revision.py b/bin/add_revision.py\n--- a/bin/add_revision.py\n+++ b/bin/add_revision.py\n@@ -137,9 +137,10 @@\n \n if not dry_run:\n # Update the URL hash on the <url> tag\n- url = paper.find(\"./url\")\n- if url is not None:\n- url.attrib[\"hash\"] = checksum\n+ if change_type != \"erratum\":\n+ url = paper.find(\"./url\")\n+ if url is not None:\n+ url.attrib[\"hash\"] = checksum\n \n if change_type == \"revision\" and revno == 2:\n if paper.find(\"./url\") is not None:\n", "issue": "Correction to Anthology ID 2022.clpsych-1.9\nMetadata correction:\r\nauthor\u2019s name is listed as Aren, but should be Are\u00e1n (as shown in the pdf)\n", "before_files": [{"content": "#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Matt Post <[email protected]>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nUsed to add revisions to the Anthology.\nAssumes all files have a base format like ANTHOLOGY_ROOT/P/P18/P18-1234.pdf format.\nThe revision process is as follows.\n\n- The original paper is named as above.\n- When a first revision is created, the original paper is archived to PYY-XXXXv1.pdf.\n- The new revision is copied to PYY-XXXXvN, where N is the next revision ID (usually 2).\n The new revision is also copied to PYY-XXXX.pdf.\n This causes it to be returned by the anthology when the base paper format is queried.\n\nUsage:\n\n add_revision.py [-e] paper_id URL_OR_PATH.pdf \"Short explanation\".\n\n`-e` denotes erratum instead of revision.\nBy default, a dry run happens.\nWhen you are ready, add `--do`.\n\"\"\"\n\nimport argparse\nimport filetype\nimport os\nimport shutil\nimport ssl\nimport sys\nimport tempfile\n\nfrom anthology.utils import (\n deconstruct_anthology_id,\n make_simple_element,\n indent,\n compute_hash_from_file,\n infer_url,\n is_newstyle_id,\n retrieve_url,\n get_pdf_dir,\n get_xml_file,\n)\nfrom anthology.data import (\n PDF_LOCATION_TEMPLATE,\n ANTHOLOGY_FILE_DIR,\n)\n\nimport lxml.etree as ET\nimport urllib.request\n\nfrom datetime import datetime\n\n\ndef validate_file_type(path):\n \"\"\"Ensure downloaded file mime type matches its extension (e.g., PDF)\"\"\"\n detected = filetype.guess(path)\n if detected is None or not detected.mime.endswith(detected.extension):\n mime_type = 'UNKNOWN' if detected is None else detected.mime\n print(\n f\"FATAL: file {path} has MIME type {mime_type}\",\n file=sys.stderr,\n )\n sys.exit(1)\n\n\ndef add_revision(\n anth_id, pdf_path, explanation, change_type=\"revision\", dry_run=True, date=None\n):\n \"\"\"\n Takes an Anthology ID. It then adds a revision to the Anthology XML,\n updating and writing the XML file, and copies the PDFs into place.\n For PDFs, the revised PDF is saved to {anth_id}.pdf and {anth_id}v{version}.pdf.\n For the first revision, we first copy {anth_id}.pdf to {anth_id}v1.pdf.\n \"\"\"\n if date is None:\n now = datetime.now()\n date = f\"{now.year}-{now.month:02d}-{now.day:02d}\"\n\n def maybe_copy(file_from, file_to):\n if not dry_run:\n print(\"-> Copying from {} -> {}\".format(file_from, file_to), file=sys.stderr)\n shutil.copy(file_from, file_to)\n os.chmod(file_to, 0o644)\n else:\n print(\n \"-> DRY RUN: Copying from {} -> {}\".format(file_from, file_to),\n file=sys.stderr,\n )\n\n # The new version\n revno = None\n\n change_letter = \"e\" if change_type == \"erratum\" else \"v\"\n\n checksum = compute_hash_from_file(pdf_path)\n\n # Files for old-style IDs are stored under anthology-files/pdf/P/P19/*\n # Files for new-style IDs are stored under anthology-files/pdf/2020.acl/*\n output_dir = get_pdf_dir(anth_id)\n\n # Make sure directory exists\n if not os.path.exists(output_dir):\n print(f\"-> Creating directory {output_dir}\", file=sys.stderr)\n os.makedirs(output_dir)\n\n canonical_path = os.path.join(output_dir, f\"{anth_id}.pdf\")\n\n # Update XML\n xml_file = get_xml_file(anth_id)\n collection_id, volume_id, paper_id = deconstruct_anthology_id(anth_id)\n tree = ET.parse(xml_file)\n if paper_id == \"0\":\n paper = tree.getroot().find(f\"./volume[@id='{volume_id}']/frontmatter\")\n else:\n paper = tree.getroot().find(\n f\"./volume[@id='{volume_id}']/paper[@id='{paper_id}']\"\n )\n if paper is not None:\n revisions = paper.findall(change_type)\n revno = 1 if change_type == \"erratum\" else 2\n for revision in revisions:\n revno = int(revision.attrib[\"id\"]) + 1\n\n if not dry_run:\n # Update the URL hash on the <url> tag\n url = paper.find(\"./url\")\n if url is not None:\n url.attrib[\"hash\"] = checksum\n\n if change_type == \"revision\" and revno == 2:\n if paper.find(\"./url\") is not None:\n current_version_url = infer_url(paper.find(\"./url\").text) + \".pdf\"\n\n # Download original file\n # There are no versioned files the first time around, so create the first one\n # (essentially backing up the original version)\n revised_file_v1_path = os.path.join(\n output_dir, f\"{anth_id}{change_letter}1.pdf\"\n )\n\n retrieve_url(current_version_url, revised_file_v1_path)\n validate_file_type(revised_file_v1_path)\n\n old_checksum = compute_hash_from_file(revised_file_v1_path)\n\n # First revision requires making the original version explicit\n revision = make_simple_element(\n change_type,\n None,\n attrib={\n \"id\": \"1\",\n \"href\": f\"{anth_id}{change_letter}1\",\n \"hash\": old_checksum,\n },\n parent=paper,\n )\n\n revision = make_simple_element(\n change_type,\n explanation,\n attrib={\n \"id\": str(revno),\n \"href\": f\"{anth_id}{change_letter}{revno}\",\n \"hash\": checksum,\n \"date\": date,\n },\n parent=paper,\n )\n indent(tree.getroot())\n\n tree.write(xml_file, encoding=\"UTF-8\", xml_declaration=True)\n print(\n f'-> Added {change_type} node \"{revision.text}\" to XML', file=sys.stderr\n )\n\n else:\n print(\n f\"-> FATAL: paper ID {anth_id} not found in the Anthology\",\n file=sys.stderr,\n )\n sys.exit(1)\n\n revised_file_versioned_path = os.path.join(\n output_dir, f\"{anth_id}{change_letter}{revno}.pdf\"\n )\n\n # Copy the file to the versioned path\n maybe_copy(pdf_path, revised_file_versioned_path)\n\n # Copy it over the canonical path\n if change_type == \"revision\":\n maybe_copy(pdf_path, canonical_path)\n\n\ndef main(args):\n change_type = \"erratum\" if args.erratum else \"revision\"\n\n print(f\"Processing {change_type} to {args.anthology_id}...\")\n\n # TODO: make sure path exists, or download URL to temp file\n if args.path.startswith(\"http\"):\n _, input_file_path = tempfile.mkstemp()\n retrieve_url(args.path, input_file_path)\n else:\n input_file_path = args.path\n\n validate_file_type(input_file_path)\n\n add_revision(\n args.anthology_id,\n input_file_path,\n args.explanation,\n change_type=change_type,\n dry_run=args.dry_run,\n )\n\n if args.path.startswith(\"http\"):\n os.remove(input_file_path)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"anthology_id\", help=\"The Anthology paper ID to revise (e.g., P18-1001)\"\n )\n parser.add_argument(\n \"path\", type=str, help=\"Path to the revised paper ID (can be URL)\"\n )\n parser.add_argument(\"explanation\", help=\"Brief description of the changes.\")\n parser.add_argument(\n \"--erratum\",\n \"-e\",\n action=\"store_true\",\n help=\"This is an erratum instead of a revision.\",\n )\n now = datetime.now()\n today = f\"{now.year}-{now.month:02d}-{now.day:02d}\"\n parser.add_argument(\n \"--date\",\n \"-d\",\n type=str,\n default=today,\n help=\"The date of the revision (ISO 8601 format)\",\n )\n parser.add_argument(\n \"--dry-run\", \"-n\", action=\"store_true\", default=False, help=\"Just a dry run.\"\n )\n args = parser.parse_args()\n\n main(args)\n", "path": "bin/add_revision.py"}]}
3,262
153
gh_patches_debug_4953
rasdani/github-patches
git_diff
getnikola__nikola-2998
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Theme meta file ignored_assets key should ignore whitespace ### Environment (if reporting a bug) **Python Version:** 3.5.4 **Nikola Version:** 8.0.0dev HEAD **Operating System:** gentoo GNU/Linux ### Description: Current behaviour: The ignored_assets key in theme meta files takes a comma-separated list of filenames, that must not contain leading or trailing whitespace because it's considered to be part of the filename. Expected behaviour: The comma-separated list may contain the usual amount of whitespace: foo, bar, baz, etc </issue> <code> [start of nikola/plugins/task/copy_assets.py] 1 # -*- coding: utf-8 -*- 2 3 # Copyright © 2012-2018 Roberto Alsina and others. 4 5 # Permission is hereby granted, free of charge, to any 6 # person obtaining a copy of this software and associated 7 # documentation files (the "Software"), to deal in the 8 # Software without restriction, including without limitation 9 # the rights to use, copy, modify, merge, publish, 10 # distribute, sublicense, and/or sell copies of the 11 # Software, and to permit persons to whom the Software is 12 # furnished to do so, subject to the following conditions: 13 # 14 # The above copyright notice and this permission notice 15 # shall be included in all copies or substantial portions of 16 # the Software. 17 # 18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY 19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE 20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR 21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS 22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 27 """Copy theme assets into output.""" 28 29 30 import io 31 import os 32 33 from nikola.plugin_categories import Task 34 from nikola import utils 35 36 37 class CopyAssets(Task): 38 """Copy theme assets into output.""" 39 40 name = "copy_assets" 41 42 def gen_tasks(self): 43 """Create tasks to copy the assets of the whole theme chain. 44 45 If a file is present on two themes, use the version 46 from the "youngest" theme. 47 """ 48 kw = { 49 "themes": self.site.THEMES, 50 "translations": self.site.translations, 51 "files_folders": self.site.config['FILES_FOLDERS'], 52 "output_folder": self.site.config['OUTPUT_FOLDER'], 53 "filters": self.site.config['FILTERS'], 54 "code_color_scheme": self.site.config['CODE_COLOR_SCHEME'], 55 "code.css_selectors": ['pre.code', '.highlight pre'], 56 "code.css_head": '/* code.css file generated by Nikola */\n', 57 "code.css_close": "\ntable.codetable { width: 100%;} td.linenos {text-align: right; width: 4em;}\n", 58 } 59 tasks = {} 60 code_css_path = os.path.join(kw['output_folder'], 'assets', 'css', 'code.css') 61 code_css_input = utils.get_asset_path('assets/css/code.css', 62 themes=kw['themes'], 63 files_folders=kw['files_folders'], output_dir=None) 64 yield self.group_task() 65 66 main_theme = utils.get_theme_path(kw['themes'][0]) 67 theme_ini = utils.parse_theme_meta(main_theme) 68 if theme_ini: 69 ignored_assets = theme_ini.get("Nikola", "ignored_assets", fallback='').split(',') 70 else: 71 ignored_assets = [] 72 73 for theme_name in kw['themes']: 74 src = os.path.join(utils.get_theme_path(theme_name), 'assets') 75 dst = os.path.join(kw['output_folder'], 'assets') 76 for task in utils.copy_tree(src, dst): 77 asset_name = os.path.relpath(task['name'], dst) 78 if task['name'] in tasks or asset_name in ignored_assets: 79 continue 80 tasks[task['name']] = task 81 task['uptodate'] = [utils.config_changed(kw, 'nikola.plugins.task.copy_assets')] 82 task['basename'] = self.name 83 if code_css_input: 84 if 'file_dep' not in task: 85 task['file_dep'] = [] 86 task['file_dep'].append(code_css_input) 87 yield utils.apply_filters(task, kw['filters']) 88 89 # Check whether or not there is a code.css file around. 90 if not code_css_input and kw['code_color_scheme']: 91 def create_code_css(): 92 from pygments.formatters import get_formatter_by_name 93 formatter = get_formatter_by_name('html', style=kw["code_color_scheme"]) 94 utils.makedirs(os.path.dirname(code_css_path)) 95 with io.open(code_css_path, 'w+', encoding='utf8') as outf: 96 outf.write(kw["code.css_head"]) 97 outf.write(formatter.get_style_defs(kw["code.css_selectors"])) 98 outf.write(kw["code.css_close"]) 99 100 if os.path.exists(code_css_path): 101 with io.open(code_css_path, 'r', encoding='utf-8') as fh: 102 testcontents = fh.read(len(kw["code.css_head"])) == kw["code.css_head"] 103 else: 104 testcontents = False 105 106 task = { 107 'basename': self.name, 108 'name': code_css_path, 109 'targets': [code_css_path], 110 'uptodate': [utils.config_changed(kw, 'nikola.plugins.task.copy_assets'), testcontents], 111 'actions': [(create_code_css, [])], 112 'clean': True, 113 } 114 yield utils.apply_filters(task, kw['filters']) 115 [end of nikola/plugins/task/copy_assets.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nikola/plugins/task/copy_assets.py b/nikola/plugins/task/copy_assets.py --- a/nikola/plugins/task/copy_assets.py +++ b/nikola/plugins/task/copy_assets.py @@ -67,6 +67,7 @@ theme_ini = utils.parse_theme_meta(main_theme) if theme_ini: ignored_assets = theme_ini.get("Nikola", "ignored_assets", fallback='').split(',') + ignored_assets = [asset_name.strip() for asset_name in ignored_assets] else: ignored_assets = []
{"golden_diff": "diff --git a/nikola/plugins/task/copy_assets.py b/nikola/plugins/task/copy_assets.py\n--- a/nikola/plugins/task/copy_assets.py\n+++ b/nikola/plugins/task/copy_assets.py\n@@ -67,6 +67,7 @@\n theme_ini = utils.parse_theme_meta(main_theme)\n if theme_ini:\n ignored_assets = theme_ini.get(\"Nikola\", \"ignored_assets\", fallback='').split(',')\n+ ignored_assets = [asset_name.strip() for asset_name in ignored_assets]\n else:\n ignored_assets = []\n", "issue": "Theme meta file ignored_assets key should ignore whitespace\n### Environment (if reporting a bug)\r\n\r\n**Python Version:**\r\n3.5.4\r\n**Nikola Version:**\r\n8.0.0dev HEAD\r\n**Operating System:**\r\ngentoo GNU/Linux\r\n### Description:\r\nCurrent behaviour: The ignored_assets key in theme meta files takes a comma-separated list of filenames, that must not contain leading or trailing whitespace because it's considered to be part of the filename.\r\n\r\nExpected behaviour: The comma-separated list may contain the usual amount of whitespace: foo, bar, baz, etc\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2018 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Copy theme assets into output.\"\"\"\n\n\nimport io\nimport os\n\nfrom nikola.plugin_categories import Task\nfrom nikola import utils\n\n\nclass CopyAssets(Task):\n \"\"\"Copy theme assets into output.\"\"\"\n\n name = \"copy_assets\"\n\n def gen_tasks(self):\n \"\"\"Create tasks to copy the assets of the whole theme chain.\n\n If a file is present on two themes, use the version\n from the \"youngest\" theme.\n \"\"\"\n kw = {\n \"themes\": self.site.THEMES,\n \"translations\": self.site.translations,\n \"files_folders\": self.site.config['FILES_FOLDERS'],\n \"output_folder\": self.site.config['OUTPUT_FOLDER'],\n \"filters\": self.site.config['FILTERS'],\n \"code_color_scheme\": self.site.config['CODE_COLOR_SCHEME'],\n \"code.css_selectors\": ['pre.code', '.highlight pre'],\n \"code.css_head\": '/* code.css file generated by Nikola */\\n',\n \"code.css_close\": \"\\ntable.codetable { width: 100%;} td.linenos {text-align: right; width: 4em;}\\n\",\n }\n tasks = {}\n code_css_path = os.path.join(kw['output_folder'], 'assets', 'css', 'code.css')\n code_css_input = utils.get_asset_path('assets/css/code.css',\n themes=kw['themes'],\n files_folders=kw['files_folders'], output_dir=None)\n yield self.group_task()\n\n main_theme = utils.get_theme_path(kw['themes'][0])\n theme_ini = utils.parse_theme_meta(main_theme)\n if theme_ini:\n ignored_assets = theme_ini.get(\"Nikola\", \"ignored_assets\", fallback='').split(',')\n else:\n ignored_assets = []\n\n for theme_name in kw['themes']:\n src = os.path.join(utils.get_theme_path(theme_name), 'assets')\n dst = os.path.join(kw['output_folder'], 'assets')\n for task in utils.copy_tree(src, dst):\n asset_name = os.path.relpath(task['name'], dst)\n if task['name'] in tasks or asset_name in ignored_assets:\n continue\n tasks[task['name']] = task\n task['uptodate'] = [utils.config_changed(kw, 'nikola.plugins.task.copy_assets')]\n task['basename'] = self.name\n if code_css_input:\n if 'file_dep' not in task:\n task['file_dep'] = []\n task['file_dep'].append(code_css_input)\n yield utils.apply_filters(task, kw['filters'])\n\n # Check whether or not there is a code.css file around.\n if not code_css_input and kw['code_color_scheme']:\n def create_code_css():\n from pygments.formatters import get_formatter_by_name\n formatter = get_formatter_by_name('html', style=kw[\"code_color_scheme\"])\n utils.makedirs(os.path.dirname(code_css_path))\n with io.open(code_css_path, 'w+', encoding='utf8') as outf:\n outf.write(kw[\"code.css_head\"])\n outf.write(formatter.get_style_defs(kw[\"code.css_selectors\"]))\n outf.write(kw[\"code.css_close\"])\n\n if os.path.exists(code_css_path):\n with io.open(code_css_path, 'r', encoding='utf-8') as fh:\n testcontents = fh.read(len(kw[\"code.css_head\"])) == kw[\"code.css_head\"]\n else:\n testcontents = False\n\n task = {\n 'basename': self.name,\n 'name': code_css_path,\n 'targets': [code_css_path],\n 'uptodate': [utils.config_changed(kw, 'nikola.plugins.task.copy_assets'), testcontents],\n 'actions': [(create_code_css, [])],\n 'clean': True,\n }\n yield utils.apply_filters(task, kw['filters'])\n", "path": "nikola/plugins/task/copy_assets.py"}]}
1,976
122
gh_patches_debug_18140
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-2408
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> OTLP Exporter: Add support for Gauge point types </issue> <code> [start of exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/_metric_exporter/__init__.py] 1 # Copyright The OpenTelemetry Authors 2 # Licensed under the Apache License, Version 2.0 (the "License"); 3 # you may not use this file except in compliance with the License. 4 # You may obtain a copy of the License at 5 # 6 # http://www.apache.org/licenses/LICENSE-2.0 7 # 8 # Unless required by applicable law or agreed to in writing, software 9 # distributed under the License is distributed on an "AS IS" BASIS, 10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 # See the License for the specific language governing permissions and 12 # limitations under the License. 13 14 import logging 15 from os import environ 16 from typing import Optional, Sequence 17 from grpc import ChannelCredentials, Compression 18 from opentelemetry.exporter.otlp.proto.grpc.exporter import ( 19 OTLPExporterMixin, 20 get_resource_data, 21 ) 22 from opentelemetry.proto.collector.metrics.v1.metrics_service_pb2 import ( 23 ExportMetricsServiceRequest, 24 ) 25 from opentelemetry.proto.collector.metrics.v1.metrics_service_pb2_grpc import ( 26 MetricsServiceStub, 27 ) 28 from opentelemetry.proto.common.v1.common_pb2 import InstrumentationLibrary 29 from opentelemetry.proto.metrics.v1 import metrics_pb2 as pb2 30 from opentelemetry.sdk.environment_variables import ( 31 OTEL_EXPORTER_OTLP_METRICS_INSECURE, 32 ) 33 from opentelemetry.sdk._metrics.point import ( 34 Gauge, 35 Histogram, 36 Metric, 37 Sum, 38 ) 39 40 from opentelemetry.sdk._metrics.export import ( 41 MetricExporter, 42 MetricExportResult, 43 ) 44 45 logger = logging.getLogger(__name__) 46 47 48 class OTLPMetricExporter( 49 MetricExporter, 50 OTLPExporterMixin[Metric, ExportMetricsServiceRequest, MetricExportResult], 51 ): 52 _result = MetricExportResult 53 _stub = MetricsServiceStub 54 55 def __init__( 56 self, 57 endpoint: Optional[str] = None, 58 insecure: Optional[bool] = None, 59 credentials: Optional[ChannelCredentials] = None, 60 headers: Optional[Sequence] = None, 61 timeout: Optional[int] = None, 62 compression: Optional[Compression] = None, 63 ): 64 65 if insecure is None: 66 insecure = environ.get(OTEL_EXPORTER_OTLP_METRICS_INSECURE) 67 if insecure is not None: 68 insecure = insecure.lower() == "true" 69 70 super().__init__( 71 **{ 72 "endpoint": endpoint, 73 "insecure": insecure, 74 "credentials": credentials, 75 "headers": headers, 76 "timeout": timeout, 77 "compression": compression, 78 } 79 ) 80 81 def _translate_data( 82 self, data: Sequence[Metric] 83 ) -> ExportMetricsServiceRequest: 84 sdk_resource_instrumentation_library_metrics = {} 85 86 for metric in data: 87 resource = metric.resource 88 instrumentation_library_map = ( 89 sdk_resource_instrumentation_library_metrics.get(resource, {}) 90 ) 91 if not instrumentation_library_map: 92 sdk_resource_instrumentation_library_metrics[ 93 resource 94 ] = instrumentation_library_map 95 96 instrumentation_library_metrics = instrumentation_library_map.get( 97 metric.instrumentation_info 98 ) 99 100 if not instrumentation_library_metrics: 101 if metric.instrumentation_info is not None: 102 instrumentation_library_map[ 103 metric.instrumentation_info 104 ] = pb2.InstrumentationLibraryMetrics( 105 instrumentation_library=InstrumentationLibrary( 106 name=metric.instrumentation_info.name, 107 version=metric.instrumentation_info.version, 108 ) 109 ) 110 else: 111 instrumentation_library_map[ 112 metric.instrumentation_info 113 ] = pb2.InstrumentationLibraryMetrics() 114 115 instrumentation_library_metrics = instrumentation_library_map.get( 116 metric.instrumentation_info 117 ) 118 119 pbmetric = pb2.Metric( 120 name=metric.name, 121 description=metric.description, 122 unit=metric.unit, 123 ) 124 if isinstance(metric.point, Gauge): 125 # TODO: implement gauge 126 pbmetric.gauge = pb2.Gauge( 127 data_points=[], 128 ) 129 elif isinstance(metric.point, Histogram): 130 # TODO: implement histogram 131 pbmetric.histogram = pb2.Histogram( 132 data_points=[], 133 ) 134 elif isinstance(metric.point, Sum): 135 pt = pb2.NumberDataPoint( 136 attributes=self._translate_attributes(metric.attributes), 137 start_time_unix_nano=metric.point.start_time_unix_nano, 138 time_unix_nano=metric.point.time_unix_nano, 139 ) 140 if isinstance(metric.point.value, int): 141 pt.as_int = metric.point.value 142 else: 143 pt.as_double = metric.point.value 144 # note that because sum is a message type, the fields must be 145 # set individually rather than instantiating a pb2.Sum and setting 146 # it once 147 pbmetric.sum.aggregation_temporality = ( 148 metric.point.aggregation_temporality 149 ) 150 pbmetric.sum.is_monotonic = metric.point.is_monotonic 151 pbmetric.sum.data_points.append(pt) 152 else: 153 logger.warn("unsupported datapoint type %s", metric.point) 154 continue 155 156 instrumentation_library_metrics.metrics.append( 157 pbmetric, 158 ) 159 return ExportMetricsServiceRequest( 160 resource_metrics=get_resource_data( 161 sdk_resource_instrumentation_library_metrics, 162 pb2.ResourceMetrics, 163 "metrics", 164 ) 165 ) 166 167 def export(self, metrics: Sequence[Metric]) -> MetricExportResult: 168 return self._export(metrics) 169 170 def shutdown(self): 171 pass 172 [end of exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/_metric_exporter/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/_metric_exporter/__init__.py b/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/_metric_exporter/__init__.py --- a/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/_metric_exporter/__init__.py +++ b/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/_metric_exporter/__init__.py @@ -122,10 +122,15 @@ unit=metric.unit, ) if isinstance(metric.point, Gauge): - # TODO: implement gauge - pbmetric.gauge = pb2.Gauge( - data_points=[], + pt = pb2.NumberDataPoint( + attributes=self._translate_attributes(metric.attributes), + time_unix_nano=metric.point.time_unix_nano, ) + if isinstance(metric.point.value, int): + pt.as_int = metric.point.value + else: + pt.as_double = metric.point.value + pbmetric.gauge.data_points.append(pt) elif isinstance(metric.point, Histogram): # TODO: implement histogram pbmetric.histogram = pb2.Histogram(
{"golden_diff": "diff --git a/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/_metric_exporter/__init__.py b/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/_metric_exporter/__init__.py\n--- a/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/_metric_exporter/__init__.py\n+++ b/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/_metric_exporter/__init__.py\n@@ -122,10 +122,15 @@\n unit=metric.unit,\n )\n if isinstance(metric.point, Gauge):\n- # TODO: implement gauge\n- pbmetric.gauge = pb2.Gauge(\n- data_points=[],\n+ pt = pb2.NumberDataPoint(\n+ attributes=self._translate_attributes(metric.attributes),\n+ time_unix_nano=metric.point.time_unix_nano,\n )\n+ if isinstance(metric.point.value, int):\n+ pt.as_int = metric.point.value\n+ else:\n+ pt.as_double = metric.point.value\n+ pbmetric.gauge.data_points.append(pt)\n elif isinstance(metric.point, Histogram):\n # TODO: implement histogram\n pbmetric.histogram = pb2.Histogram(\n", "issue": "OTLP Exporter: Add support for Gauge point types\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nfrom os import environ\nfrom typing import Optional, Sequence\nfrom grpc import ChannelCredentials, Compression\nfrom opentelemetry.exporter.otlp.proto.grpc.exporter import (\n OTLPExporterMixin,\n get_resource_data,\n)\nfrom opentelemetry.proto.collector.metrics.v1.metrics_service_pb2 import (\n ExportMetricsServiceRequest,\n)\nfrom opentelemetry.proto.collector.metrics.v1.metrics_service_pb2_grpc import (\n MetricsServiceStub,\n)\nfrom opentelemetry.proto.common.v1.common_pb2 import InstrumentationLibrary\nfrom opentelemetry.proto.metrics.v1 import metrics_pb2 as pb2\nfrom opentelemetry.sdk.environment_variables import (\n OTEL_EXPORTER_OTLP_METRICS_INSECURE,\n)\nfrom opentelemetry.sdk._metrics.point import (\n Gauge,\n Histogram,\n Metric,\n Sum,\n)\n\nfrom opentelemetry.sdk._metrics.export import (\n MetricExporter,\n MetricExportResult,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass OTLPMetricExporter(\n MetricExporter,\n OTLPExporterMixin[Metric, ExportMetricsServiceRequest, MetricExportResult],\n):\n _result = MetricExportResult\n _stub = MetricsServiceStub\n\n def __init__(\n self,\n endpoint: Optional[str] = None,\n insecure: Optional[bool] = None,\n credentials: Optional[ChannelCredentials] = None,\n headers: Optional[Sequence] = None,\n timeout: Optional[int] = None,\n compression: Optional[Compression] = None,\n ):\n\n if insecure is None:\n insecure = environ.get(OTEL_EXPORTER_OTLP_METRICS_INSECURE)\n if insecure is not None:\n insecure = insecure.lower() == \"true\"\n\n super().__init__(\n **{\n \"endpoint\": endpoint,\n \"insecure\": insecure,\n \"credentials\": credentials,\n \"headers\": headers,\n \"timeout\": timeout,\n \"compression\": compression,\n }\n )\n\n def _translate_data(\n self, data: Sequence[Metric]\n ) -> ExportMetricsServiceRequest:\n sdk_resource_instrumentation_library_metrics = {}\n\n for metric in data:\n resource = metric.resource\n instrumentation_library_map = (\n sdk_resource_instrumentation_library_metrics.get(resource, {})\n )\n if not instrumentation_library_map:\n sdk_resource_instrumentation_library_metrics[\n resource\n ] = instrumentation_library_map\n\n instrumentation_library_metrics = instrumentation_library_map.get(\n metric.instrumentation_info\n )\n\n if not instrumentation_library_metrics:\n if metric.instrumentation_info is not None:\n instrumentation_library_map[\n metric.instrumentation_info\n ] = pb2.InstrumentationLibraryMetrics(\n instrumentation_library=InstrumentationLibrary(\n name=metric.instrumentation_info.name,\n version=metric.instrumentation_info.version,\n )\n )\n else:\n instrumentation_library_map[\n metric.instrumentation_info\n ] = pb2.InstrumentationLibraryMetrics()\n\n instrumentation_library_metrics = instrumentation_library_map.get(\n metric.instrumentation_info\n )\n\n pbmetric = pb2.Metric(\n name=metric.name,\n description=metric.description,\n unit=metric.unit,\n )\n if isinstance(metric.point, Gauge):\n # TODO: implement gauge\n pbmetric.gauge = pb2.Gauge(\n data_points=[],\n )\n elif isinstance(metric.point, Histogram):\n # TODO: implement histogram\n pbmetric.histogram = pb2.Histogram(\n data_points=[],\n )\n elif isinstance(metric.point, Sum):\n pt = pb2.NumberDataPoint(\n attributes=self._translate_attributes(metric.attributes),\n start_time_unix_nano=metric.point.start_time_unix_nano,\n time_unix_nano=metric.point.time_unix_nano,\n )\n if isinstance(metric.point.value, int):\n pt.as_int = metric.point.value\n else:\n pt.as_double = metric.point.value\n # note that because sum is a message type, the fields must be\n # set individually rather than instantiating a pb2.Sum and setting\n # it once\n pbmetric.sum.aggregation_temporality = (\n metric.point.aggregation_temporality\n )\n pbmetric.sum.is_monotonic = metric.point.is_monotonic\n pbmetric.sum.data_points.append(pt)\n else:\n logger.warn(\"unsupported datapoint type %s\", metric.point)\n continue\n\n instrumentation_library_metrics.metrics.append(\n pbmetric,\n )\n return ExportMetricsServiceRequest(\n resource_metrics=get_resource_data(\n sdk_resource_instrumentation_library_metrics,\n pb2.ResourceMetrics,\n \"metrics\",\n )\n )\n\n def export(self, metrics: Sequence[Metric]) -> MetricExportResult:\n return self._export(metrics)\n\n def shutdown(self):\n pass\n", "path": "exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/_metric_exporter/__init__.py"}]}
2,143
308
gh_patches_debug_30133
rasdani/github-patches
git_diff
napari__napari-1788
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Text in welcome visual is too dark on non-macOS platforms ## 🐛 Bug The welcome text is almost unreadable on Windows or Linux: ![signal-2020-10-28-132757](https://user-images.githubusercontent.com/492549/97384445-d3080500-1923-11eb-99d3-9a3e495635c3.jpeg) ![IMG_5476](https://user-images.githubusercontent.com/492549/97384469-ddc29a00-1923-11eb-9038-81124f3dd874.jpg) It's fine on mac: <img width="1200" alt="Screen-Shot-2020-10-27-at-7 12 45-PM" src="https://user-images.githubusercontent.com/492549/97384494-e7e49880-1923-11eb-9286-90f141bd0827.png"> @sofroniewn noted that he had to make the text darker for it to appear normal on mac. It appears that this is due to some bug in macOS graphics — the workaround should be applied only on mac. </issue> <code> [start of napari/_vispy/vispy_welcome_visual.py] 1 from os.path import dirname, join 2 3 import numpy as np 4 import scipy.ndimage as ndi 5 from imageio import imread 6 from vispy.scene.visuals import Text 7 from vispy.visuals.transforms import STTransform 8 9 from ..utils.misc import str_to_rgb 10 from ..utils.theme import darken, lighten 11 from .image import Image as ImageNode 12 13 14 class VispyWelcomeVisual: 15 """Welcome to napari visual. 16 """ 17 18 def __init__(self, viewer, parent=None, order=0): 19 20 self._viewer = viewer 21 22 # Load logo and make grayscale 23 logopath = join(dirname(__file__), '..', 'resources', 'logo.png') 24 logo = imread(logopath) 25 self._logo_raw = logo 26 self._logo_border = np.all(logo[..., :3] == [38, 40, 61], axis=2) 27 self._logo = np.zeros(logo.shape) 28 29 self.node = ImageNode(parent=parent) 30 self.node.order = order 31 32 self.node.cmap = 'grays' 33 self.node.transform = STTransform() 34 35 self.text_node = Text( 36 pos=[0, 0], parent=parent, method='gpu', bold=False 37 ) 38 self.text_node.order = order 39 self.text_node.transform = STTransform() 40 self.text_node.anchors = ('left', 'center') 41 self.text_node.text = ( 42 'to add data:\n' 43 ' - drag and drop file(s) here\n' 44 ' - select File > Open from the menu\n' 45 ' - call a viewer.add_* method' 46 ) 47 self.text_node.color = np.divide( 48 str_to_rgb(darken(self._viewer.palette['foreground'], 30)), 255 49 ) 50 51 self._on_palette_change(None) 52 self._on_visible_change(None) 53 self._on_canvas_change(None) 54 55 def _on_palette_change(self, event): 56 """Change colors of the logo and text.""" 57 if ( 58 np.mean(str_to_rgb(self._viewer.palette['background'])[:3]) 59 < 255 / 2 60 ): 61 foreground_color = np.divide( 62 str_to_rgb(darken(self._viewer.palette['foreground'], 50)), 255 63 ) 64 background_color = np.divide( 65 str_to_rgb(darken(self._viewer.palette['background'], 70)), 255 66 ) 67 # Note this unsual scaling is done to preserve color balance on 68 # rendering by VisPy, which appears to be off when opacity < 1 69 text_color = np.multiply(foreground_color, [0.4, 0.65, 0.9]) 70 else: 71 foreground_color = np.divide( 72 str_to_rgb(lighten(self._viewer.palette['foreground'], 30)), 73 255, 74 ) 75 background_color = np.divide( 76 str_to_rgb(lighten(self._viewer.palette['background'], 30)), 77 255, 78 ) 79 text_color = np.divide( 80 str_to_rgb(darken(self._viewer.palette['background'], 60)), 255 81 ) 82 83 new_logo = np.zeros(self._logo_raw.shape) 84 new_logo[self._logo_border, :3] = foreground_color 85 new_logo[np.invert(self._logo_border), :3] = background_color 86 new_logo[..., -1] = self._logo_raw[..., -1] * 0.7 87 88 # Do a convolution to smooth any pixelation 89 kernel = np.array([[0, 0.5, 0], [0.5, 1, 0.5], [0, 0.5, 0]]) 90 kernel = np.expand_dims(kernel / np.sum(kernel), axis=2) 91 new_logo = ndi.convolve(new_logo, kernel) 92 93 self._logo = new_logo 94 self.node.set_data(self._logo) 95 # Having opacity < 1 improves blending but throws color balance 96 # off which needs to be adjusted if desired 97 self.text_node.color = list(text_color) + [0.7] 98 99 def _on_visible_change(self, event): 100 """Change visibiliy of axes.""" 101 visible = len(self._viewer.layers) == 0 102 self.node.visible = visible 103 self.text_node.visible = visible 104 105 def _on_canvas_change(self, event): 106 """Change visibiliy of axes.""" 107 if self.node.canvas is not None: 108 center = np.divide(self.node.canvas.size, 2) 109 else: 110 center = np.array([256, 256]) 111 112 # Calculate some good default positions for the logo and text 113 center_logo = [ 114 center[0] - center[1] / 2.4, 115 2 / 3 * center[1] - center[1] / 3, 116 ] 117 self.node.transform.translate = [center_logo[0], center_logo[1], 0, 0] 118 self.node.transform.scale = [ 119 center[1] / 1.2 / self._logo.shape[0], 120 center[1] / 1.2 / self._logo.shape[0], 121 0, 122 0, 123 ] 124 125 self.text_node.font_size = center[1] / 24 126 self.text_node.transform.translate = [ 127 center[0] - center[1] / 2.4, 128 1.45 * center[1], 129 0, 130 0, 131 ] 132 [end of napari/_vispy/vispy_welcome_visual.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/napari/_vispy/vispy_welcome_visual.py b/napari/_vispy/vispy_welcome_visual.py --- a/napari/_vispy/vispy_welcome_visual.py +++ b/napari/_vispy/vispy_welcome_visual.py @@ -1,3 +1,4 @@ +import sys from os.path import dirname, join import numpy as np @@ -64,9 +65,17 @@ background_color = np.divide( str_to_rgb(darken(self._viewer.palette['background'], 70)), 255 ) - # Note this unsual scaling is done to preserve color balance on - # rendering by VisPy, which appears to be off when opacity < 1 - text_color = np.multiply(foreground_color, [0.4, 0.65, 0.9]) + text_color = foreground_color + if sys.platform == 'darwin': + # Note this unsual scaling is done to preserve color balance on + # rendering by VisPy, which appears to be off when opacity < 1. + # It only needs to be done on a mac, where we need opacity < 1 + # to achieve good blending. + text_color = np.multiply(text_color, [0.4, 0.65, 0.9]) + text_color = list(text_color) + [0.7] + else: + text_color = list(text_color) + [1] + else: foreground_color = np.divide( str_to_rgb(lighten(self._viewer.palette['foreground'], 30)), @@ -92,9 +101,8 @@ self._logo = new_logo self.node.set_data(self._logo) - # Having opacity < 1 improves blending but throws color balance - # off which needs to be adjusted if desired - self.text_node.color = list(text_color) + [0.7] + + self.text_node.color = text_color def _on_visible_change(self, event): """Change visibiliy of axes."""
{"golden_diff": "diff --git a/napari/_vispy/vispy_welcome_visual.py b/napari/_vispy/vispy_welcome_visual.py\n--- a/napari/_vispy/vispy_welcome_visual.py\n+++ b/napari/_vispy/vispy_welcome_visual.py\n@@ -1,3 +1,4 @@\n+import sys\n from os.path import dirname, join\n \n import numpy as np\n@@ -64,9 +65,17 @@\n background_color = np.divide(\n str_to_rgb(darken(self._viewer.palette['background'], 70)), 255\n )\n- # Note this unsual scaling is done to preserve color balance on\n- # rendering by VisPy, which appears to be off when opacity < 1\n- text_color = np.multiply(foreground_color, [0.4, 0.65, 0.9])\n+ text_color = foreground_color\n+ if sys.platform == 'darwin':\n+ # Note this unsual scaling is done to preserve color balance on\n+ # rendering by VisPy, which appears to be off when opacity < 1.\n+ # It only needs to be done on a mac, where we need opacity < 1\n+ # to achieve good blending.\n+ text_color = np.multiply(text_color, [0.4, 0.65, 0.9])\n+ text_color = list(text_color) + [0.7]\n+ else:\n+ text_color = list(text_color) + [1]\n+\n else:\n foreground_color = np.divide(\n str_to_rgb(lighten(self._viewer.palette['foreground'], 30)),\n@@ -92,9 +101,8 @@\n \n self._logo = new_logo\n self.node.set_data(self._logo)\n- # Having opacity < 1 improves blending but throws color balance\n- # off which needs to be adjusted if desired\n- self.text_node.color = list(text_color) + [0.7]\n+\n+ self.text_node.color = text_color\n \n def _on_visible_change(self, event):\n \"\"\"Change visibiliy of axes.\"\"\"\n", "issue": "Text in welcome visual is too dark on non-macOS platforms\n## \ud83d\udc1b Bug\r\n\r\nThe welcome text is almost unreadable on Windows or Linux:\r\n\r\n![signal-2020-10-28-132757](https://user-images.githubusercontent.com/492549/97384445-d3080500-1923-11eb-99d3-9a3e495635c3.jpeg)\r\n\r\n![IMG_5476](https://user-images.githubusercontent.com/492549/97384469-ddc29a00-1923-11eb-9038-81124f3dd874.jpg)\r\n\r\nIt's fine on mac:\r\n\r\n<img width=\"1200\" alt=\"Screen-Shot-2020-10-27-at-7 12 45-PM\" src=\"https://user-images.githubusercontent.com/492549/97384494-e7e49880-1923-11eb-9286-90f141bd0827.png\">\r\n\r\n@sofroniewn noted that he had to make the text darker for it to appear normal on mac. It appears that this is due to some bug in macOS graphics \u2014 the workaround should be applied only on mac.\n", "before_files": [{"content": "from os.path import dirname, join\n\nimport numpy as np\nimport scipy.ndimage as ndi\nfrom imageio import imread\nfrom vispy.scene.visuals import Text\nfrom vispy.visuals.transforms import STTransform\n\nfrom ..utils.misc import str_to_rgb\nfrom ..utils.theme import darken, lighten\nfrom .image import Image as ImageNode\n\n\nclass VispyWelcomeVisual:\n \"\"\"Welcome to napari visual.\n \"\"\"\n\n def __init__(self, viewer, parent=None, order=0):\n\n self._viewer = viewer\n\n # Load logo and make grayscale\n logopath = join(dirname(__file__), '..', 'resources', 'logo.png')\n logo = imread(logopath)\n self._logo_raw = logo\n self._logo_border = np.all(logo[..., :3] == [38, 40, 61], axis=2)\n self._logo = np.zeros(logo.shape)\n\n self.node = ImageNode(parent=parent)\n self.node.order = order\n\n self.node.cmap = 'grays'\n self.node.transform = STTransform()\n\n self.text_node = Text(\n pos=[0, 0], parent=parent, method='gpu', bold=False\n )\n self.text_node.order = order\n self.text_node.transform = STTransform()\n self.text_node.anchors = ('left', 'center')\n self.text_node.text = (\n 'to add data:\\n'\n ' - drag and drop file(s) here\\n'\n ' - select File > Open from the menu\\n'\n ' - call a viewer.add_* method'\n )\n self.text_node.color = np.divide(\n str_to_rgb(darken(self._viewer.palette['foreground'], 30)), 255\n )\n\n self._on_palette_change(None)\n self._on_visible_change(None)\n self._on_canvas_change(None)\n\n def _on_palette_change(self, event):\n \"\"\"Change colors of the logo and text.\"\"\"\n if (\n np.mean(str_to_rgb(self._viewer.palette['background'])[:3])\n < 255 / 2\n ):\n foreground_color = np.divide(\n str_to_rgb(darken(self._viewer.palette['foreground'], 50)), 255\n )\n background_color = np.divide(\n str_to_rgb(darken(self._viewer.palette['background'], 70)), 255\n )\n # Note this unsual scaling is done to preserve color balance on\n # rendering by VisPy, which appears to be off when opacity < 1\n text_color = np.multiply(foreground_color, [0.4, 0.65, 0.9])\n else:\n foreground_color = np.divide(\n str_to_rgb(lighten(self._viewer.palette['foreground'], 30)),\n 255,\n )\n background_color = np.divide(\n str_to_rgb(lighten(self._viewer.palette['background'], 30)),\n 255,\n )\n text_color = np.divide(\n str_to_rgb(darken(self._viewer.palette['background'], 60)), 255\n )\n\n new_logo = np.zeros(self._logo_raw.shape)\n new_logo[self._logo_border, :3] = foreground_color\n new_logo[np.invert(self._logo_border), :3] = background_color\n new_logo[..., -1] = self._logo_raw[..., -1] * 0.7\n\n # Do a convolution to smooth any pixelation\n kernel = np.array([[0, 0.5, 0], [0.5, 1, 0.5], [0, 0.5, 0]])\n kernel = np.expand_dims(kernel / np.sum(kernel), axis=2)\n new_logo = ndi.convolve(new_logo, kernel)\n\n self._logo = new_logo\n self.node.set_data(self._logo)\n # Having opacity < 1 improves blending but throws color balance\n # off which needs to be adjusted if desired\n self.text_node.color = list(text_color) + [0.7]\n\n def _on_visible_change(self, event):\n \"\"\"Change visibiliy of axes.\"\"\"\n visible = len(self._viewer.layers) == 0\n self.node.visible = visible\n self.text_node.visible = visible\n\n def _on_canvas_change(self, event):\n \"\"\"Change visibiliy of axes.\"\"\"\n if self.node.canvas is not None:\n center = np.divide(self.node.canvas.size, 2)\n else:\n center = np.array([256, 256])\n\n # Calculate some good default positions for the logo and text\n center_logo = [\n center[0] - center[1] / 2.4,\n 2 / 3 * center[1] - center[1] / 3,\n ]\n self.node.transform.translate = [center_logo[0], center_logo[1], 0, 0]\n self.node.transform.scale = [\n center[1] / 1.2 / self._logo.shape[0],\n center[1] / 1.2 / self._logo.shape[0],\n 0,\n 0,\n ]\n\n self.text_node.font_size = center[1] / 24\n self.text_node.transform.translate = [\n center[0] - center[1] / 2.4,\n 1.45 * center[1],\n 0,\n 0,\n ]\n", "path": "napari/_vispy/vispy_welcome_visual.py"}]}
2,357
471
gh_patches_debug_45587
rasdani/github-patches
git_diff
saleor__saleor-11760
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> add API version to update_payment_method parameters In the `update_payment_method` function in `payment/gateways/stripe/stripe_api` there should be a `STRIPE_API_VERSION` passed in `stripe.PaymentMethod.modify` </issue> <code> [start of saleor/payment/gateways/stripe/stripe_api.py] 1 import logging 2 from contextlib import contextmanager 3 from decimal import Decimal 4 from typing import Dict, List, Optional, Tuple 5 from urllib.parse import urljoin 6 7 import stripe 8 from django.contrib.sites.models import Site 9 from django.urls import reverse 10 from stripe.error import AuthenticationError, InvalidRequestError, StripeError 11 from stripe.stripe_object import StripeObject 12 13 from ....core.tracing import opentracing_trace 14 from ....core.utils import build_absolute_uri 15 from ...interface import PaymentMethodInfo 16 from ...utils import price_to_minor_unit 17 from .consts import ( 18 AUTOMATIC_CAPTURE_METHOD, 19 MANUAL_CAPTURE_METHOD, 20 METADATA_IDENTIFIER, 21 PLUGIN_ID, 22 STRIPE_API_VERSION, 23 WEBHOOK_EVENTS, 24 WEBHOOK_PATH, 25 ) 26 27 logger = logging.getLogger(__name__) 28 29 30 @contextmanager 31 def stripe_opentracing_trace(span_name): 32 with opentracing_trace( 33 span_name=span_name, component_name="payment", service_name="stripe" 34 ): 35 yield 36 37 38 def is_secret_api_key_valid(api_key: str): 39 """Call api to check if api_key is a correct key.""" 40 try: 41 with stripe_opentracing_trace("stripe.WebhookEndpoint.list"): 42 stripe.WebhookEndpoint.list(api_key, stripe_version=STRIPE_API_VERSION) 43 return True 44 except AuthenticationError: 45 return False 46 47 48 def _extra_log_data(error: StripeError, payment_intent_id: Optional[str] = None): 49 data = { 50 "error_message": error.user_message, 51 "http_status": error.http_status, 52 "code": error.code, 53 } 54 if payment_intent_id is not None: 55 data["payment_intent_id"] = payment_intent_id 56 return data 57 58 59 def subscribe_webhook(api_key: str, channel_slug: str) -> Optional[StripeObject]: 60 domain = Site.objects.get_current().domain 61 api_path = reverse( 62 "plugins-per-channel", 63 kwargs={"plugin_id": PLUGIN_ID, "channel_slug": channel_slug}, 64 ) 65 66 base_url = build_absolute_uri(api_path) 67 webhook_url = urljoin(base_url, WEBHOOK_PATH) 68 69 with stripe_opentracing_trace("stripe.WebhookEndpoint.create"): 70 try: 71 return stripe.WebhookEndpoint.create( 72 api_key=api_key, 73 url=webhook_url, 74 enabled_events=WEBHOOK_EVENTS, 75 metadata={METADATA_IDENTIFIER: domain}, 76 stripe_version=STRIPE_API_VERSION, 77 ) 78 except StripeError as error: 79 logger.warning( 80 "Failed to create Stripe webhook", 81 extra=_extra_log_data(error), 82 ) 83 return None 84 85 86 def delete_webhook(api_key: str, webhook_id: str): 87 try: 88 with stripe_opentracing_trace("stripe.WebhookEndpoint.delete"): 89 stripe.WebhookEndpoint.delete( 90 webhook_id, 91 api_key=api_key, 92 stripe_version=STRIPE_API_VERSION, 93 ) 94 except InvalidRequestError: 95 # webhook doesn't exist 96 pass 97 98 99 def get_or_create_customer( 100 api_key: str, 101 customer_id: Optional[str] = None, 102 customer_email: Optional[str] = None, 103 ) -> Optional[StripeObject]: 104 try: 105 if customer_id: 106 with stripe_opentracing_trace("stripe.Customer.retrieve"): 107 return stripe.Customer.retrieve( 108 customer_id, 109 api_key=api_key, 110 stripe_version=STRIPE_API_VERSION, 111 ) 112 with stripe_opentracing_trace("stripe.Customer.create"): 113 return stripe.Customer.create( 114 api_key=api_key, email=customer_email, stripe_version=STRIPE_API_VERSION 115 ) 116 except StripeError as error: 117 logger.warning( 118 "Failed to get/create Stripe customer", 119 extra=_extra_log_data(error), 120 ) 121 return None 122 123 124 def create_payment_intent( 125 api_key: str, 126 amount: Decimal, 127 currency: str, 128 auto_capture: bool = True, 129 customer: Optional[StripeObject] = None, 130 payment_method_id: Optional[str] = None, 131 metadata: Optional[dict] = None, 132 setup_future_usage: Optional[str] = None, 133 off_session: Optional[bool] = None, 134 payment_method_types: Optional[List[str]] = None, 135 customer_email: Optional[str] = None, 136 ) -> Tuple[Optional[StripeObject], Optional[StripeError]]: 137 138 capture_method = AUTOMATIC_CAPTURE_METHOD if auto_capture else MANUAL_CAPTURE_METHOD 139 additional_params = {} 140 141 if customer: 142 additional_params["customer"] = customer 143 144 if payment_method_id and customer: 145 additional_params["payment_method"] = payment_method_id 146 147 additional_params["off_session"] = off_session if off_session else False 148 if off_session: 149 additional_params["confirm"] = True 150 151 if setup_future_usage in ["on_session", "off_session"] and not payment_method_id: 152 additional_params["setup_future_usage"] = setup_future_usage 153 154 if metadata: 155 additional_params["metadata"] = metadata 156 157 if payment_method_types and isinstance(payment_method_types, list): 158 additional_params["payment_method_types"] = payment_method_types 159 160 if customer_email: 161 additional_params["receipt_email"] = customer_email 162 163 try: 164 with stripe_opentracing_trace("stripe.PaymentIntent.create"): 165 intent = stripe.PaymentIntent.create( 166 api_key=api_key, 167 amount=price_to_minor_unit(amount, currency), 168 currency=currency, 169 capture_method=capture_method, 170 stripe_version=STRIPE_API_VERSION, 171 **additional_params, 172 ) 173 return intent, None 174 except StripeError as error: 175 logger.warning( 176 "Failed to create Stripe payment intent", extra=_extra_log_data(error) 177 ) 178 return None, error 179 180 181 def update_payment_method( 182 api_key: str, 183 payment_method_id: str, 184 metadata: Dict[str, str], 185 ): 186 with stripe_opentracing_trace("stripe.PaymentMethod.modify"): 187 try: 188 stripe.PaymentMethod.modify( 189 payment_method_id, 190 api_key=api_key, 191 metadata=metadata, 192 ) 193 except StripeError as error: 194 logger.warning( 195 "Failed to assign channel slug to payment method", 196 extra=_extra_log_data(error), 197 ) 198 199 200 def list_customer_payment_methods( 201 api_key: str, customer_id: str 202 ) -> Tuple[Optional[StripeObject], Optional[StripeError]]: 203 try: 204 with stripe_opentracing_trace("stripe.PaymentMethod.list"): 205 payment_methods = stripe.PaymentMethod.list( 206 api_key=api_key, 207 customer=customer_id, 208 stripe_version=STRIPE_API_VERSION, 209 type="card", # we support only cards for now 210 ) 211 return payment_methods, None 212 except StripeError as error: 213 return None, error 214 215 216 def retrieve_payment_intent( 217 api_key: str, payment_intent_id: str 218 ) -> Tuple[Optional[StripeObject], Optional[StripeError]]: 219 try: 220 with stripe_opentracing_trace("stripe.PaymentIntent.retrieve"): 221 payment_intent = stripe.PaymentIntent.retrieve( 222 payment_intent_id, 223 api_key=api_key, 224 stripe_version=STRIPE_API_VERSION, 225 ) 226 return payment_intent, None 227 except StripeError as error: 228 logger.warning( 229 "Unable to retrieve a payment intent", 230 extra=_extra_log_data(error), 231 ) 232 return None, error 233 234 235 def capture_payment_intent( 236 api_key: str, payment_intent_id: str, amount_to_capture: int 237 ) -> Tuple[Optional[StripeObject], Optional[StripeError]]: 238 try: 239 with stripe_opentracing_trace("stripe.PaymentIntent.capture"): 240 payment_intent = stripe.PaymentIntent.capture( 241 payment_intent_id, 242 amount_to_capture=amount_to_capture, 243 api_key=api_key, 244 stripe_version=STRIPE_API_VERSION, 245 ) 246 return payment_intent, None 247 except StripeError as error: 248 logger.warning( 249 "Unable to capture a payment intent", 250 extra=_extra_log_data(error), 251 ) 252 return None, error 253 254 255 def refund_payment_intent( 256 api_key: str, payment_intent_id: str, amount_to_refund: int 257 ) -> Tuple[Optional[StripeObject], Optional[StripeError]]: 258 try: 259 with stripe_opentracing_trace("stripe.Refund.create"): 260 refund = stripe.Refund.create( 261 payment_intent=payment_intent_id, 262 amount=amount_to_refund, 263 api_key=api_key, 264 stripe_version=STRIPE_API_VERSION, 265 ) 266 return refund, None 267 except StripeError as error: 268 logger.warning( 269 "Unable to refund a payment intent", 270 extra=_extra_log_data(error), 271 ) 272 return None, error 273 274 275 def cancel_payment_intent( 276 api_key: str, payment_intent_id: str 277 ) -> Tuple[Optional[StripeObject], Optional[StripeError]]: 278 try: 279 with stripe_opentracing_trace("stripe.PaymentIntent.cancel"): 280 payment_intent = stripe.PaymentIntent.cancel( 281 payment_intent_id, 282 api_key=api_key, 283 stripe_version=STRIPE_API_VERSION, 284 ) 285 return payment_intent, None 286 except StripeError as error: 287 logger.warning( 288 "Unable to cancel a payment intent", 289 extra=_extra_log_data(error), 290 ) 291 292 return None, error 293 294 295 def construct_stripe_event( 296 api_key: str, payload: bytes, sig_header: str, endpoint_secret: str 297 ) -> StripeObject: 298 with stripe_opentracing_trace("stripe.Webhook.construct_event"): 299 return stripe.Webhook.construct_event( 300 payload, sig_header, endpoint_secret, api_key=api_key 301 ) 302 303 304 def get_payment_method_details( 305 payment_intent: StripeObject, 306 ) -> Optional[PaymentMethodInfo]: 307 charges = payment_intent.get("charges", None) 308 payment_method_info = None 309 if charges: 310 charges_data = charges.get("data", []) 311 if not charges_data: 312 return None 313 charge_data = charges_data[-1] 314 payment_method_details = charge_data.get("payment_method_details", {}) 315 316 if payment_method_details.get("type") == "card": 317 card_details = payment_method_details.get("card", {}) 318 exp_year = card_details.get("exp_year", "") 319 exp_year = int(exp_year) if exp_year else None 320 exp_month = card_details.get("exp_month", "") 321 exp_month = int(exp_month) if exp_month else None 322 payment_method_info = PaymentMethodInfo( 323 last_4=card_details.get("last4", ""), 324 exp_year=exp_year, 325 exp_month=exp_month, 326 brand=card_details.get("brand", ""), 327 type="card", 328 ) 329 return payment_method_info 330 [end of saleor/payment/gateways/stripe/stripe_api.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/saleor/payment/gateways/stripe/stripe_api.py b/saleor/payment/gateways/stripe/stripe_api.py --- a/saleor/payment/gateways/stripe/stripe_api.py +++ b/saleor/payment/gateways/stripe/stripe_api.py @@ -27,6 +27,9 @@ logger = logging.getLogger(__name__) +stripe.api_version = STRIPE_API_VERSION + + @contextmanager def stripe_opentracing_trace(span_name): with opentracing_trace( @@ -39,7 +42,7 @@ """Call api to check if api_key is a correct key.""" try: with stripe_opentracing_trace("stripe.WebhookEndpoint.list"): - stripe.WebhookEndpoint.list(api_key, stripe_version=STRIPE_API_VERSION) + stripe.WebhookEndpoint.list(api_key) return True except AuthenticationError: return False @@ -73,7 +76,6 @@ url=webhook_url, enabled_events=WEBHOOK_EVENTS, metadata={METADATA_IDENTIFIER: domain}, - stripe_version=STRIPE_API_VERSION, ) except StripeError as error: logger.warning( @@ -89,7 +91,6 @@ stripe.WebhookEndpoint.delete( webhook_id, api_key=api_key, - stripe_version=STRIPE_API_VERSION, ) except InvalidRequestError: # webhook doesn't exist @@ -107,11 +108,11 @@ return stripe.Customer.retrieve( customer_id, api_key=api_key, - stripe_version=STRIPE_API_VERSION, ) with stripe_opentracing_trace("stripe.Customer.create"): return stripe.Customer.create( - api_key=api_key, email=customer_email, stripe_version=STRIPE_API_VERSION + api_key=api_key, + email=customer_email, ) except StripeError as error: logger.warning( @@ -167,7 +168,6 @@ amount=price_to_minor_unit(amount, currency), currency=currency, capture_method=capture_method, - stripe_version=STRIPE_API_VERSION, **additional_params, ) return intent, None @@ -205,7 +205,6 @@ payment_methods = stripe.PaymentMethod.list( api_key=api_key, customer=customer_id, - stripe_version=STRIPE_API_VERSION, type="card", # we support only cards for now ) return payment_methods, None @@ -221,7 +220,6 @@ payment_intent = stripe.PaymentIntent.retrieve( payment_intent_id, api_key=api_key, - stripe_version=STRIPE_API_VERSION, ) return payment_intent, None except StripeError as error: @@ -241,7 +239,6 @@ payment_intent_id, amount_to_capture=amount_to_capture, api_key=api_key, - stripe_version=STRIPE_API_VERSION, ) return payment_intent, None except StripeError as error: @@ -261,7 +258,6 @@ payment_intent=payment_intent_id, amount=amount_to_refund, api_key=api_key, - stripe_version=STRIPE_API_VERSION, ) return refund, None except StripeError as error: @@ -280,7 +276,6 @@ payment_intent = stripe.PaymentIntent.cancel( payment_intent_id, api_key=api_key, - stripe_version=STRIPE_API_VERSION, ) return payment_intent, None except StripeError as error:
{"golden_diff": "diff --git a/saleor/payment/gateways/stripe/stripe_api.py b/saleor/payment/gateways/stripe/stripe_api.py\n--- a/saleor/payment/gateways/stripe/stripe_api.py\n+++ b/saleor/payment/gateways/stripe/stripe_api.py\n@@ -27,6 +27,9 @@\n logger = logging.getLogger(__name__)\n \n \n+stripe.api_version = STRIPE_API_VERSION\n+\n+\n @contextmanager\n def stripe_opentracing_trace(span_name):\n with opentracing_trace(\n@@ -39,7 +42,7 @@\n \"\"\"Call api to check if api_key is a correct key.\"\"\"\n try:\n with stripe_opentracing_trace(\"stripe.WebhookEndpoint.list\"):\n- stripe.WebhookEndpoint.list(api_key, stripe_version=STRIPE_API_VERSION)\n+ stripe.WebhookEndpoint.list(api_key)\n return True\n except AuthenticationError:\n return False\n@@ -73,7 +76,6 @@\n url=webhook_url,\n enabled_events=WEBHOOK_EVENTS,\n metadata={METADATA_IDENTIFIER: domain},\n- stripe_version=STRIPE_API_VERSION,\n )\n except StripeError as error:\n logger.warning(\n@@ -89,7 +91,6 @@\n stripe.WebhookEndpoint.delete(\n webhook_id,\n api_key=api_key,\n- stripe_version=STRIPE_API_VERSION,\n )\n except InvalidRequestError:\n # webhook doesn't exist\n@@ -107,11 +108,11 @@\n return stripe.Customer.retrieve(\n customer_id,\n api_key=api_key,\n- stripe_version=STRIPE_API_VERSION,\n )\n with stripe_opentracing_trace(\"stripe.Customer.create\"):\n return stripe.Customer.create(\n- api_key=api_key, email=customer_email, stripe_version=STRIPE_API_VERSION\n+ api_key=api_key,\n+ email=customer_email,\n )\n except StripeError as error:\n logger.warning(\n@@ -167,7 +168,6 @@\n amount=price_to_minor_unit(amount, currency),\n currency=currency,\n capture_method=capture_method,\n- stripe_version=STRIPE_API_VERSION,\n **additional_params,\n )\n return intent, None\n@@ -205,7 +205,6 @@\n payment_methods = stripe.PaymentMethod.list(\n api_key=api_key,\n customer=customer_id,\n- stripe_version=STRIPE_API_VERSION,\n type=\"card\", # we support only cards for now\n )\n return payment_methods, None\n@@ -221,7 +220,6 @@\n payment_intent = stripe.PaymentIntent.retrieve(\n payment_intent_id,\n api_key=api_key,\n- stripe_version=STRIPE_API_VERSION,\n )\n return payment_intent, None\n except StripeError as error:\n@@ -241,7 +239,6 @@\n payment_intent_id,\n amount_to_capture=amount_to_capture,\n api_key=api_key,\n- stripe_version=STRIPE_API_VERSION,\n )\n return payment_intent, None\n except StripeError as error:\n@@ -261,7 +258,6 @@\n payment_intent=payment_intent_id,\n amount=amount_to_refund,\n api_key=api_key,\n- stripe_version=STRIPE_API_VERSION,\n )\n return refund, None\n except StripeError as error:\n@@ -280,7 +276,6 @@\n payment_intent = stripe.PaymentIntent.cancel(\n payment_intent_id,\n api_key=api_key,\n- stripe_version=STRIPE_API_VERSION,\n )\n return payment_intent, None\n except StripeError as error:\n", "issue": "add API version to update_payment_method parameters\nIn the `update_payment_method` function in `payment/gateways/stripe/stripe_api` there should be a `STRIPE_API_VERSION` passed in `stripe.PaymentMethod.modify`\n", "before_files": [{"content": "import logging\nfrom contextlib import contextmanager\nfrom decimal import Decimal\nfrom typing import Dict, List, Optional, Tuple\nfrom urllib.parse import urljoin\n\nimport stripe\nfrom django.contrib.sites.models import Site\nfrom django.urls import reverse\nfrom stripe.error import AuthenticationError, InvalidRequestError, StripeError\nfrom stripe.stripe_object import StripeObject\n\nfrom ....core.tracing import opentracing_trace\nfrom ....core.utils import build_absolute_uri\nfrom ...interface import PaymentMethodInfo\nfrom ...utils import price_to_minor_unit\nfrom .consts import (\n AUTOMATIC_CAPTURE_METHOD,\n MANUAL_CAPTURE_METHOD,\n METADATA_IDENTIFIER,\n PLUGIN_ID,\n STRIPE_API_VERSION,\n WEBHOOK_EVENTS,\n WEBHOOK_PATH,\n)\n\nlogger = logging.getLogger(__name__)\n\n\n@contextmanager\ndef stripe_opentracing_trace(span_name):\n with opentracing_trace(\n span_name=span_name, component_name=\"payment\", service_name=\"stripe\"\n ):\n yield\n\n\ndef is_secret_api_key_valid(api_key: str):\n \"\"\"Call api to check if api_key is a correct key.\"\"\"\n try:\n with stripe_opentracing_trace(\"stripe.WebhookEndpoint.list\"):\n stripe.WebhookEndpoint.list(api_key, stripe_version=STRIPE_API_VERSION)\n return True\n except AuthenticationError:\n return False\n\n\ndef _extra_log_data(error: StripeError, payment_intent_id: Optional[str] = None):\n data = {\n \"error_message\": error.user_message,\n \"http_status\": error.http_status,\n \"code\": error.code,\n }\n if payment_intent_id is not None:\n data[\"payment_intent_id\"] = payment_intent_id\n return data\n\n\ndef subscribe_webhook(api_key: str, channel_slug: str) -> Optional[StripeObject]:\n domain = Site.objects.get_current().domain\n api_path = reverse(\n \"plugins-per-channel\",\n kwargs={\"plugin_id\": PLUGIN_ID, \"channel_slug\": channel_slug},\n )\n\n base_url = build_absolute_uri(api_path)\n webhook_url = urljoin(base_url, WEBHOOK_PATH)\n\n with stripe_opentracing_trace(\"stripe.WebhookEndpoint.create\"):\n try:\n return stripe.WebhookEndpoint.create(\n api_key=api_key,\n url=webhook_url,\n enabled_events=WEBHOOK_EVENTS,\n metadata={METADATA_IDENTIFIER: domain},\n stripe_version=STRIPE_API_VERSION,\n )\n except StripeError as error:\n logger.warning(\n \"Failed to create Stripe webhook\",\n extra=_extra_log_data(error),\n )\n return None\n\n\ndef delete_webhook(api_key: str, webhook_id: str):\n try:\n with stripe_opentracing_trace(\"stripe.WebhookEndpoint.delete\"):\n stripe.WebhookEndpoint.delete(\n webhook_id,\n api_key=api_key,\n stripe_version=STRIPE_API_VERSION,\n )\n except InvalidRequestError:\n # webhook doesn't exist\n pass\n\n\ndef get_or_create_customer(\n api_key: str,\n customer_id: Optional[str] = None,\n customer_email: Optional[str] = None,\n) -> Optional[StripeObject]:\n try:\n if customer_id:\n with stripe_opentracing_trace(\"stripe.Customer.retrieve\"):\n return stripe.Customer.retrieve(\n customer_id,\n api_key=api_key,\n stripe_version=STRIPE_API_VERSION,\n )\n with stripe_opentracing_trace(\"stripe.Customer.create\"):\n return stripe.Customer.create(\n api_key=api_key, email=customer_email, stripe_version=STRIPE_API_VERSION\n )\n except StripeError as error:\n logger.warning(\n \"Failed to get/create Stripe customer\",\n extra=_extra_log_data(error),\n )\n return None\n\n\ndef create_payment_intent(\n api_key: str,\n amount: Decimal,\n currency: str,\n auto_capture: bool = True,\n customer: Optional[StripeObject] = None,\n payment_method_id: Optional[str] = None,\n metadata: Optional[dict] = None,\n setup_future_usage: Optional[str] = None,\n off_session: Optional[bool] = None,\n payment_method_types: Optional[List[str]] = None,\n customer_email: Optional[str] = None,\n) -> Tuple[Optional[StripeObject], Optional[StripeError]]:\n\n capture_method = AUTOMATIC_CAPTURE_METHOD if auto_capture else MANUAL_CAPTURE_METHOD\n additional_params = {}\n\n if customer:\n additional_params[\"customer\"] = customer\n\n if payment_method_id and customer:\n additional_params[\"payment_method\"] = payment_method_id\n\n additional_params[\"off_session\"] = off_session if off_session else False\n if off_session:\n additional_params[\"confirm\"] = True\n\n if setup_future_usage in [\"on_session\", \"off_session\"] and not payment_method_id:\n additional_params[\"setup_future_usage\"] = setup_future_usage\n\n if metadata:\n additional_params[\"metadata\"] = metadata\n\n if payment_method_types and isinstance(payment_method_types, list):\n additional_params[\"payment_method_types\"] = payment_method_types\n\n if customer_email:\n additional_params[\"receipt_email\"] = customer_email\n\n try:\n with stripe_opentracing_trace(\"stripe.PaymentIntent.create\"):\n intent = stripe.PaymentIntent.create(\n api_key=api_key,\n amount=price_to_minor_unit(amount, currency),\n currency=currency,\n capture_method=capture_method,\n stripe_version=STRIPE_API_VERSION,\n **additional_params,\n )\n return intent, None\n except StripeError as error:\n logger.warning(\n \"Failed to create Stripe payment intent\", extra=_extra_log_data(error)\n )\n return None, error\n\n\ndef update_payment_method(\n api_key: str,\n payment_method_id: str,\n metadata: Dict[str, str],\n):\n with stripe_opentracing_trace(\"stripe.PaymentMethod.modify\"):\n try:\n stripe.PaymentMethod.modify(\n payment_method_id,\n api_key=api_key,\n metadata=metadata,\n )\n except StripeError as error:\n logger.warning(\n \"Failed to assign channel slug to payment method\",\n extra=_extra_log_data(error),\n )\n\n\ndef list_customer_payment_methods(\n api_key: str, customer_id: str\n) -> Tuple[Optional[StripeObject], Optional[StripeError]]:\n try:\n with stripe_opentracing_trace(\"stripe.PaymentMethod.list\"):\n payment_methods = stripe.PaymentMethod.list(\n api_key=api_key,\n customer=customer_id,\n stripe_version=STRIPE_API_VERSION,\n type=\"card\", # we support only cards for now\n )\n return payment_methods, None\n except StripeError as error:\n return None, error\n\n\ndef retrieve_payment_intent(\n api_key: str, payment_intent_id: str\n) -> Tuple[Optional[StripeObject], Optional[StripeError]]:\n try:\n with stripe_opentracing_trace(\"stripe.PaymentIntent.retrieve\"):\n payment_intent = stripe.PaymentIntent.retrieve(\n payment_intent_id,\n api_key=api_key,\n stripe_version=STRIPE_API_VERSION,\n )\n return payment_intent, None\n except StripeError as error:\n logger.warning(\n \"Unable to retrieve a payment intent\",\n extra=_extra_log_data(error),\n )\n return None, error\n\n\ndef capture_payment_intent(\n api_key: str, payment_intent_id: str, amount_to_capture: int\n) -> Tuple[Optional[StripeObject], Optional[StripeError]]:\n try:\n with stripe_opentracing_trace(\"stripe.PaymentIntent.capture\"):\n payment_intent = stripe.PaymentIntent.capture(\n payment_intent_id,\n amount_to_capture=amount_to_capture,\n api_key=api_key,\n stripe_version=STRIPE_API_VERSION,\n )\n return payment_intent, None\n except StripeError as error:\n logger.warning(\n \"Unable to capture a payment intent\",\n extra=_extra_log_data(error),\n )\n return None, error\n\n\ndef refund_payment_intent(\n api_key: str, payment_intent_id: str, amount_to_refund: int\n) -> Tuple[Optional[StripeObject], Optional[StripeError]]:\n try:\n with stripe_opentracing_trace(\"stripe.Refund.create\"):\n refund = stripe.Refund.create(\n payment_intent=payment_intent_id,\n amount=amount_to_refund,\n api_key=api_key,\n stripe_version=STRIPE_API_VERSION,\n )\n return refund, None\n except StripeError as error:\n logger.warning(\n \"Unable to refund a payment intent\",\n extra=_extra_log_data(error),\n )\n return None, error\n\n\ndef cancel_payment_intent(\n api_key: str, payment_intent_id: str\n) -> Tuple[Optional[StripeObject], Optional[StripeError]]:\n try:\n with stripe_opentracing_trace(\"stripe.PaymentIntent.cancel\"):\n payment_intent = stripe.PaymentIntent.cancel(\n payment_intent_id,\n api_key=api_key,\n stripe_version=STRIPE_API_VERSION,\n )\n return payment_intent, None\n except StripeError as error:\n logger.warning(\n \"Unable to cancel a payment intent\",\n extra=_extra_log_data(error),\n )\n\n return None, error\n\n\ndef construct_stripe_event(\n api_key: str, payload: bytes, sig_header: str, endpoint_secret: str\n) -> StripeObject:\n with stripe_opentracing_trace(\"stripe.Webhook.construct_event\"):\n return stripe.Webhook.construct_event(\n payload, sig_header, endpoint_secret, api_key=api_key\n )\n\n\ndef get_payment_method_details(\n payment_intent: StripeObject,\n) -> Optional[PaymentMethodInfo]:\n charges = payment_intent.get(\"charges\", None)\n payment_method_info = None\n if charges:\n charges_data = charges.get(\"data\", [])\n if not charges_data:\n return None\n charge_data = charges_data[-1]\n payment_method_details = charge_data.get(\"payment_method_details\", {})\n\n if payment_method_details.get(\"type\") == \"card\":\n card_details = payment_method_details.get(\"card\", {})\n exp_year = card_details.get(\"exp_year\", \"\")\n exp_year = int(exp_year) if exp_year else None\n exp_month = card_details.get(\"exp_month\", \"\")\n exp_month = int(exp_month) if exp_month else None\n payment_method_info = PaymentMethodInfo(\n last_4=card_details.get(\"last4\", \"\"),\n exp_year=exp_year,\n exp_month=exp_month,\n brand=card_details.get(\"brand\", \"\"),\n type=\"card\",\n )\n return payment_method_info\n", "path": "saleor/payment/gateways/stripe/stripe_api.py"}]}
3,731
812
gh_patches_debug_2760
rasdani/github-patches
git_diff
getredash__redash-464
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Error running query: datetime.time(13, 52, 27) is not JSON serializable My table schema: ``` sql CREATE TABLE F_entrances ( id SERIAL PRIMARY KEY, timeOfEntrance time, customerId int REFERENCES D_customers ); ``` (and yes, I committed the horrible sin of camel_case vs underScore. I'll be fixing that soonish) The query ``` sql SELECT timeofentrance FROM F_entrances ``` Gives me the error `Error running query: datetime.time(13, 52, 27) is not JSON serializable`. I worked around it with `to_char` but this seems to be a problem at the [Python layer](http://stackoverflow.com/a/11875813/1216976). </issue> <code> [start of redash/utils.py] 1 import cStringIO 2 import csv 3 import codecs 4 import decimal 5 import datetime 6 import json 7 import re 8 import hashlib 9 import sqlparse 10 import pytz 11 12 COMMENTS_REGEX = re.compile("/\*.*?\*/") 13 14 15 class SQLMetaData(object): 16 TABLE_SELECTION_KEYWORDS = ('FROM', 'JOIN', 'LEFT JOIN', 'FULL JOIN', 'RIGHT JOIN', 'CROSS JOIN', 'INNER JOIN', 17 'OUTER JOIN', 'LEFT OUTER JOIN', 'RIGHT OUTER JOIN', 'FULL OUTER JOIN') 18 19 def __init__(self, sql): 20 self.sql = sql 21 self.parsed_sql = sqlparse.parse(self.sql) 22 23 self.has_ddl_statements = self._find_ddl_statements() 24 self.has_non_select_dml_statements = self._find_dml_statements() 25 self.used_tables = self._find_tables() 26 27 def _find_ddl_statements(self): 28 for statement in self.parsed_sql: 29 if len([x for x in statement.flatten() if x.ttype == sqlparse.tokens.DDL]): 30 return True 31 32 return False 33 34 def _find_tables(self): 35 tables = set() 36 for statement in self.parsed_sql: 37 tables.update(self.extract_table_names(statement.tokens)) 38 39 return tables 40 41 def extract_table_names(self, tokens): 42 tables = set() 43 tokens = [t for t in tokens if t.ttype not in (sqlparse.tokens.Whitespace, sqlparse.tokens.Newline)] 44 45 for i in range(len(tokens)): 46 if tokens[i].is_group(): 47 tables.update(self.extract_table_names(tokens[i].tokens)) 48 else: 49 if tokens[i].ttype == sqlparse.tokens.Keyword and tokens[i].normalized in self.TABLE_SELECTION_KEYWORDS: 50 if isinstance(tokens[i + 1], sqlparse.sql.Identifier): 51 tables.add(tokens[i + 1].value) 52 53 if isinstance(tokens[i + 1], sqlparse.sql.IdentifierList): 54 tables.update(set([t.value for t in tokens[i+1].get_identifiers()])) 55 return tables 56 57 def _find_dml_statements(self): 58 for statement in self.parsed_sql: 59 for token in statement.flatten(): 60 if token.ttype == sqlparse.tokens.DML and token.normalized != 'SELECT': 61 return True 62 63 return False 64 65 66 def utcnow(): 67 """Return datetime.now value with timezone specified. 68 69 Without the timezone data, when the timestamp stored to the database it gets the current timezone of the server, 70 which leads to errors in calculations. 71 """ 72 return datetime.datetime.now(pytz.utc) 73 74 def slugify(s): 75 return re.sub('[^a-z0-9_\-]+', '-', s.lower()) 76 77 78 def gen_query_hash(sql): 79 """Returns hash of the given query after stripping all comments, line breaks and multiple 80 spaces, and lower casing all text. 81 82 TODO: possible issue - the following queries will get the same id: 83 1. SELECT 1 FROM table WHERE column='Value'; 84 2. SELECT 1 FROM table where column='value'; 85 """ 86 sql = COMMENTS_REGEX.sub("", sql) 87 sql = "".join(sql.split()).lower() 88 return hashlib.md5(sql.encode('utf-8')).hexdigest() 89 90 91 class JSONEncoder(json.JSONEncoder): 92 """Custom JSON encoding class, to handle Decimal and datetime.date instances. 93 """ 94 def default(self, o): 95 if isinstance(o, decimal.Decimal): 96 return float(o) 97 98 if isinstance(o, datetime.date): 99 return o.isoformat() 100 101 super(JSONEncoder, self).default(o) 102 103 104 def json_dumps(data): 105 return json.dumps(data, cls=JSONEncoder) 106 107 108 class UnicodeWriter: 109 """ 110 A CSV writer which will write rows to CSV file "f", 111 which is encoded in the given encoding. 112 """ 113 def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds): 114 # Redirect output to a queue 115 self.queue = cStringIO.StringIO() 116 self.writer = csv.writer(self.queue, dialect=dialect, **kwds) 117 self.stream = f 118 self.encoder = codecs.getincrementalencoder(encoding)() 119 120 def _encode_utf8(self, val): 121 if isinstance(val, (unicode, str)): 122 return val.encode('utf-8') 123 124 return val 125 126 def writerow(self, row): 127 self.writer.writerow([self._encode_utf8(s) for s in row]) 128 # Fetch UTF-8 output from the queue ... 129 data = self.queue.getvalue() 130 data = data.decode("utf-8") 131 # ... and reencode it into the target encoding 132 data = self.encoder.encode(data) 133 # write to the target stream 134 self.stream.write(data) 135 # empty queue 136 self.queue.truncate(0) 137 138 def writerows(self, rows): 139 for row in rows: 140 self.writerow(row) 141 [end of redash/utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/redash/utils.py b/redash/utils.py --- a/redash/utils.py +++ b/redash/utils.py @@ -95,7 +95,7 @@ if isinstance(o, decimal.Decimal): return float(o) - if isinstance(o, datetime.date): + if isinstance(o, (datetime.date, datetime.time, datetime.timedelta)): return o.isoformat() super(JSONEncoder, self).default(o)
{"golden_diff": "diff --git a/redash/utils.py b/redash/utils.py\n--- a/redash/utils.py\n+++ b/redash/utils.py\n@@ -95,7 +95,7 @@\n if isinstance(o, decimal.Decimal):\n return float(o)\n \n- if isinstance(o, datetime.date):\n+ if isinstance(o, (datetime.date, datetime.time, datetime.timedelta)):\n return o.isoformat()\n \n super(JSONEncoder, self).default(o)\n", "issue": "Error running query: datetime.time(13, 52, 27) is not JSON serializable\nMy table schema:\n\n``` sql\nCREATE TABLE F_entrances (\n id SERIAL PRIMARY KEY,\n timeOfEntrance time,\n customerId int REFERENCES D_customers\n);\n```\n\n(and yes, I committed the horrible sin of camel_case vs underScore. I'll be fixing that soonish)\n\nThe query\n\n``` sql\nSELECT\ntimeofentrance\nFROM F_entrances\n```\n\nGives me the error `Error running query: datetime.time(13, 52, 27) is not JSON serializable`. I worked around it with `to_char` but this seems to be a problem at the [Python layer](http://stackoverflow.com/a/11875813/1216976).\n\n", "before_files": [{"content": "import cStringIO\nimport csv\nimport codecs\nimport decimal\nimport datetime\nimport json\nimport re\nimport hashlib\nimport sqlparse\nimport pytz\n\nCOMMENTS_REGEX = re.compile(\"/\\*.*?\\*/\")\n\n\nclass SQLMetaData(object):\n TABLE_SELECTION_KEYWORDS = ('FROM', 'JOIN', 'LEFT JOIN', 'FULL JOIN', 'RIGHT JOIN', 'CROSS JOIN', 'INNER JOIN',\n 'OUTER JOIN', 'LEFT OUTER JOIN', 'RIGHT OUTER JOIN', 'FULL OUTER JOIN')\n\n def __init__(self, sql):\n self.sql = sql\n self.parsed_sql = sqlparse.parse(self.sql)\n\n self.has_ddl_statements = self._find_ddl_statements()\n self.has_non_select_dml_statements = self._find_dml_statements()\n self.used_tables = self._find_tables()\n\n def _find_ddl_statements(self):\n for statement in self.parsed_sql:\n if len([x for x in statement.flatten() if x.ttype == sqlparse.tokens.DDL]):\n return True\n\n return False\n\n def _find_tables(self):\n tables = set()\n for statement in self.parsed_sql:\n tables.update(self.extract_table_names(statement.tokens))\n\n return tables\n\n def extract_table_names(self, tokens):\n tables = set()\n tokens = [t for t in tokens if t.ttype not in (sqlparse.tokens.Whitespace, sqlparse.tokens.Newline)]\n\n for i in range(len(tokens)):\n if tokens[i].is_group():\n tables.update(self.extract_table_names(tokens[i].tokens))\n else:\n if tokens[i].ttype == sqlparse.tokens.Keyword and tokens[i].normalized in self.TABLE_SELECTION_KEYWORDS:\n if isinstance(tokens[i + 1], sqlparse.sql.Identifier):\n tables.add(tokens[i + 1].value)\n\n if isinstance(tokens[i + 1], sqlparse.sql.IdentifierList):\n tables.update(set([t.value for t in tokens[i+1].get_identifiers()]))\n return tables\n\n def _find_dml_statements(self):\n for statement in self.parsed_sql:\n for token in statement.flatten():\n if token.ttype == sqlparse.tokens.DML and token.normalized != 'SELECT':\n return True\n\n return False\n\n\ndef utcnow():\n \"\"\"Return datetime.now value with timezone specified.\n\n Without the timezone data, when the timestamp stored to the database it gets the current timezone of the server,\n which leads to errors in calculations.\n \"\"\"\n return datetime.datetime.now(pytz.utc)\n\ndef slugify(s):\n return re.sub('[^a-z0-9_\\-]+', '-', s.lower())\n\n\ndef gen_query_hash(sql):\n \"\"\"Returns hash of the given query after stripping all comments, line breaks and multiple\n spaces, and lower casing all text.\n\n TODO: possible issue - the following queries will get the same id:\n 1. SELECT 1 FROM table WHERE column='Value';\n 2. SELECT 1 FROM table where column='value';\n \"\"\"\n sql = COMMENTS_REGEX.sub(\"\", sql)\n sql = \"\".join(sql.split()).lower()\n return hashlib.md5(sql.encode('utf-8')).hexdigest()\n\n\nclass JSONEncoder(json.JSONEncoder):\n \"\"\"Custom JSON encoding class, to handle Decimal and datetime.date instances.\n \"\"\"\n def default(self, o):\n if isinstance(o, decimal.Decimal):\n return float(o)\n\n if isinstance(o, datetime.date):\n return o.isoformat()\n \n super(JSONEncoder, self).default(o)\n\n\ndef json_dumps(data):\n return json.dumps(data, cls=JSONEncoder)\n\n\nclass UnicodeWriter:\n \"\"\"\n A CSV writer which will write rows to CSV file \"f\",\n which is encoded in the given encoding.\n \"\"\"\n def __init__(self, f, dialect=csv.excel, encoding=\"utf-8\", **kwds):\n # Redirect output to a queue\n self.queue = cStringIO.StringIO()\n self.writer = csv.writer(self.queue, dialect=dialect, **kwds)\n self.stream = f\n self.encoder = codecs.getincrementalencoder(encoding)()\n\n def _encode_utf8(self, val):\n if isinstance(val, (unicode, str)):\n return val.encode('utf-8')\n\n return val\n\n def writerow(self, row):\n self.writer.writerow([self._encode_utf8(s) for s in row])\n # Fetch UTF-8 output from the queue ...\n data = self.queue.getvalue()\n data = data.decode(\"utf-8\")\n # ... and reencode it into the target encoding\n data = self.encoder.encode(data)\n # write to the target stream\n self.stream.write(data)\n # empty queue\n self.queue.truncate(0)\n\n def writerows(self, rows):\n for row in rows:\n self.writerow(row)\n", "path": "redash/utils.py"}]}
2,074
96
gh_patches_debug_1663
rasdani/github-patches
git_diff
Textualize__rich-211
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [BUG] Deprecation warning due to invalid escape sequences **Describe the bug** Deprecation warnings are raised due to invalid escape sequences. This can be fixed by using raw strings or escaping the literals. pyupgrade also helps in automatic conversion : https://github.com/asottile/pyupgrade/ **To Reproduce** ``` ./tests/test_markup.py:26: DeprecationWarning: invalid escape sequence \[ assert escape("foo[bar]") == "foo\[bar]" ./tests/test_markup.py:30: DeprecationWarning: invalid escape sequence \[ result = list(_parse("[foo]hello[/foo][bar]world[/]\[escaped]")) ./rich/markup.py:50: DeprecationWarning: invalid escape sequence \[ return markup.replace("[", "\[") ``` **Platform** What platform (Win/Linux/Mac) are you running on? What terminal software are you using. Which version of Rich? </issue> <code> [start of rich/markup.py] 1 import re 2 from typing import Iterable, List, NamedTuple, Optional, Tuple, Union 3 4 from .errors import MarkupError 5 from .style import Style 6 from .text import Span, Text 7 from ._emoji_replace import _emoji_replace 8 9 10 RE_TAGS = re.compile( 11 r""" 12 (\\\[)| 13 \[([a-z#\/].*?)\] 14 """, 15 re.VERBOSE, 16 ) 17 18 19 class Tag(NamedTuple): 20 """A tag in console markup.""" 21 22 name: str 23 """The tag name. e.g. 'bold'.""" 24 parameters: Optional[str] 25 """Any additional parameters after the name.""" 26 27 def __str__(self) -> str: 28 return ( 29 self.name if self.parameters is None else f"{self.name} {self.parameters}" 30 ) 31 32 @property 33 def markup(self) -> str: 34 return ( 35 f"[{self.name}]" 36 if self.parameters is None 37 else f"[{self.name}={self.parameters}]" 38 ) 39 40 41 def escape(markup: str) -> str: 42 """Escapes text so that it won't be interpreted as markup. 43 44 Args: 45 markup (str): Content to be inserted in to markup. 46 47 Returns: 48 str: Markup with square brackets escaped. 49 """ 50 return markup.replace("[", "\[") 51 52 53 def _parse(markup: str) -> Iterable[Tuple[int, Optional[str], Optional[Tag]]]: 54 """Parse markup in to an iterable of tuples of (position, text, tag). 55 56 Args: 57 markup (str): A string containing console markup 58 59 """ 60 position = 0 61 for match in RE_TAGS.finditer(markup): 62 (escape_open, tag_text) = match.groups() 63 start, end = match.span() 64 if start > position: 65 yield start, markup[position:start], None 66 if escape_open: 67 yield start, "[", None 68 else: 69 text, equals, parameters = tag_text.partition("=") 70 if equals: 71 yield start, None, Tag(text, parameters) 72 else: 73 yield start, None, Tag(tag_text.strip(), None) 74 position = end 75 if position < len(markup): 76 yield position, markup[position:], None 77 78 79 def render(markup: str, style: Union[str, Style] = "", emoji: bool = True) -> Text: 80 """Render console markup in to a Text instance. 81 82 Args: 83 markup (str): A string containing console markup. 84 emoji (bool, optional): Also render emoji code. Defaults to True. 85 86 Raises: 87 MarkupError: If there is a syntax error in the markup. 88 89 Returns: 90 Text: A test instance. 91 """ 92 emoji_replace = _emoji_replace 93 if "[" not in markup: 94 return Text(emoji_replace(markup) if emoji else markup, style=style) 95 text = Text(style=style) 96 append = text.append 97 normalize = Style.normalize 98 99 style_stack: List[Tuple[int, Tag]] = [] 100 pop = style_stack.pop 101 102 spans: List[Span] = [] 103 append_span = spans.append 104 105 _Span = Span 106 107 def pop_style(style_name: str) -> Tuple[int, Tag]: 108 """Pop tag matching given style name.""" 109 for index, (_, tag) in enumerate(reversed(style_stack), 1): 110 if tag.name == style_name: 111 return pop(-index) 112 raise KeyError(style_name) 113 114 for position, plain_text, tag in _parse(markup): 115 if plain_text is not None: 116 append(emoji_replace(plain_text) if emoji else plain_text) 117 elif tag is not None: 118 if tag.name.startswith("/"): # Closing tag 119 style_name = tag.name[1:].strip() 120 if style_name: # explicit close 121 style_name = normalize(style_name) 122 try: 123 start, open_tag = pop_style(style_name) 124 except KeyError: 125 raise MarkupError( 126 f"closing tag '{tag.markup}' at position {position} doesn't match any open tag" 127 ) 128 else: # implicit close 129 try: 130 start, open_tag = pop() 131 except IndexError: 132 raise MarkupError( 133 f"closing tag '[/]' at position {position} has nothing to close" 134 ) 135 136 append_span(_Span(start, len(text), str(open_tag))) 137 else: # Opening tag 138 normalized_tag = Tag(normalize(tag.name), tag.parameters) 139 style_stack.append((len(text), normalized_tag)) 140 141 text_length = len(text) 142 while style_stack: 143 start, tag = style_stack.pop() 144 append_span(_Span(start, text_length, str(tag))) 145 146 text.spans = sorted(spans) 147 return text 148 149 150 if __name__ == "__main__": # pragma: no cover 151 # from rich import print 152 from rich.console import Console 153 from rich.text import Text 154 155 console = Console(highlight=False) 156 157 # t = Text.from_markup('Hello [link="https://www.willmcgugan.com"]W[b]o[/b]rld[/]!') 158 # print(repr(t._spans)) 159 160 console.print("Hello [1], [1,2,3] ['hello']") 161 console.print("foo") 162 console.print("Hello [link=https://www.willmcgugan.com]W[b]o[/b]rld[/]!") 163 164 # console.print("[bold]1 [not bold]2[/] 3[/]") 165 166 # console.print("[green]XXX[blue]XXX[/]XXX[/]") 167 [end of rich/markup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/rich/markup.py b/rich/markup.py --- a/rich/markup.py +++ b/rich/markup.py @@ -47,7 +47,7 @@ Returns: str: Markup with square brackets escaped. """ - return markup.replace("[", "\[") + return markup.replace("[", r"\[") def _parse(markup: str) -> Iterable[Tuple[int, Optional[str], Optional[Tag]]]:
{"golden_diff": "diff --git a/rich/markup.py b/rich/markup.py\n--- a/rich/markup.py\n+++ b/rich/markup.py\n@@ -47,7 +47,7 @@\n Returns:\n str: Markup with square brackets escaped.\n \"\"\"\n- return markup.replace(\"[\", \"\\[\")\n+ return markup.replace(\"[\", r\"\\[\")\n \n \n def _parse(markup: str) -> Iterable[Tuple[int, Optional[str], Optional[Tag]]]:\n", "issue": "[BUG] Deprecation warning due to invalid escape sequences\n**Describe the bug**\r\n\r\nDeprecation warnings are raised due to invalid escape sequences. This can be fixed by using raw strings or escaping the literals. pyupgrade also helps in automatic conversion : https://github.com/asottile/pyupgrade/\r\n\r\n**To Reproduce**\r\n\r\n```\r\n./tests/test_markup.py:26: DeprecationWarning: invalid escape sequence \\[\r\n assert escape(\"foo[bar]\") == \"foo\\[bar]\"\r\n./tests/test_markup.py:30: DeprecationWarning: invalid escape sequence \\[\r\n result = list(_parse(\"[foo]hello[/foo][bar]world[/]\\[escaped]\"))\r\n./rich/markup.py:50: DeprecationWarning: invalid escape sequence \\[\r\n return markup.replace(\"[\", \"\\[\")\r\n```\r\n\r\n**Platform**\r\nWhat platform (Win/Linux/Mac) are you running on? What terminal software are you using. Which version of Rich?\r\n\n", "before_files": [{"content": "import re\nfrom typing import Iterable, List, NamedTuple, Optional, Tuple, Union\n\nfrom .errors import MarkupError\nfrom .style import Style\nfrom .text import Span, Text\nfrom ._emoji_replace import _emoji_replace\n\n\nRE_TAGS = re.compile(\n r\"\"\"\n(\\\\\\[)|\n\\[([a-z#\\/].*?)\\]\n\"\"\",\n re.VERBOSE,\n)\n\n\nclass Tag(NamedTuple):\n \"\"\"A tag in console markup.\"\"\"\n\n name: str\n \"\"\"The tag name. e.g. 'bold'.\"\"\"\n parameters: Optional[str]\n \"\"\"Any additional parameters after the name.\"\"\"\n\n def __str__(self) -> str:\n return (\n self.name if self.parameters is None else f\"{self.name} {self.parameters}\"\n )\n\n @property\n def markup(self) -> str:\n return (\n f\"[{self.name}]\"\n if self.parameters is None\n else f\"[{self.name}={self.parameters}]\"\n )\n\n\ndef escape(markup: str) -> str:\n \"\"\"Escapes text so that it won't be interpreted as markup. \n\n Args:\n markup (str): Content to be inserted in to markup.\n\n Returns:\n str: Markup with square brackets escaped.\n \"\"\"\n return markup.replace(\"[\", \"\\[\")\n\n\ndef _parse(markup: str) -> Iterable[Tuple[int, Optional[str], Optional[Tag]]]:\n \"\"\"Parse markup in to an iterable of tuples of (position, text, tag).\n \n Args:\n markup (str): A string containing console markup\n \n \"\"\"\n position = 0\n for match in RE_TAGS.finditer(markup):\n (escape_open, tag_text) = match.groups()\n start, end = match.span()\n if start > position:\n yield start, markup[position:start], None\n if escape_open:\n yield start, \"[\", None\n else:\n text, equals, parameters = tag_text.partition(\"=\")\n if equals:\n yield start, None, Tag(text, parameters)\n else:\n yield start, None, Tag(tag_text.strip(), None)\n position = end\n if position < len(markup):\n yield position, markup[position:], None\n\n\ndef render(markup: str, style: Union[str, Style] = \"\", emoji: bool = True) -> Text:\n \"\"\"Render console markup in to a Text instance.\n\n Args:\n markup (str): A string containing console markup.\n emoji (bool, optional): Also render emoji code. Defaults to True.\n \n Raises:\n MarkupError: If there is a syntax error in the markup.\n \n Returns:\n Text: A test instance.\n \"\"\"\n emoji_replace = _emoji_replace\n if \"[\" not in markup:\n return Text(emoji_replace(markup) if emoji else markup, style=style)\n text = Text(style=style)\n append = text.append\n normalize = Style.normalize\n\n style_stack: List[Tuple[int, Tag]] = []\n pop = style_stack.pop\n\n spans: List[Span] = []\n append_span = spans.append\n\n _Span = Span\n\n def pop_style(style_name: str) -> Tuple[int, Tag]:\n \"\"\"Pop tag matching given style name.\"\"\"\n for index, (_, tag) in enumerate(reversed(style_stack), 1):\n if tag.name == style_name:\n return pop(-index)\n raise KeyError(style_name)\n\n for position, plain_text, tag in _parse(markup):\n if plain_text is not None:\n append(emoji_replace(plain_text) if emoji else plain_text)\n elif tag is not None:\n if tag.name.startswith(\"/\"): # Closing tag\n style_name = tag.name[1:].strip()\n if style_name: # explicit close\n style_name = normalize(style_name)\n try:\n start, open_tag = pop_style(style_name)\n except KeyError:\n raise MarkupError(\n f\"closing tag '{tag.markup}' at position {position} doesn't match any open tag\"\n )\n else: # implicit close\n try:\n start, open_tag = pop()\n except IndexError:\n raise MarkupError(\n f\"closing tag '[/]' at position {position} has nothing to close\"\n )\n\n append_span(_Span(start, len(text), str(open_tag)))\n else: # Opening tag\n normalized_tag = Tag(normalize(tag.name), tag.parameters)\n style_stack.append((len(text), normalized_tag))\n\n text_length = len(text)\n while style_stack:\n start, tag = style_stack.pop()\n append_span(_Span(start, text_length, str(tag)))\n\n text.spans = sorted(spans)\n return text\n\n\nif __name__ == \"__main__\": # pragma: no cover\n # from rich import print\n from rich.console import Console\n from rich.text import Text\n\n console = Console(highlight=False)\n\n # t = Text.from_markup('Hello [link=\"https://www.willmcgugan.com\"]W[b]o[/b]rld[/]!')\n # print(repr(t._spans))\n\n console.print(\"Hello [1], [1,2,3] ['hello']\")\n console.print(\"foo\")\n console.print(\"Hello [link=https://www.willmcgugan.com]W[b]o[/b]rld[/]!\")\n\n # console.print(\"[bold]1 [not bold]2[/] 3[/]\")\n\n # console.print(\"[green]XXX[blue]XXX[/]XXX[/]\")\n", "path": "rich/markup.py"}]}
2,337
102
gh_patches_debug_7666
rasdani/github-patches
git_diff
tensorflow__addons-2204
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Drop Python3.5 Support Per SIG Build, python3.5 will not be supported in TF2.4 since it has reached end of life. We should align. </issue> <code> [start of setup.py] 1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 # ============================================================================== 15 """TensorFlow Addons. 16 17 TensorFlow Addons is a repository of contributions that conform to well- 18 established API patterns, but implement new functionality not available 19 in core TensorFlow. TensorFlow natively supports a large number of 20 operators, layers, metrics, losses, and optimizers. However, in a fast 21 moving field like ML, there are many interesting new developments that 22 cannot be integrated into core TensorFlow (because their broad 23 applicability is not yet clear, or it is mostly used by a smaller subset 24 of the community). 25 """ 26 27 import os 28 from pathlib import Path 29 import sys 30 31 from datetime import datetime 32 from setuptools import find_packages 33 from setuptools import setup 34 from setuptools.dist import Distribution 35 from setuptools import Extension 36 37 DOCLINES = __doc__.split("\n") 38 39 40 def get_last_commit_time() -> str: 41 string_time = os.getenv("NIGHTLY_TIME").replace('"', "") 42 return datetime.strptime(string_time, "%Y-%m-%dT%H:%M:%SZ").strftime("%Y%m%d%H%M%S") 43 44 45 def get_project_name_version(): 46 # Version 47 version = {} 48 base_dir = os.path.dirname(os.path.abspath(__file__)) 49 with open(os.path.join(base_dir, "tensorflow_addons", "version.py")) as fp: 50 exec(fp.read(), version) 51 52 project_name = "tensorflow-addons" 53 if "--nightly" in sys.argv: 54 project_name = "tfa-nightly" 55 version["__version__"] += get_last_commit_time() 56 sys.argv.remove("--nightly") 57 58 return project_name, version 59 60 61 def get_ext_modules(): 62 ext_modules = [] 63 if "--platlib-patch" in sys.argv: 64 if sys.platform.startswith("linux"): 65 # Manylinux2010 requires a patch for platlib 66 ext_modules = [Extension("_foo", ["stub.cc"])] 67 sys.argv.remove("--platlib-patch") 68 return ext_modules 69 70 71 class BinaryDistribution(Distribution): 72 """This class is needed in order to create OS specific wheels.""" 73 74 def has_ext_modules(self): 75 return True 76 77 78 project_name, version = get_project_name_version() 79 min_tf_version = version["MIN_TF_VERSION"] 80 max_tf_version = version["MAX_TF_VERSION"] 81 setup( 82 name=project_name, 83 version=version["__version__"], 84 description=DOCLINES[0], 85 long_description="\n".join(DOCLINES[2:]), 86 author="Google Inc.", 87 author_email="[email protected]", 88 packages=find_packages(), 89 ext_modules=get_ext_modules(), 90 install_requires=Path("requirements.txt").read_text().splitlines(), 91 extras_require={ 92 "tensorflow": ["tensorflow>={},<{}".format(min_tf_version, max_tf_version)], 93 "tensorflow-gpu": [ 94 "tensorflow-gpu>={},<{}".format(min_tf_version, max_tf_version) 95 ], 96 "tensorflow-cpu": [ 97 "tensorflow-cpu>={},<{}".format(min_tf_version, max_tf_version) 98 ], 99 }, 100 include_package_data=True, 101 zip_safe=False, 102 distclass=BinaryDistribution, 103 classifiers=[ 104 "Development Status :: 4 - Beta", 105 "Intended Audience :: Developers", 106 "Intended Audience :: Education", 107 "Intended Audience :: Science/Research", 108 "License :: OSI Approved :: Apache Software License", 109 "Programming Language :: Python :: 3", 110 "Programming Language :: Python :: 3.5", 111 "Programming Language :: Python :: 3.6", 112 "Programming Language :: Python :: 3.7", 113 "Programming Language :: Python :: 3.8", 114 "Topic :: Scientific/Engineering :: Mathematics", 115 "Topic :: Software Development :: Libraries :: Python Modules", 116 "Topic :: Software Development :: Libraries", 117 ], 118 license="Apache 2.0", 119 keywords="tensorflow addons machine learning", 120 ) 121 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -107,7 +107,6 @@ "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -107,7 +107,6 @@\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3\",\n- \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n", "issue": "Drop Python3.5 Support\nPer SIG Build, python3.5 will not be supported in TF2.4 since it has reached end of life. We should align.\n", "before_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"TensorFlow Addons.\n\nTensorFlow Addons is a repository of contributions that conform to well-\nestablished API patterns, but implement new functionality not available\nin core TensorFlow. TensorFlow natively supports a large number of\noperators, layers, metrics, losses, and optimizers. However, in a fast\nmoving field like ML, there are many interesting new developments that\ncannot be integrated into core TensorFlow (because their broad\napplicability is not yet clear, or it is mostly used by a smaller subset\nof the community).\n\"\"\"\n\nimport os\nfrom pathlib import Path\nimport sys\n\nfrom datetime import datetime\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom setuptools.dist import Distribution\nfrom setuptools import Extension\n\nDOCLINES = __doc__.split(\"\\n\")\n\n\ndef get_last_commit_time() -> str:\n string_time = os.getenv(\"NIGHTLY_TIME\").replace('\"', \"\")\n return datetime.strptime(string_time, \"%Y-%m-%dT%H:%M:%SZ\").strftime(\"%Y%m%d%H%M%S\")\n\n\ndef get_project_name_version():\n # Version\n version = {}\n base_dir = os.path.dirname(os.path.abspath(__file__))\n with open(os.path.join(base_dir, \"tensorflow_addons\", \"version.py\")) as fp:\n exec(fp.read(), version)\n\n project_name = \"tensorflow-addons\"\n if \"--nightly\" in sys.argv:\n project_name = \"tfa-nightly\"\n version[\"__version__\"] += get_last_commit_time()\n sys.argv.remove(\"--nightly\")\n\n return project_name, version\n\n\ndef get_ext_modules():\n ext_modules = []\n if \"--platlib-patch\" in sys.argv:\n if sys.platform.startswith(\"linux\"):\n # Manylinux2010 requires a patch for platlib\n ext_modules = [Extension(\"_foo\", [\"stub.cc\"])]\n sys.argv.remove(\"--platlib-patch\")\n return ext_modules\n\n\nclass BinaryDistribution(Distribution):\n \"\"\"This class is needed in order to create OS specific wheels.\"\"\"\n\n def has_ext_modules(self):\n return True\n\n\nproject_name, version = get_project_name_version()\nmin_tf_version = version[\"MIN_TF_VERSION\"]\nmax_tf_version = version[\"MAX_TF_VERSION\"]\nsetup(\n name=project_name,\n version=version[\"__version__\"],\n description=DOCLINES[0],\n long_description=\"\\n\".join(DOCLINES[2:]),\n author=\"Google Inc.\",\n author_email=\"[email protected]\",\n packages=find_packages(),\n ext_modules=get_ext_modules(),\n install_requires=Path(\"requirements.txt\").read_text().splitlines(),\n extras_require={\n \"tensorflow\": [\"tensorflow>={},<{}\".format(min_tf_version, max_tf_version)],\n \"tensorflow-gpu\": [\n \"tensorflow-gpu>={},<{}\".format(min_tf_version, max_tf_version)\n ],\n \"tensorflow-cpu\": [\n \"tensorflow-cpu>={},<{}\".format(min_tf_version, max_tf_version)\n ],\n },\n include_package_data=True,\n zip_safe=False,\n distclass=BinaryDistribution,\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Topic :: Software Development :: Libraries\",\n ],\n license=\"Apache 2.0\",\n keywords=\"tensorflow addons machine learning\",\n)\n", "path": "setup.py"}]}
1,771
114
gh_patches_debug_3093
rasdani/github-patches
git_diff
googleapis__python-bigquery-135
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> A new release of rsa dependency breaks Python 2.7 tests Recent `rsa` releases are not compatible with Python 2.7 anymore, the last compatible version is 4.0. We need to bound its version in order to preserve Python 2.7 compatibility. > Major changes in 4.1 Version 4.0 was the last version to support Python 2 and 3.4. Version 4.1 is compatible with Python 3.5+ only. </issue> <code> [start of setup.py] 1 # Copyright 2018 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import io 16 import os 17 18 import setuptools 19 20 21 # Package metadata. 22 23 name = "google-cloud-bigquery" 24 description = "Google BigQuery API client library" 25 version = "1.25.0" 26 # Should be one of: 27 # 'Development Status :: 3 - Alpha' 28 # 'Development Status :: 4 - Beta' 29 # 'Development Status :: 5 - Production/Stable' 30 release_status = "Development Status :: 5 - Production/Stable" 31 dependencies = [ 32 'enum34; python_version < "3.4"', 33 "google-auth >= 1.9.0, < 2.0dev", 34 "google-api-core >= 1.15.0, < 2.0dev", 35 "google-cloud-core >= 1.1.0, < 2.0dev", 36 "google-resumable-media >= 0.5.0, < 0.6dev", 37 "protobuf >= 3.6.0", 38 "six >=1.13.0,< 2.0.0dev", 39 ] 40 extras = { 41 "bqstorage": [ 42 "google-cloud-bigquery-storage >= 1.0.0, <2.0.0dev", 43 # Due to an issue in pip's dependency resolver, the `grpc` extra is not 44 # installed, even though `google-cloud-bigquery-storage` specifies it 45 # as `google-api-core[grpc]`. We thus need to explicitly specify it here. 46 # See: https://github.com/googleapis/python-bigquery/issues/83 47 "grpcio >= 1.8.2, < 2.0dev", 48 "pyarrow>=0.16.0, < 2.0dev", 49 ], 50 "pandas": ["pandas>=0.17.1"], 51 # Exclude PyArrow dependency from Windows Python 2.7. 52 'pyarrow: platform_system != "Windows" or python_version >= "3.4"': [ 53 # Bad Linux release for 0.14.0. 54 # https://issues.apache.org/jira/browse/ARROW-5868 55 "pyarrow>=0.4.1, != 0.14.0" 56 ], 57 "tqdm": ["tqdm >= 4.0.0, <5.0.0dev"], 58 "fastparquet": [ 59 "fastparquet", 60 "python-snappy", 61 # llvmlite >= 0.32.0 cannot be installed on Python 3.5 and below 62 # (building the wheel fails), thus needs to be restricted. 63 # See: https://github.com/googleapis/python-bigquery/issues/78 64 "llvmlite <= 0.31.0", 65 ], 66 } 67 68 all_extras = [] 69 70 for extra in extras: 71 if extra == "fastparquet": 72 # Skip fastparquet from "all" because it is redundant with pyarrow and 73 # creates a dependency on pre-release versions of numpy. See: 74 # https://github.com/googleapis/google-cloud-python/issues/8549 75 continue 76 all_extras.extend(extras[extra]) 77 78 extras["all"] = all_extras 79 80 # Setup boilerplate below this line. 81 82 package_root = os.path.abspath(os.path.dirname(__file__)) 83 84 readme_filename = os.path.join(package_root, "README.rst") 85 with io.open(readme_filename, encoding="utf-8") as readme_file: 86 readme = readme_file.read() 87 88 # Only include packages under the 'google' namespace. Do not include tests, 89 # benchmarks, etc. 90 packages = [ 91 package for package in setuptools.find_packages() if package.startswith("google") 92 ] 93 94 # Determine which namespaces are needed. 95 namespaces = ["google"] 96 if "google.cloud" in packages: 97 namespaces.append("google.cloud") 98 99 100 setuptools.setup( 101 name=name, 102 version=version, 103 description=description, 104 long_description=readme, 105 author="Google LLC", 106 author_email="[email protected]", 107 license="Apache 2.0", 108 url="https://github.com/googleapis/python-bigquery", 109 classifiers=[ 110 release_status, 111 "Intended Audience :: Developers", 112 "License :: OSI Approved :: Apache Software License", 113 "Programming Language :: Python", 114 "Programming Language :: Python :: 2", 115 "Programming Language :: Python :: 2.7", 116 "Programming Language :: Python :: 3", 117 "Programming Language :: Python :: 3.5", 118 "Programming Language :: Python :: 3.6", 119 "Programming Language :: Python :: 3.7", 120 "Programming Language :: Python :: 3.8", 121 "Operating System :: OS Independent", 122 "Topic :: Internet", 123 ], 124 platforms="Posix; MacOS X; Windows", 125 packages=packages, 126 namespace_packages=namespaces, 127 install_requires=dependencies, 128 extras_require=extras, 129 python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*", 130 include_package_data=True, 131 zip_safe=False, 132 ) 133 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -36,6 +36,10 @@ "google-resumable-media >= 0.5.0, < 0.6dev", "protobuf >= 3.6.0", "six >=1.13.0,< 2.0.0dev", + # rsa >= 4.1 is not compatible with Python 2 + # https://github.com/sybrenstuvel/python-rsa/issues/152 + 'rsa <4.1; python_version < "3"', + 'rsa >=3.1.4, <5; python_version >= "3"', ] extras = { "bqstorage": [
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -36,6 +36,10 @@\n \"google-resumable-media >= 0.5.0, < 0.6dev\",\n \"protobuf >= 3.6.0\",\n \"six >=1.13.0,< 2.0.0dev\",\n+ # rsa >= 4.1 is not compatible with Python 2\n+ # https://github.com/sybrenstuvel/python-rsa/issues/152\n+ 'rsa <4.1; python_version < \"3\"',\n+ 'rsa >=3.1.4, <5; python_version >= \"3\"',\n ]\n extras = {\n \"bqstorage\": [\n", "issue": "A new release of rsa dependency breaks Python 2.7 tests\nRecent `rsa` releases are not compatible with Python 2.7 anymore, the last compatible version is 4.0. We need to bound its version in order to preserve Python 2.7 compatibility.\r\n\r\n> Major changes in 4.1\r\nVersion 4.0 was the last version to support Python 2 and 3.4. Version 4.1 is compatible with Python 3.5+ only.\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-bigquery\"\ndescription = \"Google BigQuery API client library\"\nversion = \"1.25.0\"\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n 'enum34; python_version < \"3.4\"',\n \"google-auth >= 1.9.0, < 2.0dev\",\n \"google-api-core >= 1.15.0, < 2.0dev\",\n \"google-cloud-core >= 1.1.0, < 2.0dev\",\n \"google-resumable-media >= 0.5.0, < 0.6dev\",\n \"protobuf >= 3.6.0\",\n \"six >=1.13.0,< 2.0.0dev\",\n]\nextras = {\n \"bqstorage\": [\n \"google-cloud-bigquery-storage >= 1.0.0, <2.0.0dev\",\n # Due to an issue in pip's dependency resolver, the `grpc` extra is not\n # installed, even though `google-cloud-bigquery-storage` specifies it\n # as `google-api-core[grpc]`. We thus need to explicitly specify it here.\n # See: https://github.com/googleapis/python-bigquery/issues/83\n \"grpcio >= 1.8.2, < 2.0dev\",\n \"pyarrow>=0.16.0, < 2.0dev\",\n ],\n \"pandas\": [\"pandas>=0.17.1\"],\n # Exclude PyArrow dependency from Windows Python 2.7.\n 'pyarrow: platform_system != \"Windows\" or python_version >= \"3.4\"': [\n # Bad Linux release for 0.14.0.\n # https://issues.apache.org/jira/browse/ARROW-5868\n \"pyarrow>=0.4.1, != 0.14.0\"\n ],\n \"tqdm\": [\"tqdm >= 4.0.0, <5.0.0dev\"],\n \"fastparquet\": [\n \"fastparquet\",\n \"python-snappy\",\n # llvmlite >= 0.32.0 cannot be installed on Python 3.5 and below\n # (building the wheel fails), thus needs to be restricted.\n # See: https://github.com/googleapis/python-bigquery/issues/78\n \"llvmlite <= 0.31.0\",\n ],\n}\n\nall_extras = []\n\nfor extra in extras:\n if extra == \"fastparquet\":\n # Skip fastparquet from \"all\" because it is redundant with pyarrow and\n # creates a dependency on pre-release versions of numpy. See:\n # https://github.com/googleapis/google-cloud-python/issues/8549\n continue\n all_extras.extend(extras[extra])\n\nextras[\"all\"] = all_extras\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package for package in setuptools.find_packages() if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/googleapis/python-bigquery\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "setup.py"}]}
2,150
169
gh_patches_debug_40943
rasdani/github-patches
git_diff
ARM-DOE__ACT-728
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Sunset Stamen maps in GeoDisplay and potentially replace Stamen is transitioning their maps to stadia at the end of October 2023. ACT will need to deprecate that feature in GeoDisplay and potentially look for replacements. https://github.com/SciTools/cartopy/pull/2266 </issue> <code> [start of act/plotting/geodisplay.py] 1 """ 2 Stores the class for GeographicPlotDisplay. 3 4 """ 5 6 import matplotlib 7 import matplotlib.pyplot as plt 8 import numpy as np 9 import pandas as pd 10 11 from .plot import Display 12 13 try: 14 import cartopy.crs as ccrs 15 import cartopy.feature as cfeature 16 from cartopy.io.img_tiles import Stamen 17 18 CARTOPY_AVAILABLE = True 19 except ImportError: 20 CARTOPY_AVAILABLE = False 21 22 23 class GeographicPlotDisplay(Display): 24 """ 25 A class for making geographic tracer plot of aircraft, ship or other moving 26 platform plot. 27 28 This is inherited from the :func:`act.plotting.Display` 29 class and has therefore has the same attributes as that class. 30 See :func:`act.plotting.Display` 31 for more information. There are no additional attributes or parameters 32 to this class. 33 34 In order to create geographic plots, ACT needs the Cartopy package to be 35 installed on your system. More information about 36 Cartopy go here:https://scitools.org.uk/cartopy/docs/latest/ . 37 38 """ 39 40 def __init__(self, ds, ds_name=None, **kwargs): 41 if not CARTOPY_AVAILABLE: 42 raise ImportError( 43 'Cartopy needs to be installed on your ' 'system to make geographic display plots.' 44 ) 45 super().__init__(ds, ds_name, **kwargs) 46 if self.fig is None: 47 self.fig = plt.figure(**kwargs) 48 49 def geoplot( 50 self, 51 data_field=None, 52 lat_field='lat', 53 lon_field='lon', 54 dsname=None, 55 cbar_label=None, 56 title=None, 57 projection=None, 58 plot_buffer=0.08, 59 stamen='terrain-background', 60 tile=8, 61 cartopy_feature=None, 62 cmap='rainbow', 63 text=None, 64 gridlines=True, 65 **kwargs, 66 ): 67 """ 68 Creates a latitude and longitude plot of a time series data set with 69 data values indicated by color and described with a colorbar. 70 Latitude values must be in degree north (-90 to 90) and 71 longitude must be in degree east (-180 to 180). 72 73 Parameters 74 ---------- 75 data_field : str 76 Name of data field in the dataset to plot. 77 lat_field : str 78 Name of latitude field in the dataset to use. 79 lon_field : str 80 Name of longitude field in the dataset to use. 81 dsname : str or None 82 The name of the datastream to plot. Set to None to make ACT 83 attempt to automatically determine this. 84 cbar_label : str 85 Label to use with colorbar. If set to None will attempt 86 to create label from long_name and units. 87 title : str 88 Plot title. 89 projection : cartopy.crs object 90 Project to use on plot. See 91 https://scitools.org.uk/cartopy/docs/latest/reference/projections.html?highlight=projections 92 plot_buffer : float 93 Buffer to add around data on plot in lat and lon dimension. 94 stamen : str 95 Dataset to use for background image. Set to None to not use 96 background image. 97 tile : int 98 Tile zoom to use with background image. Higer number indicates 99 more resolution. A value of 8 is typical for a normal sonde plot. 100 cartopy_feature : list of str or str 101 Cartopy feature to add to plot. 102 cmap : str 103 Color map to use for colorbar. 104 text : dictionary 105 Dictionary of {text:[lon,lat]} to add to plot. Can have more 106 than one set of text to add. 107 gridlines : boolean 108 Use latitude and longitude gridlines. 109 **kwargs : keyword arguments 110 Any other keyword arguments that will be passed 111 into :func:`matplotlib.pyplot.scatter` when the figure 112 is made. See the matplotlib documentation for further details 113 on what keyword arguments are available. 114 115 """ 116 if dsname is None and len(self._ds.keys()) > 1: 117 raise ValueError( 118 'You must choose a datastream when there are 2 ' 119 'or more datasets in the GeographicPlotDisplay ' 120 'object.' 121 ) 122 elif dsname is None: 123 dsname = list(self._ds.keys())[0] 124 125 if data_field is None: 126 raise ValueError('You must enter the name of the data ' 'to be plotted.') 127 128 if projection is None: 129 if CARTOPY_AVAILABLE: 130 projection = ccrs.PlateCarree() 131 132 # Extract data from the dataset 133 try: 134 lat = self._ds[dsname][lat_field].values 135 except KeyError: 136 raise ValueError( 137 ( 138 'You will need to provide the name of the ' 139 "field if not '{}' to use for latitude " 140 'data.' 141 ).format(lat_field) 142 ) 143 try: 144 lon = self._ds[dsname][lon_field].values 145 except KeyError: 146 raise ValueError( 147 ( 148 'You will need to provide the name of the ' 149 "field if not '{}' to use for longitude " 150 'data.' 151 ).format(lon_field) 152 ) 153 154 # Set up metadata information for display on plot 155 if cbar_label is None: 156 try: 157 cbar_label = ( 158 self._ds[dsname][data_field].attrs['long_name'] 159 + ' (' 160 + self._ds[dsname][data_field].attrs['units'] 161 + ')' 162 ) 163 except KeyError: 164 cbar_label = data_field 165 166 lat_limits = [np.nanmin(lat), np.nanmax(lat)] 167 lon_limits = [np.nanmin(lon), np.nanmax(lon)] 168 box_size = np.max([np.abs(np.diff(lat_limits)), np.abs(np.diff(lon_limits))]) 169 bx_buf = box_size * plot_buffer 170 171 lat_center = np.sum(lat_limits) / 2.0 172 lon_center = np.sum(lon_limits) / 2.0 173 174 lat_limits = [ 175 lat_center - box_size / 2.0 - bx_buf, 176 lat_center + box_size / 2.0 + bx_buf, 177 ] 178 lon_limits = [ 179 lon_center - box_size / 2.0 - bx_buf, 180 lon_center + box_size / 2.0 + bx_buf, 181 ] 182 183 data = self._ds[dsname][data_field].values 184 185 # Create base plot projection 186 ax = plt.axes(projection=projection) 187 plt.subplots_adjust(left=0.01, right=0.99, bottom=0.05, top=0.93) 188 ax.set_extent([lon_limits[0], lon_limits[1], lat_limits[0], lat_limits[1]], crs=projection) 189 190 if title is None: 191 try: 192 dim = list(self._ds[dsname][data_field].dims) 193 ts = pd.to_datetime(str(self._ds[dsname][dim[0]].values[0])) 194 date = ts.strftime('%Y-%m-%d') 195 time_str = ts.strftime('%H:%M:%S') 196 plt.title(' '.join([dsname, 'at', date, time_str])) 197 except NameError: 198 plt.title(dsname) 199 else: 200 plt.title(title) 201 202 if stamen: 203 tiler = Stamen(stamen) 204 ax.add_image(tiler, tile) 205 206 colorbar_map = None 207 if cmap is not None: 208 colorbar_map = matplotlib.colormaps.get_cmap(cmap) 209 sc = ax.scatter(lon, lat, c=data, cmap=colorbar_map, **kwargs) 210 cbar = plt.colorbar(sc) 211 cbar.ax.set_ylabel(cbar_label) 212 if cartopy_feature is not None: 213 if isinstance(cartopy_feature, str): 214 cartopy_feature = [cartopy_feature] 215 cartopy_feature = [ii.upper() for ii in cartopy_feature] 216 if 'STATES' in cartopy_feature: 217 ax.add_feature(cfeature.STATES.with_scale('10m')) 218 if 'LAND' in cartopy_feature: 219 ax.add_feature(cfeature.LAND) 220 if 'OCEAN' in cartopy_feature: 221 ax.add_feature(cfeature.OCEAN) 222 if 'COASTLINE' in cartopy_feature: 223 ax.add_feature(cfeature.COASTLINE) 224 if 'BORDERS' in cartopy_feature: 225 ax.add_feature(cfeature.BORDERS, linestyle=':') 226 if 'LAKES' in cartopy_feature: 227 ax.add_feature(cfeature.LAKES, alpha=0.5) 228 if 'RIVERS' in cartopy_feature: 229 ax.add_feature(cfeature.RIVERS) 230 if text is not None: 231 for label, location in text.items(): 232 ax.plot(location[0], location[1], marker='*', color='black') 233 ax.text(location[0], location[1], label, color='black') 234 235 if gridlines: 236 if projection == ccrs.PlateCarree() or projection == ccrs.Mercator: 237 gl = ax.gridlines( 238 crs=projection, 239 draw_labels=True, 240 linewidth=1, 241 color='gray', 242 alpha=0.5, 243 linestyle='--', 244 ) 245 gl.top_labels = False 246 gl.left_labels = True 247 gl.bottom_labels = True 248 gl.right_labels = False 249 gl.xlabel_style = {'size': 6, 'color': 'gray'} 250 gl.ylabel_style = {'size': 6, 'color': 'gray'} 251 else: 252 # Labels are only currently supported for PlateCarree and Mercator 253 gl = ax.gridlines( 254 draw_labels=False, 255 linewidth=1, 256 color='gray', 257 alpha=0.5, 258 linestyle='--', 259 ) 260 261 return ax 262 [end of act/plotting/geodisplay.py] [start of examples/plotting/plot_aaf_track.py] 1 """ 2 Plot ARM AAF Flight Path 3 -------------------------------- 4 5 Plot the ARM AAF flight path using the GeographicPlotDisplay 6 7 Author: Joe O'Brien 8 9 """ 10 import matplotlib.pyplot as plt 11 12 import act 13 from act.io.icartt import read_icartt 14 15 # Call the read_icartt function, which supports input 16 # for ICARTT (v2.0) formatted files. 17 # Example file is ARM Aerial Facility Navigation Data 18 ds = read_icartt(act.tests.EXAMPLE_AAF_ICARTT) 19 20 # Use GeographicPlotDisplay for referencing. 21 # NOTE: Cartopy is needed! 22 display = act.plotting.GeographicPlotDisplay(ds, figsize=(12, 10)) 23 24 # Plot the ARM AAF flight track with respect to Pressure Altitude 25 display.geoplot('press_alt', lat_field='lat', lon_field='lon', stamen=None) 26 27 # Display the plot 28 plt.show() 29 [end of examples/plotting/plot_aaf_track.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/act/plotting/geodisplay.py b/act/plotting/geodisplay.py --- a/act/plotting/geodisplay.py +++ b/act/plotting/geodisplay.py @@ -3,6 +3,8 @@ """ +import warnings + import matplotlib import matplotlib.pyplot as plt import numpy as np @@ -13,7 +15,7 @@ try: import cartopy.crs as ccrs import cartopy.feature as cfeature - from cartopy.io.img_tiles import Stamen + from cartopy.io import img_tiles CARTOPY_AVAILABLE = True except ImportError: @@ -56,8 +58,10 @@ title=None, projection=None, plot_buffer=0.08, - stamen='terrain-background', + img_tile=None, + img_tile_args={}, tile=8, + stamen='terrain-background', cartopy_feature=None, cmap='rainbow', text=None, @@ -91,11 +95,18 @@ https://scitools.org.uk/cartopy/docs/latest/reference/projections.html?highlight=projections plot_buffer : float Buffer to add around data on plot in lat and lon dimension. - stamen : str - Dataset to use for background image. Set to None to not use - background image. + img_tile : str + Image to use for the plot background. Set to None to not use + background image. For all image background types, see: + https://scitools.org.uk/cartopy/docs/v0.16/cartopy/io/img_tiles.html + Default is None. + img_tile_args : dict + Keyword arguments for the chosen img_tile. These arguments can be + found for the corresponding img_tile here: + https://scitools.org.uk/cartopy/docs/v0.16/cartopy/io/img_tiles.html + Default is an empty dictionary. tile : int - Tile zoom to use with background image. Higer number indicates + Tile zoom to use with background image. Higher number indicates more resolution. A value of 8 is typical for a normal sonde plot. cartopy_feature : list of str or str Cartopy feature to add to plot. @@ -199,9 +210,16 @@ else: plt.title(title) - if stamen: - tiler = Stamen(stamen) + if stamen and img_tile is None: + tiler = img_tiles.Stamen(stamen) ax.add_image(tiler, tile) + warnings.warn( + "Stamen is deprecated in Cartopy and in future versions of ACT, " + "please use img_tile to specify the image background. ") + else: + if img_tile is not None: + tiler = getattr(img_tiles, img_tile)(**img_tile_args) + ax.add_image(tiler, tile) colorbar_map = None if cmap is not None: diff --git a/examples/plotting/plot_aaf_track.py b/examples/plotting/plot_aaf_track.py --- a/examples/plotting/plot_aaf_track.py +++ b/examples/plotting/plot_aaf_track.py @@ -22,7 +22,7 @@ display = act.plotting.GeographicPlotDisplay(ds, figsize=(12, 10)) # Plot the ARM AAF flight track with respect to Pressure Altitude -display.geoplot('press_alt', lat_field='lat', lon_field='lon', stamen=None) +display.geoplot('press_alt', lat_field='lat', lon_field='lon') # Display the plot plt.show()
{"golden_diff": "diff --git a/act/plotting/geodisplay.py b/act/plotting/geodisplay.py\n--- a/act/plotting/geodisplay.py\n+++ b/act/plotting/geodisplay.py\n@@ -3,6 +3,8 @@\n \n \"\"\"\n \n+import warnings\n+\n import matplotlib\n import matplotlib.pyplot as plt\n import numpy as np\n@@ -13,7 +15,7 @@\n try:\n import cartopy.crs as ccrs\n import cartopy.feature as cfeature\n- from cartopy.io.img_tiles import Stamen\n+ from cartopy.io import img_tiles\n \n CARTOPY_AVAILABLE = True\n except ImportError:\n@@ -56,8 +58,10 @@\n title=None,\n projection=None,\n plot_buffer=0.08,\n- stamen='terrain-background',\n+ img_tile=None,\n+ img_tile_args={},\n tile=8,\n+ stamen='terrain-background',\n cartopy_feature=None,\n cmap='rainbow',\n text=None,\n@@ -91,11 +95,18 @@\n https://scitools.org.uk/cartopy/docs/latest/reference/projections.html?highlight=projections\n plot_buffer : float\n Buffer to add around data on plot in lat and lon dimension.\n- stamen : str\n- Dataset to use for background image. Set to None to not use\n- background image.\n+ img_tile : str\n+ Image to use for the plot background. Set to None to not use\n+ background image. For all image background types, see:\n+ https://scitools.org.uk/cartopy/docs/v0.16/cartopy/io/img_tiles.html\n+ Default is None.\n+ img_tile_args : dict\n+ Keyword arguments for the chosen img_tile. These arguments can be\n+ found for the corresponding img_tile here:\n+ https://scitools.org.uk/cartopy/docs/v0.16/cartopy/io/img_tiles.html\n+ Default is an empty dictionary.\n tile : int\n- Tile zoom to use with background image. Higer number indicates\n+ Tile zoom to use with background image. Higher number indicates\n more resolution. A value of 8 is typical for a normal sonde plot.\n cartopy_feature : list of str or str\n Cartopy feature to add to plot.\n@@ -199,9 +210,16 @@\n else:\n plt.title(title)\n \n- if stamen:\n- tiler = Stamen(stamen)\n+ if stamen and img_tile is None:\n+ tiler = img_tiles.Stamen(stamen)\n ax.add_image(tiler, tile)\n+ warnings.warn(\n+ \"Stamen is deprecated in Cartopy and in future versions of ACT, \"\n+ \"please use img_tile to specify the image background. \")\n+ else:\n+ if img_tile is not None:\n+ tiler = getattr(img_tiles, img_tile)(**img_tile_args)\n+ ax.add_image(tiler, tile)\n \n colorbar_map = None\n if cmap is not None:\ndiff --git a/examples/plotting/plot_aaf_track.py b/examples/plotting/plot_aaf_track.py\n--- a/examples/plotting/plot_aaf_track.py\n+++ b/examples/plotting/plot_aaf_track.py\n@@ -22,7 +22,7 @@\n display = act.plotting.GeographicPlotDisplay(ds, figsize=(12, 10))\n \n # Plot the ARM AAF flight track with respect to Pressure Altitude\n-display.geoplot('press_alt', lat_field='lat', lon_field='lon', stamen=None)\n+display.geoplot('press_alt', lat_field='lat', lon_field='lon')\n \n # Display the plot\n plt.show()\n", "issue": "Sunset Stamen maps in GeoDisplay and potentially replace\nStamen is transitioning their maps to stadia at the end of October 2023. ACT will need to deprecate that feature in GeoDisplay and potentially look for replacements.\r\n\r\nhttps://github.com/SciTools/cartopy/pull/2266 \n", "before_files": [{"content": "\"\"\"\nStores the class for GeographicPlotDisplay.\n\n\"\"\"\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nfrom .plot import Display\n\ntry:\n import cartopy.crs as ccrs\n import cartopy.feature as cfeature\n from cartopy.io.img_tiles import Stamen\n\n CARTOPY_AVAILABLE = True\nexcept ImportError:\n CARTOPY_AVAILABLE = False\n\n\nclass GeographicPlotDisplay(Display):\n \"\"\"\n A class for making geographic tracer plot of aircraft, ship or other moving\n platform plot.\n\n This is inherited from the :func:`act.plotting.Display`\n class and has therefore has the same attributes as that class.\n See :func:`act.plotting.Display`\n for more information. There are no additional attributes or parameters\n to this class.\n\n In order to create geographic plots, ACT needs the Cartopy package to be\n installed on your system. More information about\n Cartopy go here:https://scitools.org.uk/cartopy/docs/latest/ .\n\n \"\"\"\n\n def __init__(self, ds, ds_name=None, **kwargs):\n if not CARTOPY_AVAILABLE:\n raise ImportError(\n 'Cartopy needs to be installed on your ' 'system to make geographic display plots.'\n )\n super().__init__(ds, ds_name, **kwargs)\n if self.fig is None:\n self.fig = plt.figure(**kwargs)\n\n def geoplot(\n self,\n data_field=None,\n lat_field='lat',\n lon_field='lon',\n dsname=None,\n cbar_label=None,\n title=None,\n projection=None,\n plot_buffer=0.08,\n stamen='terrain-background',\n tile=8,\n cartopy_feature=None,\n cmap='rainbow',\n text=None,\n gridlines=True,\n **kwargs,\n ):\n \"\"\"\n Creates a latitude and longitude plot of a time series data set with\n data values indicated by color and described with a colorbar.\n Latitude values must be in degree north (-90 to 90) and\n longitude must be in degree east (-180 to 180).\n\n Parameters\n ----------\n data_field : str\n Name of data field in the dataset to plot.\n lat_field : str\n Name of latitude field in the dataset to use.\n lon_field : str\n Name of longitude field in the dataset to use.\n dsname : str or None\n The name of the datastream to plot. Set to None to make ACT\n attempt to automatically determine this.\n cbar_label : str\n Label to use with colorbar. If set to None will attempt\n to create label from long_name and units.\n title : str\n Plot title.\n projection : cartopy.crs object\n Project to use on plot. See\n https://scitools.org.uk/cartopy/docs/latest/reference/projections.html?highlight=projections\n plot_buffer : float\n Buffer to add around data on plot in lat and lon dimension.\n stamen : str\n Dataset to use for background image. Set to None to not use\n background image.\n tile : int\n Tile zoom to use with background image. Higer number indicates\n more resolution. A value of 8 is typical for a normal sonde plot.\n cartopy_feature : list of str or str\n Cartopy feature to add to plot.\n cmap : str\n Color map to use for colorbar.\n text : dictionary\n Dictionary of {text:[lon,lat]} to add to plot. Can have more\n than one set of text to add.\n gridlines : boolean\n Use latitude and longitude gridlines.\n **kwargs : keyword arguments\n Any other keyword arguments that will be passed\n into :func:`matplotlib.pyplot.scatter` when the figure\n is made. See the matplotlib documentation for further details\n on what keyword arguments are available.\n\n \"\"\"\n if dsname is None and len(self._ds.keys()) > 1:\n raise ValueError(\n 'You must choose a datastream when there are 2 '\n 'or more datasets in the GeographicPlotDisplay '\n 'object.'\n )\n elif dsname is None:\n dsname = list(self._ds.keys())[0]\n\n if data_field is None:\n raise ValueError('You must enter the name of the data ' 'to be plotted.')\n\n if projection is None:\n if CARTOPY_AVAILABLE:\n projection = ccrs.PlateCarree()\n\n # Extract data from the dataset\n try:\n lat = self._ds[dsname][lat_field].values\n except KeyError:\n raise ValueError(\n (\n 'You will need to provide the name of the '\n \"field if not '{}' to use for latitude \"\n 'data.'\n ).format(lat_field)\n )\n try:\n lon = self._ds[dsname][lon_field].values\n except KeyError:\n raise ValueError(\n (\n 'You will need to provide the name of the '\n \"field if not '{}' to use for longitude \"\n 'data.'\n ).format(lon_field)\n )\n\n # Set up metadata information for display on plot\n if cbar_label is None:\n try:\n cbar_label = (\n self._ds[dsname][data_field].attrs['long_name']\n + ' ('\n + self._ds[dsname][data_field].attrs['units']\n + ')'\n )\n except KeyError:\n cbar_label = data_field\n\n lat_limits = [np.nanmin(lat), np.nanmax(lat)]\n lon_limits = [np.nanmin(lon), np.nanmax(lon)]\n box_size = np.max([np.abs(np.diff(lat_limits)), np.abs(np.diff(lon_limits))])\n bx_buf = box_size * plot_buffer\n\n lat_center = np.sum(lat_limits) / 2.0\n lon_center = np.sum(lon_limits) / 2.0\n\n lat_limits = [\n lat_center - box_size / 2.0 - bx_buf,\n lat_center + box_size / 2.0 + bx_buf,\n ]\n lon_limits = [\n lon_center - box_size / 2.0 - bx_buf,\n lon_center + box_size / 2.0 + bx_buf,\n ]\n\n data = self._ds[dsname][data_field].values\n\n # Create base plot projection\n ax = plt.axes(projection=projection)\n plt.subplots_adjust(left=0.01, right=0.99, bottom=0.05, top=0.93)\n ax.set_extent([lon_limits[0], lon_limits[1], lat_limits[0], lat_limits[1]], crs=projection)\n\n if title is None:\n try:\n dim = list(self._ds[dsname][data_field].dims)\n ts = pd.to_datetime(str(self._ds[dsname][dim[0]].values[0]))\n date = ts.strftime('%Y-%m-%d')\n time_str = ts.strftime('%H:%M:%S')\n plt.title(' '.join([dsname, 'at', date, time_str]))\n except NameError:\n plt.title(dsname)\n else:\n plt.title(title)\n\n if stamen:\n tiler = Stamen(stamen)\n ax.add_image(tiler, tile)\n\n colorbar_map = None\n if cmap is not None:\n colorbar_map = matplotlib.colormaps.get_cmap(cmap)\n sc = ax.scatter(lon, lat, c=data, cmap=colorbar_map, **kwargs)\n cbar = plt.colorbar(sc)\n cbar.ax.set_ylabel(cbar_label)\n if cartopy_feature is not None:\n if isinstance(cartopy_feature, str):\n cartopy_feature = [cartopy_feature]\n cartopy_feature = [ii.upper() for ii in cartopy_feature]\n if 'STATES' in cartopy_feature:\n ax.add_feature(cfeature.STATES.with_scale('10m'))\n if 'LAND' in cartopy_feature:\n ax.add_feature(cfeature.LAND)\n if 'OCEAN' in cartopy_feature:\n ax.add_feature(cfeature.OCEAN)\n if 'COASTLINE' in cartopy_feature:\n ax.add_feature(cfeature.COASTLINE)\n if 'BORDERS' in cartopy_feature:\n ax.add_feature(cfeature.BORDERS, linestyle=':')\n if 'LAKES' in cartopy_feature:\n ax.add_feature(cfeature.LAKES, alpha=0.5)\n if 'RIVERS' in cartopy_feature:\n ax.add_feature(cfeature.RIVERS)\n if text is not None:\n for label, location in text.items():\n ax.plot(location[0], location[1], marker='*', color='black')\n ax.text(location[0], location[1], label, color='black')\n\n if gridlines:\n if projection == ccrs.PlateCarree() or projection == ccrs.Mercator:\n gl = ax.gridlines(\n crs=projection,\n draw_labels=True,\n linewidth=1,\n color='gray',\n alpha=0.5,\n linestyle='--',\n )\n gl.top_labels = False\n gl.left_labels = True\n gl.bottom_labels = True\n gl.right_labels = False\n gl.xlabel_style = {'size': 6, 'color': 'gray'}\n gl.ylabel_style = {'size': 6, 'color': 'gray'}\n else:\n # Labels are only currently supported for PlateCarree and Mercator\n gl = ax.gridlines(\n draw_labels=False,\n linewidth=1,\n color='gray',\n alpha=0.5,\n linestyle='--',\n )\n\n return ax\n", "path": "act/plotting/geodisplay.py"}, {"content": "\"\"\"\nPlot ARM AAF Flight Path\n--------------------------------\n\nPlot the ARM AAF flight path using the GeographicPlotDisplay\n\nAuthor: Joe O'Brien\n\n\"\"\"\nimport matplotlib.pyplot as plt\n\nimport act\nfrom act.io.icartt import read_icartt\n\n# Call the read_icartt function, which supports input\n# for ICARTT (v2.0) formatted files.\n# Example file is ARM Aerial Facility Navigation Data\nds = read_icartt(act.tests.EXAMPLE_AAF_ICARTT)\n\n# Use GeographicPlotDisplay for referencing.\n# NOTE: Cartopy is needed!\ndisplay = act.plotting.GeographicPlotDisplay(ds, figsize=(12, 10))\n\n# Plot the ARM AAF flight track with respect to Pressure Altitude\ndisplay.geoplot('press_alt', lat_field='lat', lon_field='lon', stamen=None)\n\n# Display the plot\nplt.show()\n", "path": "examples/plotting/plot_aaf_track.py"}]}
3,660
828
gh_patches_debug_54533
rasdani/github-patches
git_diff
dbt-labs__dbt-core-7566
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [CT-2552] Pin protobuf to greater than 4.0 Some users were seeing an error: > ImportError: cannot import name 'builder' from 'google.protobuf.internal'. The generated types_pb2.py file was built with protobuf 4 and requires protobuf 4 in order to import "builder". </issue> <code> [start of core/setup.py] 1 #!/usr/bin/env python 2 import os 3 import sys 4 5 if sys.version_info < (3, 7, 2): 6 print("Error: dbt does not support this version of Python.") 7 print("Please upgrade to Python 3.7.2 or higher.") 8 sys.exit(1) 9 10 11 from setuptools import setup 12 13 try: 14 from setuptools import find_namespace_packages 15 except ImportError: 16 # the user has a downlevel version of setuptools. 17 print("Error: dbt requires setuptools v40.1.0 or higher.") 18 print('Please upgrade setuptools with "pip install --upgrade setuptools" ' "and try again") 19 sys.exit(1) 20 21 22 this_directory = os.path.abspath(os.path.dirname(__file__)) 23 with open(os.path.join(this_directory, "README.md")) as f: 24 long_description = f.read() 25 26 27 package_name = "dbt-core" 28 package_version = "1.6.0a1" 29 description = """With dbt, data analysts and engineers can build analytics \ 30 the way engineers build applications.""" 31 32 33 setup( 34 name=package_name, 35 version=package_version, 36 description=description, 37 long_description=long_description, 38 long_description_content_type="text/markdown", 39 author="dbt Labs", 40 author_email="[email protected]", 41 url="https://github.com/dbt-labs/dbt-core", 42 packages=find_namespace_packages(include=["dbt", "dbt.*"]), 43 include_package_data=True, 44 test_suite="test", 45 entry_points={ 46 "console_scripts": ["dbt = dbt.cli.main:cli"], 47 }, 48 install_requires=[ 49 "Jinja2==3.1.2", 50 "agate>=1.6,<1.7.1", 51 "click>=7.0,<9", 52 "colorama>=0.3.9,<0.4.7", 53 "hologram>=0.0.14,<=0.0.16", 54 "isodate>=0.6,<0.7", 55 "logbook>=1.5,<1.6", 56 "mashumaro[msgpack]==3.6", 57 "minimal-snowplow-tracker==0.0.2", 58 "networkx>=2.3,<2.8.1;python_version<'3.8'", 59 "networkx>=2.3,<3;python_version>='3.8'", 60 "packaging>20.9", 61 "sqlparse>=0.2.3,<0.4.4", 62 "dbt-extractor~=0.4.1", 63 "typing-extensions>=3.7.4", 64 "werkzeug>=1,<3", 65 "pathspec>=0.9,<0.12", 66 "protobuf>=3.18.3", 67 "pytz>=2015.7", 68 # the following are all to match snowflake-connector-python 69 "requests<3.0.0", 70 "idna>=2.5,<4", 71 "cffi>=1.9,<2.0.0", 72 "pyyaml>=6.0", 73 ], 74 zip_safe=False, 75 classifiers=[ 76 "Development Status :: 5 - Production/Stable", 77 "License :: OSI Approved :: Apache Software License", 78 "Operating System :: Microsoft :: Windows", 79 "Operating System :: MacOS :: MacOS X", 80 "Operating System :: POSIX :: Linux", 81 "Programming Language :: Python :: 3.7", 82 "Programming Language :: Python :: 3.8", 83 "Programming Language :: Python :: 3.9", 84 "Programming Language :: Python :: 3.10", 85 "Programming Language :: Python :: 3.11", 86 ], 87 python_requires=">=3.7.2", 88 ) 89 [end of core/setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/core/setup.py b/core/setup.py --- a/core/setup.py +++ b/core/setup.py @@ -63,7 +63,7 @@ "typing-extensions>=3.7.4", "werkzeug>=1,<3", "pathspec>=0.9,<0.12", - "protobuf>=3.18.3", + "protobuf>=4.0.0", "pytz>=2015.7", # the following are all to match snowflake-connector-python "requests<3.0.0",
{"golden_diff": "diff --git a/core/setup.py b/core/setup.py\n--- a/core/setup.py\n+++ b/core/setup.py\n@@ -63,7 +63,7 @@\n \"typing-extensions>=3.7.4\",\n \"werkzeug>=1,<3\",\n \"pathspec>=0.9,<0.12\",\n- \"protobuf>=3.18.3\",\n+ \"protobuf>=4.0.0\",\n \"pytz>=2015.7\",\n # the following are all to match snowflake-connector-python\n \"requests<3.0.0\",\n", "issue": "[CT-2552] Pin protobuf to greater than 4.0\nSome users were seeing an error:\r\n\r\n> ImportError: cannot import name 'builder' from 'google.protobuf.internal'.\r\nThe generated types_pb2.py file was built with protobuf 4 and requires protobuf 4 in order to import \"builder\".\n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nif sys.version_info < (3, 7, 2):\n print(\"Error: dbt does not support this version of Python.\")\n print(\"Please upgrade to Python 3.7.2 or higher.\")\n sys.exit(1)\n\n\nfrom setuptools import setup\n\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print(\"Error: dbt requires setuptools v40.1.0 or higher.\")\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" ' \"and try again\")\n sys.exit(1)\n\n\nthis_directory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_directory, \"README.md\")) as f:\n long_description = f.read()\n\n\npackage_name = \"dbt-core\"\npackage_version = \"1.6.0a1\"\ndescription = \"\"\"With dbt, data analysts and engineers can build analytics \\\nthe way engineers build applications.\"\"\"\n\n\nsetup(\n name=package_name,\n version=package_version,\n description=description,\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"dbt Labs\",\n author_email=\"[email protected]\",\n url=\"https://github.com/dbt-labs/dbt-core\",\n packages=find_namespace_packages(include=[\"dbt\", \"dbt.*\"]),\n include_package_data=True,\n test_suite=\"test\",\n entry_points={\n \"console_scripts\": [\"dbt = dbt.cli.main:cli\"],\n },\n install_requires=[\n \"Jinja2==3.1.2\",\n \"agate>=1.6,<1.7.1\",\n \"click>=7.0,<9\",\n \"colorama>=0.3.9,<0.4.7\",\n \"hologram>=0.0.14,<=0.0.16\",\n \"isodate>=0.6,<0.7\",\n \"logbook>=1.5,<1.6\",\n \"mashumaro[msgpack]==3.6\",\n \"minimal-snowplow-tracker==0.0.2\",\n \"networkx>=2.3,<2.8.1;python_version<'3.8'\",\n \"networkx>=2.3,<3;python_version>='3.8'\",\n \"packaging>20.9\",\n \"sqlparse>=0.2.3,<0.4.4\",\n \"dbt-extractor~=0.4.1\",\n \"typing-extensions>=3.7.4\",\n \"werkzeug>=1,<3\",\n \"pathspec>=0.9,<0.12\",\n \"protobuf>=3.18.3\",\n \"pytz>=2015.7\",\n # the following are all to match snowflake-connector-python\n \"requests<3.0.0\",\n \"idna>=2.5,<4\",\n \"cffi>=1.9,<2.0.0\",\n \"pyyaml>=6.0\",\n ],\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n ],\n python_requires=\">=3.7.2\",\n)\n", "path": "core/setup.py"}]}
1,576
130
gh_patches_debug_4049
rasdani/github-patches
git_diff
strawberry-graphql__strawberry-1811
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Decimal scalars aren't serialized as strings when passed as numbers When using the `Decimal` scalar, if the value passed in is a floating point number, the value isn't serialized as a string, but as if it were still the floating point number which causes precision issues as can be seen in the working example below. [Working example](https://la4de.github.io/strawberry-playground/#UEsDBAoAAAAAAFaAjFQgaUU4EgAAABIAAAAQAAAAcmVxdWlyZW1lbnRzLnR4dHN0cmF3YmVycnktZ3JhcGhxbFBLAwQKAAAAAABWgIxUj3TlatIAAADSAAAACQAAAHNjaGVtYS5weWltcG9ydCBzdHJhd2JlcnJ5CmZyb20gZGVjaW1hbCBpbXBvcnQgRGVjaW1hbAoKQHN0cmF3YmVycnkudHlwZQpjbGFzcyBRdWVyeToKICAgIEBzdHJhd2JlcnJ5LmZpZWxkCiAgICBkZWYgZGVjaW1hbChzZWxmLCB2YWx1ZTogRGVjaW1hbCkgLT4gRGVjaW1hbDoKICAgICAgICByZXR1cm4gdmFsdWUKCnNjaGVtYSA9IHN0cmF3YmVycnkuU2NoZW1hKHF1ZXJ5PVF1ZXJ5KVBLAwQKAAAAAABWgIxUERrh0UMAAABDAAAACQAAAHF1ZXJ5LmdxbHsKICBkZWNpbWFsKHZhbHVlOiAzLjE0KQogIGFub3RoZXJEZWNpbWFsOiBkZWNpbWFsKHZhbHVlOiAiMy4xNCIpCn1QSwMECgAAAAAAVoCMVKi7vnMDAAAAAwAAAA4AAAB2YXJpYWJsZXMuanNvbnsKfVBLAQIUAAoAAAAAAFaAjFQgaUU4EgAAABIAAAAQAAAAAAAAAAAAAAAAAAAAAAByZXF1aXJlbWVudHMudHh0UEsBAhQACgAAAAAAVoCMVI905WrSAAAA0gAAAAkAAAAAAAAAAAAAAAAAQAAAAHNjaGVtYS5weVBLAQIUAAoAAAAAAFaAjFQRGuHRQwAAAEMAAAAJAAAAAAAAAAAAAAAAADkBAABxdWVyeS5ncWxQSwECFAAKAAAAAABWgIxUqLu+cwMAAAADAAAADgAAAAAAAAAAAAAAAACjAQAAdmFyaWFibGVzLmpzb25QSwUGAAAAAAQABADoAAAA0gEAAAAA) Actual output - `"3.140000000000000124344978758017532527446746826171875"` Expected output - `"3.14"` As text: ``` @strawberry.type class DecimalResponse: value: Decimal @strawberry.field def decimals( dec_value: Decimal, ) -> DecimalResponse: return DecimalResponse(value=dec_value) ``` ``` mutation decimals($dec_value: Decimal!) { decimals( decValue: $dec_value ) { ... on DecimalResponse { value } } } { "dec_value": 3.14 } ``` </issue> <code> [start of strawberry/schema/types/base_scalars.py] 1 import datetime 2 import decimal 3 import uuid 4 from operator import methodcaller 5 from typing import Callable 6 7 import dateutil.parser 8 9 from graphql import GraphQLError 10 11 from strawberry.custom_scalar import scalar 12 13 14 def wrap_parser(parser: Callable, type_: str) -> Callable: 15 def inner(value: str): 16 try: 17 return parser(value) 18 except ValueError as e: 19 raise GraphQLError(f'Value cannot represent a {type_}: "{value}". {e}') 20 21 return inner 22 23 24 def parse_decimal(value: str) -> decimal.Decimal: 25 try: 26 return decimal.Decimal(value) 27 except decimal.DecimalException: 28 raise GraphQLError(f'Value cannot represent a Decimal: "{value}".') 29 30 31 isoformat = methodcaller("isoformat") 32 33 34 Date = scalar( 35 datetime.date, 36 name="Date", 37 description="Date (isoformat)", 38 serialize=isoformat, 39 parse_value=wrap_parser(datetime.date.fromisoformat, "Date"), 40 ) 41 DateTime = scalar( 42 datetime.datetime, 43 name="DateTime", 44 description="Date with time (isoformat)", 45 serialize=isoformat, 46 parse_value=wrap_parser(dateutil.parser.isoparse, "DateTime"), 47 ) 48 Time = scalar( 49 datetime.time, 50 name="Time", 51 description="Time (isoformat)", 52 serialize=isoformat, 53 parse_value=wrap_parser(datetime.time.fromisoformat, "Time"), 54 ) 55 56 Decimal = scalar( 57 decimal.Decimal, 58 name="Decimal", 59 description="Decimal (fixed-point)", 60 serialize=str, 61 parse_value=parse_decimal, 62 ) 63 64 UUID = scalar( 65 uuid.UUID, 66 name="UUID", 67 serialize=str, 68 parse_value=wrap_parser(uuid.UUID, "UUID"), 69 ) 70 71 72 def _verify_void(x) -> None: 73 if x is not None: 74 raise ValueError(f"Expected 'None', got '{x}'") 75 76 77 Void = scalar( 78 type(None), 79 name="Void", 80 serialize=_verify_void, 81 parse_value=_verify_void, 82 description="Represents NULL values", 83 ) 84 [end of strawberry/schema/types/base_scalars.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/strawberry/schema/types/base_scalars.py b/strawberry/schema/types/base_scalars.py --- a/strawberry/schema/types/base_scalars.py +++ b/strawberry/schema/types/base_scalars.py @@ -21,9 +21,9 @@ return inner -def parse_decimal(value: str) -> decimal.Decimal: +def parse_decimal(value: object) -> decimal.Decimal: try: - return decimal.Decimal(value) + return decimal.Decimal(str(value)) except decimal.DecimalException: raise GraphQLError(f'Value cannot represent a Decimal: "{value}".')
{"golden_diff": "diff --git a/strawberry/schema/types/base_scalars.py b/strawberry/schema/types/base_scalars.py\n--- a/strawberry/schema/types/base_scalars.py\n+++ b/strawberry/schema/types/base_scalars.py\n@@ -21,9 +21,9 @@\n return inner\n \n \n-def parse_decimal(value: str) -> decimal.Decimal:\n+def parse_decimal(value: object) -> decimal.Decimal:\n try:\n- return decimal.Decimal(value)\n+ return decimal.Decimal(str(value))\n except decimal.DecimalException:\n raise GraphQLError(f'Value cannot represent a Decimal: \"{value}\".')\n", "issue": "Decimal scalars aren't serialized as strings when passed as numbers\nWhen using the `Decimal` scalar, if the value passed in is a floating point number, the value isn't serialized as a string, but as if it were still the floating point number which causes precision issues as can be seen in the working example below.\r\n\r\n\r\n[Working example](https://la4de.github.io/strawberry-playground/#UEsDBAoAAAAAAFaAjFQgaUU4EgAAABIAAAAQAAAAcmVxdWlyZW1lbnRzLnR4dHN0cmF3YmVycnktZ3JhcGhxbFBLAwQKAAAAAABWgIxUj3TlatIAAADSAAAACQAAAHNjaGVtYS5weWltcG9ydCBzdHJhd2JlcnJ5CmZyb20gZGVjaW1hbCBpbXBvcnQgRGVjaW1hbAoKQHN0cmF3YmVycnkudHlwZQpjbGFzcyBRdWVyeToKICAgIEBzdHJhd2JlcnJ5LmZpZWxkCiAgICBkZWYgZGVjaW1hbChzZWxmLCB2YWx1ZTogRGVjaW1hbCkgLT4gRGVjaW1hbDoKICAgICAgICByZXR1cm4gdmFsdWUKCnNjaGVtYSA9IHN0cmF3YmVycnkuU2NoZW1hKHF1ZXJ5PVF1ZXJ5KVBLAwQKAAAAAABWgIxUERrh0UMAAABDAAAACQAAAHF1ZXJ5LmdxbHsKICBkZWNpbWFsKHZhbHVlOiAzLjE0KQogIGFub3RoZXJEZWNpbWFsOiBkZWNpbWFsKHZhbHVlOiAiMy4xNCIpCn1QSwMECgAAAAAAVoCMVKi7vnMDAAAAAwAAAA4AAAB2YXJpYWJsZXMuanNvbnsKfVBLAQIUAAoAAAAAAFaAjFQgaUU4EgAAABIAAAAQAAAAAAAAAAAAAAAAAAAAAAByZXF1aXJlbWVudHMudHh0UEsBAhQACgAAAAAAVoCMVI905WrSAAAA0gAAAAkAAAAAAAAAAAAAAAAAQAAAAHNjaGVtYS5weVBLAQIUAAoAAAAAAFaAjFQRGuHRQwAAAEMAAAAJAAAAAAAAAAAAAAAAADkBAABxdWVyeS5ncWxQSwECFAAKAAAAAABWgIxUqLu+cwMAAAADAAAADgAAAAAAAAAAAAAAAACjAQAAdmFyaWFibGVzLmpzb25QSwUGAAAAAAQABADoAAAA0gEAAAAA)\r\n\r\nActual output - `\"3.140000000000000124344978758017532527446746826171875\"`\r\nExpected output - `\"3.14\"`\r\n\r\nAs text:\r\n\r\n```\r\[email protected]\r\nclass DecimalResponse:\r\n value: Decimal\r\n\r\n\r\[email protected]\r\ndef decimals(\r\n dec_value: Decimal,\r\n) -> DecimalResponse:\r\n return DecimalResponse(value=dec_value)\r\n ```\r\n```\r\nmutation decimals($dec_value: Decimal!) {\r\n decimals(\r\n decValue: $dec_value\r\n ) {\r\n ... on DecimalResponse {\r\n value\r\n }\r\n }\r\n}\r\n{\r\n \"dec_value\": 3.14\r\n}\r\n```\r\n\n", "before_files": [{"content": "import datetime\nimport decimal\nimport uuid\nfrom operator import methodcaller\nfrom typing import Callable\n\nimport dateutil.parser\n\nfrom graphql import GraphQLError\n\nfrom strawberry.custom_scalar import scalar\n\n\ndef wrap_parser(parser: Callable, type_: str) -> Callable:\n def inner(value: str):\n try:\n return parser(value)\n except ValueError as e:\n raise GraphQLError(f'Value cannot represent a {type_}: \"{value}\". {e}')\n\n return inner\n\n\ndef parse_decimal(value: str) -> decimal.Decimal:\n try:\n return decimal.Decimal(value)\n except decimal.DecimalException:\n raise GraphQLError(f'Value cannot represent a Decimal: \"{value}\".')\n\n\nisoformat = methodcaller(\"isoformat\")\n\n\nDate = scalar(\n datetime.date,\n name=\"Date\",\n description=\"Date (isoformat)\",\n serialize=isoformat,\n parse_value=wrap_parser(datetime.date.fromisoformat, \"Date\"),\n)\nDateTime = scalar(\n datetime.datetime,\n name=\"DateTime\",\n description=\"Date with time (isoformat)\",\n serialize=isoformat,\n parse_value=wrap_parser(dateutil.parser.isoparse, \"DateTime\"),\n)\nTime = scalar(\n datetime.time,\n name=\"Time\",\n description=\"Time (isoformat)\",\n serialize=isoformat,\n parse_value=wrap_parser(datetime.time.fromisoformat, \"Time\"),\n)\n\nDecimal = scalar(\n decimal.Decimal,\n name=\"Decimal\",\n description=\"Decimal (fixed-point)\",\n serialize=str,\n parse_value=parse_decimal,\n)\n\nUUID = scalar(\n uuid.UUID,\n name=\"UUID\",\n serialize=str,\n parse_value=wrap_parser(uuid.UUID, \"UUID\"),\n)\n\n\ndef _verify_void(x) -> None:\n if x is not None:\n raise ValueError(f\"Expected 'None', got '{x}'\")\n\n\nVoid = scalar(\n type(None),\n name=\"Void\",\n serialize=_verify_void,\n parse_value=_verify_void,\n description=\"Represents NULL values\",\n)\n", "path": "strawberry/schema/types/base_scalars.py"}]}
1,962
133
gh_patches_debug_6817
rasdani/github-patches
git_diff
SeldonIO__MLServer-866
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add more debugging information I am running a custom Python model in MLServer, and it fails with an error. The error itself is not interesting, what's important is that MLServer gives very little helpful information to debug it. After sending a request with grpcurl here is the entire response: ```` ERROR: Code: Unknown Message: Unexpected <class 'TypeError'>: list indices must be integers or slices, not str ```` No stack trace, no line numbers or class names. The only thing that appears in the logs is > scv2-mlserver-1 | 2022-10-19 09:10:44,218 [mlserver.grpc] INFO - /inference.GRPCInferenceService/ModelInfer Would be great if MLServer propagated more debugging info to the response message and to the log. </issue> <code> [start of mlserver/grpc/utils.py] 1 import grpc 2 3 from typing import Callable, Dict, Tuple 4 from fastapi import status 5 6 from grpc import ServicerContext 7 8 from .logging import logger 9 from ..errors import MLServerError 10 11 12 STATUS_CODE_MAPPING = { 13 status.HTTP_400_BAD_REQUEST: grpc.StatusCode.INVALID_ARGUMENT, 14 status.HTTP_404_NOT_FOUND: grpc.StatusCode.NOT_FOUND, 15 status.HTTP_422_UNPROCESSABLE_ENTITY: grpc.StatusCode.FAILED_PRECONDITION, 16 status.HTTP_500_INTERNAL_SERVER_ERROR: grpc.StatusCode.INTERNAL, 17 } 18 19 20 def to_headers(context: ServicerContext) -> Dict[str, str]: 21 metadata = context.invocation_metadata() 22 if hasattr(context, "trailing_metadata"): 23 # NOTE: Older versions of `grpcio` (e.g. `grpcio==1.34.0`) don't expose 24 # access to the trailing metadata on the service side 25 metadata += context.trailing_metadata() 26 headers = {} 27 for metadatum in metadata: 28 headers[metadatum.key] = metadatum.value 29 30 return headers 31 32 33 def to_metadata(headers: Dict[str, str]) -> Tuple[Tuple[str, str], ...]: 34 return tuple((key.lower(), value) for key, value in headers.items()) 35 36 37 def _grpc_status_code(err: MLServerError): 38 return STATUS_CODE_MAPPING.get(err.status_code, grpc.StatusCode.UNKNOWN) 39 40 41 def handle_mlserver_error(f: Callable): 42 async def _inner(self, request, context): 43 try: 44 return await f(self, request, context) 45 except MLServerError as err: 46 logger.error(err) 47 await context.abort(code=_grpc_status_code(err), details=str(err)) 48 49 return _inner 50 [end of mlserver/grpc/utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mlserver/grpc/utils.py b/mlserver/grpc/utils.py --- a/mlserver/grpc/utils.py +++ b/mlserver/grpc/utils.py @@ -43,7 +43,10 @@ try: return await f(self, request, context) except MLServerError as err: - logger.error(err) + logger.exception(err) await context.abort(code=_grpc_status_code(err), details=str(err)) + except Exception as err: + logger.exception(err) + await context.abort(code=grpc.StatusCode.INTERNAL, details=str(err)) return _inner
{"golden_diff": "diff --git a/mlserver/grpc/utils.py b/mlserver/grpc/utils.py\n--- a/mlserver/grpc/utils.py\n+++ b/mlserver/grpc/utils.py\n@@ -43,7 +43,10 @@\n try:\n return await f(self, request, context)\n except MLServerError as err:\n- logger.error(err)\n+ logger.exception(err)\n await context.abort(code=_grpc_status_code(err), details=str(err))\n+ except Exception as err:\n+ logger.exception(err)\n+ await context.abort(code=grpc.StatusCode.INTERNAL, details=str(err))\n \n return _inner\n", "issue": "Add more debugging information\nI am running a custom Python model in MLServer, and it fails with an error. The error itself is not interesting, what's important is that MLServer gives very little helpful information to debug it.\r\n\r\nAfter sending a request with grpcurl here is the entire response:\r\n````\r\nERROR:\r\n Code: Unknown\r\n Message: Unexpected <class 'TypeError'>: list indices must be integers or slices, not str\r\n````\r\nNo stack trace, no line numbers or class names. The only thing that appears in the logs is\r\n> scv2-mlserver-1 | 2022-10-19 09:10:44,218 [mlserver.grpc] INFO - /inference.GRPCInferenceService/ModelInfer\r\n\r\nWould be great if MLServer propagated more debugging info to the response message and to the log.\n", "before_files": [{"content": "import grpc\n\nfrom typing import Callable, Dict, Tuple\nfrom fastapi import status\n\nfrom grpc import ServicerContext\n\nfrom .logging import logger\nfrom ..errors import MLServerError\n\n\nSTATUS_CODE_MAPPING = {\n status.HTTP_400_BAD_REQUEST: grpc.StatusCode.INVALID_ARGUMENT,\n status.HTTP_404_NOT_FOUND: grpc.StatusCode.NOT_FOUND,\n status.HTTP_422_UNPROCESSABLE_ENTITY: grpc.StatusCode.FAILED_PRECONDITION,\n status.HTTP_500_INTERNAL_SERVER_ERROR: grpc.StatusCode.INTERNAL,\n}\n\n\ndef to_headers(context: ServicerContext) -> Dict[str, str]:\n metadata = context.invocation_metadata()\n if hasattr(context, \"trailing_metadata\"):\n # NOTE: Older versions of `grpcio` (e.g. `grpcio==1.34.0`) don't expose\n # access to the trailing metadata on the service side\n metadata += context.trailing_metadata()\n headers = {}\n for metadatum in metadata:\n headers[metadatum.key] = metadatum.value\n\n return headers\n\n\ndef to_metadata(headers: Dict[str, str]) -> Tuple[Tuple[str, str], ...]:\n return tuple((key.lower(), value) for key, value in headers.items())\n\n\ndef _grpc_status_code(err: MLServerError):\n return STATUS_CODE_MAPPING.get(err.status_code, grpc.StatusCode.UNKNOWN)\n\n\ndef handle_mlserver_error(f: Callable):\n async def _inner(self, request, context):\n try:\n return await f(self, request, context)\n except MLServerError as err:\n logger.error(err)\n await context.abort(code=_grpc_status_code(err), details=str(err))\n\n return _inner\n", "path": "mlserver/grpc/utils.py"}]}
1,176
126
gh_patches_debug_39788
rasdani/github-patches
git_diff
cupy__cupy-2145
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Use `ReductionKernel` in k-means example The k-mean example uses `ElementwiseKernel` as an example of a custom kernel, but its algorithm is essentially reduction rather than mapping. It would be better to use `ReductionKernel` in the k-means example and to provide another example that uses `ElementwiseKernel`. </issue> <code> [start of examples/kmeans/kmeans.py] 1 import argparse 2 import contextlib 3 import time 4 5 import matplotlib.pyplot as plt 6 import numpy as np 7 import six 8 9 import cupy 10 11 12 @contextlib.contextmanager 13 def timer(message): 14 cupy.cuda.Stream.null.synchronize() 15 start = time.time() 16 yield 17 cupy.cuda.Stream.null.synchronize() 18 end = time.time() 19 print('%s: %f sec' % (message, end - start)) 20 21 22 _fit_calc_distances = cupy.ElementwiseKernel( 23 'S data, raw S centers, int32 n_clusters, int32 dim', 'raw S dist', 24 ''' 25 for (int j = 0; j < n_clusters; j++){ 26 int cent_ind[] = {j, i % dim}; 27 int dist_ind[] = {i / dim, j}; 28 double diff = centers[cent_ind] - data; 29 atomicAdd(&dist[dist_ind], diff * diff); 30 } 31 ''', 32 'calc_distances' 33 ) 34 _fit_calc_center = cupy.ElementwiseKernel( 35 'S data, T label, int32 dim', 'raw S centers, raw S group', 36 ''' 37 int cent_ind[] = {label, i % dim}; 38 atomicAdd(&centers[cent_ind], data); 39 atomicAdd(&group[label], 1); 40 ''', 41 'calc_center' 42 ) 43 44 45 def fit(X, n_clusters, max_iter, use_custom_kernel): 46 assert X.ndim == 2 47 xp = cupy.get_array_module(X) 48 pred = xp.zeros(len(X), dtype=np.int32) 49 initial_indexes = np.random.choice(len(X), n_clusters, 50 replace=False).astype(np.int32) 51 centers = X[initial_indexes] 52 data_num = X.shape[0] 53 data_dim = X.shape[1] 54 55 for _ in six.moves.range(max_iter): 56 # calculate distances and label 57 if not use_custom_kernel or xp == np: 58 distances = xp.linalg.norm(X[:, None, :] - centers[None, :, :], 59 axis=2) 60 else: 61 distances = xp.zeros((data_num, n_clusters), dtype=np.float32) 62 _fit_calc_distances(X, centers, n_clusters, data_dim, distances) 63 64 new_pred = xp.argmin(distances, axis=1).astype(np.int32) 65 if xp.all(new_pred == pred): 66 break 67 pred = new_pred 68 69 # calculate centers 70 if not use_custom_kernel or xp == np: 71 centers = xp.stack([X[pred == i].mean(axis=0) 72 for i in six.moves.range(n_clusters)]) 73 else: 74 centers = xp.zeros((n_clusters, data_dim), 75 dtype=np.float32) 76 group = xp.zeros(n_clusters, dtype=np.float32) 77 label = pred[:, None] 78 _fit_calc_center(X, label, data_dim, centers, group) 79 group /= data_dim 80 centers /= group[:, None] 81 82 return centers, pred 83 84 85 def draw(X, n_clusters, centers, pred, output): 86 xp = cupy.get_array_module(X) 87 for i in six.moves.range(n_clusters): 88 labels = X[pred == i] 89 if xp == cupy: 90 labels = labels.get() 91 plt.scatter(labels[:, 0], labels[:, 1], c=np.random.rand(3)) 92 if xp == cupy: 93 centers = centers.get() 94 plt.scatter(centers[:, 0], centers[:, 1], s=120, marker='s', 95 facecolors='y', edgecolors='k') 96 plt.savefig(output) 97 98 99 def run(gpuid, n_clusters, num, max_iter, use_custom_kernel, output): 100 samples = np.random.randn(num, 2).astype(np.float32) 101 X_train = np.r_[samples + 1, samples - 1] 102 repeat = 1 103 104 with timer(' CPU '): 105 for i in range(repeat): 106 centers, pred = fit(X_train, n_clusters, max_iter, 107 use_custom_kernel) 108 109 with cupy.cuda.Device(gpuid): 110 X_train = cupy.asarray(X_train) 111 with timer(' GPU '): 112 for i in range(repeat): 113 centers, pred = fit(X_train, n_clusters, max_iter, 114 use_custom_kernel) 115 if output is not None: 116 index = np.random.choice(10000000, 300, replace=False) 117 draw(X_train[index], n_clusters, centers, pred[index], output) 118 119 120 if __name__ == '__main__': 121 parser = argparse.ArgumentParser() 122 parser.add_argument('--gpu-id', '-g', default=0, type=int, 123 help='ID of GPU.') 124 parser.add_argument('--n-clusters', '-n', default=2, type=int, 125 help='number of clusters') 126 parser.add_argument('--num', default=5000000, type=int, 127 help='number of samples') 128 parser.add_argument('--max-iter', '-m', default=10, type=int, 129 help='number of iterations') 130 parser.add_argument('--use-custom-kernel', action='store_true', 131 default=False, help='use Elementwise kernel') 132 parser.add_argument('--output-image', '-o', default=None, type=str, 133 help='output image file name') 134 args = parser.parse_args() 135 run(args.gpu_id, args.n_clusters, args.num, args.max_iter, 136 args.use_custom_kernel, args.output_image) 137 [end of examples/kmeans/kmeans.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/kmeans/kmeans.py b/examples/kmeans/kmeans.py --- a/examples/kmeans/kmeans.py +++ b/examples/kmeans/kmeans.py @@ -19,26 +19,22 @@ print('%s: %f sec' % (message, end - start)) -_fit_calc_distances = cupy.ElementwiseKernel( - 'S data, raw S centers, int32 n_clusters, int32 dim', 'raw S dist', - ''' - for (int j = 0; j < n_clusters; j++){ - int cent_ind[] = {j, i % dim}; - int dist_ind[] = {i / dim, j}; - double diff = centers[cent_ind] - data; - atomicAdd(&dist[dist_ind], diff * diff); - } - ''', - 'calc_distances' +var_kernel = cupy.ElementwiseKernel( + 'T x0, T x1, T c0, T c1', 'T out', + 'out = (x0 - c0) * (x0 - c0) + (x1 - c1) * (x1 - c1)', + 'var_kernel' ) -_fit_calc_center = cupy.ElementwiseKernel( - 'S data, T label, int32 dim', 'raw S centers, raw S group', - ''' - int cent_ind[] = {label, i % dim}; - atomicAdd(&centers[cent_ind], data); - atomicAdd(&group[label], 1); - ''', - 'calc_center' +sum_kernel = cupy.ReductionKernel( + 'T x, S mask', 'T out', + 'mask ? x : 0', + 'a + b', 'out = a', '0', + 'sum_kernel' +) +count_kernel = cupy.ReductionKernel( + 'T mask', 'float32 out', + 'mask ? 1.0 : 0.0', + 'a + b', 'out = a', '0.0', + 'count_kernel' ) @@ -49,8 +45,6 @@ initial_indexes = np.random.choice(len(X), n_clusters, replace=False).astype(np.int32) centers = X[initial_indexes] - data_num = X.shape[0] - data_dim = X.shape[1] for _ in six.moves.range(max_iter): # calculate distances and label @@ -58,8 +52,8 @@ distances = xp.linalg.norm(X[:, None, :] - centers[None, :, :], axis=2) else: - distances = xp.zeros((data_num, n_clusters), dtype=np.float32) - _fit_calc_distances(X, centers, n_clusters, data_dim, distances) + distances = var_kernel(X[:, None, 0], X[:, None, 1], + centers[None, :, 1], centers[None, :, 0]) new_pred = xp.argmin(distances, axis=1).astype(np.int32) if xp.all(new_pred == pred): @@ -67,17 +61,16 @@ pred = new_pred # calculate centers + i = xp.arange(n_clusters) + mask = pred == i[:, None] if not use_custom_kernel or xp == np: - centers = xp.stack([X[pred == i].mean(axis=0) - for i in six.moves.range(n_clusters)]) + sums = xp.where(mask[:, :, None], X, 0).sum(axis=1) + counts = xp.count_nonzero(mask, axis=1) + centers = sums / counts else: - centers = xp.zeros((n_clusters, data_dim), - dtype=np.float32) - group = xp.zeros(n_clusters, dtype=np.float32) - label = pred[:, None] - _fit_calc_center(X, label, data_dim, centers, group) - group /= data_dim - centers /= group[:, None] + sums = sum_kernel(X, mask[:, :, None], axis=1) + counts = count_kernel(mask, axis=1) + centers = sums / counts return centers, pred
{"golden_diff": "diff --git a/examples/kmeans/kmeans.py b/examples/kmeans/kmeans.py\n--- a/examples/kmeans/kmeans.py\n+++ b/examples/kmeans/kmeans.py\n@@ -19,26 +19,22 @@\n print('%s: %f sec' % (message, end - start))\n \n \n-_fit_calc_distances = cupy.ElementwiseKernel(\n- 'S data, raw S centers, int32 n_clusters, int32 dim', 'raw S dist',\n- '''\n- for (int j = 0; j < n_clusters; j++){\n- int cent_ind[] = {j, i % dim};\n- int dist_ind[] = {i / dim, j};\n- double diff = centers[cent_ind] - data;\n- atomicAdd(&dist[dist_ind], diff * diff);\n- }\n- ''',\n- 'calc_distances'\n+var_kernel = cupy.ElementwiseKernel(\n+ 'T x0, T x1, T c0, T c1', 'T out',\n+ 'out = (x0 - c0) * (x0 - c0) + (x1 - c1) * (x1 - c1)',\n+ 'var_kernel'\n )\n-_fit_calc_center = cupy.ElementwiseKernel(\n- 'S data, T label, int32 dim', 'raw S centers, raw S group',\n- '''\n- int cent_ind[] = {label, i % dim};\n- atomicAdd(&centers[cent_ind], data);\n- atomicAdd(&group[label], 1);\n- ''',\n- 'calc_center'\n+sum_kernel = cupy.ReductionKernel(\n+ 'T x, S mask', 'T out',\n+ 'mask ? x : 0',\n+ 'a + b', 'out = a', '0',\n+ 'sum_kernel'\n+)\n+count_kernel = cupy.ReductionKernel(\n+ 'T mask', 'float32 out',\n+ 'mask ? 1.0 : 0.0',\n+ 'a + b', 'out = a', '0.0',\n+ 'count_kernel'\n )\n \n \n@@ -49,8 +45,6 @@\n initial_indexes = np.random.choice(len(X), n_clusters,\n replace=False).astype(np.int32)\n centers = X[initial_indexes]\n- data_num = X.shape[0]\n- data_dim = X.shape[1]\n \n for _ in six.moves.range(max_iter):\n # calculate distances and label\n@@ -58,8 +52,8 @@\n distances = xp.linalg.norm(X[:, None, :] - centers[None, :, :],\n axis=2)\n else:\n- distances = xp.zeros((data_num, n_clusters), dtype=np.float32)\n- _fit_calc_distances(X, centers, n_clusters, data_dim, distances)\n+ distances = var_kernel(X[:, None, 0], X[:, None, 1],\n+ centers[None, :, 1], centers[None, :, 0])\n \n new_pred = xp.argmin(distances, axis=1).astype(np.int32)\n if xp.all(new_pred == pred):\n@@ -67,17 +61,16 @@\n pred = new_pred\n \n # calculate centers\n+ i = xp.arange(n_clusters)\n+ mask = pred == i[:, None]\n if not use_custom_kernel or xp == np:\n- centers = xp.stack([X[pred == i].mean(axis=0)\n- for i in six.moves.range(n_clusters)])\n+ sums = xp.where(mask[:, :, None], X, 0).sum(axis=1)\n+ counts = xp.count_nonzero(mask, axis=1)\n+ centers = sums / counts\n else:\n- centers = xp.zeros((n_clusters, data_dim),\n- dtype=np.float32)\n- group = xp.zeros(n_clusters, dtype=np.float32)\n- label = pred[:, None]\n- _fit_calc_center(X, label, data_dim, centers, group)\n- group /= data_dim\n- centers /= group[:, None]\n+ sums = sum_kernel(X, mask[:, :, None], axis=1)\n+ counts = count_kernel(mask, axis=1)\n+ centers = sums / counts\n \n return centers, pred\n", "issue": "Use `ReductionKernel` in k-means example\nThe k-mean example uses `ElementwiseKernel` as an example of a custom kernel, but its algorithm is essentially reduction rather than mapping. It would be better to use `ReductionKernel` in the k-means example and to provide another example that uses `ElementwiseKernel`.\n", "before_files": [{"content": "import argparse\nimport contextlib\nimport time\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport six\n\nimport cupy\n\n\[email protected]\ndef timer(message):\n cupy.cuda.Stream.null.synchronize()\n start = time.time()\n yield\n cupy.cuda.Stream.null.synchronize()\n end = time.time()\n print('%s: %f sec' % (message, end - start))\n\n\n_fit_calc_distances = cupy.ElementwiseKernel(\n 'S data, raw S centers, int32 n_clusters, int32 dim', 'raw S dist',\n '''\n for (int j = 0; j < n_clusters; j++){\n int cent_ind[] = {j, i % dim};\n int dist_ind[] = {i / dim, j};\n double diff = centers[cent_ind] - data;\n atomicAdd(&dist[dist_ind], diff * diff);\n }\n ''',\n 'calc_distances'\n)\n_fit_calc_center = cupy.ElementwiseKernel(\n 'S data, T label, int32 dim', 'raw S centers, raw S group',\n '''\n int cent_ind[] = {label, i % dim};\n atomicAdd(&centers[cent_ind], data);\n atomicAdd(&group[label], 1);\n ''',\n 'calc_center'\n)\n\n\ndef fit(X, n_clusters, max_iter, use_custom_kernel):\n assert X.ndim == 2\n xp = cupy.get_array_module(X)\n pred = xp.zeros(len(X), dtype=np.int32)\n initial_indexes = np.random.choice(len(X), n_clusters,\n replace=False).astype(np.int32)\n centers = X[initial_indexes]\n data_num = X.shape[0]\n data_dim = X.shape[1]\n\n for _ in six.moves.range(max_iter):\n # calculate distances and label\n if not use_custom_kernel or xp == np:\n distances = xp.linalg.norm(X[:, None, :] - centers[None, :, :],\n axis=2)\n else:\n distances = xp.zeros((data_num, n_clusters), dtype=np.float32)\n _fit_calc_distances(X, centers, n_clusters, data_dim, distances)\n\n new_pred = xp.argmin(distances, axis=1).astype(np.int32)\n if xp.all(new_pred == pred):\n break\n pred = new_pred\n\n # calculate centers\n if not use_custom_kernel or xp == np:\n centers = xp.stack([X[pred == i].mean(axis=0)\n for i in six.moves.range(n_clusters)])\n else:\n centers = xp.zeros((n_clusters, data_dim),\n dtype=np.float32)\n group = xp.zeros(n_clusters, dtype=np.float32)\n label = pred[:, None]\n _fit_calc_center(X, label, data_dim, centers, group)\n group /= data_dim\n centers /= group[:, None]\n\n return centers, pred\n\n\ndef draw(X, n_clusters, centers, pred, output):\n xp = cupy.get_array_module(X)\n for i in six.moves.range(n_clusters):\n labels = X[pred == i]\n if xp == cupy:\n labels = labels.get()\n plt.scatter(labels[:, 0], labels[:, 1], c=np.random.rand(3))\n if xp == cupy:\n centers = centers.get()\n plt.scatter(centers[:, 0], centers[:, 1], s=120, marker='s',\n facecolors='y', edgecolors='k')\n plt.savefig(output)\n\n\ndef run(gpuid, n_clusters, num, max_iter, use_custom_kernel, output):\n samples = np.random.randn(num, 2).astype(np.float32)\n X_train = np.r_[samples + 1, samples - 1]\n repeat = 1\n\n with timer(' CPU '):\n for i in range(repeat):\n centers, pred = fit(X_train, n_clusters, max_iter,\n use_custom_kernel)\n\n with cupy.cuda.Device(gpuid):\n X_train = cupy.asarray(X_train)\n with timer(' GPU '):\n for i in range(repeat):\n centers, pred = fit(X_train, n_clusters, max_iter,\n use_custom_kernel)\n if output is not None:\n index = np.random.choice(10000000, 300, replace=False)\n draw(X_train[index], n_clusters, centers, pred[index], output)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu-id', '-g', default=0, type=int,\n help='ID of GPU.')\n parser.add_argument('--n-clusters', '-n', default=2, type=int,\n help='number of clusters')\n parser.add_argument('--num', default=5000000, type=int,\n help='number of samples')\n parser.add_argument('--max-iter', '-m', default=10, type=int,\n help='number of iterations')\n parser.add_argument('--use-custom-kernel', action='store_true',\n default=False, help='use Elementwise kernel')\n parser.add_argument('--output-image', '-o', default=None, type=str,\n help='output image file name')\n args = parser.parse_args()\n run(args.gpu_id, args.n_clusters, args.num, args.max_iter,\n args.use_custom_kernel, args.output_image)\n", "path": "examples/kmeans/kmeans.py"}]}
2,083
946
gh_patches_debug_38523
rasdani/github-patches
git_diff
ietf-tools__datatracker-5167
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> /sync/discrepancies is missing `.distinct()` At the moment in the second table: ![image](https://user-images.githubusercontent.com/10996692/218182843-40d6daeb-ea79-4177-ac46-52d0ede1f032.png) Likely this will not show the same when viewed shortly after now, as I expect the state to be reconciled quickly. </issue> <code> [start of ietf/sync/discrepancies.py] 1 from django.db import models 2 from ietf.doc.models import Document, State 3 4 def find_discrepancies(): 5 res = [] 6 7 title = "Internet-Drafts that have been sent to the RFC Editor but do not have an RFC Editor state" 8 9 docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type="draft-iesg", slug__in=("ann", "rfcqueue")))).exclude(states__in=list(State.objects.filter(used=True, type="draft-rfceditor"))) 10 11 res.append((title, docs)) 12 13 title = "Internet-Drafts that have the IANA Action state \"In Progress\" but do not have a \"IANA\" RFC-Editor state/tag" 14 15 docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type="draft-iana-action", slug__in=("inprog",)))).exclude(tags="iana").exclude(states__in=list(State.objects.filter(used=True, type="draft-rfceditor", slug="iana"))) 16 17 res.append((title, docs)) 18 19 title = "Internet-Drafts that have the IANA Action state \"Waiting on RFC Editor\" or \"RFC-Ed-Ack\" but are in the RFC Editor state \"IANA\"/tagged with \"IANA\"" 20 21 docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type="draft-iana-action", slug__in=("waitrfc", "rfcedack")))).filter(models.Q(tags="iana") | models.Q(states__in=list(State.objects.filter(used=True, type="draft-rfceditor", slug="iana")))) 22 23 res.append((title, docs)) 24 25 title = "Internet-Drafts that have a state other than \"RFC Ed Queue\", \"RFC Published\" or \"Sent to the RFC Editor\" and have an RFC Editor or IANA Action state" 26 27 docs = Document.objects.exclude(states__in=list(State.objects.filter(used=True, type="draft-iesg", slug__in=("rfcqueue", "pub"))) + list(State.objects.filter(used=True, type__in=("draft-stream-iab", "draft-stream-ise", "draft-stream-irtf"), slug="rfc-edit"))).filter(states__in=list(State.objects.filter(used=True, type__in=("draft-iana-action", "draft-rfceditor")))) 28 29 res.append((title, docs)) 30 31 for _, docs in res: 32 for d in docs: 33 d.iesg_state = d.get_state("draft-iesg") 34 d.rfc_state = d.get_state("draft-rfceditor") 35 d.iana_action_state = d.get_state("draft-iana-action") 36 37 return res 38 39 [end of ietf/sync/discrepancies.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ietf/sync/discrepancies.py b/ietf/sync/discrepancies.py --- a/ietf/sync/discrepancies.py +++ b/ietf/sync/discrepancies.py @@ -6,25 +6,25 @@ title = "Internet-Drafts that have been sent to the RFC Editor but do not have an RFC Editor state" - docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type="draft-iesg", slug__in=("ann", "rfcqueue")))).exclude(states__in=list(State.objects.filter(used=True, type="draft-rfceditor"))) + docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type="draft-iesg", slug__in=("ann", "rfcqueue")))).exclude(states__in=list(State.objects.filter(used=True, type="draft-rfceditor"))).distinct() res.append((title, docs)) title = "Internet-Drafts that have the IANA Action state \"In Progress\" but do not have a \"IANA\" RFC-Editor state/tag" - docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type="draft-iana-action", slug__in=("inprog",)))).exclude(tags="iana").exclude(states__in=list(State.objects.filter(used=True, type="draft-rfceditor", slug="iana"))) + docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type="draft-iana-action", slug__in=("inprog",)))).exclude(tags="iana").exclude(states__in=list(State.objects.filter(used=True, type="draft-rfceditor", slug="iana"))).distinct() res.append((title, docs)) title = "Internet-Drafts that have the IANA Action state \"Waiting on RFC Editor\" or \"RFC-Ed-Ack\" but are in the RFC Editor state \"IANA\"/tagged with \"IANA\"" - docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type="draft-iana-action", slug__in=("waitrfc", "rfcedack")))).filter(models.Q(tags="iana") | models.Q(states__in=list(State.objects.filter(used=True, type="draft-rfceditor", slug="iana")))) + docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type="draft-iana-action", slug__in=("waitrfc", "rfcedack")))).filter(models.Q(tags="iana") | models.Q(states__in=list(State.objects.filter(used=True, type="draft-rfceditor", slug="iana")))).distinct() res.append((title, docs)) title = "Internet-Drafts that have a state other than \"RFC Ed Queue\", \"RFC Published\" or \"Sent to the RFC Editor\" and have an RFC Editor or IANA Action state" - docs = Document.objects.exclude(states__in=list(State.objects.filter(used=True, type="draft-iesg", slug__in=("rfcqueue", "pub"))) + list(State.objects.filter(used=True, type__in=("draft-stream-iab", "draft-stream-ise", "draft-stream-irtf"), slug="rfc-edit"))).filter(states__in=list(State.objects.filter(used=True, type__in=("draft-iana-action", "draft-rfceditor")))) + docs = Document.objects.exclude(states__in=list(State.objects.filter(used=True, type="draft-iesg", slug__in=("rfcqueue", "pub"))) + list(State.objects.filter(used=True, type__in=("draft-stream-iab", "draft-stream-ise", "draft-stream-irtf"), slug="rfc-edit"))).filter(states__in=list(State.objects.filter(used=True, type__in=("draft-iana-action", "draft-rfceditor")))).distinct() res.append((title, docs))
{"golden_diff": "diff --git a/ietf/sync/discrepancies.py b/ietf/sync/discrepancies.py\n--- a/ietf/sync/discrepancies.py\n+++ b/ietf/sync/discrepancies.py\n@@ -6,25 +6,25 @@\n \n title = \"Internet-Drafts that have been sent to the RFC Editor but do not have an RFC Editor state\"\n \n- docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type=\"draft-iesg\", slug__in=(\"ann\", \"rfcqueue\")))).exclude(states__in=list(State.objects.filter(used=True, type=\"draft-rfceditor\")))\n+ docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type=\"draft-iesg\", slug__in=(\"ann\", \"rfcqueue\")))).exclude(states__in=list(State.objects.filter(used=True, type=\"draft-rfceditor\"))).distinct()\n \n res.append((title, docs))\n \n title = \"Internet-Drafts that have the IANA Action state \\\"In Progress\\\" but do not have a \\\"IANA\\\" RFC-Editor state/tag\"\n \n- docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type=\"draft-iana-action\", slug__in=(\"inprog\",)))).exclude(tags=\"iana\").exclude(states__in=list(State.objects.filter(used=True, type=\"draft-rfceditor\", slug=\"iana\")))\n+ docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type=\"draft-iana-action\", slug__in=(\"inprog\",)))).exclude(tags=\"iana\").exclude(states__in=list(State.objects.filter(used=True, type=\"draft-rfceditor\", slug=\"iana\"))).distinct()\n \n res.append((title, docs))\n \n title = \"Internet-Drafts that have the IANA Action state \\\"Waiting on RFC Editor\\\" or \\\"RFC-Ed-Ack\\\" but are in the RFC Editor state \\\"IANA\\\"/tagged with \\\"IANA\\\"\"\n \n- docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type=\"draft-iana-action\", slug__in=(\"waitrfc\", \"rfcedack\")))).filter(models.Q(tags=\"iana\") | models.Q(states__in=list(State.objects.filter(used=True, type=\"draft-rfceditor\", slug=\"iana\"))))\n+ docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type=\"draft-iana-action\", slug__in=(\"waitrfc\", \"rfcedack\")))).filter(models.Q(tags=\"iana\") | models.Q(states__in=list(State.objects.filter(used=True, type=\"draft-rfceditor\", slug=\"iana\")))).distinct()\n \n res.append((title, docs))\n \n title = \"Internet-Drafts that have a state other than \\\"RFC Ed Queue\\\", \\\"RFC Published\\\" or \\\"Sent to the RFC Editor\\\" and have an RFC Editor or IANA Action state\"\n \n- docs = Document.objects.exclude(states__in=list(State.objects.filter(used=True, type=\"draft-iesg\", slug__in=(\"rfcqueue\", \"pub\"))) + list(State.objects.filter(used=True, type__in=(\"draft-stream-iab\", \"draft-stream-ise\", \"draft-stream-irtf\"), slug=\"rfc-edit\"))).filter(states__in=list(State.objects.filter(used=True, type__in=(\"draft-iana-action\", \"draft-rfceditor\"))))\n+ docs = Document.objects.exclude(states__in=list(State.objects.filter(used=True, type=\"draft-iesg\", slug__in=(\"rfcqueue\", \"pub\"))) + list(State.objects.filter(used=True, type__in=(\"draft-stream-iab\", \"draft-stream-ise\", \"draft-stream-irtf\"), slug=\"rfc-edit\"))).filter(states__in=list(State.objects.filter(used=True, type__in=(\"draft-iana-action\", \"draft-rfceditor\")))).distinct()\n \n res.append((title, docs))\n", "issue": "/sync/discrepancies is missing `.distinct()`\nAt the moment in the second table:\r\n![image](https://user-images.githubusercontent.com/10996692/218182843-40d6daeb-ea79-4177-ac46-52d0ede1f032.png)\r\nLikely this will not show the same when viewed shortly after now, as I expect the state to be reconciled quickly.\n", "before_files": [{"content": "from django.db import models\nfrom ietf.doc.models import Document, State\n\ndef find_discrepancies():\n res = []\n\n title = \"Internet-Drafts that have been sent to the RFC Editor but do not have an RFC Editor state\"\n\n docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type=\"draft-iesg\", slug__in=(\"ann\", \"rfcqueue\")))).exclude(states__in=list(State.objects.filter(used=True, type=\"draft-rfceditor\")))\n\n res.append((title, docs))\n\n title = \"Internet-Drafts that have the IANA Action state \\\"In Progress\\\" but do not have a \\\"IANA\\\" RFC-Editor state/tag\"\n\n docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type=\"draft-iana-action\", slug__in=(\"inprog\",)))).exclude(tags=\"iana\").exclude(states__in=list(State.objects.filter(used=True, type=\"draft-rfceditor\", slug=\"iana\")))\n\n res.append((title, docs))\n\n title = \"Internet-Drafts that have the IANA Action state \\\"Waiting on RFC Editor\\\" or \\\"RFC-Ed-Ack\\\" but are in the RFC Editor state \\\"IANA\\\"/tagged with \\\"IANA\\\"\"\n\n docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type=\"draft-iana-action\", slug__in=(\"waitrfc\", \"rfcedack\")))).filter(models.Q(tags=\"iana\") | models.Q(states__in=list(State.objects.filter(used=True, type=\"draft-rfceditor\", slug=\"iana\"))))\n\n res.append((title, docs))\n\n title = \"Internet-Drafts that have a state other than \\\"RFC Ed Queue\\\", \\\"RFC Published\\\" or \\\"Sent to the RFC Editor\\\" and have an RFC Editor or IANA Action state\"\n\n docs = Document.objects.exclude(states__in=list(State.objects.filter(used=True, type=\"draft-iesg\", slug__in=(\"rfcqueue\", \"pub\"))) + list(State.objects.filter(used=True, type__in=(\"draft-stream-iab\", \"draft-stream-ise\", \"draft-stream-irtf\"), slug=\"rfc-edit\"))).filter(states__in=list(State.objects.filter(used=True, type__in=(\"draft-iana-action\", \"draft-rfceditor\"))))\n\n res.append((title, docs))\n\n for _, docs in res:\n for d in docs:\n d.iesg_state = d.get_state(\"draft-iesg\")\n d.rfc_state = d.get_state(\"draft-rfceditor\")\n d.iana_action_state = d.get_state(\"draft-iana-action\")\n\n return res\n\n", "path": "ietf/sync/discrepancies.py"}]}
1,300
854
gh_patches_debug_9564
rasdani/github-patches
git_diff
watchdogpolska__small_eod-494
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> udostępnienie id w API dla tags i documentType Id dla tags i documetType, jest niezbędne dla zbudowania listy tych elementów w reakcie. </issue> <code> [start of backend-project/small_eod/letters/serializers.py] 1 from uuid import uuid4 2 from django.conf import settings 3 from rest_framework import serializers 4 from .models import Letter, DocumentType 5 from ..generic.serializers import UserLogModelSerializer 6 from ..cases.models import Case 7 from ..institutions.models import Institution 8 from ..channels.models import Channel 9 from ..files.apps import minio_app 10 from ..files.serializers import FileSerializer 11 12 13 class DocumentTypeSerializer(serializers.ModelSerializer): 14 class Meta: 15 model = DocumentType 16 fields = ["name"] 17 18 19 class LetterSerializer(UserLogModelSerializer): 20 document_type = serializers.PrimaryKeyRelatedField( 21 many=False, default=None, queryset=DocumentType.objects.all() 22 ) 23 case = serializers.PrimaryKeyRelatedField( 24 many=False, default=None, queryset=Case.objects.all() 25 ) 26 institution = serializers.PrimaryKeyRelatedField( 27 many=False, default=None, queryset=Institution.objects.all() 28 ) 29 channel = serializers.PrimaryKeyRelatedField( 30 many=False, default=None, queryset=Channel.objects.all() 31 ) 32 attachments = FileSerializer(many=True, read_only=True) 33 34 class Meta: 35 model = Letter 36 fields = [ 37 "id", 38 "direction", 39 "channel", 40 "final", 41 "date", 42 "reference_number", 43 "institution", 44 "case", 45 "attachments", 46 "ordering", 47 "comment", 48 "excerpt", 49 "document_type", 50 "created_on", 51 "created_by", 52 "modified_on", 53 "modified_by", 54 ] 55 56 def create(self, validated_data): 57 channel = validated_data.pop("channel") 58 document_type = validated_data.pop("document_type") 59 institution = validated_data.pop("institution") 60 case = validated_data.pop("case") 61 62 letter = super().create(validated_data) 63 letter.channel = channel 64 letter.document_type = document_type 65 letter.institution = institution 66 letter.case = case 67 letter.save() 68 return letter 69 70 def update(self, instance, validated_data): 71 """ 72 nested - variable storing representations of the nested objects 73 of LetterSerializer (Channel, Address and DocumentType). 74 Iterating over those 3 and updating fields of the related objects, 75 using key-value pairs from PATCH request. 76 """ 77 nested = [] 78 for nested_object in nested: 79 for attr, value in nested_object["data"].items(): 80 setattr(nested_object["instance"], attr, value) 81 nested_object["instance"].save() 82 return super().update(instance, validated_data) 83 84 85 class SignRequestSerializer(serializers.Serializer): 86 name = serializers.CharField(max_length=200) 87 method = serializers.CharField(read_only=True) 88 url = serializers.CharField(read_only=True) 89 formData = serializers.DictField(read_only=True, child=serializers.CharField()) 90 path = serializers.CharField(read_only=True) 91 92 def create(self, validated_data): 93 path = f'{uuid4()}/{validated_data["name"]}' 94 url, form_data = minio_app.presigned_post_form_data(settings.MINIO_BUCKET, path) 95 return { 96 "name": validated_data["name"], 97 "method": "POST", 98 "url": url, 99 "formData": form_data, 100 "path": path, 101 } 102 [end of backend-project/small_eod/letters/serializers.py] [start of backend-project/small_eod/tags/serializers.py] 1 from rest_framework import serializers 2 from .models import Tag 3 4 5 class TagSerializer(serializers.ModelSerializer): 6 class Meta: 7 model = Tag 8 fields = [ 9 "name", 10 ] 11 [end of backend-project/small_eod/tags/serializers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/backend-project/small_eod/letters/serializers.py b/backend-project/small_eod/letters/serializers.py --- a/backend-project/small_eod/letters/serializers.py +++ b/backend-project/small_eod/letters/serializers.py @@ -13,7 +13,7 @@ class DocumentTypeSerializer(serializers.ModelSerializer): class Meta: model = DocumentType - fields = ["name"] + fields = ["id", "name"] class LetterSerializer(UserLogModelSerializer): diff --git a/backend-project/small_eod/tags/serializers.py b/backend-project/small_eod/tags/serializers.py --- a/backend-project/small_eod/tags/serializers.py +++ b/backend-project/small_eod/tags/serializers.py @@ -6,5 +6,6 @@ class Meta: model = Tag fields = [ + "id", "name", ]
{"golden_diff": "diff --git a/backend-project/small_eod/letters/serializers.py b/backend-project/small_eod/letters/serializers.py\n--- a/backend-project/small_eod/letters/serializers.py\n+++ b/backend-project/small_eod/letters/serializers.py\n@@ -13,7 +13,7 @@\n class DocumentTypeSerializer(serializers.ModelSerializer):\n class Meta:\n model = DocumentType\n- fields = [\"name\"]\n+ fields = [\"id\", \"name\"]\n \n \n class LetterSerializer(UserLogModelSerializer):\ndiff --git a/backend-project/small_eod/tags/serializers.py b/backend-project/small_eod/tags/serializers.py\n--- a/backend-project/small_eod/tags/serializers.py\n+++ b/backend-project/small_eod/tags/serializers.py\n@@ -6,5 +6,6 @@\n class Meta:\n model = Tag\n fields = [\n+ \"id\",\n \"name\",\n ]\n", "issue": "udost\u0119pnienie id w API dla tags i documentType \nId dla tags i documetType, jest niezb\u0119dne dla zbudowania listy tych element\u00f3w w reakcie. \n", "before_files": [{"content": "from uuid import uuid4\nfrom django.conf import settings\nfrom rest_framework import serializers\nfrom .models import Letter, DocumentType\nfrom ..generic.serializers import UserLogModelSerializer\nfrom ..cases.models import Case\nfrom ..institutions.models import Institution\nfrom ..channels.models import Channel\nfrom ..files.apps import minio_app\nfrom ..files.serializers import FileSerializer\n\n\nclass DocumentTypeSerializer(serializers.ModelSerializer):\n class Meta:\n model = DocumentType\n fields = [\"name\"]\n\n\nclass LetterSerializer(UserLogModelSerializer):\n document_type = serializers.PrimaryKeyRelatedField(\n many=False, default=None, queryset=DocumentType.objects.all()\n )\n case = serializers.PrimaryKeyRelatedField(\n many=False, default=None, queryset=Case.objects.all()\n )\n institution = serializers.PrimaryKeyRelatedField(\n many=False, default=None, queryset=Institution.objects.all()\n )\n channel = serializers.PrimaryKeyRelatedField(\n many=False, default=None, queryset=Channel.objects.all()\n )\n attachments = FileSerializer(many=True, read_only=True)\n\n class Meta:\n model = Letter\n fields = [\n \"id\",\n \"direction\",\n \"channel\",\n \"final\",\n \"date\",\n \"reference_number\",\n \"institution\",\n \"case\",\n \"attachments\",\n \"ordering\",\n \"comment\",\n \"excerpt\",\n \"document_type\",\n \"created_on\",\n \"created_by\",\n \"modified_on\",\n \"modified_by\",\n ]\n\n def create(self, validated_data):\n channel = validated_data.pop(\"channel\")\n document_type = validated_data.pop(\"document_type\")\n institution = validated_data.pop(\"institution\")\n case = validated_data.pop(\"case\")\n\n letter = super().create(validated_data)\n letter.channel = channel\n letter.document_type = document_type\n letter.institution = institution\n letter.case = case\n letter.save()\n return letter\n\n def update(self, instance, validated_data):\n \"\"\"\n nested - variable storing representations of the nested objects\n of LetterSerializer (Channel, Address and DocumentType).\n Iterating over those 3 and updating fields of the related objects,\n using key-value pairs from PATCH request.\n \"\"\"\n nested = []\n for nested_object in nested:\n for attr, value in nested_object[\"data\"].items():\n setattr(nested_object[\"instance\"], attr, value)\n nested_object[\"instance\"].save()\n return super().update(instance, validated_data)\n\n\nclass SignRequestSerializer(serializers.Serializer):\n name = serializers.CharField(max_length=200)\n method = serializers.CharField(read_only=True)\n url = serializers.CharField(read_only=True)\n formData = serializers.DictField(read_only=True, child=serializers.CharField())\n path = serializers.CharField(read_only=True)\n\n def create(self, validated_data):\n path = f'{uuid4()}/{validated_data[\"name\"]}'\n url, form_data = minio_app.presigned_post_form_data(settings.MINIO_BUCKET, path)\n return {\n \"name\": validated_data[\"name\"],\n \"method\": \"POST\",\n \"url\": url,\n \"formData\": form_data,\n \"path\": path,\n }\n", "path": "backend-project/small_eod/letters/serializers.py"}, {"content": "from rest_framework import serializers\nfrom .models import Tag\n\n\nclass TagSerializer(serializers.ModelSerializer):\n class Meta:\n model = Tag\n fields = [\n \"name\",\n ]\n", "path": "backend-project/small_eod/tags/serializers.py"}]}
1,536
197
gh_patches_debug_19197
rasdani/github-patches
git_diff
enthought__chaco-717
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Remove "PopupablePlot" `PopupablePlot` was introduced in the commit https://github.com/enthought/chaco/commit/4af154cec6f678862ba7e695ae62d681312b50e8#diff-c93657842da48caff25acdb9de9f919f9c04b5301d0fafc28598c9fdeee829f8 but it is not clear why it exists or who uses it. </issue> <code> [start of chaco/ui/popupable_plot.py] 1 # Enthought library imports 2 from traits.api import List 3 from chaco.plot import Plot 4 from chaco.plot_containers import VPlotContainer 5 from chaco.tools.pan_tool import PanTool 6 from chaco.tools.zoom_tool import ZoomTool 7 from chaco.ui.plot_window import PlotWindow 8 9 from traitsui.wx.constants import WindowColor 10 11 12 class PopupablePlot(Plot): 13 """A Plot class that pops up in a new window on double click""" 14 15 # FIXME: It would be nice to queue up other types of commands and settings 16 command_queue = List() 17 18 def normal_left_dclick(self, event): 19 plot = Plot(self.data) 20 for data, kw in self.command_queue: 21 plot.plot(data, **kw) 22 plot.title = self.title 23 24 plot.title = self.title 25 container = VPlotContainer(bgcolor=WindowColor) 26 container.add(plot) 27 plot.tools.append(PanTool(plot)) 28 plot.overlays.append(ZoomTool(plot)) 29 window = PlotWindow(plot=container) 30 window.edit_traits(kind="live", parent=event.window.control) 31 32 def plot(self, data, **kw): 33 """Queue up the plot commands""" 34 self.command_queue.append((data, kw)) 35 super().plot(data, **kw) 36 [end of chaco/ui/popupable_plot.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/chaco/ui/popupable_plot.py b/chaco/ui/popupable_plot.py deleted file mode 100644 --- a/chaco/ui/popupable_plot.py +++ /dev/null @@ -1,35 +0,0 @@ -# Enthought library imports -from traits.api import List -from chaco.plot import Plot -from chaco.plot_containers import VPlotContainer -from chaco.tools.pan_tool import PanTool -from chaco.tools.zoom_tool import ZoomTool -from chaco.ui.plot_window import PlotWindow - -from traitsui.wx.constants import WindowColor - - -class PopupablePlot(Plot): - """A Plot class that pops up in a new window on double click""" - - # FIXME: It would be nice to queue up other types of commands and settings - command_queue = List() - - def normal_left_dclick(self, event): - plot = Plot(self.data) - for data, kw in self.command_queue: - plot.plot(data, **kw) - plot.title = self.title - - plot.title = self.title - container = VPlotContainer(bgcolor=WindowColor) - container.add(plot) - plot.tools.append(PanTool(plot)) - plot.overlays.append(ZoomTool(plot)) - window = PlotWindow(plot=container) - window.edit_traits(kind="live", parent=event.window.control) - - def plot(self, data, **kw): - """Queue up the plot commands""" - self.command_queue.append((data, kw)) - super().plot(data, **kw)
{"golden_diff": "diff --git a/chaco/ui/popupable_plot.py b/chaco/ui/popupable_plot.py\ndeleted file mode 100644\n--- a/chaco/ui/popupable_plot.py\n+++ /dev/null\n@@ -1,35 +0,0 @@\n-# Enthought library imports\n-from traits.api import List\n-from chaco.plot import Plot\n-from chaco.plot_containers import VPlotContainer\n-from chaco.tools.pan_tool import PanTool\n-from chaco.tools.zoom_tool import ZoomTool\n-from chaco.ui.plot_window import PlotWindow\n-\n-from traitsui.wx.constants import WindowColor\n-\n-\n-class PopupablePlot(Plot):\n- \"\"\"A Plot class that pops up in a new window on double click\"\"\"\n-\n- # FIXME: It would be nice to queue up other types of commands and settings\n- command_queue = List()\n-\n- def normal_left_dclick(self, event):\n- plot = Plot(self.data)\n- for data, kw in self.command_queue:\n- plot.plot(data, **kw)\n- plot.title = self.title\n-\n- plot.title = self.title\n- container = VPlotContainer(bgcolor=WindowColor)\n- container.add(plot)\n- plot.tools.append(PanTool(plot))\n- plot.overlays.append(ZoomTool(plot))\n- window = PlotWindow(plot=container)\n- window.edit_traits(kind=\"live\", parent=event.window.control)\n-\n- def plot(self, data, **kw):\n- \"\"\"Queue up the plot commands\"\"\"\n- self.command_queue.append((data, kw))\n- super().plot(data, **kw)\n", "issue": "Remove \"PopupablePlot\"\n`PopupablePlot` was introduced in the commit https://github.com/enthought/chaco/commit/4af154cec6f678862ba7e695ae62d681312b50e8#diff-c93657842da48caff25acdb9de9f919f9c04b5301d0fafc28598c9fdeee829f8 but it is not clear why it exists or who uses it.\n", "before_files": [{"content": "# Enthought library imports\nfrom traits.api import List\nfrom chaco.plot import Plot\nfrom chaco.plot_containers import VPlotContainer\nfrom chaco.tools.pan_tool import PanTool\nfrom chaco.tools.zoom_tool import ZoomTool\nfrom chaco.ui.plot_window import PlotWindow\n\nfrom traitsui.wx.constants import WindowColor\n\n\nclass PopupablePlot(Plot):\n \"\"\"A Plot class that pops up in a new window on double click\"\"\"\n\n # FIXME: It would be nice to queue up other types of commands and settings\n command_queue = List()\n\n def normal_left_dclick(self, event):\n plot = Plot(self.data)\n for data, kw in self.command_queue:\n plot.plot(data, **kw)\n plot.title = self.title\n\n plot.title = self.title\n container = VPlotContainer(bgcolor=WindowColor)\n container.add(plot)\n plot.tools.append(PanTool(plot))\n plot.overlays.append(ZoomTool(plot))\n window = PlotWindow(plot=container)\n window.edit_traits(kind=\"live\", parent=event.window.control)\n\n def plot(self, data, **kw):\n \"\"\"Queue up the plot commands\"\"\"\n self.command_queue.append((data, kw))\n super().plot(data, **kw)\n", "path": "chaco/ui/popupable_plot.py"}]}
1,002
347
gh_patches_debug_31794
rasdani/github-patches
git_diff
bridgecrewio__checkov-2552
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> image scanning does not cleanup the twistcli binary ``` ~ checkov --bc-api-key REDACTED --dockerfile-path Dockerfile --docker-image checkov_scan_me --repo-id tkishel/example <SNIP> More details: https://www.bridgecrew.cloud/projects?repository=tkishel73_tkishel/example&branch=bc-bdfab20_master&runId=latest ``` ``` ~ git status On branch main Your branch is up to date with 'origin/main'. Untracked files: (use "git add <file>..." to include in what will be committed) twistcli no changes added to commit (use "git add" and/or "git commit -a") ``` Some usage of `twistcli` includes a `cleanup_scan()` or (misspelled) `cleanup_twictcli()` method, but they do not appear to be called: https://github.com/bridgecrewio/checkov/blob/master/checkov/sca_image/runner.py https://github.com/bridgecrewio/checkov/blob/master/checkov/sca_package/scanner.py https://github.com/bridgecrewio/checkov/blob/master/checkov/common/bridgecrew/vulnerability_scanning/image_scanner.py So, it may be valuable to add something like this, after all possible uses of `twistcli` have executed ... ``` def __exit__(self, t, v, tr): self.cleanup_scan() ```` ... especially to execute that cleanup after an exception exit. An argument could also be made to instead download and cache the `twistcli` binary outside of the current directory. And, it may be valuable to consolidate the various duplicate methods that interact with the `twistcli` binary in the above files. </issue> <code> [start of checkov/sca_package/runner.py] 1 import logging 2 import os 3 from pathlib import Path 4 from typing import Optional, List, Tuple, Set, Union, Sequence, Dict, Any 5 6 from checkov.common.bridgecrew.platform_integration import bc_integration 7 from checkov.common.models.enums import CheckResult 8 from checkov.common.output.report import Report, CheckType 9 from checkov.common.runners.base_runner import BaseRunner, ignored_directories, strtobool 10 from checkov.runner_filter import RunnerFilter 11 from checkov.sca_package.output import create_report_record 12 from checkov.sca_package.scanner import Scanner 13 14 SUPPORTED_PACKAGE_FILES = { 15 "bower.json", 16 "build.gradle", 17 "build.gradle.kts", 18 "go.sum", 19 "gradle.properties", 20 "METADATA", 21 "npm-shrinkwrap.json", 22 "package.json", 23 "package-lock.json", 24 "pom.xml", 25 "requirements.txt", 26 } 27 28 class Runner(BaseRunner): 29 check_type = CheckType.SCA_PACKAGE 30 31 def __init__(self): 32 self._check_class: Optional[str] = None 33 self._code_repo_path: Optional[Path] = None 34 35 def prepare_and_scan( 36 self, 37 root_folder: Union[str, Path], 38 files: Optional[List[str]] = None, 39 runner_filter: RunnerFilter = RunnerFilter(), 40 exclude_package_json: bool = True, 41 cleanup_twictcli: bool = True, 42 ) -> "Optional[Sequence[Dict[str, Any]]]": 43 44 if not strtobool(os.getenv("ENABLE_SCA_PACKAGE_SCAN", "False")): 45 return None 46 47 # skip complete run, if flag '--check' was used without a CVE check ID 48 if runner_filter.checks and all(not check.startswith("CKV_CVE") for check in runner_filter.checks): 49 return None 50 51 if not bc_integration.bc_api_key: 52 logging.info("The --bc-api-key flag needs to be set to run SCA package scanning") 53 return None 54 55 logging.info("SCA package scanning searching for scannable files") 56 57 self._code_repo_path = Path(root_folder) 58 59 excluded_paths = {*ignored_directories} 60 if runner_filter.excluded_paths: 61 excluded_paths.update(runner_filter.excluded_paths) 62 63 input_output_paths = self.find_scannable_files( 64 root_path=self._code_repo_path, 65 files=files, 66 excluded_paths=excluded_paths, 67 exclude_package_json=exclude_package_json 68 ) 69 if not input_output_paths: 70 # no packages found 71 return None 72 73 logging.info(f"SCA package scanning will scan {len(input_output_paths)} files") 74 75 scanner = Scanner() 76 self._check_class = f"{scanner.__module__}.{scanner.__class__.__qualname__}" 77 scan_results = scanner.scan(input_output_paths, cleanup_twictcli) 78 79 logging.info(f"SCA package scanning successfully scanned {len(scan_results)} files") 80 return scan_results 81 82 def run( 83 self, 84 root_folder: Union[str, Path], 85 external_checks_dir: Optional[List[str]] = None, 86 files: Optional[List[str]] = None, 87 runner_filter: RunnerFilter = RunnerFilter(), 88 collect_skip_comments: bool = True, 89 ) -> Report: 90 report = Report(self.check_type) 91 92 scan_results = self.prepare_and_scan(root_folder, files, runner_filter) 93 if scan_results is None: 94 return report 95 96 for result in scan_results: 97 package_file_path = Path(result["repository"]) 98 try: 99 package_file_path = package_file_path.relative_to(self._code_repo_path) 100 except ValueError: 101 # Path.is_relative_to() was implemented in Python 3.9 102 pass 103 104 vulnerabilities = result.get("vulnerabilities") or [] 105 106 rootless_file_path = str(package_file_path).replace(package_file_path.anchor, "", 1) 107 self.parse_vulns_to_records(report, result, rootless_file_path, runner_filter, vulnerabilities) 108 109 return report 110 111 def parse_vulns_to_records(self, report, result, rootless_file_path, runner_filter, vulnerabilities, 112 file_abs_path=''): 113 for vulnerability in vulnerabilities: 114 record = create_report_record( 115 rootless_file_path=rootless_file_path, 116 file_abs_path=file_abs_path or result.get("repository"), 117 check_class=self._check_class, 118 vulnerability_details=vulnerability, 119 runner_filter=runner_filter 120 ) 121 if not runner_filter.should_run_check(check_id=record.check_id, bc_check_id=record.bc_check_id, 122 severity=record.severity): 123 if runner_filter.checks: 124 continue 125 else: 126 record.check_result = { 127 "result": CheckResult.SKIPPED, 128 "suppress_comment": f"{vulnerability['id']} is skipped" 129 } 130 131 report.add_resource(record.resource) 132 report.add_record(record) 133 134 def find_scannable_files( 135 self, root_path: Path, files: Optional[List[str]], excluded_paths: Set[str], exclude_package_json: bool = True 136 ) -> Set[Tuple[Path, Path]]: 137 input_paths = { 138 file_path 139 for file_path in root_path.glob("**/*") 140 if file_path.name in SUPPORTED_PACKAGE_FILES and not any(p in file_path.parts for p in excluded_paths) 141 } 142 143 package_lock_parent_paths = set() 144 if exclude_package_json: 145 # filter out package.json, if package-lock.json exists 146 package_lock_parent_paths = { 147 file_path.parent for file_path in input_paths if file_path.name == "package-lock.json" 148 } 149 150 input_output_paths = { 151 (file_path, file_path.parent / f"{file_path.stem}_result.json") 152 for file_path in input_paths 153 if file_path.name != "package.json" or file_path.parent not in package_lock_parent_paths 154 } 155 156 for file in files or []: 157 file_path = Path(file) 158 if not file_path.exists(): 159 logging.warning(f"File {file_path} doesn't exist") 160 continue 161 162 input_output_paths.add((file_path, file_path.parent / f"{file_path.stem}_result.json")) 163 164 return input_output_paths 165 [end of checkov/sca_package/runner.py] [start of checkov/sca_image/runner.py] 1 import asyncio 2 import json 3 import logging 4 import os.path 5 from pathlib import Path 6 from typing import Optional, List, Union, Dict, Any 7 8 from checkov.common.bridgecrew.platform_integration import bc_integration 9 from checkov.common.bridgecrew.vulnerability_scanning.image_scanner import image_scanner, TWISTCLI_FILE_NAME 10 from checkov.common.bridgecrew.vulnerability_scanning.integrations.docker_image_scanning import \ 11 docker_image_scanning_integration 12 from checkov.common.output.report import Report, CheckType 13 from checkov.runner_filter import RunnerFilter 14 from checkov.sca_package.runner import Runner as PackageRunner 15 16 17 class Runner(PackageRunner): 18 check_type = CheckType.SCA_IMAGE 19 20 def __init__(self) -> None: 21 self._check_class: Optional[str] = None 22 self._code_repo_path: Optional[Path] = None 23 self._check_class = f"{image_scanner.__module__}.{image_scanner.__class__.__qualname__}" 24 self.raw_report: Optional[Dict[str, Any]] = None 25 26 def scan( 27 self, 28 image_id: str, 29 dockerfile_path: str, 30 runner_filter: RunnerFilter = RunnerFilter(), 31 ) -> Optional[Dict[str, Any]]: 32 33 # skip complete run, if flag '--check' was used without a CVE check ID 34 if runner_filter.checks and all(not check.startswith("CKV_CVE") for check in runner_filter.checks): 35 return None 36 37 if not bc_integration.bc_api_key: 38 logging.info("The --bc-api-key flag needs to be set to run SCA package scanning") 39 return None 40 41 logging.info(f"SCA image scanning is scanning the image {image_id}") 42 image_scanner.setup_scan(image_id, dockerfile_path, skip_extract_image_name=False) 43 scan_result = asyncio.run(self.execute_scan(image_id, Path('results.json'))) 44 logging.info(f"SCA image scanning successfully scanned the image {image_id}") 45 return scan_result 46 47 @staticmethod 48 async def execute_scan( 49 image_id: str, 50 output_path: Path, 51 ) -> Dict[str, Any]: 52 command = f"./{TWISTCLI_FILE_NAME} images scan --address {docker_image_scanning_integration.get_proxy_address()} --token {docker_image_scanning_integration.get_bc_api_key()} --details --output-file \"{output_path}\" {image_id}" 53 process = await asyncio.create_subprocess_shell( 54 command, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE 55 ) 56 57 stdout, stderr = await process.communicate() 58 59 # log output for debugging 60 logging.debug(stdout.decode()) 61 62 exit_code = await process.wait() 63 64 if exit_code: 65 logging.error(stderr.decode()) 66 return {} 67 68 # read and delete the report file 69 scan_result: Dict[str, Any] = json.loads(output_path.read_text()) 70 output_path.unlink() 71 72 return scan_result 73 74 def run( 75 self, 76 root_folder: Union[str, Path], 77 external_checks_dir: Optional[List[str]] = None, 78 files: Optional[List[str]] = None, 79 runner_filter: RunnerFilter = RunnerFilter(), 80 collect_skip_comments: bool = True, 81 **kwargs: str 82 ) -> Report: 83 report = Report(self.check_type) 84 85 dockerfile_path = kwargs['dockerfile_path'] 86 image_id = kwargs['image_id'] 87 scan_result = self.scan(image_id, dockerfile_path, runner_filter) 88 if scan_result is None: 89 return report 90 self.raw_report = scan_result 91 result = scan_result.get('results', [{}])[0] 92 93 vulnerabilities = result.get("vulnerabilities") or [] 94 self.parse_vulns_to_records(report, result, f"{dockerfile_path} ({image_id})", runner_filter, vulnerabilities, 95 file_abs_path=os.path.abspath(dockerfile_path)) 96 97 return report 98 [end of checkov/sca_image/runner.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/checkov/sca_image/runner.py b/checkov/sca_image/runner.py --- a/checkov/sca_image/runner.py +++ b/checkov/sca_image/runner.py @@ -40,9 +40,14 @@ logging.info(f"SCA image scanning is scanning the image {image_id}") image_scanner.setup_scan(image_id, dockerfile_path, skip_extract_image_name=False) - scan_result = asyncio.run(self.execute_scan(image_id, Path('results.json'))) - logging.info(f"SCA image scanning successfully scanned the image {image_id}") - return scan_result + try: + scan_result = asyncio.run(self.execute_scan(image_id, Path('results.json'))) + logging.info(f"SCA image scanning successfully scanned the image {image_id}") + image_scanner.cleanup_scan() + return scan_result + except Exception as err: + image_scanner.cleanup_scan() + raise err @staticmethod async def execute_scan( diff --git a/checkov/sca_package/runner.py b/checkov/sca_package/runner.py --- a/checkov/sca_package/runner.py +++ b/checkov/sca_package/runner.py @@ -38,7 +38,7 @@ files: Optional[List[str]] = None, runner_filter: RunnerFilter = RunnerFilter(), exclude_package_json: bool = True, - cleanup_twictcli: bool = True, + cleanup_twistcli: bool = True, ) -> "Optional[Sequence[Dict[str, Any]]]": if not strtobool(os.getenv("ENABLE_SCA_PACKAGE_SCAN", "False")): @@ -74,7 +74,7 @@ scanner = Scanner() self._check_class = f"{scanner.__module__}.{scanner.__class__.__qualname__}" - scan_results = scanner.scan(input_output_paths, cleanup_twictcli) + scan_results = scanner.scan(input_output_paths, cleanup_twistcli) logging.info(f"SCA package scanning successfully scanned {len(scan_results)} files") return scan_results
{"golden_diff": "diff --git a/checkov/sca_image/runner.py b/checkov/sca_image/runner.py\n--- a/checkov/sca_image/runner.py\n+++ b/checkov/sca_image/runner.py\n@@ -40,9 +40,14 @@\n \n logging.info(f\"SCA image scanning is scanning the image {image_id}\")\n image_scanner.setup_scan(image_id, dockerfile_path, skip_extract_image_name=False)\n- scan_result = asyncio.run(self.execute_scan(image_id, Path('results.json')))\n- logging.info(f\"SCA image scanning successfully scanned the image {image_id}\")\n- return scan_result\n+ try:\n+ scan_result = asyncio.run(self.execute_scan(image_id, Path('results.json')))\n+ logging.info(f\"SCA image scanning successfully scanned the image {image_id}\")\n+ image_scanner.cleanup_scan()\n+ return scan_result\n+ except Exception as err:\n+ image_scanner.cleanup_scan()\n+ raise err\n \n @staticmethod\n async def execute_scan(\ndiff --git a/checkov/sca_package/runner.py b/checkov/sca_package/runner.py\n--- a/checkov/sca_package/runner.py\n+++ b/checkov/sca_package/runner.py\n@@ -38,7 +38,7 @@\n files: Optional[List[str]] = None,\n runner_filter: RunnerFilter = RunnerFilter(),\n exclude_package_json: bool = True,\n- cleanup_twictcli: bool = True,\n+ cleanup_twistcli: bool = True,\n ) -> \"Optional[Sequence[Dict[str, Any]]]\":\n \n if not strtobool(os.getenv(\"ENABLE_SCA_PACKAGE_SCAN\", \"False\")):\n@@ -74,7 +74,7 @@\n \n scanner = Scanner()\n self._check_class = f\"{scanner.__module__}.{scanner.__class__.__qualname__}\"\n- scan_results = scanner.scan(input_output_paths, cleanup_twictcli)\n+ scan_results = scanner.scan(input_output_paths, cleanup_twistcli)\n \n logging.info(f\"SCA package scanning successfully scanned {len(scan_results)} files\")\n return scan_results\n", "issue": "image scanning does not cleanup the twistcli binary\n```\r\n~ checkov --bc-api-key REDACTED --dockerfile-path Dockerfile --docker-image checkov_scan_me --repo-id tkishel/example\r\n\r\n<SNIP>\r\n\r\nMore details: https://www.bridgecrew.cloud/projects?repository=tkishel73_tkishel/example&branch=bc-bdfab20_master&runId=latest\r\n```\r\n\r\n```\r\n~ git status\r\nOn branch main\r\nYour branch is up to date with 'origin/main'.\r\n\r\nUntracked files:\r\n (use \"git add <file>...\" to include in what will be committed)\r\n\ttwistcli\r\n\r\nno changes added to commit (use \"git add\" and/or \"git commit -a\")\r\n```\r\n\r\nSome usage of `twistcli` includes a `cleanup_scan()` or (misspelled) `cleanup_twictcli()` method, but they do not appear to be called:\r\n\r\nhttps://github.com/bridgecrewio/checkov/blob/master/checkov/sca_image/runner.py\r\n\r\nhttps://github.com/bridgecrewio/checkov/blob/master/checkov/sca_package/scanner.py\r\n\r\nhttps://github.com/bridgecrewio/checkov/blob/master/checkov/common/bridgecrew/vulnerability_scanning/image_scanner.py\r\n\r\nSo, it may be valuable to add something like this, after all possible uses of `twistcli` have executed ...\r\n\r\n```\r\ndef __exit__(self, t, v, tr):\r\n self.cleanup_scan()\r\n````\r\n\r\n... especially to execute that cleanup after an exception exit.\r\n\r\nAn argument could also be made to instead download and cache the `twistcli` binary outside of the current directory.\r\n\r\nAnd, it may be valuable to consolidate the various duplicate methods that interact with the `twistcli` binary in the above files.\n", "before_files": [{"content": "import logging\nimport os\nfrom pathlib import Path\nfrom typing import Optional, List, Tuple, Set, Union, Sequence, Dict, Any\n\nfrom checkov.common.bridgecrew.platform_integration import bc_integration\nfrom checkov.common.models.enums import CheckResult\nfrom checkov.common.output.report import Report, CheckType\nfrom checkov.common.runners.base_runner import BaseRunner, ignored_directories, strtobool\nfrom checkov.runner_filter import RunnerFilter\nfrom checkov.sca_package.output import create_report_record\nfrom checkov.sca_package.scanner import Scanner\n\nSUPPORTED_PACKAGE_FILES = {\n \"bower.json\",\n \"build.gradle\",\n \"build.gradle.kts\",\n \"go.sum\",\n \"gradle.properties\",\n \"METADATA\",\n \"npm-shrinkwrap.json\",\n \"package.json\",\n \"package-lock.json\",\n \"pom.xml\",\n \"requirements.txt\",\n}\n\nclass Runner(BaseRunner):\n check_type = CheckType.SCA_PACKAGE\n\n def __init__(self):\n self._check_class: Optional[str] = None\n self._code_repo_path: Optional[Path] = None\n\n def prepare_and_scan(\n self,\n root_folder: Union[str, Path],\n files: Optional[List[str]] = None,\n runner_filter: RunnerFilter = RunnerFilter(),\n exclude_package_json: bool = True,\n cleanup_twictcli: bool = True,\n ) -> \"Optional[Sequence[Dict[str, Any]]]\":\n\n if not strtobool(os.getenv(\"ENABLE_SCA_PACKAGE_SCAN\", \"False\")):\n return None\n\n # skip complete run, if flag '--check' was used without a CVE check ID\n if runner_filter.checks and all(not check.startswith(\"CKV_CVE\") for check in runner_filter.checks):\n return None\n\n if not bc_integration.bc_api_key:\n logging.info(\"The --bc-api-key flag needs to be set to run SCA package scanning\")\n return None\n\n logging.info(\"SCA package scanning searching for scannable files\")\n\n self._code_repo_path = Path(root_folder)\n\n excluded_paths = {*ignored_directories}\n if runner_filter.excluded_paths:\n excluded_paths.update(runner_filter.excluded_paths)\n\n input_output_paths = self.find_scannable_files(\n root_path=self._code_repo_path,\n files=files,\n excluded_paths=excluded_paths,\n exclude_package_json=exclude_package_json\n )\n if not input_output_paths:\n # no packages found\n return None\n\n logging.info(f\"SCA package scanning will scan {len(input_output_paths)} files\")\n\n scanner = Scanner()\n self._check_class = f\"{scanner.__module__}.{scanner.__class__.__qualname__}\"\n scan_results = scanner.scan(input_output_paths, cleanup_twictcli)\n\n logging.info(f\"SCA package scanning successfully scanned {len(scan_results)} files\")\n return scan_results\n\n def run(\n self,\n root_folder: Union[str, Path],\n external_checks_dir: Optional[List[str]] = None,\n files: Optional[List[str]] = None,\n runner_filter: RunnerFilter = RunnerFilter(),\n collect_skip_comments: bool = True,\n ) -> Report:\n report = Report(self.check_type)\n\n scan_results = self.prepare_and_scan(root_folder, files, runner_filter)\n if scan_results is None:\n return report\n\n for result in scan_results:\n package_file_path = Path(result[\"repository\"])\n try:\n package_file_path = package_file_path.relative_to(self._code_repo_path)\n except ValueError:\n # Path.is_relative_to() was implemented in Python 3.9\n pass\n\n vulnerabilities = result.get(\"vulnerabilities\") or []\n\n rootless_file_path = str(package_file_path).replace(package_file_path.anchor, \"\", 1)\n self.parse_vulns_to_records(report, result, rootless_file_path, runner_filter, vulnerabilities)\n\n return report\n\n def parse_vulns_to_records(self, report, result, rootless_file_path, runner_filter, vulnerabilities,\n file_abs_path=''):\n for vulnerability in vulnerabilities:\n record = create_report_record(\n rootless_file_path=rootless_file_path,\n file_abs_path=file_abs_path or result.get(\"repository\"),\n check_class=self._check_class,\n vulnerability_details=vulnerability,\n runner_filter=runner_filter\n )\n if not runner_filter.should_run_check(check_id=record.check_id, bc_check_id=record.bc_check_id,\n severity=record.severity):\n if runner_filter.checks:\n continue\n else:\n record.check_result = {\n \"result\": CheckResult.SKIPPED,\n \"suppress_comment\": f\"{vulnerability['id']} is skipped\"\n }\n\n report.add_resource(record.resource)\n report.add_record(record)\n\n def find_scannable_files(\n self, root_path: Path, files: Optional[List[str]], excluded_paths: Set[str], exclude_package_json: bool = True\n ) -> Set[Tuple[Path, Path]]:\n input_paths = {\n file_path\n for file_path in root_path.glob(\"**/*\")\n if file_path.name in SUPPORTED_PACKAGE_FILES and not any(p in file_path.parts for p in excluded_paths)\n }\n\n package_lock_parent_paths = set()\n if exclude_package_json:\n # filter out package.json, if package-lock.json exists\n package_lock_parent_paths = {\n file_path.parent for file_path in input_paths if file_path.name == \"package-lock.json\"\n }\n\n input_output_paths = {\n (file_path, file_path.parent / f\"{file_path.stem}_result.json\")\n for file_path in input_paths\n if file_path.name != \"package.json\" or file_path.parent not in package_lock_parent_paths\n }\n\n for file in files or []:\n file_path = Path(file)\n if not file_path.exists():\n logging.warning(f\"File {file_path} doesn't exist\")\n continue\n\n input_output_paths.add((file_path, file_path.parent / f\"{file_path.stem}_result.json\"))\n\n return input_output_paths\n", "path": "checkov/sca_package/runner.py"}, {"content": "import asyncio\nimport json\nimport logging\nimport os.path\nfrom pathlib import Path\nfrom typing import Optional, List, Union, Dict, Any\n\nfrom checkov.common.bridgecrew.platform_integration import bc_integration\nfrom checkov.common.bridgecrew.vulnerability_scanning.image_scanner import image_scanner, TWISTCLI_FILE_NAME\nfrom checkov.common.bridgecrew.vulnerability_scanning.integrations.docker_image_scanning import \\\n docker_image_scanning_integration\nfrom checkov.common.output.report import Report, CheckType\nfrom checkov.runner_filter import RunnerFilter\nfrom checkov.sca_package.runner import Runner as PackageRunner\n\n\nclass Runner(PackageRunner):\n check_type = CheckType.SCA_IMAGE\n\n def __init__(self) -> None:\n self._check_class: Optional[str] = None\n self._code_repo_path: Optional[Path] = None\n self._check_class = f\"{image_scanner.__module__}.{image_scanner.__class__.__qualname__}\"\n self.raw_report: Optional[Dict[str, Any]] = None\n\n def scan(\n self,\n image_id: str,\n dockerfile_path: str,\n runner_filter: RunnerFilter = RunnerFilter(),\n ) -> Optional[Dict[str, Any]]:\n\n # skip complete run, if flag '--check' was used without a CVE check ID\n if runner_filter.checks and all(not check.startswith(\"CKV_CVE\") for check in runner_filter.checks):\n return None\n\n if not bc_integration.bc_api_key:\n logging.info(\"The --bc-api-key flag needs to be set to run SCA package scanning\")\n return None\n\n logging.info(f\"SCA image scanning is scanning the image {image_id}\")\n image_scanner.setup_scan(image_id, dockerfile_path, skip_extract_image_name=False)\n scan_result = asyncio.run(self.execute_scan(image_id, Path('results.json')))\n logging.info(f\"SCA image scanning successfully scanned the image {image_id}\")\n return scan_result\n\n @staticmethod\n async def execute_scan(\n image_id: str,\n output_path: Path,\n ) -> Dict[str, Any]:\n command = f\"./{TWISTCLI_FILE_NAME} images scan --address {docker_image_scanning_integration.get_proxy_address()} --token {docker_image_scanning_integration.get_bc_api_key()} --details --output-file \\\"{output_path}\\\" {image_id}\"\n process = await asyncio.create_subprocess_shell(\n command, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE\n )\n\n stdout, stderr = await process.communicate()\n\n # log output for debugging\n logging.debug(stdout.decode())\n\n exit_code = await process.wait()\n\n if exit_code:\n logging.error(stderr.decode())\n return {}\n\n # read and delete the report file\n scan_result: Dict[str, Any] = json.loads(output_path.read_text())\n output_path.unlink()\n\n return scan_result\n\n def run(\n self,\n root_folder: Union[str, Path],\n external_checks_dir: Optional[List[str]] = None,\n files: Optional[List[str]] = None,\n runner_filter: RunnerFilter = RunnerFilter(),\n collect_skip_comments: bool = True,\n **kwargs: str\n ) -> Report:\n report = Report(self.check_type)\n\n dockerfile_path = kwargs['dockerfile_path']\n image_id = kwargs['image_id']\n scan_result = self.scan(image_id, dockerfile_path, runner_filter)\n if scan_result is None:\n return report\n self.raw_report = scan_result\n result = scan_result.get('results', [{}])[0]\n\n vulnerabilities = result.get(\"vulnerabilities\") or []\n self.parse_vulns_to_records(report, result, f\"{dockerfile_path} ({image_id})\", runner_filter, vulnerabilities,\n file_abs_path=os.path.abspath(dockerfile_path))\n\n return report\n", "path": "checkov/sca_image/runner.py"}]}
3,645
457
gh_patches_debug_3353
rasdani/github-patches
git_diff
TabbycatDebate__tabbycat-1092
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Team partial check-in indicator doesn't work with 2+ speaker-teams The team's partial check-in indicator (where the box on the 'check-ins' page has green and grey stripes) displays when only 1 speaker in a team is checked-in, but then turns grey again when more than 1 speaker is checked-in but the entire team is not checked-in. </issue> <code> [start of tabbycat/checkins/views.py] 1 import json 2 3 from asgiref.sync import async_to_sync 4 from channels.layers import get_channel_layer 5 from django.contrib import messages 6 from django.core.exceptions import ObjectDoesNotExist 7 from django.views.generic.base import TemplateView 8 from django.template.response import TemplateResponse 9 from django.utils.translation import gettext as _ 10 11 from actionlog.mixins import LogActionMixin 12 from actionlog.models import ActionLogEntry 13 from options.utils import use_team_code_names 14 from participants.models import Person, Speaker 15 from utils.misc import reverse_tournament 16 from utils.mixins import AdministratorMixin, AssistantMixin 17 from utils.views import PostOnlyRedirectView 18 from tournaments.mixins import PublicTournamentPageMixin, TournamentMixin 19 20 from .consumers import CheckInEventConsumer 21 from .models import PersonIdentifier, VenueIdentifier 22 from .utils import create_identifiers, get_unexpired_checkins 23 24 25 class CheckInPreScanView(TournamentMixin, TemplateView): 26 template_name = 'checkin_scan.html' 27 page_title = _('Scan Identifiers') 28 page_emoji = '📷' 29 30 def get_context_data(self, **kwargs): 31 kwargs["scan_url"] = self.tournament.slug + '/checkins/' 32 return super().get_context_data(**kwargs) 33 34 35 class AdminCheckInPreScanView(AdministratorMixin, CheckInPreScanView): 36 scan_view = 'admin-checkin-scan' 37 38 39 class AssistantCheckInPreScanView(AssistantMixin, CheckInPreScanView): 40 scan_view = 'assistant-checkin-scan' 41 42 43 class BaseCheckInStatusView(TournamentMixin, TemplateView): 44 template_name = 'checkin_status.html' 45 scan_view = False 46 for_admin = True 47 48 def get_context_data(self, **kwargs): 49 events = get_unexpired_checkins(self.tournament, self.window_preference) 50 kwargs["events"] = json.dumps([e.serialize() for e in events]) 51 if self.scan_view: 52 kwargs["scan_url"] = self.tournament.slug + '/checkins/' 53 kwargs["for_admin"] = self.for_admin 54 return super().get_context_data(**kwargs) 55 56 57 class CheckInPeopleStatusView(BaseCheckInStatusView): 58 page_emoji = '⌚️' 59 page_title = _("People's Check-In Statuses") 60 window_preference = 'checkin_window_people' 61 62 def get_context_data(self, **kwargs): 63 64 team_codes = use_team_code_names(self.tournament, admin=self.for_admin) 65 kwargs["team_codes"] = json.dumps(team_codes) 66 67 adjudicators = [] 68 for adj in self.tournament.relevant_adjudicators.all().select_related('institution', 'checkin_identifier'): 69 try: 70 code = adj.checkin_identifier.barcode 71 except ObjectDoesNotExist: 72 code = None 73 74 adjudicators.append({ 75 'id': adj.id, 'name': adj.name, 'type': 'Adjudicator', 76 'identifier': [code], 'locked': False, 'independent': adj.independent, 77 'institution': adj.institution.serialize if adj.institution else None, 78 }) 79 kwargs["adjudicators"] = json.dumps(adjudicators) 80 81 speakers = [] 82 for speaker in Speaker.objects.filter(team__tournament=self.tournament).select_related('team', 'team__institution', 'checkin_identifier'): 83 try: 84 code = speaker.checkin_identifier.barcode 85 except ObjectDoesNotExist: 86 code = None 87 88 speakers.append({ 89 'id': speaker.id, 'name': speaker.name, 'type': 'Speaker', 90 'identifier': [code], 'locked': False, 91 'team': speaker.team.code_name if team_codes else speaker.team.short_name, 92 'institution': speaker.team.institution.serialize if speaker.team.institution else None, 93 }) 94 kwargs["speakers"] = json.dumps(speakers) 95 96 return super().get_context_data(**kwargs) 97 98 99 class AdminCheckInPeopleStatusView(AdministratorMixin, CheckInPeopleStatusView): 100 scan_view = 'admin-checkin-scan' 101 102 103 class AssistantCheckInPeopleStatusView(AssistantMixin, CheckInPeopleStatusView): 104 scan_view = 'assistant-checkin-scan' 105 106 107 class PublicCheckInPeopleStatusView(PublicTournamentPageMixin, CheckInPeopleStatusView): 108 for_admin = False 109 public_page_preference = 'public_checkins' 110 111 112 class CheckInVenuesStatusView(BaseCheckInStatusView): 113 page_emoji = '👜' 114 page_title = _("Venue's Check-In Statuses") 115 window_preference = 'checkin_window_venues' 116 117 def get_context_data(self, **kwargs): 118 venues = [] 119 for venue in self.tournament.relevant_venues.select_related('checkin_identifier').prefetch_related('venuecategory_set').all(): 120 item = venue.serialize() 121 item['locked'] = False 122 try: 123 item['identifier'] = [venue.checkin_identifier.barcode] 124 except ObjectDoesNotExist: 125 item['identifier'] = [None] 126 venues.append(item) 127 kwargs["venues"] = json.dumps(venues) 128 kwargs["team_codes"] = json.dumps(False) 129 130 return super().get_context_data(**kwargs) 131 132 133 class AdminCheckInVenuesStatusView(AdministratorMixin, CheckInVenuesStatusView): 134 scan_view = 'admin-checkin-scan' 135 136 137 class AssistantCheckInVenuesStatusView(AssistantMixin, CheckInVenuesStatusView): 138 scan_view = 'assistant-checkin-scan' 139 140 141 class SegregatedCheckinsMixin(TournamentMixin): 142 143 def t_speakers(self): 144 return Speaker.objects.filter( 145 team__tournament=self.tournament).values_list( 146 'person_ptr_id', flat=True) 147 148 def speakers_with_barcodes(self): 149 identifiers = PersonIdentifier.objects.all() 150 return identifiers.filter(person_id__in=self.t_speakers()) 151 152 def t_adjs(self): 153 return self.tournament.adjudicator_set.values_list( 154 'person_ptr_id', flat=True) 155 156 def adjs_with_barcodes(self): 157 identifiers = PersonIdentifier.objects.all() 158 return identifiers.filter(person_id__in=self.t_adjs()) 159 160 161 class CheckInIdentifiersView(SegregatedCheckinsMixin, TemplateView): 162 template_name = 'checkin_ids.html' 163 page_title = _('Make Identifiers') 164 page_emoji = '📛' 165 166 def get_context_data(self, **kwargs): 167 t = self.tournament 168 kwargs["check_in_info"] = { 169 "speakers": { 170 "title": _("Speakers"), 171 "total": self.t_speakers().count(), 172 "in": self.speakers_with_barcodes().count() 173 }, 174 "adjudicators": { 175 "title": _("Adjudicators"), 176 "total": self.t_adjs().count(), 177 "in": self.adjs_with_barcodes().count() 178 }, 179 "venues": { 180 "title": _("Venues"), 181 "total": t.venue_set.count(), 182 "in": VenueIdentifier.objects.filter(venue__tournament=t).count(), 183 } 184 } 185 return super().get_context_data(**kwargs) 186 187 188 class AdminCheckInIdentifiersView(AdministratorMixin, CheckInIdentifiersView): 189 pass 190 191 192 class AssistantCheckInIdentifiersView(AssistantMixin, CheckInIdentifiersView): 193 pass 194 195 196 class AdminCheckInGenerateView(AdministratorMixin, LogActionMixin, 197 TournamentMixin, PostOnlyRedirectView): 198 199 def get_action_log_type(self): 200 if self.kwargs["kind"] == "speakers": 201 return ActionLogEntry.ACTION_TYPE_CHECKIN_SPEAK_GENERATE 202 elif self.kwargs["kind"] == "adjudicators": 203 return ActionLogEntry.ACTION_TYPE_CHECKIN_ADJ_GENERATE 204 elif self.kwargs["kind"] == "venues": 205 return ActionLogEntry.ACTION_TYPE_CHECKIN_VENUES_GENERATE 206 207 # Providing tournament_slug_url_kwarg isn't working for some reason; so use: 208 def get_redirect_url(self, *args, **kwargs): 209 return reverse_tournament('admin-checkin-identifiers', self.tournament) 210 211 def post(self, request, *args, **kwargs): 212 t = self.tournament 213 214 if self.kwargs["kind"] == "speakers": 215 create_identifiers(PersonIdentifier, Speaker.objects.filter(team__tournament=t)) 216 elif self.kwargs["kind"] == "adjudicators": 217 create_identifiers(PersonIdentifier, t.adjudicator_set.all()) 218 elif self.kwargs["kind"] == "venues": 219 create_identifiers(VenueIdentifier, t.venue_set.all()) 220 221 messages.success(request, _("Generated identifiers for %s" % self.kwargs["kind"])) 222 self.log_action() # Need to call explicitly 223 return super().post(request, *args, **kwargs) 224 225 226 class CheckInPrintablesView(SegregatedCheckinsMixin, TemplateView): 227 template_name = 'checkin_printables.html' 228 page_title = _('Identifiers') 229 page_emoji = '📛' 230 231 def get_context_data(self, **kwargs): 232 if self.kwargs["kind"] == "speakers": 233 kwargs["identifiers"] = self.speakers_with_barcodes().order_by('person__name') 234 elif self.kwargs["kind"] == "adjudicators": 235 kwargs["identifiers"] = self.adjs_with_barcodes().order_by('person__name') 236 elif self.kwargs["kind"] == "venues": 237 venues = self.tournament.relevant_venues 238 kwargs["identifiers"] = VenueIdentifier.objects.filter(venue__in=venues) 239 240 return super().get_context_data(**kwargs) 241 242 243 class AdminCheckInPrintablesView(AdministratorMixin, CheckInPrintablesView): 244 pass 245 246 247 class AssistantCheckInPrintablesView(AssistantMixin, CheckInPrintablesView): 248 pass 249 250 251 class ParticipantCheckinView(PublicTournamentPageMixin, PostOnlyRedirectView): 252 253 public_page_preference = 'public_checkins_submit' 254 255 def post(self, request, *args, **kwargs): 256 t = self.tournament 257 258 action = request.POST['action'] 259 260 try: 261 person = Person.objects.get(url_key=kwargs['url_key']) 262 identifier = PersonIdentifier.objects.get(person=person) 263 except ObjectDoesNotExist: 264 messages.error(self.request, _("Could not check you in as you do not have an identifying code — your tab director may need to make you an identifier.")) 265 return super().post(request, *args, **kwargs) 266 267 checkins = get_unexpired_checkins(t, 'checkin_window_people') 268 existing_checkin = checkins.filter(identifier=identifier) 269 if action == 'revoke': 270 if existing_checkin.exists(): 271 messages.success(self.request, _("You have revoked your check-in.")) 272 else: 273 messages.error(self.request, _("Whoops! Looks like your check-in was already revoked.")) 274 elif action == 'checkin': 275 if existing_checkin.exists(): 276 messages.error(self.request, _("Whoops! Looks like you're already checked in.")) 277 else: 278 messages.success(self.request, _("You are now checked in.")) 279 else: 280 return TemplateResponse(request=self.request, template='400.html', status=400) 281 282 group_name = CheckInEventConsumer.group_prefix + "_" + t.slug 283 284 # Override permissions check - no user but authenticated through URL 285 async_to_sync(get_channel_layer().group_send)( 286 group_name, { 287 'type': 'broadcast_checkin', 288 'content': { 289 'barcodes': [identifier.barcode], 290 'status': action == 'checkin', 291 'type': 'people', 292 'component_id': None 293 } 294 } 295 ) 296 297 return super().post(request, *args, **kwargs) 298 299 def get_redirect_url(self, *args, **kwargs): 300 return reverse_tournament('privateurls-person-index', self.tournament, kwargs={'url_key': kwargs['url_key']}) 301 [end of tabbycat/checkins/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/tabbycat/checkins/views.py b/tabbycat/checkins/views.py --- a/tabbycat/checkins/views.py +++ b/tabbycat/checkins/views.py @@ -51,6 +51,7 @@ if self.scan_view: kwargs["scan_url"] = self.tournament.slug + '/checkins/' kwargs["for_admin"] = self.for_admin + kwargs["team_size"] = self.tournament.pref('substantive_speakers') return super().get_context_data(**kwargs)
{"golden_diff": "diff --git a/tabbycat/checkins/views.py b/tabbycat/checkins/views.py\n--- a/tabbycat/checkins/views.py\n+++ b/tabbycat/checkins/views.py\n@@ -51,6 +51,7 @@\n if self.scan_view:\n kwargs[\"scan_url\"] = self.tournament.slug + '/checkins/'\n kwargs[\"for_admin\"] = self.for_admin\n+ kwargs[\"team_size\"] = self.tournament.pref('substantive_speakers')\n return super().get_context_data(**kwargs)\n", "issue": "Team partial check-in indicator doesn't work with 2+ speaker-teams\nThe team's partial check-in indicator (where the box on the 'check-ins' page has green and grey stripes) displays when only 1 speaker in a team is checked-in, but then turns grey again when more than 1 speaker is checked-in but the entire team is not checked-in.\n", "before_files": [{"content": "import json\n\nfrom asgiref.sync import async_to_sync\nfrom channels.layers import get_channel_layer\nfrom django.contrib import messages\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.views.generic.base import TemplateView\nfrom django.template.response import TemplateResponse\nfrom django.utils.translation import gettext as _\n\nfrom actionlog.mixins import LogActionMixin\nfrom actionlog.models import ActionLogEntry\nfrom options.utils import use_team_code_names\nfrom participants.models import Person, Speaker\nfrom utils.misc import reverse_tournament\nfrom utils.mixins import AdministratorMixin, AssistantMixin\nfrom utils.views import PostOnlyRedirectView\nfrom tournaments.mixins import PublicTournamentPageMixin, TournamentMixin\n\nfrom .consumers import CheckInEventConsumer\nfrom .models import PersonIdentifier, VenueIdentifier\nfrom .utils import create_identifiers, get_unexpired_checkins\n\n\nclass CheckInPreScanView(TournamentMixin, TemplateView):\n template_name = 'checkin_scan.html'\n page_title = _('Scan Identifiers')\n page_emoji = '\ud83d\udcf7'\n\n def get_context_data(self, **kwargs):\n kwargs[\"scan_url\"] = self.tournament.slug + '/checkins/'\n return super().get_context_data(**kwargs)\n\n\nclass AdminCheckInPreScanView(AdministratorMixin, CheckInPreScanView):\n scan_view = 'admin-checkin-scan'\n\n\nclass AssistantCheckInPreScanView(AssistantMixin, CheckInPreScanView):\n scan_view = 'assistant-checkin-scan'\n\n\nclass BaseCheckInStatusView(TournamentMixin, TemplateView):\n template_name = 'checkin_status.html'\n scan_view = False\n for_admin = True\n\n def get_context_data(self, **kwargs):\n events = get_unexpired_checkins(self.tournament, self.window_preference)\n kwargs[\"events\"] = json.dumps([e.serialize() for e in events])\n if self.scan_view:\n kwargs[\"scan_url\"] = self.tournament.slug + '/checkins/'\n kwargs[\"for_admin\"] = self.for_admin\n return super().get_context_data(**kwargs)\n\n\nclass CheckInPeopleStatusView(BaseCheckInStatusView):\n page_emoji = '\u231a\ufe0f'\n page_title = _(\"People's Check-In Statuses\")\n window_preference = 'checkin_window_people'\n\n def get_context_data(self, **kwargs):\n\n team_codes = use_team_code_names(self.tournament, admin=self.for_admin)\n kwargs[\"team_codes\"] = json.dumps(team_codes)\n\n adjudicators = []\n for adj in self.tournament.relevant_adjudicators.all().select_related('institution', 'checkin_identifier'):\n try:\n code = adj.checkin_identifier.barcode\n except ObjectDoesNotExist:\n code = None\n\n adjudicators.append({\n 'id': adj.id, 'name': adj.name, 'type': 'Adjudicator',\n 'identifier': [code], 'locked': False, 'independent': adj.independent,\n 'institution': adj.institution.serialize if adj.institution else None,\n })\n kwargs[\"adjudicators\"] = json.dumps(adjudicators)\n\n speakers = []\n for speaker in Speaker.objects.filter(team__tournament=self.tournament).select_related('team', 'team__institution', 'checkin_identifier'):\n try:\n code = speaker.checkin_identifier.barcode\n except ObjectDoesNotExist:\n code = None\n\n speakers.append({\n 'id': speaker.id, 'name': speaker.name, 'type': 'Speaker',\n 'identifier': [code], 'locked': False,\n 'team': speaker.team.code_name if team_codes else speaker.team.short_name,\n 'institution': speaker.team.institution.serialize if speaker.team.institution else None,\n })\n kwargs[\"speakers\"] = json.dumps(speakers)\n\n return super().get_context_data(**kwargs)\n\n\nclass AdminCheckInPeopleStatusView(AdministratorMixin, CheckInPeopleStatusView):\n scan_view = 'admin-checkin-scan'\n\n\nclass AssistantCheckInPeopleStatusView(AssistantMixin, CheckInPeopleStatusView):\n scan_view = 'assistant-checkin-scan'\n\n\nclass PublicCheckInPeopleStatusView(PublicTournamentPageMixin, CheckInPeopleStatusView):\n for_admin = False\n public_page_preference = 'public_checkins'\n\n\nclass CheckInVenuesStatusView(BaseCheckInStatusView):\n page_emoji = '\ud83d\udc5c'\n page_title = _(\"Venue's Check-In Statuses\")\n window_preference = 'checkin_window_venues'\n\n def get_context_data(self, **kwargs):\n venues = []\n for venue in self.tournament.relevant_venues.select_related('checkin_identifier').prefetch_related('venuecategory_set').all():\n item = venue.serialize()\n item['locked'] = False\n try:\n item['identifier'] = [venue.checkin_identifier.barcode]\n except ObjectDoesNotExist:\n item['identifier'] = [None]\n venues.append(item)\n kwargs[\"venues\"] = json.dumps(venues)\n kwargs[\"team_codes\"] = json.dumps(False)\n\n return super().get_context_data(**kwargs)\n\n\nclass AdminCheckInVenuesStatusView(AdministratorMixin, CheckInVenuesStatusView):\n scan_view = 'admin-checkin-scan'\n\n\nclass AssistantCheckInVenuesStatusView(AssistantMixin, CheckInVenuesStatusView):\n scan_view = 'assistant-checkin-scan'\n\n\nclass SegregatedCheckinsMixin(TournamentMixin):\n\n def t_speakers(self):\n return Speaker.objects.filter(\n team__tournament=self.tournament).values_list(\n 'person_ptr_id', flat=True)\n\n def speakers_with_barcodes(self):\n identifiers = PersonIdentifier.objects.all()\n return identifiers.filter(person_id__in=self.t_speakers())\n\n def t_adjs(self):\n return self.tournament.adjudicator_set.values_list(\n 'person_ptr_id', flat=True)\n\n def adjs_with_barcodes(self):\n identifiers = PersonIdentifier.objects.all()\n return identifiers.filter(person_id__in=self.t_adjs())\n\n\nclass CheckInIdentifiersView(SegregatedCheckinsMixin, TemplateView):\n template_name = 'checkin_ids.html'\n page_title = _('Make Identifiers')\n page_emoji = '\ud83d\udcdb'\n\n def get_context_data(self, **kwargs):\n t = self.tournament\n kwargs[\"check_in_info\"] = {\n \"speakers\": {\n \"title\": _(\"Speakers\"),\n \"total\": self.t_speakers().count(),\n \"in\": self.speakers_with_barcodes().count()\n },\n \"adjudicators\": {\n \"title\": _(\"Adjudicators\"),\n \"total\": self.t_adjs().count(),\n \"in\": self.adjs_with_barcodes().count()\n },\n \"venues\": {\n \"title\": _(\"Venues\"),\n \"total\": t.venue_set.count(),\n \"in\": VenueIdentifier.objects.filter(venue__tournament=t).count(),\n }\n }\n return super().get_context_data(**kwargs)\n\n\nclass AdminCheckInIdentifiersView(AdministratorMixin, CheckInIdentifiersView):\n pass\n\n\nclass AssistantCheckInIdentifiersView(AssistantMixin, CheckInIdentifiersView):\n pass\n\n\nclass AdminCheckInGenerateView(AdministratorMixin, LogActionMixin,\n TournamentMixin, PostOnlyRedirectView):\n\n def get_action_log_type(self):\n if self.kwargs[\"kind\"] == \"speakers\":\n return ActionLogEntry.ACTION_TYPE_CHECKIN_SPEAK_GENERATE\n elif self.kwargs[\"kind\"] == \"adjudicators\":\n return ActionLogEntry.ACTION_TYPE_CHECKIN_ADJ_GENERATE\n elif self.kwargs[\"kind\"] == \"venues\":\n return ActionLogEntry.ACTION_TYPE_CHECKIN_VENUES_GENERATE\n\n # Providing tournament_slug_url_kwarg isn't working for some reason; so use:\n def get_redirect_url(self, *args, **kwargs):\n return reverse_tournament('admin-checkin-identifiers', self.tournament)\n\n def post(self, request, *args, **kwargs):\n t = self.tournament\n\n if self.kwargs[\"kind\"] == \"speakers\":\n create_identifiers(PersonIdentifier, Speaker.objects.filter(team__tournament=t))\n elif self.kwargs[\"kind\"] == \"adjudicators\":\n create_identifiers(PersonIdentifier, t.adjudicator_set.all())\n elif self.kwargs[\"kind\"] == \"venues\":\n create_identifiers(VenueIdentifier, t.venue_set.all())\n\n messages.success(request, _(\"Generated identifiers for %s\" % self.kwargs[\"kind\"]))\n self.log_action() # Need to call explicitly\n return super().post(request, *args, **kwargs)\n\n\nclass CheckInPrintablesView(SegregatedCheckinsMixin, TemplateView):\n template_name = 'checkin_printables.html'\n page_title = _('Identifiers')\n page_emoji = '\ud83d\udcdb'\n\n def get_context_data(self, **kwargs):\n if self.kwargs[\"kind\"] == \"speakers\":\n kwargs[\"identifiers\"] = self.speakers_with_barcodes().order_by('person__name')\n elif self.kwargs[\"kind\"] == \"adjudicators\":\n kwargs[\"identifiers\"] = self.adjs_with_barcodes().order_by('person__name')\n elif self.kwargs[\"kind\"] == \"venues\":\n venues = self.tournament.relevant_venues\n kwargs[\"identifiers\"] = VenueIdentifier.objects.filter(venue__in=venues)\n\n return super().get_context_data(**kwargs)\n\n\nclass AdminCheckInPrintablesView(AdministratorMixin, CheckInPrintablesView):\n pass\n\n\nclass AssistantCheckInPrintablesView(AssistantMixin, CheckInPrintablesView):\n pass\n\n\nclass ParticipantCheckinView(PublicTournamentPageMixin, PostOnlyRedirectView):\n\n public_page_preference = 'public_checkins_submit'\n\n def post(self, request, *args, **kwargs):\n t = self.tournament\n\n action = request.POST['action']\n\n try:\n person = Person.objects.get(url_key=kwargs['url_key'])\n identifier = PersonIdentifier.objects.get(person=person)\n except ObjectDoesNotExist:\n messages.error(self.request, _(\"Could not check you in as you do not have an identifying code \u2014 your tab director may need to make you an identifier.\"))\n return super().post(request, *args, **kwargs)\n\n checkins = get_unexpired_checkins(t, 'checkin_window_people')\n existing_checkin = checkins.filter(identifier=identifier)\n if action == 'revoke':\n if existing_checkin.exists():\n messages.success(self.request, _(\"You have revoked your check-in.\"))\n else:\n messages.error(self.request, _(\"Whoops! Looks like your check-in was already revoked.\"))\n elif action == 'checkin':\n if existing_checkin.exists():\n messages.error(self.request, _(\"Whoops! Looks like you're already checked in.\"))\n else:\n messages.success(self.request, _(\"You are now checked in.\"))\n else:\n return TemplateResponse(request=self.request, template='400.html', status=400)\n\n group_name = CheckInEventConsumer.group_prefix + \"_\" + t.slug\n\n # Override permissions check - no user but authenticated through URL\n async_to_sync(get_channel_layer().group_send)(\n group_name, {\n 'type': 'broadcast_checkin',\n 'content': {\n 'barcodes': [identifier.barcode],\n 'status': action == 'checkin',\n 'type': 'people',\n 'component_id': None\n }\n }\n )\n\n return super().post(request, *args, **kwargs)\n\n def get_redirect_url(self, *args, **kwargs):\n return reverse_tournament('privateurls-person-index', self.tournament, kwargs={'url_key': kwargs['url_key']})\n", "path": "tabbycat/checkins/views.py"}]}
3,947
115