problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_39906 | rasdani/github-patches | git_diff | PaddlePaddle__PaddleSpeech-51 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Correct the error rate's computation for multiple sentences
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/tune.py`
Content:
```
1 """Beam search parameters tuning for DeepSpeech2 model."""
2 from __future__ import absolute_import
3 from __future__ import division
4 from __future__ import print_function
5
6 import sys
7 import os
8 import numpy as np
9 import argparse
10 import functools
11 import gzip
12 import logging
13 import paddle.v2 as paddle
14 import _init_paths
15 from data_utils.data import DataGenerator
16 from decoders.swig_wrapper import Scorer
17 from decoders.swig_wrapper import ctc_beam_search_decoder_batch
18 from model_utils.model import deep_speech_v2_network
19 from utils.error_rate import wer, cer
20 from utils.utility import add_arguments, print_arguments
21
22 parser = argparse.ArgumentParser(description=__doc__)
23 add_arg = functools.partial(add_arguments, argparser=parser)
24 # yapf: disable
25 add_arg('num_batches', int, -1, "# of batches tuning on. "
26 "Default -1, on whole dev set.")
27 add_arg('batch_size', int, 256, "# of samples per batch.")
28 add_arg('trainer_count', int, 8, "# of Trainers (CPUs or GPUs).")
29 add_arg('beam_size', int, 500, "Beam search width.")
30 add_arg('num_proc_bsearch', int, 8, "# of CPUs for beam search.")
31 add_arg('num_proc_data', int, 8, "# of CPUs for data preprocessing.")
32 add_arg('num_conv_layers', int, 2, "# of convolution layers.")
33 add_arg('num_rnn_layers', int, 3, "# of recurrent layers.")
34 add_arg('rnn_layer_size', int, 2048, "# of recurrent cells per layer.")
35 add_arg('num_alphas', int, 45, "# of alpha candidates for tuning.")
36 add_arg('num_betas', int, 8, "# of beta candidates for tuning.")
37 add_arg('alpha_from', float, 1.0, "Where alpha starts tuning from.")
38 add_arg('alpha_to', float, 3.2, "Where alpha ends tuning with.")
39 add_arg('beta_from', float, 0.1, "Where beta starts tuning from.")
40 add_arg('beta_to', float, 0.45, "Where beta ends tuning with.")
41 add_arg('cutoff_prob', float, 1.0, "Cutoff probability for pruning.")
42 add_arg('cutoff_top_n', int, 40, "Cutoff number for pruning.")
43 add_arg('use_gru', bool, False, "Use GRUs instead of simple RNNs.")
44 add_arg('use_gpu', bool, True, "Use GPU or not.")
45 add_arg('share_rnn_weights',bool, True, "Share input-hidden weights across "
46 "bi-directional RNNs. Not for GRU.")
47 add_arg('tune_manifest', str,
48 'data/librispeech/manifest.dev-clean',
49 "Filepath of manifest to tune.")
50 add_arg('mean_std_path', str,
51 'data/librispeech/mean_std.npz',
52 "Filepath of normalizer's mean & std.")
53 add_arg('vocab_path', str,
54 'data/librispeech/vocab.txt',
55 "Filepath of vocabulary.")
56 add_arg('lang_model_path', str,
57 'models/lm/common_crawl_00.prune01111.trie.klm',
58 "Filepath for language model.")
59 add_arg('model_path', str,
60 './checkpoints/libri/params.latest.tar.gz',
61 "If None, the training starts from scratch, "
62 "otherwise, it resumes from the pre-trained model.")
63 add_arg('error_rate_type', str,
64 'wer',
65 "Error rate type for evaluation.",
66 choices=['wer', 'cer'])
67 add_arg('specgram_type', str,
68 'linear',
69 "Audio feature type. Options: linear, mfcc.",
70 choices=['linear', 'mfcc'])
71 # yapf: disable
72 args = parser.parse_args()
73
74
75 logging.basicConfig(
76 format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)d] %(message)s')
77
78 def tune():
79 """Tune parameters alpha and beta incrementally."""
80 if not args.num_alphas >= 0:
81 raise ValueError("num_alphas must be non-negative!")
82 if not args.num_betas >= 0:
83 raise ValueError("num_betas must be non-negative!")
84
85 data_generator = DataGenerator(
86 vocab_filepath=args.vocab_path,
87 mean_std_filepath=args.mean_std_path,
88 augmentation_config='{}',
89 specgram_type=args.specgram_type,
90 num_threads=args.num_proc_data,
91 keep_transcription_text=True,
92 num_conv_layers=args.num_conv_layers)
93
94 audio_data = paddle.layer.data(
95 name="audio_spectrogram",
96 type=paddle.data_type.dense_array(161 * 161))
97 text_data = paddle.layer.data(
98 name="transcript_text",
99 type=paddle.data_type.integer_value_sequence(data_generator.vocab_size))
100 seq_offset_data = paddle.layer.data(
101 name='sequence_offset',
102 type=paddle.data_type.integer_value_sequence(1))
103 seq_len_data = paddle.layer.data(
104 name='sequence_length',
105 type=paddle.data_type.integer_value_sequence(1))
106 index_range_datas = []
107 for i in xrange(args.num_rnn_layers):
108 index_range_datas.append(
109 paddle.layer.data(
110 name='conv%d_index_range' % i,
111 type=paddle.data_type.dense_vector(6)))
112
113 output_probs, _ = deep_speech_v2_network(
114 audio_data=audio_data,
115 text_data=text_data,
116 seq_offset_data=seq_offset_data,
117 seq_len_data=seq_len_data,
118 index_range_datas=index_range_datas,
119 dict_size=data_generator.vocab_size,
120 num_conv_layers=args.num_conv_layers,
121 num_rnn_layers=args.num_rnn_layers,
122 rnn_size=args.rnn_layer_size,
123 use_gru=args.use_gru,
124 share_rnn_weights=args.share_rnn_weights)
125
126 batch_reader = data_generator.batch_reader_creator(
127 manifest_path=args.tune_manifest,
128 batch_size=args.batch_size,
129 sortagrad=False,
130 shuffle_method=None)
131
132 # load parameters
133 if not os.path.isfile(args.model_path):
134 raise IOError("Invaid model path: %s" % args.model_path)
135 parameters = paddle.parameters.Parameters.from_tar(
136 gzip.open(args.model_path))
137
138 inferer = paddle.inference.Inference(
139 output_layer=output_probs, parameters=parameters)
140 # decoders only accept string encoded in utf-8
141 vocab_list = [chars.encode("utf-8") for chars in data_generator.vocab_list]
142
143 # init logger
144 logger = logging.getLogger("")
145 logger.setLevel(level=logging.INFO)
146 # init external scorer
147 logger.info("begin to initialize the external scorer for tuning")
148 if not os.path.isfile(args.lang_model_path):
149 raise IOError("Invaid language model path: %s" % args.lang_model_path)
150 ext_scorer = Scorer(
151 alpha=args.alpha_from,
152 beta=args.beta_from,
153 model_path=args.lang_model_path,
154 vocabulary=vocab_list)
155 logger.info("language model: "
156 "is_character_based = %d," % ext_scorer.is_character_based() +
157 " max_order = %d," % ext_scorer.get_max_order() +
158 " dict_size = %d" % ext_scorer.get_dict_size())
159 logger.info("end initializing scorer. Start tuning ...")
160
161 error_rate_func = cer if args.error_rate_type == 'cer' else wer
162 # create grid for search
163 cand_alphas = np.linspace(args.alpha_from, args.alpha_to, args.num_alphas)
164 cand_betas = np.linspace(args.beta_from, args.beta_to, args.num_betas)
165 params_grid = [(alpha, beta) for alpha in cand_alphas
166 for beta in cand_betas]
167
168 err_sum = [0.0 for i in xrange(len(params_grid))]
169 err_ave = [0.0 for i in xrange(len(params_grid))]
170 num_ins, cur_batch = 0, 0
171 ## incremental tuning parameters over multiple batches
172 for infer_data in batch_reader():
173 if (args.num_batches >= 0) and (cur_batch >= args.num_batches):
174 break
175 infer_results = inferer.infer(input=infer_data,
176 feeding=data_generator.feeding)
177 start_pos = [0] * (len(infer_data) + 1)
178 for i in xrange(len(infer_data)):
179 start_pos[i + 1] = start_pos[i] + infer_data[i][3][0]
180 probs_split = [
181 infer_results[start_pos[i]:start_pos[i + 1]]
182 for i in xrange(0, len(infer_data))
183 ]
184
185 target_transcripts = [ data[1] for data in infer_data ]
186
187 num_ins += len(target_transcripts)
188 # grid search
189 for index, (alpha, beta) in enumerate(params_grid):
190 # reset alpha & beta
191 ext_scorer.reset_params(alpha, beta)
192 beam_search_results = ctc_beam_search_decoder_batch(
193 probs_split=probs_split,
194 vocabulary=vocab_list,
195 beam_size=args.beam_size,
196 num_processes=args.num_proc_bsearch,
197 cutoff_prob=args.cutoff_prob,
198 cutoff_top_n=args.cutoff_top_n,
199 ext_scoring_func=ext_scorer, )
200
201 result_transcripts = [res[0][1] for res in beam_search_results]
202 for target, result in zip(target_transcripts, result_transcripts):
203 err_sum[index] += error_rate_func(target, result)
204 err_ave[index] = err_sum[index] / num_ins
205 if index % 2 == 0:
206 sys.stdout.write('.')
207 sys.stdout.flush()
208
209 # output on-line tuning result at the end of current batch
210 err_ave_min = min(err_ave)
211 min_index = err_ave.index(err_ave_min)
212 print("\nBatch %d [%d/?], current opt (alpha, beta) = (%s, %s), "
213 " min [%s] = %f" %(cur_batch, num_ins,
214 "%.3f" % params_grid[min_index][0],
215 "%.3f" % params_grid[min_index][1],
216 args.error_rate_type, err_ave_min))
217 cur_batch += 1
218
219 # output WER/CER at every (alpha, beta)
220 print("\nFinal %s:\n" % args.error_rate_type)
221 for index in xrange(len(params_grid)):
222 print("(alpha, beta) = (%s, %s), [%s] = %f"
223 % ("%.3f" % params_grid[index][0], "%.3f" % params_grid[index][1],
224 args.error_rate_type, err_ave[index]))
225
226 err_ave_min = min(err_ave)
227 min_index = err_ave.index(err_ave_min)
228 print("\nFinish tuning on %d batches, final opt (alpha, beta) = (%s, %s)"
229 % (args.num_batches, "%.3f" % params_grid[min_index][0],
230 "%.3f" % params_grid[min_index][1]))
231
232 logger.info("finish tuning")
233
234
235 def main():
236 print_arguments(args)
237 paddle.init(use_gpu=args.use_gpu,
238 rnn_use_batch=True,
239 trainer_count=args.trainer_count)
240 tune()
241
242
243 if __name__ == '__main__':
244 main()
245
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tools/tune.py b/tools/tune.py
--- a/tools/tune.py
+++ b/tools/tune.py
@@ -16,7 +16,7 @@
from decoders.swig_wrapper import Scorer
from decoders.swig_wrapper import ctc_beam_search_decoder_batch
from model_utils.model import deep_speech_v2_network
-from utils.error_rate import wer, cer
+from utils.error_rate import char_errors, word_errors
from utils.utility import add_arguments, print_arguments
parser = argparse.ArgumentParser(description=__doc__)
@@ -158,7 +158,7 @@
" dict_size = %d" % ext_scorer.get_dict_size())
logger.info("end initializing scorer. Start tuning ...")
- error_rate_func = cer if args.error_rate_type == 'cer' else wer
+ errors_func = char_errors if args.error_rate_type == 'cer' else word_errors
# create grid for search
cand_alphas = np.linspace(args.alpha_from, args.alpha_to, args.num_alphas)
cand_betas = np.linspace(args.beta_from, args.beta_to, args.num_betas)
@@ -167,7 +167,7 @@
err_sum = [0.0 for i in xrange(len(params_grid))]
err_ave = [0.0 for i in xrange(len(params_grid))]
- num_ins, cur_batch = 0, 0
+ num_ins, len_refs, cur_batch = 0, 0, 0
## incremental tuning parameters over multiple batches
for infer_data in batch_reader():
if (args.num_batches >= 0) and (cur_batch >= args.num_batches):
@@ -200,8 +200,14 @@
result_transcripts = [res[0][1] for res in beam_search_results]
for target, result in zip(target_transcripts, result_transcripts):
- err_sum[index] += error_rate_func(target, result)
- err_ave[index] = err_sum[index] / num_ins
+ errors, len_ref = errors_func(target, result)
+ err_sum[index] += errors
+ # accumulate the length of references of every batch
+ # in the first iteration
+ if args.alpha_from == alpha and args.beta_from == beta:
+ len_refs += len_ref
+
+ err_ave[index] = err_sum[index] / len_refs
if index % 2 == 0:
sys.stdout.write('.')
sys.stdout.flush()
@@ -226,7 +232,7 @@
err_ave_min = min(err_ave)
min_index = err_ave.index(err_ave_min)
print("\nFinish tuning on %d batches, final opt (alpha, beta) = (%s, %s)"
- % (args.num_batches, "%.3f" % params_grid[min_index][0],
+ % (cur_batch, "%.3f" % params_grid[min_index][0],
"%.3f" % params_grid[min_index][1]))
logger.info("finish tuning")
| {"golden_diff": "diff --git a/tools/tune.py b/tools/tune.py\n--- a/tools/tune.py\n+++ b/tools/tune.py\n@@ -16,7 +16,7 @@\n from decoders.swig_wrapper import Scorer\n from decoders.swig_wrapper import ctc_beam_search_decoder_batch\n from model_utils.model import deep_speech_v2_network\n-from utils.error_rate import wer, cer\n+from utils.error_rate import char_errors, word_errors\n from utils.utility import add_arguments, print_arguments\n \n parser = argparse.ArgumentParser(description=__doc__)\n@@ -158,7 +158,7 @@\n \" dict_size = %d\" % ext_scorer.get_dict_size())\n logger.info(\"end initializing scorer. Start tuning ...\")\n \n- error_rate_func = cer if args.error_rate_type == 'cer' else wer\n+ errors_func = char_errors if args.error_rate_type == 'cer' else word_errors\n # create grid for search\n cand_alphas = np.linspace(args.alpha_from, args.alpha_to, args.num_alphas)\n cand_betas = np.linspace(args.beta_from, args.beta_to, args.num_betas)\n@@ -167,7 +167,7 @@\n \n err_sum = [0.0 for i in xrange(len(params_grid))]\n err_ave = [0.0 for i in xrange(len(params_grid))]\n- num_ins, cur_batch = 0, 0\n+ num_ins, len_refs, cur_batch = 0, 0, 0\n ## incremental tuning parameters over multiple batches\n for infer_data in batch_reader():\n if (args.num_batches >= 0) and (cur_batch >= args.num_batches):\n@@ -200,8 +200,14 @@\n \n result_transcripts = [res[0][1] for res in beam_search_results]\n for target, result in zip(target_transcripts, result_transcripts):\n- err_sum[index] += error_rate_func(target, result)\n- err_ave[index] = err_sum[index] / num_ins\n+ errors, len_ref = errors_func(target, result)\n+ err_sum[index] += errors\n+ # accumulate the length of references of every batch\n+ # in the first iteration\n+ if args.alpha_from == alpha and args.beta_from == beta:\n+ len_refs += len_ref\n+\n+ err_ave[index] = err_sum[index] / len_refs\n if index % 2 == 0:\n sys.stdout.write('.')\n sys.stdout.flush()\n@@ -226,7 +232,7 @@\n err_ave_min = min(err_ave)\n min_index = err_ave.index(err_ave_min)\n print(\"\\nFinish tuning on %d batches, final opt (alpha, beta) = (%s, %s)\"\n- % (args.num_batches, \"%.3f\" % params_grid[min_index][0],\n+ % (cur_batch, \"%.3f\" % params_grid[min_index][0],\n \"%.3f\" % params_grid[min_index][1]))\n \n logger.info(\"finish tuning\")\n", "issue": "Correct the error rate's computation for multiple sentences\n\n", "before_files": [{"content": "\"\"\"Beam search parameters tuning for DeepSpeech2 model.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nimport os\nimport numpy as np\nimport argparse\nimport functools\nimport gzip\nimport logging\nimport paddle.v2 as paddle\nimport _init_paths\nfrom data_utils.data import DataGenerator\nfrom decoders.swig_wrapper import Scorer\nfrom decoders.swig_wrapper import ctc_beam_search_decoder_batch\nfrom model_utils.model import deep_speech_v2_network\nfrom utils.error_rate import wer, cer\nfrom utils.utility import add_arguments, print_arguments\n\nparser = argparse.ArgumentParser(description=__doc__)\nadd_arg = functools.partial(add_arguments, argparser=parser)\n# yapf: disable\nadd_arg('num_batches', int, -1, \"# of batches tuning on. \"\n \"Default -1, on whole dev set.\")\nadd_arg('batch_size', int, 256, \"# of samples per batch.\")\nadd_arg('trainer_count', int, 8, \"# of Trainers (CPUs or GPUs).\")\nadd_arg('beam_size', int, 500, \"Beam search width.\")\nadd_arg('num_proc_bsearch', int, 8, \"# of CPUs for beam search.\")\nadd_arg('num_proc_data', int, 8, \"# of CPUs for data preprocessing.\")\nadd_arg('num_conv_layers', int, 2, \"# of convolution layers.\")\nadd_arg('num_rnn_layers', int, 3, \"# of recurrent layers.\")\nadd_arg('rnn_layer_size', int, 2048, \"# of recurrent cells per layer.\")\nadd_arg('num_alphas', int, 45, \"# of alpha candidates for tuning.\")\nadd_arg('num_betas', int, 8, \"# of beta candidates for tuning.\")\nadd_arg('alpha_from', float, 1.0, \"Where alpha starts tuning from.\")\nadd_arg('alpha_to', float, 3.2, \"Where alpha ends tuning with.\")\nadd_arg('beta_from', float, 0.1, \"Where beta starts tuning from.\")\nadd_arg('beta_to', float, 0.45, \"Where beta ends tuning with.\")\nadd_arg('cutoff_prob', float, 1.0, \"Cutoff probability for pruning.\")\nadd_arg('cutoff_top_n', int, 40, \"Cutoff number for pruning.\")\nadd_arg('use_gru', bool, False, \"Use GRUs instead of simple RNNs.\")\nadd_arg('use_gpu', bool, True, \"Use GPU or not.\")\nadd_arg('share_rnn_weights',bool, True, \"Share input-hidden weights across \"\n \"bi-directional RNNs. Not for GRU.\")\nadd_arg('tune_manifest', str,\n 'data/librispeech/manifest.dev-clean',\n \"Filepath of manifest to tune.\")\nadd_arg('mean_std_path', str,\n 'data/librispeech/mean_std.npz',\n \"Filepath of normalizer's mean & std.\")\nadd_arg('vocab_path', str,\n 'data/librispeech/vocab.txt',\n \"Filepath of vocabulary.\")\nadd_arg('lang_model_path', str,\n 'models/lm/common_crawl_00.prune01111.trie.klm',\n \"Filepath for language model.\")\nadd_arg('model_path', str,\n './checkpoints/libri/params.latest.tar.gz',\n \"If None, the training starts from scratch, \"\n \"otherwise, it resumes from the pre-trained model.\")\nadd_arg('error_rate_type', str,\n 'wer',\n \"Error rate type for evaluation.\",\n choices=['wer', 'cer'])\nadd_arg('specgram_type', str,\n 'linear',\n \"Audio feature type. Options: linear, mfcc.\",\n choices=['linear', 'mfcc'])\n# yapf: disable\nargs = parser.parse_args()\n\n\nlogging.basicConfig(\n format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)d] %(message)s')\n\ndef tune():\n \"\"\"Tune parameters alpha and beta incrementally.\"\"\"\n if not args.num_alphas >= 0:\n raise ValueError(\"num_alphas must be non-negative!\")\n if not args.num_betas >= 0:\n raise ValueError(\"num_betas must be non-negative!\")\n\n data_generator = DataGenerator(\n vocab_filepath=args.vocab_path,\n mean_std_filepath=args.mean_std_path,\n augmentation_config='{}',\n specgram_type=args.specgram_type,\n num_threads=args.num_proc_data,\n keep_transcription_text=True,\n num_conv_layers=args.num_conv_layers)\n\n audio_data = paddle.layer.data(\n name=\"audio_spectrogram\",\n type=paddle.data_type.dense_array(161 * 161))\n text_data = paddle.layer.data(\n name=\"transcript_text\",\n type=paddle.data_type.integer_value_sequence(data_generator.vocab_size))\n seq_offset_data = paddle.layer.data(\n name='sequence_offset',\n type=paddle.data_type.integer_value_sequence(1))\n seq_len_data = paddle.layer.data(\n name='sequence_length',\n type=paddle.data_type.integer_value_sequence(1))\n index_range_datas = []\n for i in xrange(args.num_rnn_layers):\n index_range_datas.append(\n paddle.layer.data(\n name='conv%d_index_range' % i,\n type=paddle.data_type.dense_vector(6)))\n\n output_probs, _ = deep_speech_v2_network(\n audio_data=audio_data,\n text_data=text_data,\n seq_offset_data=seq_offset_data,\n seq_len_data=seq_len_data,\n index_range_datas=index_range_datas,\n dict_size=data_generator.vocab_size,\n num_conv_layers=args.num_conv_layers,\n num_rnn_layers=args.num_rnn_layers,\n rnn_size=args.rnn_layer_size,\n use_gru=args.use_gru,\n share_rnn_weights=args.share_rnn_weights)\n\n batch_reader = data_generator.batch_reader_creator(\n manifest_path=args.tune_manifest,\n batch_size=args.batch_size,\n sortagrad=False,\n shuffle_method=None)\n\n # load parameters\n if not os.path.isfile(args.model_path):\n raise IOError(\"Invaid model path: %s\" % args.model_path)\n parameters = paddle.parameters.Parameters.from_tar(\n gzip.open(args.model_path))\n\n inferer = paddle.inference.Inference(\n output_layer=output_probs, parameters=parameters)\n # decoders only accept string encoded in utf-8\n vocab_list = [chars.encode(\"utf-8\") for chars in data_generator.vocab_list]\n\n # init logger\n logger = logging.getLogger(\"\")\n logger.setLevel(level=logging.INFO)\n # init external scorer\n logger.info(\"begin to initialize the external scorer for tuning\")\n if not os.path.isfile(args.lang_model_path):\n raise IOError(\"Invaid language model path: %s\" % args.lang_model_path)\n ext_scorer = Scorer(\n alpha=args.alpha_from,\n beta=args.beta_from,\n model_path=args.lang_model_path,\n vocabulary=vocab_list)\n logger.info(\"language model: \"\n \"is_character_based = %d,\" % ext_scorer.is_character_based() +\n \" max_order = %d,\" % ext_scorer.get_max_order() +\n \" dict_size = %d\" % ext_scorer.get_dict_size())\n logger.info(\"end initializing scorer. Start tuning ...\")\n\n error_rate_func = cer if args.error_rate_type == 'cer' else wer\n # create grid for search\n cand_alphas = np.linspace(args.alpha_from, args.alpha_to, args.num_alphas)\n cand_betas = np.linspace(args.beta_from, args.beta_to, args.num_betas)\n params_grid = [(alpha, beta) for alpha in cand_alphas\n for beta in cand_betas]\n\n err_sum = [0.0 for i in xrange(len(params_grid))]\n err_ave = [0.0 for i in xrange(len(params_grid))]\n num_ins, cur_batch = 0, 0\n ## incremental tuning parameters over multiple batches\n for infer_data in batch_reader():\n if (args.num_batches >= 0) and (cur_batch >= args.num_batches):\n break\n infer_results = inferer.infer(input=infer_data,\n feeding=data_generator.feeding)\n start_pos = [0] * (len(infer_data) + 1)\n for i in xrange(len(infer_data)):\n start_pos[i + 1] = start_pos[i] + infer_data[i][3][0]\n probs_split = [\n infer_results[start_pos[i]:start_pos[i + 1]]\n for i in xrange(0, len(infer_data))\n ]\n\n target_transcripts = [ data[1] for data in infer_data ]\n\n num_ins += len(target_transcripts)\n # grid search\n for index, (alpha, beta) in enumerate(params_grid):\n # reset alpha & beta\n ext_scorer.reset_params(alpha, beta)\n beam_search_results = ctc_beam_search_decoder_batch(\n probs_split=probs_split,\n vocabulary=vocab_list,\n beam_size=args.beam_size,\n num_processes=args.num_proc_bsearch,\n cutoff_prob=args.cutoff_prob,\n cutoff_top_n=args.cutoff_top_n,\n ext_scoring_func=ext_scorer, )\n\n result_transcripts = [res[0][1] for res in beam_search_results]\n for target, result in zip(target_transcripts, result_transcripts):\n err_sum[index] += error_rate_func(target, result)\n err_ave[index] = err_sum[index] / num_ins\n if index % 2 == 0:\n sys.stdout.write('.')\n sys.stdout.flush()\n\n # output on-line tuning result at the end of current batch\n err_ave_min = min(err_ave)\n min_index = err_ave.index(err_ave_min)\n print(\"\\nBatch %d [%d/?], current opt (alpha, beta) = (%s, %s), \"\n \" min [%s] = %f\" %(cur_batch, num_ins,\n \"%.3f\" % params_grid[min_index][0],\n \"%.3f\" % params_grid[min_index][1],\n args.error_rate_type, err_ave_min))\n cur_batch += 1\n\n # output WER/CER at every (alpha, beta)\n print(\"\\nFinal %s:\\n\" % args.error_rate_type)\n for index in xrange(len(params_grid)):\n print(\"(alpha, beta) = (%s, %s), [%s] = %f\"\n % (\"%.3f\" % params_grid[index][0], \"%.3f\" % params_grid[index][1],\n args.error_rate_type, err_ave[index]))\n\n err_ave_min = min(err_ave)\n min_index = err_ave.index(err_ave_min)\n print(\"\\nFinish tuning on %d batches, final opt (alpha, beta) = (%s, %s)\"\n % (args.num_batches, \"%.3f\" % params_grid[min_index][0],\n \"%.3f\" % params_grid[min_index][1]))\n\n logger.info(\"finish tuning\")\n\n\ndef main():\n print_arguments(args)\n paddle.init(use_gpu=args.use_gpu,\n rnn_use_batch=True,\n trainer_count=args.trainer_count)\n tune()\n\n\nif __name__ == '__main__':\n main()\n", "path": "tools/tune.py"}], "after_files": [{"content": "\"\"\"Beam search parameters tuning for DeepSpeech2 model.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nimport os\nimport numpy as np\nimport argparse\nimport functools\nimport gzip\nimport logging\nimport paddle.v2 as paddle\nimport _init_paths\nfrom data_utils.data import DataGenerator\nfrom decoders.swig_wrapper import Scorer\nfrom decoders.swig_wrapper import ctc_beam_search_decoder_batch\nfrom model_utils.model import deep_speech_v2_network\nfrom utils.error_rate import char_errors, word_errors\nfrom utils.utility import add_arguments, print_arguments\n\nparser = argparse.ArgumentParser(description=__doc__)\nadd_arg = functools.partial(add_arguments, argparser=parser)\n# yapf: disable\nadd_arg('num_batches', int, -1, \"# of batches tuning on. \"\n \"Default -1, on whole dev set.\")\nadd_arg('batch_size', int, 256, \"# of samples per batch.\")\nadd_arg('trainer_count', int, 8, \"# of Trainers (CPUs or GPUs).\")\nadd_arg('beam_size', int, 500, \"Beam search width.\")\nadd_arg('num_proc_bsearch', int, 8, \"# of CPUs for beam search.\")\nadd_arg('num_proc_data', int, 8, \"# of CPUs for data preprocessing.\")\nadd_arg('num_conv_layers', int, 2, \"# of convolution layers.\")\nadd_arg('num_rnn_layers', int, 3, \"# of recurrent layers.\")\nadd_arg('rnn_layer_size', int, 2048, \"# of recurrent cells per layer.\")\nadd_arg('num_alphas', int, 45, \"# of alpha candidates for tuning.\")\nadd_arg('num_betas', int, 8, \"# of beta candidates for tuning.\")\nadd_arg('alpha_from', float, 1.0, \"Where alpha starts tuning from.\")\nadd_arg('alpha_to', float, 3.2, \"Where alpha ends tuning with.\")\nadd_arg('beta_from', float, 0.1, \"Where beta starts tuning from.\")\nadd_arg('beta_to', float, 0.45, \"Where beta ends tuning with.\")\nadd_arg('cutoff_prob', float, 1.0, \"Cutoff probability for pruning.\")\nadd_arg('cutoff_top_n', int, 40, \"Cutoff number for pruning.\")\nadd_arg('use_gru', bool, False, \"Use GRUs instead of simple RNNs.\")\nadd_arg('use_gpu', bool, True, \"Use GPU or not.\")\nadd_arg('share_rnn_weights',bool, True, \"Share input-hidden weights across \"\n \"bi-directional RNNs. Not for GRU.\")\nadd_arg('tune_manifest', str,\n 'data/librispeech/manifest.dev-clean',\n \"Filepath of manifest to tune.\")\nadd_arg('mean_std_path', str,\n 'data/librispeech/mean_std.npz',\n \"Filepath of normalizer's mean & std.\")\nadd_arg('vocab_path', str,\n 'data/librispeech/vocab.txt',\n \"Filepath of vocabulary.\")\nadd_arg('lang_model_path', str,\n 'models/lm/common_crawl_00.prune01111.trie.klm',\n \"Filepath for language model.\")\nadd_arg('model_path', str,\n './checkpoints/libri/params.latest.tar.gz',\n \"If None, the training starts from scratch, \"\n \"otherwise, it resumes from the pre-trained model.\")\nadd_arg('error_rate_type', str,\n 'wer',\n \"Error rate type for evaluation.\",\n choices=['wer', 'cer'])\nadd_arg('specgram_type', str,\n 'linear',\n \"Audio feature type. Options: linear, mfcc.\",\n choices=['linear', 'mfcc'])\n# yapf: disable\nargs = parser.parse_args()\n\n\nlogging.basicConfig(\n format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)d] %(message)s')\n\ndef tune():\n \"\"\"Tune parameters alpha and beta incrementally.\"\"\"\n if not args.num_alphas >= 0:\n raise ValueError(\"num_alphas must be non-negative!\")\n if not args.num_betas >= 0:\n raise ValueError(\"num_betas must be non-negative!\")\n\n data_generator = DataGenerator(\n vocab_filepath=args.vocab_path,\n mean_std_filepath=args.mean_std_path,\n augmentation_config='{}',\n specgram_type=args.specgram_type,\n num_threads=args.num_proc_data,\n keep_transcription_text=True,\n num_conv_layers=args.num_conv_layers)\n\n audio_data = paddle.layer.data(\n name=\"audio_spectrogram\",\n type=paddle.data_type.dense_array(161 * 161))\n text_data = paddle.layer.data(\n name=\"transcript_text\",\n type=paddle.data_type.integer_value_sequence(data_generator.vocab_size))\n seq_offset_data = paddle.layer.data(\n name='sequence_offset',\n type=paddle.data_type.integer_value_sequence(1))\n seq_len_data = paddle.layer.data(\n name='sequence_length',\n type=paddle.data_type.integer_value_sequence(1))\n index_range_datas = []\n for i in xrange(args.num_rnn_layers):\n index_range_datas.append(\n paddle.layer.data(\n name='conv%d_index_range' % i,\n type=paddle.data_type.dense_vector(6)))\n\n output_probs, _ = deep_speech_v2_network(\n audio_data=audio_data,\n text_data=text_data,\n seq_offset_data=seq_offset_data,\n seq_len_data=seq_len_data,\n index_range_datas=index_range_datas,\n dict_size=data_generator.vocab_size,\n num_conv_layers=args.num_conv_layers,\n num_rnn_layers=args.num_rnn_layers,\n rnn_size=args.rnn_layer_size,\n use_gru=args.use_gru,\n share_rnn_weights=args.share_rnn_weights)\n\n batch_reader = data_generator.batch_reader_creator(\n manifest_path=args.tune_manifest,\n batch_size=args.batch_size,\n sortagrad=False,\n shuffle_method=None)\n\n # load parameters\n if not os.path.isfile(args.model_path):\n raise IOError(\"Invaid model path: %s\" % args.model_path)\n parameters = paddle.parameters.Parameters.from_tar(\n gzip.open(args.model_path))\n\n inferer = paddle.inference.Inference(\n output_layer=output_probs, parameters=parameters)\n # decoders only accept string encoded in utf-8\n vocab_list = [chars.encode(\"utf-8\") for chars in data_generator.vocab_list]\n\n # init logger\n logger = logging.getLogger(\"\")\n logger.setLevel(level=logging.INFO)\n # init external scorer\n logger.info(\"begin to initialize the external scorer for tuning\")\n if not os.path.isfile(args.lang_model_path):\n raise IOError(\"Invaid language model path: %s\" % args.lang_model_path)\n ext_scorer = Scorer(\n alpha=args.alpha_from,\n beta=args.beta_from,\n model_path=args.lang_model_path,\n vocabulary=vocab_list)\n logger.info(\"language model: \"\n \"is_character_based = %d,\" % ext_scorer.is_character_based() +\n \" max_order = %d,\" % ext_scorer.get_max_order() +\n \" dict_size = %d\" % ext_scorer.get_dict_size())\n logger.info(\"end initializing scorer. Start tuning ...\")\n\n errors_func = char_errors if args.error_rate_type == 'cer' else word_errors\n # create grid for search\n cand_alphas = np.linspace(args.alpha_from, args.alpha_to, args.num_alphas)\n cand_betas = np.linspace(args.beta_from, args.beta_to, args.num_betas)\n params_grid = [(alpha, beta) for alpha in cand_alphas\n for beta in cand_betas]\n\n err_sum = [0.0 for i in xrange(len(params_grid))]\n err_ave = [0.0 for i in xrange(len(params_grid))]\n num_ins, len_refs, cur_batch = 0, 0, 0\n ## incremental tuning parameters over multiple batches\n for infer_data in batch_reader():\n if (args.num_batches >= 0) and (cur_batch >= args.num_batches):\n break\n infer_results = inferer.infer(input=infer_data,\n feeding=data_generator.feeding)\n start_pos = [0] * (len(infer_data) + 1)\n for i in xrange(len(infer_data)):\n start_pos[i + 1] = start_pos[i] + infer_data[i][3][0]\n probs_split = [\n infer_results[start_pos[i]:start_pos[i + 1]]\n for i in xrange(0, len(infer_data))\n ]\n\n target_transcripts = [ data[1] for data in infer_data ]\n\n num_ins += len(target_transcripts)\n # grid search\n for index, (alpha, beta) in enumerate(params_grid):\n # reset alpha & beta\n ext_scorer.reset_params(alpha, beta)\n beam_search_results = ctc_beam_search_decoder_batch(\n probs_split=probs_split,\n vocabulary=vocab_list,\n beam_size=args.beam_size,\n num_processes=args.num_proc_bsearch,\n cutoff_prob=args.cutoff_prob,\n cutoff_top_n=args.cutoff_top_n,\n ext_scoring_func=ext_scorer, )\n\n result_transcripts = [res[0][1] for res in beam_search_results]\n for target, result in zip(target_transcripts, result_transcripts):\n errors, len_ref = errors_func(target, result)\n err_sum[index] += errors\n # accumulate the length of references of every batch\n # in the first iteration\n if args.alpha_from == alpha and args.beta_from == beta:\n len_refs += len_ref\n\n err_ave[index] = err_sum[index] / len_refs\n if index % 2 == 0:\n sys.stdout.write('.')\n sys.stdout.flush()\n\n # output on-line tuning result at the end of current batch\n err_ave_min = min(err_ave)\n min_index = err_ave.index(err_ave_min)\n print(\"\\nBatch %d [%d/?], current opt (alpha, beta) = (%s, %s), \"\n \" min [%s] = %f\" %(cur_batch, num_ins,\n \"%.3f\" % params_grid[min_index][0],\n \"%.3f\" % params_grid[min_index][1],\n args.error_rate_type, err_ave_min))\n cur_batch += 1\n\n # output WER/CER at every (alpha, beta)\n print(\"\\nFinal %s:\\n\" % args.error_rate_type)\n for index in xrange(len(params_grid)):\n print(\"(alpha, beta) = (%s, %s), [%s] = %f\"\n % (\"%.3f\" % params_grid[index][0], \"%.3f\" % params_grid[index][1],\n args.error_rate_type, err_ave[index]))\n\n err_ave_min = min(err_ave)\n min_index = err_ave.index(err_ave_min)\n print(\"\\nFinish tuning on %d batches, final opt (alpha, beta) = (%s, %s)\"\n % (cur_batch, \"%.3f\" % params_grid[min_index][0],\n \"%.3f\" % params_grid[min_index][1]))\n\n logger.info(\"finish tuning\")\n\n\ndef main():\n print_arguments(args)\n paddle.init(use_gpu=args.use_gpu,\n rnn_use_batch=True,\n trainer_count=args.trainer_count)\n tune()\n\n\nif __name__ == '__main__':\n main()\n", "path": "tools/tune.py"}]} | 3,386 | 668 |
gh_patches_debug_10806 | rasdani/github-patches | git_diff | Kinto__kinto-850 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Return 400 if a group contains system.Everyone or a group URL
Unless I'm mistaken:
- We don't support groups for anonymous requests
- We don't support recursivity in groups definitions
So we should reject with `400` if such groups definitons are created
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/views/groups.py`
Content:
```
1 import colander
2
3 from kinto.core import resource, utils
4 from kinto.core.events import ResourceChanged, ACTIONS
5 from pyramid.events import subscriber
6
7
8 class GroupSchema(resource.ResourceSchema):
9 members = colander.SchemaNode(colander.Sequence(),
10 colander.SchemaNode(colander.String()))
11
12
13 @resource.register(name='group',
14 collection_path='/buckets/{{bucket_id}}/groups',
15 record_path='/buckets/{{bucket_id}}/groups/{{id}}')
16 class Group(resource.ShareableResource):
17 mapping = GroupSchema()
18
19 def get_parent_id(self, request):
20 bucket_id = request.matchdict['bucket_id']
21 parent_id = utils.instance_uri(request, 'bucket', id=bucket_id)
22 return parent_id
23
24
25 @subscriber(ResourceChanged,
26 for_resources=('group',),
27 for_actions=(ACTIONS.DELETE,))
28 def on_groups_deleted(event):
29 """Some groups were deleted, remove them from users principals.
30 """
31 permission_backend = event.request.registry.permission
32
33 for change in event.impacted_records:
34 group = change['old']
35 bucket_id = event.payload['bucket_id']
36 group_uri = utils.instance_uri(event.request, 'group',
37 bucket_id=bucket_id,
38 id=group['id'])
39
40 permission_backend.remove_principal(group_uri)
41
42
43 @subscriber(ResourceChanged,
44 for_resources=('group',),
45 for_actions=(ACTIONS.CREATE, ACTIONS.UPDATE))
46 def on_groups_changed(event):
47 """Some groups were changed, update users principals.
48 """
49 permission_backend = event.request.registry.permission
50
51 for change in event.impacted_records:
52 if 'old' in change:
53 existing_record_members = set(change['old'].get('members', []))
54 else:
55 existing_record_members = set()
56
57 group = change['new']
58 group_uri = '/buckets/{bucket_id}/groups/{id}'.format(id=group['id'],
59 **event.payload)
60 new_record_members = set(group.get('members', []))
61 new_members = new_record_members - existing_record_members
62 removed_members = existing_record_members - new_record_members
63
64 for member in new_members:
65 # Add the group to the member principal.
66 permission_backend.add_user_principal(member, group_uri)
67
68 for member in removed_members:
69 # Remove the group from the member principal.
70 permission_backend.remove_user_principal(member, group_uri)
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kinto/views/groups.py b/kinto/views/groups.py
--- a/kinto/views/groups.py
+++ b/kinto/views/groups.py
@@ -5,9 +5,15 @@
from pyramid.events import subscriber
+def validate_member(node, member):
+ if member.startswith('/buckets/') or member == 'system.Everyone':
+ raise colander.Invalid(node, "%r is not a valid user ID." % member)
+
+
class GroupSchema(resource.ResourceSchema):
members = colander.SchemaNode(colander.Sequence(),
- colander.SchemaNode(colander.String()))
+ colander.SchemaNode(colander.String(),
+ validator=validate_member))
@resource.register(name='group',
| {"golden_diff": "diff --git a/kinto/views/groups.py b/kinto/views/groups.py\n--- a/kinto/views/groups.py\n+++ b/kinto/views/groups.py\n@@ -5,9 +5,15 @@\n from pyramid.events import subscriber\n \n \n+def validate_member(node, member):\n+ if member.startswith('/buckets/') or member == 'system.Everyone':\n+ raise colander.Invalid(node, \"%r is not a valid user ID.\" % member)\n+\n+\n class GroupSchema(resource.ResourceSchema):\n members = colander.SchemaNode(colander.Sequence(),\n- colander.SchemaNode(colander.String()))\n+ colander.SchemaNode(colander.String(),\n+ validator=validate_member))\n \n \n @resource.register(name='group',\n", "issue": "Return 400 if a group contains system.Everyone or a group URL\nUnless I'm mistaken:\n- We don't support groups for anonymous requests\n- We don't support recursivity in groups definitions\n\nSo we should reject with `400` if such groups definitons are created\n\n", "before_files": [{"content": "import colander\n\nfrom kinto.core import resource, utils\nfrom kinto.core.events import ResourceChanged, ACTIONS\nfrom pyramid.events import subscriber\n\n\nclass GroupSchema(resource.ResourceSchema):\n members = colander.SchemaNode(colander.Sequence(),\n colander.SchemaNode(colander.String()))\n\n\[email protected](name='group',\n collection_path='/buckets/{{bucket_id}}/groups',\n record_path='/buckets/{{bucket_id}}/groups/{{id}}')\nclass Group(resource.ShareableResource):\n mapping = GroupSchema()\n\n def get_parent_id(self, request):\n bucket_id = request.matchdict['bucket_id']\n parent_id = utils.instance_uri(request, 'bucket', id=bucket_id)\n return parent_id\n\n\n@subscriber(ResourceChanged,\n for_resources=('group',),\n for_actions=(ACTIONS.DELETE,))\ndef on_groups_deleted(event):\n \"\"\"Some groups were deleted, remove them from users principals.\n \"\"\"\n permission_backend = event.request.registry.permission\n\n for change in event.impacted_records:\n group = change['old']\n bucket_id = event.payload['bucket_id']\n group_uri = utils.instance_uri(event.request, 'group',\n bucket_id=bucket_id,\n id=group['id'])\n\n permission_backend.remove_principal(group_uri)\n\n\n@subscriber(ResourceChanged,\n for_resources=('group',),\n for_actions=(ACTIONS.CREATE, ACTIONS.UPDATE))\ndef on_groups_changed(event):\n \"\"\"Some groups were changed, update users principals.\n \"\"\"\n permission_backend = event.request.registry.permission\n\n for change in event.impacted_records:\n if 'old' in change:\n existing_record_members = set(change['old'].get('members', []))\n else:\n existing_record_members = set()\n\n group = change['new']\n group_uri = '/buckets/{bucket_id}/groups/{id}'.format(id=group['id'],\n **event.payload)\n new_record_members = set(group.get('members', []))\n new_members = new_record_members - existing_record_members\n removed_members = existing_record_members - new_record_members\n\n for member in new_members:\n # Add the group to the member principal.\n permission_backend.add_user_principal(member, group_uri)\n\n for member in removed_members:\n # Remove the group from the member principal.\n permission_backend.remove_user_principal(member, group_uri)\n", "path": "kinto/views/groups.py"}], "after_files": [{"content": "import colander\n\nfrom kinto.core import resource, utils\nfrom kinto.core.events import ResourceChanged, ACTIONS\nfrom pyramid.events import subscriber\n\n\ndef validate_member(node, member):\n if member.startswith('/buckets/') or member == 'system.Everyone':\n raise colander.Invalid(node, \"%r is not a valid user ID.\" % member)\n\n\nclass GroupSchema(resource.ResourceSchema):\n members = colander.SchemaNode(colander.Sequence(),\n colander.SchemaNode(colander.String(),\n validator=validate_member))\n\n\[email protected](name='group',\n collection_path='/buckets/{{bucket_id}}/groups',\n record_path='/buckets/{{bucket_id}}/groups/{{id}}')\nclass Group(resource.ShareableResource):\n mapping = GroupSchema()\n\n def get_parent_id(self, request):\n bucket_id = request.matchdict['bucket_id']\n parent_id = utils.instance_uri(request, 'bucket', id=bucket_id)\n return parent_id\n\n\n@subscriber(ResourceChanged,\n for_resources=('group',),\n for_actions=(ACTIONS.DELETE,))\ndef on_groups_deleted(event):\n \"\"\"Some groups were deleted, remove them from users principals.\n \"\"\"\n permission_backend = event.request.registry.permission\n\n for change in event.impacted_records:\n group = change['old']\n bucket_id = event.payload['bucket_id']\n group_uri = utils.instance_uri(event.request, 'group',\n bucket_id=bucket_id,\n id=group['id'])\n\n permission_backend.remove_principal(group_uri)\n\n\n@subscriber(ResourceChanged,\n for_resources=('group',),\n for_actions=(ACTIONS.CREATE, ACTIONS.UPDATE))\ndef on_groups_changed(event):\n \"\"\"Some groups were changed, update users principals.\n \"\"\"\n permission_backend = event.request.registry.permission\n\n for change in event.impacted_records:\n if 'old' in change:\n existing_record_members = set(change['old'].get('members', []))\n else:\n existing_record_members = set()\n\n group = change['new']\n group_uri = '/buckets/{bucket_id}/groups/{id}'.format(id=group['id'],\n **event.payload)\n new_record_members = set(group.get('members', []))\n new_members = new_record_members - existing_record_members\n removed_members = existing_record_members - new_record_members\n\n for member in new_members:\n # Add the group to the member principal.\n permission_backend.add_user_principal(member, group_uri)\n\n for member in removed_members:\n # Remove the group from the member principal.\n permission_backend.remove_user_principal(member, group_uri)\n", "path": "kinto/views/groups.py"}]} | 948 | 150 |
gh_patches_debug_16119 | rasdani/github-patches | git_diff | conan-io__conan-center-index-549 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[package] make/4.2.1: not building for Linux Clang 9
The recipe for `make/4.2.1` does not build under Linux Clang 9.
After generating all the index packages for Clang 9, the ones for this library failed to compile. In this case it doesn't matter that much as this is packaging a tool to be used as a build requirement.
Related to #211
### Package and Environment Details (include every applicable attribute)
* Package Name/Version: **make/4.2.1**
* Operating System+version: **Linux Ubuntu 18.04**
* Compiler+version: **Clang 9**
* Conan version: **conan 1.21.0**
* Python version: **Python 3.7.4**
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `recipes/make/all/conanfile.py`
Content:
```
1 from conans import ConanFile, tools, AutoToolsBuildEnvironment
2 import os
3
4
5 class MakeConan(ConanFile):
6 name = "make"
7 description = "GNU Make is a tool which controls the generation of executables and other non-source files of a program from the program's source files"
8 topics = ("conan", "make", "build", "makefile")
9 url = "https://github.com/conan-io/conan-center-index"
10 homepage = "https://www.gnu.org/software/make/"
11 license = "GPL-3.0-or-later"
12 settings = "os_build", "arch_build", "compiler"
13 _source_subfolder = "source_subfolder"
14
15 def source(self):
16 tools.get(**self.conan_data["sources"][self.version])
17 extracted_dir = "make-" + self.version
18 os.rename(extracted_dir, self._source_subfolder)
19
20 def configure(self):
21 del self.settings.compiler.libcxx
22 del self.settings.compiler.cppstd
23
24 def build(self):
25 with tools.chdir(self._source_subfolder):
26 # README.W32
27 if self.settings.os_build == "Windows":
28 if self.settings.compiler == "Visual Studio":
29 command = "build_w32.bat --without-guile"
30 else:
31 command = "build_w32.bat --without-guile gcc"
32 else:
33 env_build = AutoToolsBuildEnvironment(self)
34 env_build.configure()
35 command = "./build.sh"
36 with tools.vcvars(self.settings) if self.settings.compiler == "Visual Studio" else tools.no_op():
37 self.run(command)
38
39 def package(self):
40 self.copy(pattern="COPYING", dst="licenses", src=self._source_subfolder)
41 self.copy(pattern="make", dst="bin", src=self._source_subfolder, keep_path=False)
42 self.copy(pattern="*gnumake.exe", dst="bin", src=self._source_subfolder, keep_path=False)
43
44 def package_info(self):
45 make = "gnumake.exe" if self.settings.os_build == "Windows" else "make"
46 make = os.path.join(self.package_folder, "bin", make)
47 self.output.info('Creating CONAN_MAKE_PROGRAM environment variable: %s' % make)
48 self.env_info.CONAN_MAKE_PROGRAM = make
49
50 def package_id(self):
51 del self.info.settings.compiler
52
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/recipes/make/all/conanfile.py b/recipes/make/all/conanfile.py
--- a/recipes/make/all/conanfile.py
+++ b/recipes/make/all/conanfile.py
@@ -10,6 +10,7 @@
homepage = "https://www.gnu.org/software/make/"
license = "GPL-3.0-or-later"
settings = "os_build", "arch_build", "compiler"
+ exports_sources = ["patches/*"]
_source_subfolder = "source_subfolder"
def source(self):
@@ -22,6 +23,9 @@
del self.settings.compiler.cppstd
def build(self):
+ for patch in self.conan_data["patches"][self.version]:
+ tools.patch(**patch)
+
with tools.chdir(self._source_subfolder):
# README.W32
if self.settings.os_build == "Windows":
| {"golden_diff": "diff --git a/recipes/make/all/conanfile.py b/recipes/make/all/conanfile.py\n--- a/recipes/make/all/conanfile.py\n+++ b/recipes/make/all/conanfile.py\n@@ -10,6 +10,7 @@\n homepage = \"https://www.gnu.org/software/make/\"\n license = \"GPL-3.0-or-later\"\n settings = \"os_build\", \"arch_build\", \"compiler\"\n+ exports_sources = [\"patches/*\"]\n _source_subfolder = \"source_subfolder\"\n \n def source(self):\n@@ -22,6 +23,9 @@\n del self.settings.compiler.cppstd\n \n def build(self):\n+ for patch in self.conan_data[\"patches\"][self.version]:\n+ tools.patch(**patch)\n+\n with tools.chdir(self._source_subfolder):\n # README.W32\n if self.settings.os_build == \"Windows\":\n", "issue": "[package] make/4.2.1: not building for Linux Clang 9\nThe recipe for `make/4.2.1` does not build under Linux Clang 9.\r\n\r\nAfter generating all the index packages for Clang 9, the ones for this library failed to compile. In this case it doesn't matter that much as this is packaging a tool to be used as a build requirement.\r\n\r\nRelated to #211 \r\n\r\n### Package and Environment Details (include every applicable attribute)\r\n * Package Name/Version: **make/4.2.1**\r\n * Operating System+version: **Linux Ubuntu 18.04**\r\n * Compiler+version: **Clang 9**\r\n * Conan version: **conan 1.21.0**\r\n * Python version: **Python 3.7.4**\n", "before_files": [{"content": "from conans import ConanFile, tools, AutoToolsBuildEnvironment\nimport os\n\n\nclass MakeConan(ConanFile):\n name = \"make\"\n description = \"GNU Make is a tool which controls the generation of executables and other non-source files of a program from the program's source files\"\n topics = (\"conan\", \"make\", \"build\", \"makefile\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://www.gnu.org/software/make/\"\n license = \"GPL-3.0-or-later\"\n settings = \"os_build\", \"arch_build\", \"compiler\"\n _source_subfolder = \"source_subfolder\"\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = \"make-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n def configure(self):\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def build(self):\n with tools.chdir(self._source_subfolder):\n # README.W32\n if self.settings.os_build == \"Windows\":\n if self.settings.compiler == \"Visual Studio\":\n command = \"build_w32.bat --without-guile\"\n else:\n command = \"build_w32.bat --without-guile gcc\"\n else:\n env_build = AutoToolsBuildEnvironment(self)\n env_build.configure()\n command = \"./build.sh\"\n with tools.vcvars(self.settings) if self.settings.compiler == \"Visual Studio\" else tools.no_op():\n self.run(command)\n\n def package(self):\n self.copy(pattern=\"COPYING\", dst=\"licenses\", src=self._source_subfolder)\n self.copy(pattern=\"make\", dst=\"bin\", src=self._source_subfolder, keep_path=False)\n self.copy(pattern=\"*gnumake.exe\", dst=\"bin\", src=self._source_subfolder, keep_path=False)\n\n def package_info(self):\n make = \"gnumake.exe\" if self.settings.os_build == \"Windows\" else \"make\"\n make = os.path.join(self.package_folder, \"bin\", make)\n self.output.info('Creating CONAN_MAKE_PROGRAM environment variable: %s' % make)\n self.env_info.CONAN_MAKE_PROGRAM = make\n\n def package_id(self):\n del self.info.settings.compiler\n", "path": "recipes/make/all/conanfile.py"}], "after_files": [{"content": "from conans import ConanFile, tools, AutoToolsBuildEnvironment\nimport os\n\n\nclass MakeConan(ConanFile):\n name = \"make\"\n description = \"GNU Make is a tool which controls the generation of executables and other non-source files of a program from the program's source files\"\n topics = (\"conan\", \"make\", \"build\", \"makefile\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://www.gnu.org/software/make/\"\n license = \"GPL-3.0-or-later\"\n settings = \"os_build\", \"arch_build\", \"compiler\"\n exports_sources = [\"patches/*\"]\n _source_subfolder = \"source_subfolder\"\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = \"make-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n def configure(self):\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def build(self):\n for patch in self.conan_data[\"patches\"][self.version]:\n tools.patch(**patch)\n\n with tools.chdir(self._source_subfolder):\n # README.W32\n if self.settings.os_build == \"Windows\":\n if self.settings.compiler == \"Visual Studio\":\n command = \"build_w32.bat --without-guile\"\n else:\n command = \"build_w32.bat --without-guile gcc\"\n else:\n env_build = AutoToolsBuildEnvironment(self)\n env_build.configure()\n command = \"./build.sh\"\n with tools.vcvars(self.settings) if self.settings.compiler == \"Visual Studio\" else tools.no_op():\n self.run(command)\n\n def package(self):\n self.copy(pattern=\"COPYING\", dst=\"licenses\", src=self._source_subfolder)\n self.copy(pattern=\"make\", dst=\"bin\", src=self._source_subfolder, keep_path=False)\n self.copy(pattern=\"*gnumake.exe\", dst=\"bin\", src=self._source_subfolder, keep_path=False)\n\n def package_info(self):\n make = \"gnumake.exe\" if self.settings.os_build == \"Windows\" else \"make\"\n make = os.path.join(self.package_folder, \"bin\", make)\n self.output.info('Creating CONAN_MAKE_PROGRAM environment variable: %s' % make)\n self.env_info.CONAN_MAKE_PROGRAM = make\n\n def package_id(self):\n del self.info.settings.compiler\n", "path": "recipes/make/all/conanfile.py"}]} | 1,042 | 201 |
gh_patches_debug_25779 | rasdani/github-patches | git_diff | weecology__retriever-1004 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update FIA links
It's that time of year again where FIA puts out a new release and moves things around. See https://github.com/weecology/retriever/issues/865#issuecomment-325588307
We need to track down the most recent links and update the script.
Thanks to @nestorperez for reporting this.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/forest_inventory_analysis.py`
Content:
```
1 #retriever
2 """Retriever script for Forest Inventory and Analysis
3
4 """
5 from __future__ import print_function
6 from future import standard_library
7 standard_library.install_aliases()
8
9 import os
10
11 from retriever.lib.templates import Script
12 from retriever.lib.models import Table
13 from retriever import open_fr, open_fw, VERSION
14 from pkg_resources import parse_version
15
16
17 class main(Script):
18 def __init__(self, **kwargs):
19 Script.__init__(self, **kwargs)
20 self.title = "Forest Inventory and Analysis"
21 self.name = "forest-inventory-analysis"
22 self.retriever_minimum_version = '2.0.dev'
23 self.version = '1.4.0'
24 self.ref = "http://fia.fs.fed.us/"
25 self.urls = {"main": "https://apps.fs.usda.gov/fiadb-downloads/CSV/",
26 'species': 'https://apps.fs.usda.gov/fiadb-downloads/CSV/REF_SPECIES.csv'}
27 self.keywords = ["plants", "continental-scale", "observational"]
28 self.citation = "DATEOFDOWNLOAD. Forest Inventory and Analysis Database, St. Paul, MN: U.S. Department of Agriculture, Forest Service, Northern Research Station. [Available only on internet: http://apps.fs.fed.us/fiadb-downloads/datamart.html]"
29 self.description = """WARNING: This dataset requires downloading many large files and will probably take several hours to finish installing."""
30 self.addendum = """This dataset requires downloading many large files - please be patient."""
31
32 if parse_version(VERSION) <= parse_version("2.0.0"):
33 self.shortname = self.name
34 self.name = self.title
35 self.tags = self.keywords
36
37 def download(self, engine=None, debug=False):
38 Script.download(self, engine, debug)
39 engine = self.engine
40
41 # download and create species table
42 table = Table('species')
43 self.engine.auto_create_table(table, url=self.urls['species'])
44 self.engine.insert_data_from_url(self.urls['species'])
45
46 # State abbreviations with the year annual inventory began for that state
47 stateslist = [('AL', 2001), ('AK', 2004), ('AZ', 2001), ('AR', 2000),
48 ('CA', 2001), ('CO', 2002), ('CT', 2003), ('DE', 2004),
49 ('FL', 2003), ('GA', 1998), ('ID', 2004), ('IL', 2001),
50 ('IN', 1999), ('IA', 1999), ('KS', 2001), ('KY', 1999),
51 ('LA', 2001), ('ME', 1999), ('MD', 2004), ('MA', 2003),
52 ('MI', 2000), ('MN', 1999), ('MO', 1999), ('MS', 2006),
53 ('MT', 2003), ('NE', 2001), ('NV', 2004), ('NH', 2002),
54 ('NJ', 2004), ('NM', 1999), ('NY', 2002), ('NC', 2003),
55 ('ND', 2001), ('OH', 2001), ('OK', 2008), ('OR', 2001),
56 ('PA', 2000), ('RI', 2003), ('SC', 1999), ('SD', 2001),
57 ('TN', 2000), ('TX', 2001), ('UT', 2000), ('VT', 2003),
58 ('VA', 1998), ('WA', 2002), ('WV', 2004), ('WI', 2000),
59 ('WY', 2000), ('PR', 2001)]
60
61 tablelist = ["SURVEY", "PLOT", "COND", "SUBPLOT", "SUBP_COND", "TREE", "SEEDLING"]
62
63 for table in tablelist:
64 for state, year in stateslist:
65 engine.download_files_from_archive(self.urls["main"] + state + "_" + table + ".ZIP",
66 [state + "_" + table + ".csv"])
67
68 for table in tablelist:
69 print("Scanning data for table %s..." % table)
70 prep_file_name = "%s.csv" % table
71 prep_file = open_fw(engine.format_filename(prep_file_name))
72 this_file = open_fr(engine.format_filename(stateslist[0][0] + "_" + table + ".csv"))
73 col_names = this_file.readline()
74 prep_file.write(col_names)
75 column_names = [col.strip('"') for col in col_names.split(',')]
76 year_column = column_names.index("INVYR")
77 this_file.close()
78
79 for state, year in stateslist:
80 this_file = open_fr(engine.format_filename(state + "_" + table + ".csv"))
81 this_file.readline()
82 for line in this_file:
83 values = line.split(',')
84 this_year = values[year_column]
85 if int(this_year) >= year:
86 prep_file.write(line)
87 prep_file.close()
88 engine.auto_create_table(Table(table), filename=prep_file_name)
89
90 engine.insert_data_from_file(engine.format_filename(prep_file_name))
91
92 try:
93 os.remove(engine.format_filename(prep_file_name))
94 except:
95 pass
96
97 return engine
98
99
100 SCRIPT = main()
101
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scripts/forest_inventory_analysis.py b/scripts/forest_inventory_analysis.py
--- a/scripts/forest_inventory_analysis.py
+++ b/scripts/forest_inventory_analysis.py
@@ -20,10 +20,10 @@
self.title = "Forest Inventory and Analysis"
self.name = "forest-inventory-analysis"
self.retriever_minimum_version = '2.0.dev'
- self.version = '1.4.0'
+ self.version = '1.4.1'
self.ref = "http://fia.fs.fed.us/"
- self.urls = {"main": "https://apps.fs.usda.gov/fiadb-downloads/CSV/",
- 'species': 'https://apps.fs.usda.gov/fiadb-downloads/CSV/REF_SPECIES.csv'}
+ self.urls = {"main": "https://apps.fs.usda.gov/fia/datamart/CSV/",
+ 'species': 'https://apps.fs.usda.gov/fia/datamart/CSV/REF_SPECIES.csv'}
self.keywords = ["plants", "continental-scale", "observational"]
self.citation = "DATEOFDOWNLOAD. Forest Inventory and Analysis Database, St. Paul, MN: U.S. Department of Agriculture, Forest Service, Northern Research Station. [Available only on internet: http://apps.fs.fed.us/fiadb-downloads/datamart.html]"
self.description = """WARNING: This dataset requires downloading many large files and will probably take several hours to finish installing."""
| {"golden_diff": "diff --git a/scripts/forest_inventory_analysis.py b/scripts/forest_inventory_analysis.py\n--- a/scripts/forest_inventory_analysis.py\n+++ b/scripts/forest_inventory_analysis.py\n@@ -20,10 +20,10 @@\n self.title = \"Forest Inventory and Analysis\"\n self.name = \"forest-inventory-analysis\"\n self.retriever_minimum_version = '2.0.dev'\n- self.version = '1.4.0'\n+ self.version = '1.4.1'\n self.ref = \"http://fia.fs.fed.us/\"\n- self.urls = {\"main\": \"https://apps.fs.usda.gov/fiadb-downloads/CSV/\",\n- 'species': 'https://apps.fs.usda.gov/fiadb-downloads/CSV/REF_SPECIES.csv'}\n+ self.urls = {\"main\": \"https://apps.fs.usda.gov/fia/datamart/CSV/\",\n+ 'species': 'https://apps.fs.usda.gov/fia/datamart/CSV/REF_SPECIES.csv'}\n self.keywords = [\"plants\", \"continental-scale\", \"observational\"]\n self.citation = \"DATEOFDOWNLOAD. Forest Inventory and Analysis Database, St. Paul, MN: U.S. Department of Agriculture, Forest Service, Northern Research Station. [Available only on internet: http://apps.fs.fed.us/fiadb-downloads/datamart.html]\"\n self.description = \"\"\"WARNING: This dataset requires downloading many large files and will probably take several hours to finish installing.\"\"\"\n", "issue": "Update FIA links\nIt's that time of year again where FIA puts out a new release and moves things around. See https://github.com/weecology/retriever/issues/865#issuecomment-325588307\r\n\r\nWe need to track down the most recent links and update the script.\r\n\r\nThanks to @nestorperez for reporting this.\n", "before_files": [{"content": "#retriever\n\"\"\"Retriever script for Forest Inventory and Analysis\n\n\"\"\"\nfrom __future__ import print_function\nfrom future import standard_library\nstandard_library.install_aliases()\n\nimport os\n\nfrom retriever.lib.templates import Script\nfrom retriever.lib.models import Table\nfrom retriever import open_fr, open_fw, VERSION\nfrom pkg_resources import parse_version\n\n\nclass main(Script):\n def __init__(self, **kwargs):\n Script.__init__(self, **kwargs)\n self.title = \"Forest Inventory and Analysis\"\n self.name = \"forest-inventory-analysis\"\n self.retriever_minimum_version = '2.0.dev'\n self.version = '1.4.0'\n self.ref = \"http://fia.fs.fed.us/\"\n self.urls = {\"main\": \"https://apps.fs.usda.gov/fiadb-downloads/CSV/\",\n 'species': 'https://apps.fs.usda.gov/fiadb-downloads/CSV/REF_SPECIES.csv'}\n self.keywords = [\"plants\", \"continental-scale\", \"observational\"]\n self.citation = \"DATEOFDOWNLOAD. Forest Inventory and Analysis Database, St. Paul, MN: U.S. Department of Agriculture, Forest Service, Northern Research Station. [Available only on internet: http://apps.fs.fed.us/fiadb-downloads/datamart.html]\"\n self.description = \"\"\"WARNING: This dataset requires downloading many large files and will probably take several hours to finish installing.\"\"\"\n self.addendum = \"\"\"This dataset requires downloading many large files - please be patient.\"\"\"\n \n if parse_version(VERSION) <= parse_version(\"2.0.0\"):\n self.shortname = self.name\n self.name = self.title\n self.tags = self.keywords\n\n def download(self, engine=None, debug=False):\n Script.download(self, engine, debug)\n engine = self.engine\n\n # download and create species table\n table = Table('species')\n self.engine.auto_create_table(table, url=self.urls['species'])\n self.engine.insert_data_from_url(self.urls['species'])\n\n # State abbreviations with the year annual inventory began for that state\n stateslist = [('AL', 2001), ('AK', 2004), ('AZ', 2001), ('AR', 2000),\n ('CA', 2001), ('CO', 2002), ('CT', 2003), ('DE', 2004),\n ('FL', 2003), ('GA', 1998), ('ID', 2004), ('IL', 2001),\n ('IN', 1999), ('IA', 1999), ('KS', 2001), ('KY', 1999),\n ('LA', 2001), ('ME', 1999), ('MD', 2004), ('MA', 2003),\n ('MI', 2000), ('MN', 1999), ('MO', 1999), ('MS', 2006),\n ('MT', 2003), ('NE', 2001), ('NV', 2004), ('NH', 2002),\n ('NJ', 2004), ('NM', 1999), ('NY', 2002), ('NC', 2003),\n ('ND', 2001), ('OH', 2001), ('OK', 2008), ('OR', 2001),\n ('PA', 2000), ('RI', 2003), ('SC', 1999), ('SD', 2001),\n ('TN', 2000), ('TX', 2001), ('UT', 2000), ('VT', 2003),\n ('VA', 1998), ('WA', 2002), ('WV', 2004), ('WI', 2000),\n ('WY', 2000), ('PR', 2001)]\n\n tablelist = [\"SURVEY\", \"PLOT\", \"COND\", \"SUBPLOT\", \"SUBP_COND\", \"TREE\", \"SEEDLING\"]\n\n for table in tablelist:\n for state, year in stateslist:\n engine.download_files_from_archive(self.urls[\"main\"] + state + \"_\" + table + \".ZIP\",\n [state + \"_\" + table + \".csv\"])\n\n for table in tablelist:\n print(\"Scanning data for table %s...\" % table)\n prep_file_name = \"%s.csv\" % table\n prep_file = open_fw(engine.format_filename(prep_file_name))\n this_file = open_fr(engine.format_filename(stateslist[0][0] + \"_\" + table + \".csv\"))\n col_names = this_file.readline()\n prep_file.write(col_names)\n column_names = [col.strip('\"') for col in col_names.split(',')]\n year_column = column_names.index(\"INVYR\")\n this_file.close()\n\n for state, year in stateslist:\n this_file = open_fr(engine.format_filename(state + \"_\" + table + \".csv\"))\n this_file.readline()\n for line in this_file:\n values = line.split(',')\n this_year = values[year_column]\n if int(this_year) >= year:\n prep_file.write(line)\n prep_file.close()\n engine.auto_create_table(Table(table), filename=prep_file_name)\n\n engine.insert_data_from_file(engine.format_filename(prep_file_name))\n\n try:\n os.remove(engine.format_filename(prep_file_name))\n except:\n pass\n\n return engine\n\n\nSCRIPT = main()\n", "path": "scripts/forest_inventory_analysis.py"}], "after_files": [{"content": "#retriever\n\"\"\"Retriever script for Forest Inventory and Analysis\n\n\"\"\"\nfrom __future__ import print_function\nfrom future import standard_library\nstandard_library.install_aliases()\n\nimport os\n\nfrom retriever.lib.templates import Script\nfrom retriever.lib.models import Table\nfrom retriever import open_fr, open_fw, VERSION\nfrom pkg_resources import parse_version\n\n\nclass main(Script):\n def __init__(self, **kwargs):\n Script.__init__(self, **kwargs)\n self.title = \"Forest Inventory and Analysis\"\n self.name = \"forest-inventory-analysis\"\n self.retriever_minimum_version = '2.0.dev'\n self.version = '1.4.1'\n self.ref = \"http://fia.fs.fed.us/\"\n self.urls = {\"main\": \"https://apps.fs.usda.gov/fia/datamart/CSV/\",\n 'species': 'https://apps.fs.usda.gov/fia/datamart/CSV/REF_SPECIES.csv'}\n self.keywords = [\"plants\", \"continental-scale\", \"observational\"]\n self.citation = \"DATEOFDOWNLOAD. Forest Inventory and Analysis Database, St. Paul, MN: U.S. Department of Agriculture, Forest Service, Northern Research Station. [Available only on internet: http://apps.fs.fed.us/fiadb-downloads/datamart.html]\"\n self.description = \"\"\"WARNING: This dataset requires downloading many large files and will probably take several hours to finish installing.\"\"\"\n self.addendum = \"\"\"This dataset requires downloading many large files - please be patient.\"\"\"\n \n if parse_version(VERSION) <= parse_version(\"2.0.0\"):\n self.shortname = self.name\n self.name = self.title\n self.tags = self.keywords\n\n def download(self, engine=None, debug=False):\n Script.download(self, engine, debug)\n engine = self.engine\n\n # download and create species table\n table = Table('species')\n self.engine.auto_create_table(table, url=self.urls['species'])\n self.engine.insert_data_from_url(self.urls['species'])\n\n # State abbreviations with the year annual inventory began for that state\n stateslist = [('AL', 2001), ('AK', 2004), ('AZ', 2001), ('AR', 2000),\n ('CA', 2001), ('CO', 2002), ('CT', 2003), ('DE', 2004),\n ('FL', 2003), ('GA', 1998), ('ID', 2004), ('IL', 2001),\n ('IN', 1999), ('IA', 1999), ('KS', 2001), ('KY', 1999),\n ('LA', 2001), ('ME', 1999), ('MD', 2004), ('MA', 2003),\n ('MI', 2000), ('MN', 1999), ('MO', 1999), ('MS', 2006),\n ('MT', 2003), ('NE', 2001), ('NV', 2004), ('NH', 2002),\n ('NJ', 2004), ('NM', 1999), ('NY', 2002), ('NC', 2003),\n ('ND', 2001), ('OH', 2001), ('OK', 2008), ('OR', 2001),\n ('PA', 2000), ('RI', 2003), ('SC', 1999), ('SD', 2001),\n ('TN', 2000), ('TX', 2001), ('UT', 2000), ('VT', 2003),\n ('VA', 1998), ('WA', 2002), ('WV', 2004), ('WI', 2000),\n ('WY', 2000), ('PR', 2001)]\n\n tablelist = [\"SURVEY\", \"PLOT\", \"COND\", \"SUBPLOT\", \"SUBP_COND\", \"TREE\", \"SEEDLING\"]\n\n for table in tablelist:\n for state, year in stateslist:\n engine.download_files_from_archive(self.urls[\"main\"] + state + \"_\" + table + \".ZIP\",\n [state + \"_\" + table + \".csv\"])\n\n for table in tablelist:\n print(\"Scanning data for table %s...\" % table)\n prep_file_name = \"%s.csv\" % table\n prep_file = open_fw(engine.format_filename(prep_file_name))\n this_file = open_fr(engine.format_filename(stateslist[0][0] + \"_\" + table + \".csv\"))\n col_names = this_file.readline()\n prep_file.write(col_names)\n column_names = [col.strip('\"') for col in col_names.split(',')]\n year_column = column_names.index(\"INVYR\")\n this_file.close()\n\n for state, year in stateslist:\n this_file = open_fr(engine.format_filename(state + \"_\" + table + \".csv\"))\n this_file.readline()\n for line in this_file:\n values = line.split(',')\n this_year = values[year_column]\n if int(this_year) >= year:\n prep_file.write(line)\n prep_file.close()\n engine.auto_create_table(Table(table), filename=prep_file_name)\n\n engine.insert_data_from_file(engine.format_filename(prep_file_name))\n\n try:\n os.remove(engine.format_filename(prep_file_name))\n except:\n pass\n\n return engine\n\n\nSCRIPT = main()\n", "path": "scripts/forest_inventory_analysis.py"}]} | 1,795 | 320 |
gh_patches_debug_1351 | rasdani/github-patches | git_diff | ibis-project__ibis-2249 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BUG: Multiple aliases on the same column not behaving as expected
``` python
column = table.some_column
table.projection(
[
column.name("alias1"),
column.name("alias2"),
column.name("alias3"),
]
)
```
I think the expected behavior would be a table expression with:
```
Selection[table]
table:
Table: ref_0
selections:
alias1 = Column[float64*] 'some_column' from table
ref_0
alias2 = Column[float64*] 'some_column' from table
ref_0
alias3 = Column[float64*] 'some_column' from table
ref_0
```
However, the result I'm getting is:
```
Selection[table]
table:
Table: ref_0
selections:
alias1 = Column[float64*] 'some_column' from table
ref_0
alias1 = Column[float64*] 'some_column' from table
ref_0
alias1 = Column[float64*] 'some_column' from table
ref_0
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ibis/expr/format.py`
Content:
```
1 import ibis.expr.operations as ops
2 import ibis.expr.types as ir
3 import ibis.util as util
4
5
6 class FormatMemo:
7 # A little sanity hack to simplify the below
8
9 def __init__(self):
10 from collections import defaultdict
11
12 self.formatted = {}
13 self.aliases = {}
14 self.ops = {}
15 self.counts = defaultdict(int)
16 self._repr_memo = {}
17 self.subexprs = {}
18 self.visit_memo = set()
19
20 def __contains__(self, obj):
21 return self._key(obj) in self.formatted
22
23 def _key(self, expr):
24 memo = self._repr_memo
25 try:
26 result = memo[expr]
27 except KeyError:
28 result = memo[expr] = self._format(expr)
29 return result
30
31 def _format(self, expr):
32 return expr.op()._repr(memo=self)
33
34 def observe(self, expr, formatter=None):
35 if formatter is None:
36 formatter = self._format
37 key = self._key(expr)
38 if key not in self.formatted:
39 self.aliases[key] = 'ref_{:d}'.format(len(self.formatted))
40 self.formatted[key] = formatter(expr)
41 self.ops[key] = expr.op()
42
43 self.counts[key] += 1
44
45 def count(self, expr):
46 return self.counts[self._key(expr)]
47
48 def get_alias(self, expr):
49 return self.aliases[self._key(expr)]
50
51 def get_formatted(self, expr):
52 return self.formatted[self._key(expr)]
53
54
55 class ExprFormatter:
56 """For creating a nice tree-like representation of an expression graph.
57
58 Notes
59 -----
60 TODO: detect reused DAG nodes and do not display redundant information
61
62 """
63
64 def __init__(
65 self, expr, indent_size=2, base_level=0, memo=None, memoize=True
66 ):
67 self.expr = expr
68 self.indent_size = indent_size
69 self.base_level = base_level
70
71 self.memoize = memoize
72
73 # For tracking "extracted" objects, like tables, that we don't want to
74 # print out more than once, and simply alias in the expression tree
75 if memo is None:
76 memo = FormatMemo()
77
78 self.memo = memo
79
80 def get_result(self):
81 what = self.expr.op()
82
83 if self.memoize:
84 self._memoize_tables()
85
86 if isinstance(what, ops.TableNode) and what.has_schema():
87 # This should also catch aggregations
88 if not self.memoize and self.expr in self.memo:
89 text = 'Table: %s' % self.memo.get_alias(self.expr)
90 elif isinstance(what, ops.PhysicalTable):
91 text = self._format_table(self.expr)
92 else:
93 # Any other node type
94 text = self._format_node(self.expr)
95 elif isinstance(what, ops.TableColumn):
96 text = self._format_column(self.expr)
97 elif isinstance(what, ops.Literal):
98 text = 'Literal[{}]\n {}'.format(
99 self._get_type_display(), str(what.value)
100 )
101 elif isinstance(what, ops.ScalarParameter):
102 text = 'ScalarParameter[{}]'.format(self._get_type_display())
103 elif isinstance(what, ops.Node):
104 text = self._format_node(self.expr)
105
106 if isinstance(self.expr, ir.ValueExpr) and self.expr._name is not None:
107 text = '{} = {}'.format(self.expr.get_name(), text)
108
109 if self.memoize:
110 alias_to_text = [
111 (
112 self.memo.aliases[x],
113 self.memo.formatted[x],
114 self.memo.ops[x],
115 )
116 for x in self.memo.formatted
117 ]
118 alias_to_text.sort()
119
120 # A hack to suppress printing out of a ref that is the result of
121 # the top level expression
122 refs = [
123 x + '\n' + y
124 for x, y, op in alias_to_text
125 if not op.equals(what)
126 ]
127
128 text = '\n\n'.join(refs + [text])
129
130 return self._indent(text, self.base_level)
131
132 def _memoize_tables(self):
133 table_memo_ops = (ops.Aggregation, ops.Selection, ops.SelfReference)
134 expr = self.expr
135 if expr.op() in self.memo.visit_memo:
136 return
137
138 stack = [expr]
139 seen = set()
140 memo = self.memo
141
142 while stack:
143 e = stack.pop()
144 op = e.op()
145
146 if op not in seen:
147 seen.add(op)
148
149 if isinstance(op, ops.PhysicalTable):
150 memo.observe(e, self._format_table)
151 elif isinstance(op, ops.Node):
152 stack.extend(
153 arg
154 for arg in reversed(op.args)
155 if isinstance(arg, ir.Expr)
156 )
157 if isinstance(op, table_memo_ops):
158 memo.observe(e, self._format_node)
159 elif isinstance(op, ops.TableNode) and op.has_schema():
160 memo.observe(e, self._format_table)
161 memo.visit_memo.add(op)
162
163 def _indent(self, text, indents=1):
164 return util.indent(text, self.indent_size * indents)
165
166 def _format_table(self, expr):
167 table = expr.op()
168 # format the schema
169 rows = ['name: {}\nschema:'.format(table.name)]
170 rows.extend(
171 map(' {} : {}'.format, table.schema.names, table.schema.types)
172 )
173 opname = type(table).__name__
174 type_display = self._get_type_display(expr)
175 opline = '{}[{}]'.format(opname, type_display)
176 return '{}\n{}'.format(opline, self._indent('\n'.join(rows)))
177
178 def _format_column(self, expr):
179 # HACK: if column is pulled from a Filter of another table, this parent
180 # will not be found in the memo
181 col = expr.op()
182 parent = col.parent()
183
184 if parent not in self.memo:
185 self.memo.observe(parent, formatter=self._format_node)
186
187 table_formatted = self.memo.get_alias(parent)
188 table_formatted = self._indent(table_formatted)
189
190 type_display = self._get_type_display(self.expr)
191 return "Column[{0}] '{1}' from table\n{2}".format(
192 type_display, col.name, table_formatted
193 )
194
195 def _format_node(self, expr):
196 op = expr.op()
197 formatted_args = []
198
199 def visit(what, extra_indents=0):
200 if isinstance(what, ir.Expr):
201 result = self._format_subexpr(what)
202 else:
203 result = self._indent(str(what))
204
205 if extra_indents > 0:
206 result = util.indent(result, self.indent_size)
207
208 formatted_args.append(result)
209
210 arg_names = getattr(op, 'display_argnames', op.argnames)
211
212 if not arg_names:
213 for arg in op.flat_args():
214 visit(arg)
215 else:
216 signature = op.signature
217 arg_name_pairs = (
218 (arg, name)
219 for arg, name in zip(op.args, arg_names)
220 if signature[name].show
221 )
222 for arg, name in arg_name_pairs:
223 if name == 'arg' and isinstance(op, ops.ValueOp):
224 # don't display first argument's name in repr
225 name = None
226 if name is not None:
227 name = self._indent('{}:'.format(name))
228 if util.is_iterable(arg):
229 if name is not None and len(arg) > 0:
230 formatted_args.append(name)
231 indents = 1
232 else:
233 indents = 0
234 for x in arg:
235 visit(x, extra_indents=indents)
236 else:
237 if name is not None:
238 formatted_args.append(name)
239 indents = 1
240 else:
241 indents = 0
242 visit(arg, extra_indents=indents)
243
244 opname = type(op).__name__
245 type_display = self._get_type_display(expr)
246 opline = '{}[{}]'.format(opname, type_display)
247 return '\n'.join([opline] + formatted_args)
248
249 def _format_subexpr(self, expr):
250 subexprs = self.memo.subexprs
251 key = expr.op()
252 try:
253 result = subexprs[key]
254 except KeyError:
255 formatter = ExprFormatter(expr, memo=self.memo, memoize=False)
256 result = subexprs[key] = self._indent(formatter.get_result(), 1)
257 return result
258
259 def _get_type_display(self, expr=None):
260 if expr is None:
261 expr = self.expr
262 return expr._type_display()
263
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ibis/expr/format.py b/ibis/expr/format.py
--- a/ibis/expr/format.py
+++ b/ibis/expr/format.py
@@ -248,7 +248,7 @@
def _format_subexpr(self, expr):
subexprs = self.memo.subexprs
- key = expr.op()
+ key = expr._key
try:
result = subexprs[key]
except KeyError:
| {"golden_diff": "diff --git a/ibis/expr/format.py b/ibis/expr/format.py\n--- a/ibis/expr/format.py\n+++ b/ibis/expr/format.py\n@@ -248,7 +248,7 @@\n \n def _format_subexpr(self, expr):\n subexprs = self.memo.subexprs\n- key = expr.op()\n+ key = expr._key\n try:\n result = subexprs[key]\n except KeyError:\n", "issue": "BUG: Multiple aliases on the same column not behaving as expected\n\r\n``` python\r\ncolumn = table.some_column\r\ntable.projection(\r\n [\r\n column.name(\"alias1\"),\r\n column.name(\"alias2\"),\r\n column.name(\"alias3\"),\r\n ]\r\n )\r\n```\r\n\r\nI think the expected behavior would be a table expression with:\r\n```\r\nSelection[table]\r\n table:\r\n Table: ref_0\r\n selections:\r\n alias1 = Column[float64*] 'some_column' from table\r\n ref_0\r\n alias2 = Column[float64*] 'some_column' from table\r\n ref_0\r\n alias3 = Column[float64*] 'some_column' from table\r\n ref_0\r\n```\r\n\r\nHowever, the result I'm getting is:\r\n```\r\nSelection[table]\r\n table:\r\n Table: ref_0\r\n selections:\r\n alias1 = Column[float64*] 'some_column' from table\r\n ref_0\r\n alias1 = Column[float64*] 'some_column' from table\r\n ref_0\r\n alias1 = Column[float64*] 'some_column' from table\r\n ref_0\r\n```\n", "before_files": [{"content": "import ibis.expr.operations as ops\nimport ibis.expr.types as ir\nimport ibis.util as util\n\n\nclass FormatMemo:\n # A little sanity hack to simplify the below\n\n def __init__(self):\n from collections import defaultdict\n\n self.formatted = {}\n self.aliases = {}\n self.ops = {}\n self.counts = defaultdict(int)\n self._repr_memo = {}\n self.subexprs = {}\n self.visit_memo = set()\n\n def __contains__(self, obj):\n return self._key(obj) in self.formatted\n\n def _key(self, expr):\n memo = self._repr_memo\n try:\n result = memo[expr]\n except KeyError:\n result = memo[expr] = self._format(expr)\n return result\n\n def _format(self, expr):\n return expr.op()._repr(memo=self)\n\n def observe(self, expr, formatter=None):\n if formatter is None:\n formatter = self._format\n key = self._key(expr)\n if key not in self.formatted:\n self.aliases[key] = 'ref_{:d}'.format(len(self.formatted))\n self.formatted[key] = formatter(expr)\n self.ops[key] = expr.op()\n\n self.counts[key] += 1\n\n def count(self, expr):\n return self.counts[self._key(expr)]\n\n def get_alias(self, expr):\n return self.aliases[self._key(expr)]\n\n def get_formatted(self, expr):\n return self.formatted[self._key(expr)]\n\n\nclass ExprFormatter:\n \"\"\"For creating a nice tree-like representation of an expression graph.\n\n Notes\n -----\n TODO: detect reused DAG nodes and do not display redundant information\n\n \"\"\"\n\n def __init__(\n self, expr, indent_size=2, base_level=0, memo=None, memoize=True\n ):\n self.expr = expr\n self.indent_size = indent_size\n self.base_level = base_level\n\n self.memoize = memoize\n\n # For tracking \"extracted\" objects, like tables, that we don't want to\n # print out more than once, and simply alias in the expression tree\n if memo is None:\n memo = FormatMemo()\n\n self.memo = memo\n\n def get_result(self):\n what = self.expr.op()\n\n if self.memoize:\n self._memoize_tables()\n\n if isinstance(what, ops.TableNode) and what.has_schema():\n # This should also catch aggregations\n if not self.memoize and self.expr in self.memo:\n text = 'Table: %s' % self.memo.get_alias(self.expr)\n elif isinstance(what, ops.PhysicalTable):\n text = self._format_table(self.expr)\n else:\n # Any other node type\n text = self._format_node(self.expr)\n elif isinstance(what, ops.TableColumn):\n text = self._format_column(self.expr)\n elif isinstance(what, ops.Literal):\n text = 'Literal[{}]\\n {}'.format(\n self._get_type_display(), str(what.value)\n )\n elif isinstance(what, ops.ScalarParameter):\n text = 'ScalarParameter[{}]'.format(self._get_type_display())\n elif isinstance(what, ops.Node):\n text = self._format_node(self.expr)\n\n if isinstance(self.expr, ir.ValueExpr) and self.expr._name is not None:\n text = '{} = {}'.format(self.expr.get_name(), text)\n\n if self.memoize:\n alias_to_text = [\n (\n self.memo.aliases[x],\n self.memo.formatted[x],\n self.memo.ops[x],\n )\n for x in self.memo.formatted\n ]\n alias_to_text.sort()\n\n # A hack to suppress printing out of a ref that is the result of\n # the top level expression\n refs = [\n x + '\\n' + y\n for x, y, op in alias_to_text\n if not op.equals(what)\n ]\n\n text = '\\n\\n'.join(refs + [text])\n\n return self._indent(text, self.base_level)\n\n def _memoize_tables(self):\n table_memo_ops = (ops.Aggregation, ops.Selection, ops.SelfReference)\n expr = self.expr\n if expr.op() in self.memo.visit_memo:\n return\n\n stack = [expr]\n seen = set()\n memo = self.memo\n\n while stack:\n e = stack.pop()\n op = e.op()\n\n if op not in seen:\n seen.add(op)\n\n if isinstance(op, ops.PhysicalTable):\n memo.observe(e, self._format_table)\n elif isinstance(op, ops.Node):\n stack.extend(\n arg\n for arg in reversed(op.args)\n if isinstance(arg, ir.Expr)\n )\n if isinstance(op, table_memo_ops):\n memo.observe(e, self._format_node)\n elif isinstance(op, ops.TableNode) and op.has_schema():\n memo.observe(e, self._format_table)\n memo.visit_memo.add(op)\n\n def _indent(self, text, indents=1):\n return util.indent(text, self.indent_size * indents)\n\n def _format_table(self, expr):\n table = expr.op()\n # format the schema\n rows = ['name: {}\\nschema:'.format(table.name)]\n rows.extend(\n map(' {} : {}'.format, table.schema.names, table.schema.types)\n )\n opname = type(table).__name__\n type_display = self._get_type_display(expr)\n opline = '{}[{}]'.format(opname, type_display)\n return '{}\\n{}'.format(opline, self._indent('\\n'.join(rows)))\n\n def _format_column(self, expr):\n # HACK: if column is pulled from a Filter of another table, this parent\n # will not be found in the memo\n col = expr.op()\n parent = col.parent()\n\n if parent not in self.memo:\n self.memo.observe(parent, formatter=self._format_node)\n\n table_formatted = self.memo.get_alias(parent)\n table_formatted = self._indent(table_formatted)\n\n type_display = self._get_type_display(self.expr)\n return \"Column[{0}] '{1}' from table\\n{2}\".format(\n type_display, col.name, table_formatted\n )\n\n def _format_node(self, expr):\n op = expr.op()\n formatted_args = []\n\n def visit(what, extra_indents=0):\n if isinstance(what, ir.Expr):\n result = self._format_subexpr(what)\n else:\n result = self._indent(str(what))\n\n if extra_indents > 0:\n result = util.indent(result, self.indent_size)\n\n formatted_args.append(result)\n\n arg_names = getattr(op, 'display_argnames', op.argnames)\n\n if not arg_names:\n for arg in op.flat_args():\n visit(arg)\n else:\n signature = op.signature\n arg_name_pairs = (\n (arg, name)\n for arg, name in zip(op.args, arg_names)\n if signature[name].show\n )\n for arg, name in arg_name_pairs:\n if name == 'arg' and isinstance(op, ops.ValueOp):\n # don't display first argument's name in repr\n name = None\n if name is not None:\n name = self._indent('{}:'.format(name))\n if util.is_iterable(arg):\n if name is not None and len(arg) > 0:\n formatted_args.append(name)\n indents = 1\n else:\n indents = 0\n for x in arg:\n visit(x, extra_indents=indents)\n else:\n if name is not None:\n formatted_args.append(name)\n indents = 1\n else:\n indents = 0\n visit(arg, extra_indents=indents)\n\n opname = type(op).__name__\n type_display = self._get_type_display(expr)\n opline = '{}[{}]'.format(opname, type_display)\n return '\\n'.join([opline] + formatted_args)\n\n def _format_subexpr(self, expr):\n subexprs = self.memo.subexprs\n key = expr.op()\n try:\n result = subexprs[key]\n except KeyError:\n formatter = ExprFormatter(expr, memo=self.memo, memoize=False)\n result = subexprs[key] = self._indent(formatter.get_result(), 1)\n return result\n\n def _get_type_display(self, expr=None):\n if expr is None:\n expr = self.expr\n return expr._type_display()\n", "path": "ibis/expr/format.py"}], "after_files": [{"content": "import ibis.expr.operations as ops\nimport ibis.expr.types as ir\nimport ibis.util as util\n\n\nclass FormatMemo:\n # A little sanity hack to simplify the below\n\n def __init__(self):\n from collections import defaultdict\n\n self.formatted = {}\n self.aliases = {}\n self.ops = {}\n self.counts = defaultdict(int)\n self._repr_memo = {}\n self.subexprs = {}\n self.visit_memo = set()\n\n def __contains__(self, obj):\n return self._key(obj) in self.formatted\n\n def _key(self, expr):\n memo = self._repr_memo\n try:\n result = memo[expr]\n except KeyError:\n result = memo[expr] = self._format(expr)\n return result\n\n def _format(self, expr):\n return expr.op()._repr(memo=self)\n\n def observe(self, expr, formatter=None):\n if formatter is None:\n formatter = self._format\n key = self._key(expr)\n if key not in self.formatted:\n self.aliases[key] = 'ref_{:d}'.format(len(self.formatted))\n self.formatted[key] = formatter(expr)\n self.ops[key] = expr.op()\n\n self.counts[key] += 1\n\n def count(self, expr):\n return self.counts[self._key(expr)]\n\n def get_alias(self, expr):\n return self.aliases[self._key(expr)]\n\n def get_formatted(self, expr):\n return self.formatted[self._key(expr)]\n\n\nclass ExprFormatter:\n \"\"\"For creating a nice tree-like representation of an expression graph.\n\n Notes\n -----\n TODO: detect reused DAG nodes and do not display redundant information\n\n \"\"\"\n\n def __init__(\n self, expr, indent_size=2, base_level=0, memo=None, memoize=True\n ):\n self.expr = expr\n self.indent_size = indent_size\n self.base_level = base_level\n\n self.memoize = memoize\n\n # For tracking \"extracted\" objects, like tables, that we don't want to\n # print out more than once, and simply alias in the expression tree\n if memo is None:\n memo = FormatMemo()\n\n self.memo = memo\n\n def get_result(self):\n what = self.expr.op()\n\n if self.memoize:\n self._memoize_tables()\n\n if isinstance(what, ops.TableNode) and what.has_schema():\n # This should also catch aggregations\n if not self.memoize and self.expr in self.memo:\n text = 'Table: %s' % self.memo.get_alias(self.expr)\n elif isinstance(what, ops.PhysicalTable):\n text = self._format_table(self.expr)\n else:\n # Any other node type\n text = self._format_node(self.expr)\n elif isinstance(what, ops.TableColumn):\n text = self._format_column(self.expr)\n elif isinstance(what, ops.Literal):\n text = 'Literal[{}]\\n {}'.format(\n self._get_type_display(), str(what.value)\n )\n elif isinstance(what, ops.ScalarParameter):\n text = 'ScalarParameter[{}]'.format(self._get_type_display())\n elif isinstance(what, ops.Node):\n text = self._format_node(self.expr)\n\n if isinstance(self.expr, ir.ValueExpr) and self.expr._name is not None:\n text = '{} = {}'.format(self.expr.get_name(), text)\n\n if self.memoize:\n alias_to_text = [\n (\n self.memo.aliases[x],\n self.memo.formatted[x],\n self.memo.ops[x],\n )\n for x in self.memo.formatted\n ]\n alias_to_text.sort()\n\n # A hack to suppress printing out of a ref that is the result of\n # the top level expression\n refs = [\n x + '\\n' + y\n for x, y, op in alias_to_text\n if not op.equals(what)\n ]\n\n text = '\\n\\n'.join(refs + [text])\n\n return self._indent(text, self.base_level)\n\n def _memoize_tables(self):\n table_memo_ops = (ops.Aggregation, ops.Selection, ops.SelfReference)\n expr = self.expr\n if expr.op() in self.memo.visit_memo:\n return\n\n stack = [expr]\n seen = set()\n memo = self.memo\n\n while stack:\n e = stack.pop()\n op = e.op()\n\n if op not in seen:\n seen.add(op)\n\n if isinstance(op, ops.PhysicalTable):\n memo.observe(e, self._format_table)\n elif isinstance(op, ops.Node):\n stack.extend(\n arg\n for arg in reversed(op.args)\n if isinstance(arg, ir.Expr)\n )\n if isinstance(op, table_memo_ops):\n memo.observe(e, self._format_node)\n elif isinstance(op, ops.TableNode) and op.has_schema():\n memo.observe(e, self._format_table)\n memo.visit_memo.add(op)\n\n def _indent(self, text, indents=1):\n return util.indent(text, self.indent_size * indents)\n\n def _format_table(self, expr):\n table = expr.op()\n # format the schema\n rows = ['name: {}\\nschema:'.format(table.name)]\n rows.extend(\n map(' {} : {}'.format, table.schema.names, table.schema.types)\n )\n opname = type(table).__name__\n type_display = self._get_type_display(expr)\n opline = '{}[{}]'.format(opname, type_display)\n return '{}\\n{}'.format(opline, self._indent('\\n'.join(rows)))\n\n def _format_column(self, expr):\n # HACK: if column is pulled from a Filter of another table, this parent\n # will not be found in the memo\n col = expr.op()\n parent = col.parent()\n\n if parent not in self.memo:\n self.memo.observe(parent, formatter=self._format_node)\n\n table_formatted = self.memo.get_alias(parent)\n table_formatted = self._indent(table_formatted)\n\n type_display = self._get_type_display(self.expr)\n return \"Column[{0}] '{1}' from table\\n{2}\".format(\n type_display, col.name, table_formatted\n )\n\n def _format_node(self, expr):\n op = expr.op()\n formatted_args = []\n\n def visit(what, extra_indents=0):\n if isinstance(what, ir.Expr):\n result = self._format_subexpr(what)\n else:\n result = self._indent(str(what))\n\n if extra_indents > 0:\n result = util.indent(result, self.indent_size)\n\n formatted_args.append(result)\n\n arg_names = getattr(op, 'display_argnames', op.argnames)\n\n if not arg_names:\n for arg in op.flat_args():\n visit(arg)\n else:\n signature = op.signature\n arg_name_pairs = (\n (arg, name)\n for arg, name in zip(op.args, arg_names)\n if signature[name].show\n )\n for arg, name in arg_name_pairs:\n if name == 'arg' and isinstance(op, ops.ValueOp):\n # don't display first argument's name in repr\n name = None\n if name is not None:\n name = self._indent('{}:'.format(name))\n if util.is_iterable(arg):\n if name is not None and len(arg) > 0:\n formatted_args.append(name)\n indents = 1\n else:\n indents = 0\n for x in arg:\n visit(x, extra_indents=indents)\n else:\n if name is not None:\n formatted_args.append(name)\n indents = 1\n else:\n indents = 0\n visit(arg, extra_indents=indents)\n\n opname = type(op).__name__\n type_display = self._get_type_display(expr)\n opline = '{}[{}]'.format(opname, type_display)\n return '\\n'.join([opline] + formatted_args)\n\n def _format_subexpr(self, expr):\n subexprs = self.memo.subexprs\n key = expr._key\n try:\n result = subexprs[key]\n except KeyError:\n formatter = ExprFormatter(expr, memo=self.memo, memoize=False)\n result = subexprs[key] = self._indent(formatter.get_result(), 1)\n return result\n\n def _get_type_display(self, expr=None):\n if expr is None:\n expr = self.expr\n return expr._type_display()\n", "path": "ibis/expr/format.py"}]} | 3,073 | 106 |
gh_patches_debug_23792 | rasdani/github-patches | git_diff | svthalia__concrexit-2717 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Summary download not working
### Describe the bug
Some summaries (for example Data Mining 2019-2020 Practice Exam Midterms) are not downloaded when clicked. They also do not work when trying to view them through the Site Administration. Pressing these buttons only leads to a new empty tab.
Some summaries still work however.
### How to reproduce
Steps to reproduce the behaviour:
1. Go to 'education'
2. Scroll down to 'data mining'
3. Click on 'data mining'
4. Click on '2019-2020'
5. Click on 'Practice Exam Midterm'
### Expected behaviour
Clicking this button should download the associated file.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/education/views.py`
Content:
```
1 """Views provided by the education package."""
2 import os
3 from datetime import date, datetime
4
5 from django.contrib.auth.decorators import login_required
6 from django.contrib.messages.views import SuccessMessageMixin
7 from django.core.exceptions import PermissionDenied
8 from django.db.models import Count
9 from django.http import HttpResponse
10 from django.shortcuts import redirect
11 from django.urls import reverse_lazy
12 from django.utils import timezone
13 from django.utils.decorators import method_decorator
14 from django.utils.translation import gettext_lazy as _
15 from django.views.generic import CreateView, DetailView, ListView, TemplateView
16
17 from members.decorators import membership_required
18 from utils.media.services import get_media_url
19
20 from . import emails
21 from .forms import AddExamForm, AddSummaryForm
22 from .models import Category, Course, Exam, Summary
23
24
25 class CourseIndexView(ListView):
26 """Render an overview of the courses."""
27
28 queryset = (
29 Course.objects.filter(until=None)
30 .prefetch_related("categories", "old_courses")
31 .annotate(summary_count=Count("summary"))
32 .annotate(exam_count=Count("exam"))
33 )
34 template_name = "education/courses.html"
35
36 def get_ordering(self) -> str:
37 return "name"
38
39 def get_context_data(self, **kwargs) -> dict:
40 context = super().get_context_data(**kwargs)
41 context.update(
42 {
43 "courses": (
44 {
45 "course_code": x.course_code,
46 "name": x.name,
47 "categories": x.categories.all(),
48 "document_count": sum(
49 [
50 x.summary_count,
51 x.exam_count,
52 ]
53 + [
54 c.summary_set.filter(accepted=True).count()
55 + c.exam_set.filter(accepted=True).count()
56 for c in x.old_courses.all()
57 ]
58 ),
59 "url": x.get_absolute_url(),
60 }
61 for x in context["object_list"]
62 ),
63 "categories": Category.objects.all(),
64 }
65 )
66 return context
67
68
69 class CourseDetailView(DetailView):
70 """Render the detail page of one specific course."""
71
72 model = Course
73 context_object_name = "course"
74 template_name = "education/course.html"
75
76 def get_context_data(self, **kwargs) -> dict:
77 context = super().get_context_data(**kwargs)
78 obj = context["course"]
79 courses = list(obj.old_courses.all())
80 courses.append(obj)
81 items = {}
82 for course in courses:
83 for summary in course.summary_set.filter(accepted=True):
84 if summary.year not in items:
85 items[summary.year] = {
86 "summaries": [],
87 "exams": [],
88 "legacy": course if course.pk != obj.pk else None,
89 }
90 items[summary.year]["summaries"].append(
91 {
92 "year": summary.year,
93 "name": summary.name,
94 "language": summary.language,
95 "id": summary.id,
96 }
97 )
98 for exam in course.exam_set.filter(accepted=True):
99 if exam.year not in items:
100 items[exam.year] = {
101 "summaries": [],
102 "exams": [],
103 "legacy": course if course.pk != obj.pk else None,
104 }
105 items[exam.year]["exams"].append(
106 {
107 "type": "exam",
108 "year": exam.year,
109 "name": f"{exam.get_type_display()} {exam.name}",
110 "language": exam.language,
111 "id": exam.id,
112 }
113 )
114 context.update({"items": sorted(items.items(), key=lambda x: x[0])})
115 return context
116
117
118 @method_decorator(login_required, "dispatch")
119 @method_decorator(membership_required, "dispatch")
120 class ExamDetailView(DetailView):
121 """Fetch and output the specified exam."""
122
123 model = Exam
124
125 def get(self, request, *args, **kwargs) -> HttpResponse:
126 response = super().get(request, *args, **kwargs)
127 exam = response.context_data["object"]
128 exam.download_count += 1
129 exam.save()
130
131 ext = os.path.splitext(exam.file.name)[1]
132 filename = f"{exam.course.name}-exam{exam.year}{ext}"
133 return redirect(get_media_url(exam.file, filename))
134
135
136 @method_decorator(login_required, "dispatch")
137 @method_decorator(membership_required, "dispatch")
138 class SummaryDetailView(DetailView):
139 """Fetch and output the specified summary."""
140
141 model = Summary
142
143 def get(self, request, *args, **kwargs) -> HttpResponse:
144 response = super().get(request, *args, **kwargs)
145 obj = response.context_data["object"]
146 obj.download_count += 1
147 obj.save()
148
149 ext = os.path.splitext(obj.file.name)[1]
150 filename = f"{obj.course.name}-summary{obj.year}{ext}"
151 return redirect(get_media_url(obj.file, filename))
152
153
154 @method_decorator(login_required, "dispatch")
155 @method_decorator(membership_required, "dispatch")
156 class ExamCreateView(SuccessMessageMixin, CreateView):
157 """Render the form to submit a new exam."""
158
159 model = Exam
160 form_class = AddExamForm
161 template_name = "education/add_exam.html"
162 success_url = reverse_lazy("education:submit-exam")
163 success_message = _("Exam submitted successfully.")
164
165 def get_initial(self) -> dict:
166 initial = super().get_initial()
167 initial["exam_date"] = date.today()
168 initial["course"] = self.kwargs.get("pk", None)
169 return initial
170
171 def form_valid(self, form) -> HttpResponse:
172 self.object = form.save(commit=False)
173 self.object.uploader = self.request.member
174 self.object.uploader_date = datetime.now()
175 self.object.save()
176 emails.send_document_notification(self.object)
177 return super().form_valid(form)
178
179
180 @method_decorator(login_required, "dispatch")
181 @method_decorator(membership_required, "dispatch")
182 class SummaryCreateView(SuccessMessageMixin, CreateView):
183 """Render the form to submit a new summary."""
184
185 model = Summary
186 form_class = AddSummaryForm
187 template_name = "education/add_summary.html"
188 success_url = reverse_lazy("education:submit-summary")
189 success_message = _("Summary submitted successfully.")
190
191 def get_initial(self):
192 initial = super().get_initial()
193 initial["author"] = self.request.member.get_full_name()
194 initial["course"] = self.kwargs.get("pk", None)
195 return initial
196
197 def form_valid(self, form) -> HttpResponse:
198 self.object = form.save(commit=False)
199 self.object.uploader = self.request.member
200 self.object.uploader_date = datetime.now()
201 self.object.save()
202 emails.send_document_notification(self.object)
203 return super().form_valid(form)
204
205
206 @method_decorator(login_required, "dispatch")
207 class BookInfoView(TemplateView):
208 """Render a page with information about book sale.
209
210 Only available to members and to-be members
211 """
212
213 template_name = "education/books.html"
214
215 def dispatch(self, request, *args, **kwargs) -> HttpResponse:
216 if request.member.has_active_membership() or (
217 request.member.earliest_membership
218 and request.member.earliest_membership.since > timezone.now().date()
219 ):
220 return super().dispatch(request, *args, **kwargs)
221 raise PermissionDenied
222
```
Path: `website/utils/media/services.py`
Content:
```
1 import io
2 import os
3
4 from django.conf import settings
5 from django.core import signing
6 from django.core.files.base import ContentFile
7 from django.core.files.storage import DefaultStorage, get_storage_class
8 from django.core.files.uploadedfile import InMemoryUploadedFile
9 from django.db.models.fields.files import FieldFile, ImageFieldFile
10 from django.urls import reverse
11
12
13 def save_image(storage, image, path, format):
14 buffer = io.BytesIO()
15 image.convert("RGB" if format == "JPEG" else "RGBA").save(fp=buffer, format=format)
16 buff_val = buffer.getvalue()
17 content = ContentFile(buff_val)
18 file = InMemoryUploadedFile(
19 content,
20 None,
21 f"foo.{format.lower()}",
22 f"image/{format.lower()}",
23 content.tell,
24 None,
25 )
26 return storage.save(path, file)
27
28
29 def get_media_url(file, attachment=False):
30 """Get the url of the provided media file to serve in a browser.
31
32 If the file is private a signature will be added.
33 Do NOT use this with user input
34 :param file: the file field
35 :param attachment: True if the file is a forced download
36 :return: the url of the media
37 """
38 storage = DefaultStorage()
39 file_name = file
40 if isinstance(file, (ImageFieldFile, FieldFile)):
41 storage = file.storage
42 file_name = file.name
43
44 return f"{storage.url(file_name, attachment)}"
45
46
47 def get_thumbnail_url(file, size, fit=True):
48 """Get the thumbnail url of a media file, NEVER use this with user input.
49
50 If the thumbnail exists this function will return the url of the
51 media file, with signature if necessary. Does it not yet exist a route
52 that executes the :func:`utils.media.views.generate_thumbnail`
53 will be the output.
54 :param file: the file field
55 :param size: size of the image
56 :param fit: False to keep the aspect ratio, True to crop
57 :return: get-thumbnail path
58 """
59 storage = DefaultStorage()
60 name = file
61
62 if isinstance(file, (ImageFieldFile, FieldFile)):
63 storage = file.storage
64 name = file.name
65
66 is_public = isinstance(storage, get_storage_class(settings.PUBLIC_FILE_STORAGE))
67 size_fit = f"{size}_{int(fit)}"
68
69 if name.endswith(".svg") and is_public:
70 return storage.url(name)
71
72 sig_info = {
73 "size": size,
74 "fit": int(fit),
75 "name": name,
76 "thumb_path": f"thumbnails/{size_fit}/{name}",
77 "serve_path": f"thumbnails/{size_fit}/{name}",
78 "storage": f"{storage.__class__.__module__}.{storage.__class__.__name__}",
79 }
80
81 # We provide a URL instead of calling it as a function, so that using
82 # it means kicking off a new GET request. If we would need to check all files for the
83 # thumbnails inline, loading an album overview would have high latency.
84 return (
85 reverse("get-thumbnail", args=[os.path.join(size_fit, sig_info["name"])])
86 + f"?sig={signing.dumps(sig_info)}"
87 )
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/education/views.py b/website/education/views.py
--- a/website/education/views.py
+++ b/website/education/views.py
@@ -124,13 +124,13 @@
def get(self, request, *args, **kwargs) -> HttpResponse:
response = super().get(request, *args, **kwargs)
- exam = response.context_data["object"]
- exam.download_count += 1
- exam.save()
+ obj = response.context_data["object"]
+ obj.download_count += 1
+ obj.save()
- ext = os.path.splitext(exam.file.name)[1]
- filename = f"{exam.course.name}-exam{exam.year}{ext}"
- return redirect(get_media_url(exam.file, filename))
+ ext = os.path.splitext(obj.file.name)[1]
+ filename = f"{obj.course.name}-summary{obj.year}{ext}"
+ return redirect(get_media_url(obj.file, filename))
@method_decorator(login_required, "dispatch")
diff --git a/website/utils/media/services.py b/website/utils/media/services.py
--- a/website/utils/media/services.py
+++ b/website/utils/media/services.py
@@ -32,7 +32,7 @@
If the file is private a signature will be added.
Do NOT use this with user input
:param file: the file field
- :param attachment: True if the file is a forced download
+ :param attachment: filename to use for the attachment or False to not download as attachment
:return: the url of the media
"""
storage = DefaultStorage()
| {"golden_diff": "diff --git a/website/education/views.py b/website/education/views.py\n--- a/website/education/views.py\n+++ b/website/education/views.py\n@@ -124,13 +124,13 @@\n \n def get(self, request, *args, **kwargs) -> HttpResponse:\n response = super().get(request, *args, **kwargs)\n- exam = response.context_data[\"object\"]\n- exam.download_count += 1\n- exam.save()\n+ obj = response.context_data[\"object\"]\n+ obj.download_count += 1\n+ obj.save()\n \n- ext = os.path.splitext(exam.file.name)[1]\n- filename = f\"{exam.course.name}-exam{exam.year}{ext}\"\n- return redirect(get_media_url(exam.file, filename))\n+ ext = os.path.splitext(obj.file.name)[1]\n+ filename = f\"{obj.course.name}-summary{obj.year}{ext}\"\n+ return redirect(get_media_url(obj.file, filename))\n \n \n @method_decorator(login_required, \"dispatch\")\ndiff --git a/website/utils/media/services.py b/website/utils/media/services.py\n--- a/website/utils/media/services.py\n+++ b/website/utils/media/services.py\n@@ -32,7 +32,7 @@\n If the file is private a signature will be added.\n Do NOT use this with user input\n :param file: the file field\n- :param attachment: True if the file is a forced download\n+ :param attachment: filename to use for the attachment or False to not download as attachment\n :return: the url of the media\n \"\"\"\n storage = DefaultStorage()\n", "issue": "Summary download not working\n### Describe the bug\r\nSome summaries (for example Data Mining 2019-2020 Practice Exam Midterms) are not downloaded when clicked. They also do not work when trying to view them through the Site Administration. Pressing these buttons only leads to a new empty tab. \r\n\r\nSome summaries still work however.\r\n\r\n### How to reproduce\r\nSteps to reproduce the behaviour:\r\n1. Go to 'education'\r\n2. Scroll down to 'data mining'\r\n3. Click on 'data mining'\r\n4. Click on '2019-2020'\r\n5. Click on 'Practice Exam Midterm'\r\n\r\n### Expected behaviour\r\nClicking this button should download the associated file.\r\n\n", "before_files": [{"content": "\"\"\"Views provided by the education package.\"\"\"\nimport os\nfrom datetime import date, datetime\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.core.exceptions import PermissionDenied\nfrom django.db.models import Count\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect\nfrom django.urls import reverse_lazy\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.generic import CreateView, DetailView, ListView, TemplateView\n\nfrom members.decorators import membership_required\nfrom utils.media.services import get_media_url\n\nfrom . import emails\nfrom .forms import AddExamForm, AddSummaryForm\nfrom .models import Category, Course, Exam, Summary\n\n\nclass CourseIndexView(ListView):\n \"\"\"Render an overview of the courses.\"\"\"\n\n queryset = (\n Course.objects.filter(until=None)\n .prefetch_related(\"categories\", \"old_courses\")\n .annotate(summary_count=Count(\"summary\"))\n .annotate(exam_count=Count(\"exam\"))\n )\n template_name = \"education/courses.html\"\n\n def get_ordering(self) -> str:\n return \"name\"\n\n def get_context_data(self, **kwargs) -> dict:\n context = super().get_context_data(**kwargs)\n context.update(\n {\n \"courses\": (\n {\n \"course_code\": x.course_code,\n \"name\": x.name,\n \"categories\": x.categories.all(),\n \"document_count\": sum(\n [\n x.summary_count,\n x.exam_count,\n ]\n + [\n c.summary_set.filter(accepted=True).count()\n + c.exam_set.filter(accepted=True).count()\n for c in x.old_courses.all()\n ]\n ),\n \"url\": x.get_absolute_url(),\n }\n for x in context[\"object_list\"]\n ),\n \"categories\": Category.objects.all(),\n }\n )\n return context\n\n\nclass CourseDetailView(DetailView):\n \"\"\"Render the detail page of one specific course.\"\"\"\n\n model = Course\n context_object_name = \"course\"\n template_name = \"education/course.html\"\n\n def get_context_data(self, **kwargs) -> dict:\n context = super().get_context_data(**kwargs)\n obj = context[\"course\"]\n courses = list(obj.old_courses.all())\n courses.append(obj)\n items = {}\n for course in courses:\n for summary in course.summary_set.filter(accepted=True):\n if summary.year not in items:\n items[summary.year] = {\n \"summaries\": [],\n \"exams\": [],\n \"legacy\": course if course.pk != obj.pk else None,\n }\n items[summary.year][\"summaries\"].append(\n {\n \"year\": summary.year,\n \"name\": summary.name,\n \"language\": summary.language,\n \"id\": summary.id,\n }\n )\n for exam in course.exam_set.filter(accepted=True):\n if exam.year not in items:\n items[exam.year] = {\n \"summaries\": [],\n \"exams\": [],\n \"legacy\": course if course.pk != obj.pk else None,\n }\n items[exam.year][\"exams\"].append(\n {\n \"type\": \"exam\",\n \"year\": exam.year,\n \"name\": f\"{exam.get_type_display()} {exam.name}\",\n \"language\": exam.language,\n \"id\": exam.id,\n }\n )\n context.update({\"items\": sorted(items.items(), key=lambda x: x[0])})\n return context\n\n\n@method_decorator(login_required, \"dispatch\")\n@method_decorator(membership_required, \"dispatch\")\nclass ExamDetailView(DetailView):\n \"\"\"Fetch and output the specified exam.\"\"\"\n\n model = Exam\n\n def get(self, request, *args, **kwargs) -> HttpResponse:\n response = super().get(request, *args, **kwargs)\n exam = response.context_data[\"object\"]\n exam.download_count += 1\n exam.save()\n\n ext = os.path.splitext(exam.file.name)[1]\n filename = f\"{exam.course.name}-exam{exam.year}{ext}\"\n return redirect(get_media_url(exam.file, filename))\n\n\n@method_decorator(login_required, \"dispatch\")\n@method_decorator(membership_required, \"dispatch\")\nclass SummaryDetailView(DetailView):\n \"\"\"Fetch and output the specified summary.\"\"\"\n\n model = Summary\n\n def get(self, request, *args, **kwargs) -> HttpResponse:\n response = super().get(request, *args, **kwargs)\n obj = response.context_data[\"object\"]\n obj.download_count += 1\n obj.save()\n\n ext = os.path.splitext(obj.file.name)[1]\n filename = f\"{obj.course.name}-summary{obj.year}{ext}\"\n return redirect(get_media_url(obj.file, filename))\n\n\n@method_decorator(login_required, \"dispatch\")\n@method_decorator(membership_required, \"dispatch\")\nclass ExamCreateView(SuccessMessageMixin, CreateView):\n \"\"\"Render the form to submit a new exam.\"\"\"\n\n model = Exam\n form_class = AddExamForm\n template_name = \"education/add_exam.html\"\n success_url = reverse_lazy(\"education:submit-exam\")\n success_message = _(\"Exam submitted successfully.\")\n\n def get_initial(self) -> dict:\n initial = super().get_initial()\n initial[\"exam_date\"] = date.today()\n initial[\"course\"] = self.kwargs.get(\"pk\", None)\n return initial\n\n def form_valid(self, form) -> HttpResponse:\n self.object = form.save(commit=False)\n self.object.uploader = self.request.member\n self.object.uploader_date = datetime.now()\n self.object.save()\n emails.send_document_notification(self.object)\n return super().form_valid(form)\n\n\n@method_decorator(login_required, \"dispatch\")\n@method_decorator(membership_required, \"dispatch\")\nclass SummaryCreateView(SuccessMessageMixin, CreateView):\n \"\"\"Render the form to submit a new summary.\"\"\"\n\n model = Summary\n form_class = AddSummaryForm\n template_name = \"education/add_summary.html\"\n success_url = reverse_lazy(\"education:submit-summary\")\n success_message = _(\"Summary submitted successfully.\")\n\n def get_initial(self):\n initial = super().get_initial()\n initial[\"author\"] = self.request.member.get_full_name()\n initial[\"course\"] = self.kwargs.get(\"pk\", None)\n return initial\n\n def form_valid(self, form) -> HttpResponse:\n self.object = form.save(commit=False)\n self.object.uploader = self.request.member\n self.object.uploader_date = datetime.now()\n self.object.save()\n emails.send_document_notification(self.object)\n return super().form_valid(form)\n\n\n@method_decorator(login_required, \"dispatch\")\nclass BookInfoView(TemplateView):\n \"\"\"Render a page with information about book sale.\n\n Only available to members and to-be members\n \"\"\"\n\n template_name = \"education/books.html\"\n\n def dispatch(self, request, *args, **kwargs) -> HttpResponse:\n if request.member.has_active_membership() or (\n request.member.earliest_membership\n and request.member.earliest_membership.since > timezone.now().date()\n ):\n return super().dispatch(request, *args, **kwargs)\n raise PermissionDenied\n", "path": "website/education/views.py"}, {"content": "import io\nimport os\n\nfrom django.conf import settings\nfrom django.core import signing\nfrom django.core.files.base import ContentFile\nfrom django.core.files.storage import DefaultStorage, get_storage_class\nfrom django.core.files.uploadedfile import InMemoryUploadedFile\nfrom django.db.models.fields.files import FieldFile, ImageFieldFile\nfrom django.urls import reverse\n\n\ndef save_image(storage, image, path, format):\n buffer = io.BytesIO()\n image.convert(\"RGB\" if format == \"JPEG\" else \"RGBA\").save(fp=buffer, format=format)\n buff_val = buffer.getvalue()\n content = ContentFile(buff_val)\n file = InMemoryUploadedFile(\n content,\n None,\n f\"foo.{format.lower()}\",\n f\"image/{format.lower()}\",\n content.tell,\n None,\n )\n return storage.save(path, file)\n\n\ndef get_media_url(file, attachment=False):\n \"\"\"Get the url of the provided media file to serve in a browser.\n\n If the file is private a signature will be added.\n Do NOT use this with user input\n :param file: the file field\n :param attachment: True if the file is a forced download\n :return: the url of the media\n \"\"\"\n storage = DefaultStorage()\n file_name = file\n if isinstance(file, (ImageFieldFile, FieldFile)):\n storage = file.storage\n file_name = file.name\n\n return f\"{storage.url(file_name, attachment)}\"\n\n\ndef get_thumbnail_url(file, size, fit=True):\n \"\"\"Get the thumbnail url of a media file, NEVER use this with user input.\n\n If the thumbnail exists this function will return the url of the\n media file, with signature if necessary. Does it not yet exist a route\n that executes the :func:`utils.media.views.generate_thumbnail`\n will be the output.\n :param file: the file field\n :param size: size of the image\n :param fit: False to keep the aspect ratio, True to crop\n :return: get-thumbnail path\n \"\"\"\n storage = DefaultStorage()\n name = file\n\n if isinstance(file, (ImageFieldFile, FieldFile)):\n storage = file.storage\n name = file.name\n\n is_public = isinstance(storage, get_storage_class(settings.PUBLIC_FILE_STORAGE))\n size_fit = f\"{size}_{int(fit)}\"\n\n if name.endswith(\".svg\") and is_public:\n return storage.url(name)\n\n sig_info = {\n \"size\": size,\n \"fit\": int(fit),\n \"name\": name,\n \"thumb_path\": f\"thumbnails/{size_fit}/{name}\",\n \"serve_path\": f\"thumbnails/{size_fit}/{name}\",\n \"storage\": f\"{storage.__class__.__module__}.{storage.__class__.__name__}\",\n }\n\n # We provide a URL instead of calling it as a function, so that using\n # it means kicking off a new GET request. If we would need to check all files for the\n # thumbnails inline, loading an album overview would have high latency.\n return (\n reverse(\"get-thumbnail\", args=[os.path.join(size_fit, sig_info[\"name\"])])\n + f\"?sig={signing.dumps(sig_info)}\"\n )\n", "path": "website/utils/media/services.py"}], "after_files": [{"content": "\"\"\"Views provided by the education package.\"\"\"\nimport os\nfrom datetime import date, datetime\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.core.exceptions import PermissionDenied\nfrom django.db.models import Count\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect\nfrom django.urls import reverse_lazy\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.generic import CreateView, DetailView, ListView, TemplateView\n\nfrom members.decorators import membership_required\nfrom utils.media.services import get_media_url\n\nfrom . import emails\nfrom .forms import AddExamForm, AddSummaryForm\nfrom .models import Category, Course, Exam, Summary\n\n\nclass CourseIndexView(ListView):\n \"\"\"Render an overview of the courses.\"\"\"\n\n queryset = (\n Course.objects.filter(until=None)\n .prefetch_related(\"categories\", \"old_courses\")\n .annotate(summary_count=Count(\"summary\"))\n .annotate(exam_count=Count(\"exam\"))\n )\n template_name = \"education/courses.html\"\n\n def get_ordering(self) -> str:\n return \"name\"\n\n def get_context_data(self, **kwargs) -> dict:\n context = super().get_context_data(**kwargs)\n context.update(\n {\n \"courses\": (\n {\n \"course_code\": x.course_code,\n \"name\": x.name,\n \"categories\": x.categories.all(),\n \"document_count\": sum(\n [\n x.summary_count,\n x.exam_count,\n ]\n + [\n c.summary_set.filter(accepted=True).count()\n + c.exam_set.filter(accepted=True).count()\n for c in x.old_courses.all()\n ]\n ),\n \"url\": x.get_absolute_url(),\n }\n for x in context[\"object_list\"]\n ),\n \"categories\": Category.objects.all(),\n }\n )\n return context\n\n\nclass CourseDetailView(DetailView):\n \"\"\"Render the detail page of one specific course.\"\"\"\n\n model = Course\n context_object_name = \"course\"\n template_name = \"education/course.html\"\n\n def get_context_data(self, **kwargs) -> dict:\n context = super().get_context_data(**kwargs)\n obj = context[\"course\"]\n courses = list(obj.old_courses.all())\n courses.append(obj)\n items = {}\n for course in courses:\n for summary in course.summary_set.filter(accepted=True):\n if summary.year not in items:\n items[summary.year] = {\n \"summaries\": [],\n \"exams\": [],\n \"legacy\": course if course.pk != obj.pk else None,\n }\n items[summary.year][\"summaries\"].append(\n {\n \"year\": summary.year,\n \"name\": summary.name,\n \"language\": summary.language,\n \"id\": summary.id,\n }\n )\n for exam in course.exam_set.filter(accepted=True):\n if exam.year not in items:\n items[exam.year] = {\n \"summaries\": [],\n \"exams\": [],\n \"legacy\": course if course.pk != obj.pk else None,\n }\n items[exam.year][\"exams\"].append(\n {\n \"type\": \"exam\",\n \"year\": exam.year,\n \"name\": f\"{exam.get_type_display()} {exam.name}\",\n \"language\": exam.language,\n \"id\": exam.id,\n }\n )\n context.update({\"items\": sorted(items.items(), key=lambda x: x[0])})\n return context\n\n\n@method_decorator(login_required, \"dispatch\")\n@method_decorator(membership_required, \"dispatch\")\nclass ExamDetailView(DetailView):\n \"\"\"Fetch and output the specified exam.\"\"\"\n\n model = Exam\n\n def get(self, request, *args, **kwargs) -> HttpResponse:\n response = super().get(request, *args, **kwargs)\n obj = response.context_data[\"object\"]\n obj.download_count += 1\n obj.save()\n\n ext = os.path.splitext(obj.file.name)[1]\n filename = f\"{obj.course.name}-summary{obj.year}{ext}\"\n return redirect(get_media_url(obj.file, filename))\n\n\n@method_decorator(login_required, \"dispatch\")\n@method_decorator(membership_required, \"dispatch\")\nclass SummaryDetailView(DetailView):\n \"\"\"Fetch and output the specified summary.\"\"\"\n\n model = Summary\n\n def get(self, request, *args, **kwargs) -> HttpResponse:\n response = super().get(request, *args, **kwargs)\n obj = response.context_data[\"object\"]\n obj.download_count += 1\n obj.save()\n\n ext = os.path.splitext(obj.file.name)[1]\n filename = f\"{obj.course.name}-summary{obj.year}{ext}\"\n return redirect(get_media_url(obj.file, filename))\n\n\n@method_decorator(login_required, \"dispatch\")\n@method_decorator(membership_required, \"dispatch\")\nclass ExamCreateView(SuccessMessageMixin, CreateView):\n \"\"\"Render the form to submit a new exam.\"\"\"\n\n model = Exam\n form_class = AddExamForm\n template_name = \"education/add_exam.html\"\n success_url = reverse_lazy(\"education:submit-exam\")\n success_message = _(\"Exam submitted successfully.\")\n\n def get_initial(self) -> dict:\n initial = super().get_initial()\n initial[\"exam_date\"] = date.today()\n initial[\"course\"] = self.kwargs.get(\"pk\", None)\n return initial\n\n def form_valid(self, form) -> HttpResponse:\n self.object = form.save(commit=False)\n self.object.uploader = self.request.member\n self.object.uploader_date = datetime.now()\n self.object.save()\n emails.send_document_notification(self.object)\n return super().form_valid(form)\n\n\n@method_decorator(login_required, \"dispatch\")\n@method_decorator(membership_required, \"dispatch\")\nclass SummaryCreateView(SuccessMessageMixin, CreateView):\n \"\"\"Render the form to submit a new summary.\"\"\"\n\n model = Summary\n form_class = AddSummaryForm\n template_name = \"education/add_summary.html\"\n success_url = reverse_lazy(\"education:submit-summary\")\n success_message = _(\"Summary submitted successfully.\")\n\n def get_initial(self):\n initial = super().get_initial()\n initial[\"author\"] = self.request.member.get_full_name()\n initial[\"course\"] = self.kwargs.get(\"pk\", None)\n return initial\n\n def form_valid(self, form) -> HttpResponse:\n self.object = form.save(commit=False)\n self.object.uploader = self.request.member\n self.object.uploader_date = datetime.now()\n self.object.save()\n emails.send_document_notification(self.object)\n return super().form_valid(form)\n\n\n@method_decorator(login_required, \"dispatch\")\nclass BookInfoView(TemplateView):\n \"\"\"Render a page with information about book sale.\n\n Only available to members and to-be members\n \"\"\"\n\n template_name = \"education/books.html\"\n\n def dispatch(self, request, *args, **kwargs) -> HttpResponse:\n if request.member.has_active_membership() or (\n request.member.earliest_membership\n and request.member.earliest_membership.since > timezone.now().date()\n ):\n return super().dispatch(request, *args, **kwargs)\n raise PermissionDenied\n", "path": "website/education/views.py"}, {"content": "import io\nimport os\n\nfrom django.conf import settings\nfrom django.core import signing\nfrom django.core.files.base import ContentFile\nfrom django.core.files.storage import DefaultStorage, get_storage_class\nfrom django.core.files.uploadedfile import InMemoryUploadedFile\nfrom django.db.models.fields.files import FieldFile, ImageFieldFile\nfrom django.urls import reverse\n\n\ndef save_image(storage, image, path, format):\n buffer = io.BytesIO()\n image.convert(\"RGB\" if format == \"JPEG\" else \"RGBA\").save(fp=buffer, format=format)\n buff_val = buffer.getvalue()\n content = ContentFile(buff_val)\n file = InMemoryUploadedFile(\n content,\n None,\n f\"foo.{format.lower()}\",\n f\"image/{format.lower()}\",\n content.tell,\n None,\n )\n return storage.save(path, file)\n\n\ndef get_media_url(file, attachment=False):\n \"\"\"Get the url of the provided media file to serve in a browser.\n\n If the file is private a signature will be added.\n Do NOT use this with user input\n :param file: the file field\n :param attachment: filename to use for the attachment or False to not download as attachment\n :return: the url of the media\n \"\"\"\n storage = DefaultStorage()\n file_name = file\n if isinstance(file, (ImageFieldFile, FieldFile)):\n storage = file.storage\n file_name = file.name\n\n return f\"{storage.url(file_name, attachment)}\"\n\n\ndef get_thumbnail_url(file, size, fit=True):\n \"\"\"Get the thumbnail url of a media file, NEVER use this with user input.\n\n If the thumbnail exists this function will return the url of the\n media file, with signature if necessary. Does it not yet exist a route\n that executes the :func:`utils.media.views.generate_thumbnail`\n will be the output.\n :param file: the file field\n :param size: size of the image\n :param fit: False to keep the aspect ratio, True to crop\n :return: get-thumbnail path\n \"\"\"\n storage = DefaultStorage()\n name = file\n\n if isinstance(file, (ImageFieldFile, FieldFile)):\n storage = file.storage\n name = file.name\n\n is_public = isinstance(storage, get_storage_class(settings.PUBLIC_FILE_STORAGE))\n size_fit = f\"{size}_{int(fit)}\"\n\n if name.endswith(\".svg\") and is_public:\n return storage.url(name)\n\n sig_info = {\n \"size\": size,\n \"fit\": int(fit),\n \"name\": name,\n \"thumb_path\": f\"thumbnails/{size_fit}/{name}\",\n \"serve_path\": f\"thumbnails/{size_fit}/{name}\",\n \"storage\": f\"{storage.__class__.__module__}.{storage.__class__.__name__}\",\n }\n\n # We provide a URL instead of calling it as a function, so that using\n # it means kicking off a new GET request. If we would need to check all files for the\n # thumbnails inline, loading an album overview would have high latency.\n return (\n reverse(\"get-thumbnail\", args=[os.path.join(size_fit, sig_info[\"name\"])])\n + f\"?sig={signing.dumps(sig_info)}\"\n )\n", "path": "website/utils/media/services.py"}]} | 3,387 | 359 |
gh_patches_debug_3844 | rasdani/github-patches | git_diff | python-telegram-bot__python-telegram-bot-1968 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[FEATURE] User.send_poll shortcut
We have `Chat.send_poll` as shortcut, but `User` was neglected in #1418
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `telegram/user.py`
Content:
```
1 #!/usr/bin/env python
2 # pylint: disable=C0103,W0622
3 #
4 # A library that provides a Python interface to the Telegram Bot API
5 # Copyright (C) 2015-2020
6 # Leandro Toledo de Souza <[email protected]>
7 #
8 # This program is free software: you can redistribute it and/or modify
9 # it under the terms of the GNU Lesser Public License as published by
10 # the Free Software Foundation, either version 3 of the License, or
11 # (at your option) any later version.
12 #
13 # This program is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU Lesser Public License for more details.
17 #
18 # You should have received a copy of the GNU Lesser Public License
19 # along with this program. If not, see [http://www.gnu.org/licenses/].
20 """This module contains an object that represents a Telegram User."""
21
22 from telegram import TelegramObject
23 from telegram.utils.helpers import mention_html as util_mention_html
24 from telegram.utils.helpers import mention_markdown as util_mention_markdown
25
26
27 class User(TelegramObject):
28 """This object represents a Telegram user or bot.
29
30 Attributes:
31 id (:obj:`int`): Unique identifier for this user or bot.
32 is_bot (:obj:`bool`): True, if this user is a bot
33 first_name (:obj:`str`): User's or bot's first name.
34 last_name (:obj:`str`): Optional. User's or bot's last name.
35 username (:obj:`str`): Optional. User's or bot's username.
36 language_code (:obj:`str`): Optional. IETF language tag of the user's language.
37 can_join_groups (:obj:`str`): Optional. True, if the bot can be invited to groups.
38 Returned only in :attr:`telegram.Bot.get_me` requests.
39 can_read_all_group_messages (:obj:`str`): Optional. True, if privacy mode is disabled
40 for the bot. Returned only in :attr:`telegram.Bot.get_me` requests.
41 supports_inline_queries (:obj:`str`): Optional. True, if the bot supports inline queries.
42 Returned only in :attr:`telegram.Bot.get_me` requests.
43 bot (:class:`telegram.Bot`): Optional. The Bot to use for instance methods.
44
45 Args:
46 id (:obj:`int`): Unique identifier for this user or bot.
47 is_bot (:obj:`bool`): True, if this user is a bot
48 first_name (:obj:`str`): User's or bot's first name.
49 last_name (:obj:`str`, optional): User's or bot's last name.
50 username (:obj:`str`, optional): User's or bot's username.
51 language_code (:obj:`str`, optional): IETF language tag of the user's language.
52 can_join_groups (:obj:`str`, optional): True, if the bot can be invited to groups.
53 Returned only in :attr:`telegram.Bot.get_me` requests.
54 can_read_all_group_messages (:obj:`str`, optional): True, if privacy mode is disabled
55 for the bot. Returned only in :attr:`telegram.Bot.get_me` requests.
56 supports_inline_queries (:obj:`str`, optional): True, if the bot supports inline queries.
57 Returned only in :attr:`telegram.Bot.get_me` requests.
58 bot (:class:`telegram.Bot`, optional): The Bot to use for instance methods.
59
60 """
61
62 def __init__(self,
63 id,
64 first_name,
65 is_bot,
66 last_name=None,
67 username=None,
68 language_code=None,
69 can_join_groups=None,
70 can_read_all_group_messages=None,
71 supports_inline_queries=None,
72 bot=None,
73 **kwargs):
74 # Required
75 self.id = int(id)
76 self.first_name = first_name
77 self.is_bot = is_bot
78 # Optionals
79 self.last_name = last_name
80 self.username = username
81 self.language_code = language_code
82 self.can_join_groups = can_join_groups
83 self.can_read_all_group_messages = can_read_all_group_messages
84 self.supports_inline_queries = supports_inline_queries
85 self.bot = bot
86
87 self._id_attrs = (self.id,)
88
89 @property
90 def name(self):
91 """:obj:`str`: Convenience property. If available, returns the user's :attr:`username`
92 prefixed with "@". If :attr:`username` is not available, returns :attr:`full_name`."""
93 if self.username:
94 return '@{}'.format(self.username)
95 return self.full_name
96
97 @property
98 def full_name(self):
99 """:obj:`str`: Convenience property. The user's :attr:`first_name`, followed by (if
100 available) :attr:`last_name`."""
101
102 if self.last_name:
103 return u'{} {}'.format(self.first_name, self.last_name)
104 return self.first_name
105
106 @property
107 def link(self):
108 """:obj:`str`: Convenience property. If :attr:`username` is available, returns a t.me link
109 of the user."""
110
111 if self.username:
112 return "https://t.me/{}".format(self.username)
113 return None
114
115 @classmethod
116 def de_json(cls, data, bot):
117 if not data:
118 return None
119 data = super(User, cls).de_json(data, bot)
120
121 return cls(bot=bot, **data)
122
123 def get_profile_photos(self, *args, **kwargs):
124 """
125 Shortcut for::
126
127 bot.get_user_profile_photos(update.message.from_user.id, *args, **kwargs)
128
129 """
130
131 return self.bot.get_user_profile_photos(self.id, *args, **kwargs)
132
133 @classmethod
134 def de_list(cls, data, bot):
135 if not data:
136 return []
137
138 users = list()
139 for user in data:
140 users.append(cls.de_json(user, bot))
141
142 return users
143
144 def mention_markdown(self, name=None):
145 """
146 Args:
147 name (:obj:`str`): The name used as a link for the user. Defaults to :attr:`full_name`.
148
149 Returns:
150 :obj:`str`: The inline mention for the user as markdown (version 1).
151
152 """
153 if name:
154 return util_mention_markdown(self.id, name)
155 return util_mention_markdown(self.id, self.full_name)
156
157 def mention_markdown_v2(self, name=None):
158 """
159 Args:
160 name (:obj:`str`): The name used as a link for the user. Defaults to :attr:`full_name`.
161
162 Returns:
163 :obj:`str`: The inline mention for the user as markdown (version 2).
164
165 """
166 if name:
167 return util_mention_markdown(self.id, name, version=2)
168 return util_mention_markdown(self.id, self.full_name, version=2)
169
170 def mention_html(self, name=None):
171 """
172 Args:
173 name (:obj:`str`): The name used as a link for the user. Defaults to :attr:`full_name`.
174
175 Returns:
176 :obj:`str`: The inline mention for the user as HTML.
177
178 """
179 if name:
180 return util_mention_html(self.id, name)
181 return util_mention_html(self.id, self.full_name)
182
183 def send_message(self, *args, **kwargs):
184 """Shortcut for::
185
186 bot.send_message(User.id, *args, **kwargs)
187
188 Where User is the current instance.
189
190 Returns:
191 :class:`telegram.Message`: On success, instance representing the message posted.
192
193 """
194 return self.bot.send_message(self.id, *args, **kwargs)
195
196 def send_photo(self, *args, **kwargs):
197 """Shortcut for::
198
199 bot.send_photo(User.id, *args, **kwargs)
200
201 Where User is the current instance.
202
203 Returns:
204 :class:`telegram.Message`: On success, instance representing the message posted.
205
206 """
207 return self.bot.send_photo(self.id, *args, **kwargs)
208
209 def send_audio(self, *args, **kwargs):
210 """Shortcut for::
211
212 bot.send_audio(User.id, *args, **kwargs)
213
214 Where User is the current instance.
215
216 Returns:
217 :class:`telegram.Message`: On success, instance representing the message posted.
218
219 """
220 return self.bot.send_audio(self.id, *args, **kwargs)
221
222 def send_document(self, *args, **kwargs):
223 """Shortcut for::
224
225 bot.send_document(User.id, *args, **kwargs)
226
227 Where User is the current instance.
228
229 Returns:
230 :class:`telegram.Message`: On success, instance representing the message posted.
231
232 """
233 return self.bot.send_document(self.id, *args, **kwargs)
234
235 def send_animation(self, *args, **kwargs):
236 """Shortcut for::
237
238 bot.send_animation(User.id, *args, **kwargs)
239
240 Where User is the current instance.
241
242 Returns:
243 :class:`telegram.Message`: On success, instance representing the message posted.
244
245 """
246 return self.bot.send_animation(self.id, *args, **kwargs)
247
248 def send_sticker(self, *args, **kwargs):
249 """Shortcut for::
250
251 bot.send_sticker(User.id, *args, **kwargs)
252
253 Where User is the current instance.
254
255 Returns:
256 :class:`telegram.Message`: On success, instance representing the message posted.
257
258 """
259 return self.bot.send_sticker(self.id, *args, **kwargs)
260
261 def send_video(self, *args, **kwargs):
262 """Shortcut for::
263
264 bot.send_video(User.id, *args, **kwargs)
265
266 Where User is the current instance.
267
268 Returns:
269 :class:`telegram.Message`: On success, instance representing the message posted.
270
271 """
272 return self.bot.send_video(self.id, *args, **kwargs)
273
274 def send_video_note(self, *args, **kwargs):
275 """Shortcut for::
276
277 bot.send_video_note(User.id, *args, **kwargs)
278
279 Where User is the current instance.
280
281 Returns:
282 :class:`telegram.Message`: On success, instance representing the message posted.
283
284 """
285 return self.bot.send_video_note(self.id, *args, **kwargs)
286
287 def send_voice(self, *args, **kwargs):
288 """Shortcut for::
289
290 bot.send_voice(User.id, *args, **kwargs)
291
292 Where User is the current instance.
293
294 Returns:
295 :class:`telegram.Message`: On success, instance representing the message posted.
296
297 """
298 return self.bot.send_voice(self.id, *args, **kwargs)
299
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/telegram/user.py b/telegram/user.py
--- a/telegram/user.py
+++ b/telegram/user.py
@@ -296,3 +296,16 @@
"""
return self.bot.send_voice(self.id, *args, **kwargs)
+
+ def send_poll(self, *args, **kwargs):
+ """Shortcut for::
+
+ bot.send_poll(User.id, *args, **kwargs)
+
+ Where User is the current instance.
+
+ Returns:
+ :class:`telegram.Message`: On success, instance representing the message posted.
+
+ """
+ return self.bot.send_poll(self.id, *args, **kwargs)
| {"golden_diff": "diff --git a/telegram/user.py b/telegram/user.py\n--- a/telegram/user.py\n+++ b/telegram/user.py\n@@ -296,3 +296,16 @@\n \n \"\"\"\n return self.bot.send_voice(self.id, *args, **kwargs)\n+\n+ def send_poll(self, *args, **kwargs):\n+ \"\"\"Shortcut for::\n+\n+ bot.send_poll(User.id, *args, **kwargs)\n+\n+ Where User is the current instance.\n+\n+ Returns:\n+ :class:`telegram.Message`: On success, instance representing the message posted.\n+\n+ \"\"\"\n+ return self.bot.send_poll(self.id, *args, **kwargs)\n", "issue": "[FEATURE] User.send_poll shortcut\nWe have `Chat.send_poll` as shortcut, but `User` was neglected in #1418\n", "before_files": [{"content": "#!/usr/bin/env python\n# pylint: disable=C0103,W0622\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2020\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains an object that represents a Telegram User.\"\"\"\n\nfrom telegram import TelegramObject\nfrom telegram.utils.helpers import mention_html as util_mention_html\nfrom telegram.utils.helpers import mention_markdown as util_mention_markdown\n\n\nclass User(TelegramObject):\n \"\"\"This object represents a Telegram user or bot.\n\n Attributes:\n id (:obj:`int`): Unique identifier for this user or bot.\n is_bot (:obj:`bool`): True, if this user is a bot\n first_name (:obj:`str`): User's or bot's first name.\n last_name (:obj:`str`): Optional. User's or bot's last name.\n username (:obj:`str`): Optional. User's or bot's username.\n language_code (:obj:`str`): Optional. IETF language tag of the user's language.\n can_join_groups (:obj:`str`): Optional. True, if the bot can be invited to groups.\n Returned only in :attr:`telegram.Bot.get_me` requests.\n can_read_all_group_messages (:obj:`str`): Optional. True, if privacy mode is disabled\n for the bot. Returned only in :attr:`telegram.Bot.get_me` requests.\n supports_inline_queries (:obj:`str`): Optional. True, if the bot supports inline queries.\n Returned only in :attr:`telegram.Bot.get_me` requests.\n bot (:class:`telegram.Bot`): Optional. The Bot to use for instance methods.\n\n Args:\n id (:obj:`int`): Unique identifier for this user or bot.\n is_bot (:obj:`bool`): True, if this user is a bot\n first_name (:obj:`str`): User's or bot's first name.\n last_name (:obj:`str`, optional): User's or bot's last name.\n username (:obj:`str`, optional): User's or bot's username.\n language_code (:obj:`str`, optional): IETF language tag of the user's language.\n can_join_groups (:obj:`str`, optional): True, if the bot can be invited to groups.\n Returned only in :attr:`telegram.Bot.get_me` requests.\n can_read_all_group_messages (:obj:`str`, optional): True, if privacy mode is disabled\n for the bot. Returned only in :attr:`telegram.Bot.get_me` requests.\n supports_inline_queries (:obj:`str`, optional): True, if the bot supports inline queries.\n Returned only in :attr:`telegram.Bot.get_me` requests.\n bot (:class:`telegram.Bot`, optional): The Bot to use for instance methods.\n\n \"\"\"\n\n def __init__(self,\n id,\n first_name,\n is_bot,\n last_name=None,\n username=None,\n language_code=None,\n can_join_groups=None,\n can_read_all_group_messages=None,\n supports_inline_queries=None,\n bot=None,\n **kwargs):\n # Required\n self.id = int(id)\n self.first_name = first_name\n self.is_bot = is_bot\n # Optionals\n self.last_name = last_name\n self.username = username\n self.language_code = language_code\n self.can_join_groups = can_join_groups\n self.can_read_all_group_messages = can_read_all_group_messages\n self.supports_inline_queries = supports_inline_queries\n self.bot = bot\n\n self._id_attrs = (self.id,)\n\n @property\n def name(self):\n \"\"\":obj:`str`: Convenience property. If available, returns the user's :attr:`username`\n prefixed with \"@\". If :attr:`username` is not available, returns :attr:`full_name`.\"\"\"\n if self.username:\n return '@{}'.format(self.username)\n return self.full_name\n\n @property\n def full_name(self):\n \"\"\":obj:`str`: Convenience property. The user's :attr:`first_name`, followed by (if\n available) :attr:`last_name`.\"\"\"\n\n if self.last_name:\n return u'{} {}'.format(self.first_name, self.last_name)\n return self.first_name\n\n @property\n def link(self):\n \"\"\":obj:`str`: Convenience property. If :attr:`username` is available, returns a t.me link\n of the user.\"\"\"\n\n if self.username:\n return \"https://t.me/{}\".format(self.username)\n return None\n\n @classmethod\n def de_json(cls, data, bot):\n if not data:\n return None\n data = super(User, cls).de_json(data, bot)\n\n return cls(bot=bot, **data)\n\n def get_profile_photos(self, *args, **kwargs):\n \"\"\"\n Shortcut for::\n\n bot.get_user_profile_photos(update.message.from_user.id, *args, **kwargs)\n\n \"\"\"\n\n return self.bot.get_user_profile_photos(self.id, *args, **kwargs)\n\n @classmethod\n def de_list(cls, data, bot):\n if not data:\n return []\n\n users = list()\n for user in data:\n users.append(cls.de_json(user, bot))\n\n return users\n\n def mention_markdown(self, name=None):\n \"\"\"\n Args:\n name (:obj:`str`): The name used as a link for the user. Defaults to :attr:`full_name`.\n\n Returns:\n :obj:`str`: The inline mention for the user as markdown (version 1).\n\n \"\"\"\n if name:\n return util_mention_markdown(self.id, name)\n return util_mention_markdown(self.id, self.full_name)\n\n def mention_markdown_v2(self, name=None):\n \"\"\"\n Args:\n name (:obj:`str`): The name used as a link for the user. Defaults to :attr:`full_name`.\n\n Returns:\n :obj:`str`: The inline mention for the user as markdown (version 2).\n\n \"\"\"\n if name:\n return util_mention_markdown(self.id, name, version=2)\n return util_mention_markdown(self.id, self.full_name, version=2)\n\n def mention_html(self, name=None):\n \"\"\"\n Args:\n name (:obj:`str`): The name used as a link for the user. Defaults to :attr:`full_name`.\n\n Returns:\n :obj:`str`: The inline mention for the user as HTML.\n\n \"\"\"\n if name:\n return util_mention_html(self.id, name)\n return util_mention_html(self.id, self.full_name)\n\n def send_message(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_message(User.id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_message(self.id, *args, **kwargs)\n\n def send_photo(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_photo(User.id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_photo(self.id, *args, **kwargs)\n\n def send_audio(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_audio(User.id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_audio(self.id, *args, **kwargs)\n\n def send_document(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_document(User.id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_document(self.id, *args, **kwargs)\n\n def send_animation(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_animation(User.id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_animation(self.id, *args, **kwargs)\n\n def send_sticker(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_sticker(User.id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_sticker(self.id, *args, **kwargs)\n\n def send_video(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_video(User.id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_video(self.id, *args, **kwargs)\n\n def send_video_note(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_video_note(User.id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_video_note(self.id, *args, **kwargs)\n\n def send_voice(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_voice(User.id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_voice(self.id, *args, **kwargs)\n", "path": "telegram/user.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# pylint: disable=C0103,W0622\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2020\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains an object that represents a Telegram User.\"\"\"\n\nfrom telegram import TelegramObject\nfrom telegram.utils.helpers import mention_html as util_mention_html\nfrom telegram.utils.helpers import mention_markdown as util_mention_markdown\n\n\nclass User(TelegramObject):\n \"\"\"This object represents a Telegram user or bot.\n\n Attributes:\n id (:obj:`int`): Unique identifier for this user or bot.\n is_bot (:obj:`bool`): True, if this user is a bot\n first_name (:obj:`str`): User's or bot's first name.\n last_name (:obj:`str`): Optional. User's or bot's last name.\n username (:obj:`str`): Optional. User's or bot's username.\n language_code (:obj:`str`): Optional. IETF language tag of the user's language.\n can_join_groups (:obj:`str`): Optional. True, if the bot can be invited to groups.\n Returned only in :attr:`telegram.Bot.get_me` requests.\n can_read_all_group_messages (:obj:`str`): Optional. True, if privacy mode is disabled\n for the bot. Returned only in :attr:`telegram.Bot.get_me` requests.\n supports_inline_queries (:obj:`str`): Optional. True, if the bot supports inline queries.\n Returned only in :attr:`telegram.Bot.get_me` requests.\n bot (:class:`telegram.Bot`): Optional. The Bot to use for instance methods.\n\n Args:\n id (:obj:`int`): Unique identifier for this user or bot.\n is_bot (:obj:`bool`): True, if this user is a bot\n first_name (:obj:`str`): User's or bot's first name.\n last_name (:obj:`str`, optional): User's or bot's last name.\n username (:obj:`str`, optional): User's or bot's username.\n language_code (:obj:`str`, optional): IETF language tag of the user's language.\n can_join_groups (:obj:`str`, optional): True, if the bot can be invited to groups.\n Returned only in :attr:`telegram.Bot.get_me` requests.\n can_read_all_group_messages (:obj:`str`, optional): True, if privacy mode is disabled\n for the bot. Returned only in :attr:`telegram.Bot.get_me` requests.\n supports_inline_queries (:obj:`str`, optional): True, if the bot supports inline queries.\n Returned only in :attr:`telegram.Bot.get_me` requests.\n bot (:class:`telegram.Bot`, optional): The Bot to use for instance methods.\n\n \"\"\"\n\n def __init__(self,\n id,\n first_name,\n is_bot,\n last_name=None,\n username=None,\n language_code=None,\n can_join_groups=None,\n can_read_all_group_messages=None,\n supports_inline_queries=None,\n bot=None,\n **kwargs):\n # Required\n self.id = int(id)\n self.first_name = first_name\n self.is_bot = is_bot\n # Optionals\n self.last_name = last_name\n self.username = username\n self.language_code = language_code\n self.can_join_groups = can_join_groups\n self.can_read_all_group_messages = can_read_all_group_messages\n self.supports_inline_queries = supports_inline_queries\n self.bot = bot\n\n self._id_attrs = (self.id,)\n\n @property\n def name(self):\n \"\"\":obj:`str`: Convenience property. If available, returns the user's :attr:`username`\n prefixed with \"@\". If :attr:`username` is not available, returns :attr:`full_name`.\"\"\"\n if self.username:\n return '@{}'.format(self.username)\n return self.full_name\n\n @property\n def full_name(self):\n \"\"\":obj:`str`: Convenience property. The user's :attr:`first_name`, followed by (if\n available) :attr:`last_name`.\"\"\"\n\n if self.last_name:\n return u'{} {}'.format(self.first_name, self.last_name)\n return self.first_name\n\n @property\n def link(self):\n \"\"\":obj:`str`: Convenience property. If :attr:`username` is available, returns a t.me link\n of the user.\"\"\"\n\n if self.username:\n return \"https://t.me/{}\".format(self.username)\n return None\n\n @classmethod\n def de_json(cls, data, bot):\n if not data:\n return None\n data = super(User, cls).de_json(data, bot)\n\n return cls(bot=bot, **data)\n\n def get_profile_photos(self, *args, **kwargs):\n \"\"\"\n Shortcut for::\n\n bot.get_user_profile_photos(update.message.from_user.id, *args, **kwargs)\n\n \"\"\"\n\n return self.bot.get_user_profile_photos(self.id, *args, **kwargs)\n\n @classmethod\n def de_list(cls, data, bot):\n if not data:\n return []\n\n users = list()\n for user in data:\n users.append(cls.de_json(user, bot))\n\n return users\n\n def mention_markdown(self, name=None):\n \"\"\"\n Args:\n name (:obj:`str`): The name used as a link for the user. Defaults to :attr:`full_name`.\n\n Returns:\n :obj:`str`: The inline mention for the user as markdown (version 1).\n\n \"\"\"\n if name:\n return util_mention_markdown(self.id, name)\n return util_mention_markdown(self.id, self.full_name)\n\n def mention_markdown_v2(self, name=None):\n \"\"\"\n Args:\n name (:obj:`str`): The name used as a link for the user. Defaults to :attr:`full_name`.\n\n Returns:\n :obj:`str`: The inline mention for the user as markdown (version 2).\n\n \"\"\"\n if name:\n return util_mention_markdown(self.id, name, version=2)\n return util_mention_markdown(self.id, self.full_name, version=2)\n\n def mention_html(self, name=None):\n \"\"\"\n Args:\n name (:obj:`str`): The name used as a link for the user. Defaults to :attr:`full_name`.\n\n Returns:\n :obj:`str`: The inline mention for the user as HTML.\n\n \"\"\"\n if name:\n return util_mention_html(self.id, name)\n return util_mention_html(self.id, self.full_name)\n\n def send_message(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_message(User.id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_message(self.id, *args, **kwargs)\n\n def send_photo(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_photo(User.id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_photo(self.id, *args, **kwargs)\n\n def send_audio(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_audio(User.id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_audio(self.id, *args, **kwargs)\n\n def send_document(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_document(User.id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_document(self.id, *args, **kwargs)\n\n def send_animation(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_animation(User.id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_animation(self.id, *args, **kwargs)\n\n def send_sticker(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_sticker(User.id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_sticker(self.id, *args, **kwargs)\n\n def send_video(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_video(User.id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_video(self.id, *args, **kwargs)\n\n def send_video_note(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_video_note(User.id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_video_note(self.id, *args, **kwargs)\n\n def send_voice(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_voice(User.id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_voice(self.id, *args, **kwargs)\n\n def send_poll(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_poll(User.id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_poll(self.id, *args, **kwargs)\n", "path": "telegram/user.py"}]} | 3,388 | 148 |
gh_patches_debug_28652 | rasdani/github-patches | git_diff | svthalia__concrexit-1380 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BankAccount last used should be updated to withdrawal date not processing date.
### Describe the bug
We keep the last date a mandate was used so mandates that have not been used for more than <s>a year</s> three years can be revoked (because that are the sepa rules). However the ‘last used’ date is actually the indicated withdrawal date and not the processing date of the batch
### Expected behaviour
Update with the withdrawal date
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/payments/admin_views.py`
Content:
```
1 """Admin views provided by the payments package"""
2 import csv
3
4 from django.apps import apps
5 from django.contrib import messages
6 from django.contrib.admin.utils import model_ngettext
7 from django.contrib.admin.views.decorators import staff_member_required
8 from django.contrib.auth.decorators import permission_required
9 from django.db.models import Sum, Count, Min, Max
10 from django.http import HttpResponse
11 from django.core.exceptions import SuspiciousOperation, DisallowedRedirect
12 from django.shortcuts import redirect, get_object_or_404, render
13 from django.utils import timezone
14 from django.utils.text import capfirst
15 from django.utils.decorators import method_decorator
16 from django.utils.http import url_has_allowed_host_and_scheme
17 from django.utils.translation import gettext_lazy as _
18 from django.views import View
19
20 from payments import services
21 from .models import Payment, Batch, PaymentUser
22
23
24 @method_decorator(staff_member_required, name="dispatch")
25 @method_decorator(
26 permission_required("payments.process_payments"), name="dispatch",
27 )
28 class PaymentAdminView(View):
29 """
30 View that creates a payment
31 """
32
33 def post(self, request, *args, app_label, model_name, payable, **kwargs):
34 if "type" not in request.POST:
35 raise SuspiciousOperation("Missing POST parameters")
36
37 if "next" in request.POST and not url_has_allowed_host_and_scheme(
38 request.POST.get("next"), allowed_hosts={request.get_host()}
39 ):
40 raise DisallowedRedirect
41
42 payable_model = apps.get_model(app_label=app_label, model_name=model_name)
43 payable_obj = payable_model.objects.get(pk=payable)
44
45 result = services.create_payment(
46 payable_obj, self.request.member, request.POST["type"],
47 )
48 payable_obj.save()
49
50 if result:
51 messages.success(
52 request, _("Successfully paid %s.") % model_ngettext(payable_obj, 1),
53 )
54 else:
55 messages.error(
56 request, _("Could not pay %s.") % model_ngettext(payable_obj, 1),
57 )
58 return redirect(f"admin:{app_label}_{model_name}_change", payable_obj.pk)
59
60 if "next" in request.POST:
61 return redirect(request.POST["next"])
62
63 return redirect("admin:payments_payment_change", result.pk)
64
65
66 @method_decorator(staff_member_required, name="dispatch")
67 @method_decorator(
68 permission_required("payments.process_batches"), name="dispatch",
69 )
70 class BatchProcessAdminView(View):
71 """
72 View that processes a batch
73 """
74
75 def post(self, request, *args, **kwargs):
76 batch = Batch.objects.get(pk=kwargs["pk"])
77
78 if "next" in request.POST and not url_has_allowed_host_and_scheme(
79 request.POST.get("next"), allowed_hosts={request.get_host()}
80 ):
81 raise DisallowedRedirect
82
83 if batch.processed:
84 messages.error(
85 request, _("{} already processed.").format(model_ngettext(batch, 1))
86 )
87 else:
88 batch.processed = True
89 payments = batch.payments_set.select_related("paid_by")
90 for payment in payments:
91 bank_account = payment.paid_by.bank_accounts.last()
92 bank_account.last_used = timezone.now()
93 bank_account.save()
94
95 batch.save()
96
97 services.send_tpay_batch_processing_emails(batch)
98
99 messages.success(
100 request,
101 _("Successfully processed {}.").format(model_ngettext(batch, 1)),
102 )
103
104 if "next" in request.POST:
105 return redirect(request.POST["next"])
106
107 return redirect("admin:payments_batch_change", kwargs["pk"])
108
109
110 @method_decorator(staff_member_required, name="dispatch")
111 @method_decorator(
112 permission_required("payments.process_batches"), name="dispatch",
113 )
114 class BatchExportAdminView(View):
115 """
116 View that exports a batch
117 """
118
119 def post(self, request, *args, **kwargs):
120 batch = Batch.objects.get(pk=kwargs["pk"])
121
122 response = HttpResponse(content_type="text/csv")
123 response["Content-Disposition"] = 'attachment;filename="batch.csv"'
124 writer = csv.writer(response)
125 headers = [
126 _("Account holder"),
127 _("IBAN"),
128 _("Mandate Reference"),
129 _("Amount"),
130 _("Description"),
131 _("Mandate Date"),
132 ]
133 writer.writerow([capfirst(x) for x in headers])
134
135 member_rows = batch.payments_set.values("paid_by").annotate(total=Sum("amount"))
136
137 for row in member_rows:
138 member = PaymentUser.objects.get(id=row["paid_by"])
139 bankaccount = member.bank_accounts.last()
140 writer.writerow(
141 [
142 member.get_full_name(),
143 bankaccount.iban,
144 bankaccount.mandate_no,
145 f"{row['total']:.2f}",
146 batch.description,
147 bankaccount.valid_from,
148 ]
149 )
150 return response
151
152
153 @method_decorator(staff_member_required, name="dispatch")
154 @method_decorator(
155 permission_required("payments.process_batches"), name="dispatch",
156 )
157 class BatchTopicExportAdminView(View):
158 """
159 View that exports a batch per topic
160 """
161
162 def post(self, request, *args, **kwargs):
163 batch = Batch.objects.get(pk=kwargs["pk"])
164
165 response = HttpResponse(content_type="text/csv")
166 response["Content-Disposition"] = 'attachment;filename="batch-topic.csv"'
167 writer = csv.writer(response)
168 headers = [
169 _("Topic"),
170 _("No. of payments"),
171 _("First payment"),
172 _("Last payment"),
173 _("Total amount"),
174 ]
175 writer.writerow([capfirst(x) for x in headers])
176
177 topic_rows = (
178 batch.payments_set.values("topic")
179 .annotate(
180 total=Sum("amount"),
181 count=Count("paid_by"),
182 min_date=Min("created_at"),
183 max_date=Max("created_at"),
184 )
185 .order_by("topic")
186 )
187
188 for row in topic_rows:
189 writer.writerow(
190 [
191 row["topic"],
192 row["count"],
193 timezone.localtime(row["min_date"]).date(),
194 timezone.localtime(row["max_date"]).date(),
195 f"{row['total']:.2f}",
196 ]
197 )
198 return response
199
200
201 @method_decorator(staff_member_required, name="dispatch")
202 @method_decorator(
203 permission_required("payments.process_batches"), name="dispatch",
204 )
205 class BatchTopicDescriptionAdminView(View):
206 """
207 Shows the topic export as plain text
208 """
209
210 template_name = "admin/payments/batch_topic.html"
211
212 def post(self, request, *args, **kwargs):
213 context = {}
214 batch = get_object_or_404(Batch, pk=kwargs["pk"])
215 topic_rows = (
216 batch.payments_set.values("topic")
217 .annotate(
218 total=Sum("amount"),
219 count=Count("paid_by"),
220 min_date=Min("created_at"),
221 max_date=Max("created_at"),
222 )
223 .order_by("topic")
224 )
225
226 description = f"Batch {batch.id} - {batch.processing_date if batch.processing_date else timezone.now().date()}:\n"
227 for row in topic_rows:
228 description += f"- {row['topic']} ({row['count']}x) [{timezone.localtime(row['min_date']).date()} -- {timezone.localtime(row['max_date']).date()}], total €{row['total']:.2f}\n"
229 description += f"\n{batch.description}"
230
231 context["batch"] = batch
232 context["description"] = description
233 return render(request, self.template_name, context)
234
235
236 @method_decorator(staff_member_required, name="dispatch")
237 @method_decorator(
238 permission_required("payments.process_batches"), name="dispatch",
239 )
240 class BatchNewFilledAdminView(View):
241 """
242 View that adds a new batch filled with all payments that where not already in a batch.
243 """
244
245 def get(self, request, *args, **kwargs):
246 batch = Batch()
247 batch.save()
248
249 payments = Payment.objects.filter(type=Payment.TPAY, batch=None,)
250
251 payments.update(batch=batch)
252
253 return redirect("admin:payments_batch_change", object_id=batch.id)
254
```
Path: `website/payments/services.py`
Content:
```
1 """The services defined by the payments package"""
2 import datetime
3 from typing import Union
4
5 from django.conf import settings
6 from django.db.models import QuerySet, Q, Sum
7 from django.urls import reverse
8 from django.utils import timezone, translation
9 from django.utils.translation import gettext_lazy as _
10
11 from members.models import Member
12 from registrations.emails import _send_email
13 from .exceptions import PaymentError
14 from .models import Payment, BankAccount, Payable, PaymentUser
15
16
17 def create_payment(
18 payable: Payable,
19 processed_by: Member,
20 pay_type: Union[Payment.CASH, Payment.CARD, Payment.WIRE, Payment.TPAY],
21 ) -> Payment:
22 """
23 Create a new payment from a payable object
24
25 :param payable: Payable object
26 :param processed_by: PaymentUser that processed this payment
27 :param pay_type: Payment type
28 :return: Payment object
29 """
30 payer = (
31 PaymentUser.objects.get(pk=payable.payment_payer.pk)
32 if payable.payment_payer
33 else None
34 )
35
36 if pay_type == Payment.TPAY and not payer.tpay_enabled:
37 raise PaymentError(_("This user does not have Thalia Pay enabled"))
38
39 if payable.payment is not None:
40 payable.payment.amount = payable.payment_amount
41 payable.payment.notes = payable.payment_notes
42 payable.payment.topic = payable.payment_topic
43 payable.payment.paid_by = payer
44 payable.payment.processed_by = processed_by
45 payable.payment.type = pay_type
46 payable.payment.save()
47 else:
48 payable.payment = Payment.objects.create(
49 processed_by=processed_by,
50 amount=payable.payment_amount,
51 notes=payable.payment_notes,
52 topic=payable.payment_topic,
53 paid_by=payer,
54 type=pay_type,
55 )
56 return payable.payment
57
58
59 def delete_payment(payable: Payable):
60 """
61 Removes a payment from a payable object
62 :param payable: Payable object
63 :return:
64 """
65 payment = payable.payment
66 if payment.created_at < timezone.now() - timezone.timedelta(
67 seconds=settings.PAYMENT_CHANGE_WINDOW
68 ):
69 raise PaymentError(_("You are not authorized to delete this payment."))
70 if payment.batch and payment.batch.processed:
71 raise PaymentError(_("This payment has already been processed."))
72
73 payable.payment = None
74 payable.save()
75 payment.delete()
76
77
78 def update_last_used(queryset: QuerySet, date: datetime.date = None) -> int:
79 """
80 Update the last used field of a BankAccount queryset
81
82 :param queryset: Queryset of BankAccounts
83 :param date: date to set last_used to
84 :return: number of affected rows
85 """
86 if not date:
87 date = timezone.now().date()
88
89 result = queryset.filter(
90 (Q(valid_from__gte=timezone.now()) & Q(valid_until__lt=timezone.now()))
91 | Q(valid_until=None)
92 ).update(last_used=date)
93 return result
94
95
96 def revoke_old_mandates() -> int:
97 """
98 Revokes all mandates that have not been used for 36 months or more
99 :return: number of affected rows
100 """
101 return BankAccount.objects.filter(
102 last_used__lte=(timezone.now() - timezone.timedelta(days=36 * 30))
103 ).update(valid_until=timezone.now().date())
104
105
106 def send_tpay_batch_processing_emails(batch):
107 """Sends withdrawal notice emails to all members in a batch"""
108 member_payments = batch.payments_set.values("paid_by").annotate(total=Sum("amount"))
109 for member_row in member_payments:
110 member = PaymentUser.objects.get(pk=member_row["paid_by"])
111 total_amount = member_row["total"]
112
113 with translation.override(member.profile.language):
114 _send_email(
115 member.email,
116 _("Thalia Pay withdrawal notice"),
117 "payments/email/tpay_withdrawal_notice_mail.txt",
118 {
119 "name": member.get_full_name(),
120 "batch": batch,
121 "bank_account": member.bank_accounts.filter(
122 mandate_no__isnull=False
123 ).last(),
124 "creditor_id": settings.SEPA_CREDITOR_ID,
125 "payments": batch.payments_set.filter(paid_by=member),
126 "total_amount": total_amount,
127 "payments_url": (
128 settings.BASE_URL + reverse("payments:payment-list",)
129 ),
130 },
131 )
132 return len(member_payments)
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/payments/admin_views.py b/website/payments/admin_views.py
--- a/website/payments/admin_views.py
+++ b/website/payments/admin_views.py
@@ -85,17 +85,7 @@
request, _("{} already processed.").format(model_ngettext(batch, 1))
)
else:
- batch.processed = True
- payments = batch.payments_set.select_related("paid_by")
- for payment in payments:
- bank_account = payment.paid_by.bank_accounts.last()
- bank_account.last_used = timezone.now()
- bank_account.save()
-
- batch.save()
-
- services.send_tpay_batch_processing_emails(batch)
-
+ services.process_batch(batch)
messages.success(
request,
_("Successfully processed {}.").format(model_ngettext(batch, 1)),
diff --git a/website/payments/services.py b/website/payments/services.py
--- a/website/payments/services.py
+++ b/website/payments/services.py
@@ -103,6 +103,26 @@
).update(valid_until=timezone.now().date())
+def process_batch(batch):
+ """
+ Processes a Thalia Pay batch
+
+ :param batch: the batch to be processed
+ :return:
+ """
+ batch.processed = True
+
+ payments = batch.payments_set.select_related("paid_by")
+ for payment in payments:
+ bank_account = payment.paid_by.bank_accounts.last()
+ bank_account.last_used = batch.withdrawal_date
+ bank_account.save()
+
+ batch.save()
+
+ send_tpay_batch_processing_emails(batch)
+
+
def send_tpay_batch_processing_emails(batch):
"""Sends withdrawal notice emails to all members in a batch"""
member_payments = batch.payments_set.values("paid_by").annotate(total=Sum("amount"))
| {"golden_diff": "diff --git a/website/payments/admin_views.py b/website/payments/admin_views.py\n--- a/website/payments/admin_views.py\n+++ b/website/payments/admin_views.py\n@@ -85,17 +85,7 @@\n request, _(\"{} already processed.\").format(model_ngettext(batch, 1))\n )\n else:\n- batch.processed = True\n- payments = batch.payments_set.select_related(\"paid_by\")\n- for payment in payments:\n- bank_account = payment.paid_by.bank_accounts.last()\n- bank_account.last_used = timezone.now()\n- bank_account.save()\n-\n- batch.save()\n-\n- services.send_tpay_batch_processing_emails(batch)\n-\n+ services.process_batch(batch)\n messages.success(\n request,\n _(\"Successfully processed {}.\").format(model_ngettext(batch, 1)),\ndiff --git a/website/payments/services.py b/website/payments/services.py\n--- a/website/payments/services.py\n+++ b/website/payments/services.py\n@@ -103,6 +103,26 @@\n ).update(valid_until=timezone.now().date())\n \n \n+def process_batch(batch):\n+ \"\"\"\n+ Processes a Thalia Pay batch\n+\n+ :param batch: the batch to be processed\n+ :return:\n+ \"\"\"\n+ batch.processed = True\n+\n+ payments = batch.payments_set.select_related(\"paid_by\")\n+ for payment in payments:\n+ bank_account = payment.paid_by.bank_accounts.last()\n+ bank_account.last_used = batch.withdrawal_date\n+ bank_account.save()\n+\n+ batch.save()\n+\n+ send_tpay_batch_processing_emails(batch)\n+\n+\n def send_tpay_batch_processing_emails(batch):\n \"\"\"Sends withdrawal notice emails to all members in a batch\"\"\"\n member_payments = batch.payments_set.values(\"paid_by\").annotate(total=Sum(\"amount\"))\n", "issue": "BankAccount last used should be updated to withdrawal date not processing date.\n### Describe the bug\r\nWe keep the last date a mandate was used so mandates that have not been used for more than <s>a year</s> three years can be revoked (because that are the sepa rules). However the \u2018last used\u2019 date is actually the indicated withdrawal date and not the processing date of the batch\r\n\r\n### Expected behaviour\r\nUpdate with the withdrawal date \n", "before_files": [{"content": "\"\"\"Admin views provided by the payments package\"\"\"\nimport csv\n\nfrom django.apps import apps\nfrom django.contrib import messages\nfrom django.contrib.admin.utils import model_ngettext\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.auth.decorators import permission_required\nfrom django.db.models import Sum, Count, Min, Max\nfrom django.http import HttpResponse\nfrom django.core.exceptions import SuspiciousOperation, DisallowedRedirect\nfrom django.shortcuts import redirect, get_object_or_404, render\nfrom django.utils import timezone\nfrom django.utils.text import capfirst\nfrom django.utils.decorators import method_decorator\nfrom django.utils.http import url_has_allowed_host_and_scheme\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import View\n\nfrom payments import services\nfrom .models import Payment, Batch, PaymentUser\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"payments.process_payments\"), name=\"dispatch\",\n)\nclass PaymentAdminView(View):\n \"\"\"\n View that creates a payment\n \"\"\"\n\n def post(self, request, *args, app_label, model_name, payable, **kwargs):\n if \"type\" not in request.POST:\n raise SuspiciousOperation(\"Missing POST parameters\")\n\n if \"next\" in request.POST and not url_has_allowed_host_and_scheme(\n request.POST.get(\"next\"), allowed_hosts={request.get_host()}\n ):\n raise DisallowedRedirect\n\n payable_model = apps.get_model(app_label=app_label, model_name=model_name)\n payable_obj = payable_model.objects.get(pk=payable)\n\n result = services.create_payment(\n payable_obj, self.request.member, request.POST[\"type\"],\n )\n payable_obj.save()\n\n if result:\n messages.success(\n request, _(\"Successfully paid %s.\") % model_ngettext(payable_obj, 1),\n )\n else:\n messages.error(\n request, _(\"Could not pay %s.\") % model_ngettext(payable_obj, 1),\n )\n return redirect(f\"admin:{app_label}_{model_name}_change\", payable_obj.pk)\n\n if \"next\" in request.POST:\n return redirect(request.POST[\"next\"])\n\n return redirect(\"admin:payments_payment_change\", result.pk)\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"payments.process_batches\"), name=\"dispatch\",\n)\nclass BatchProcessAdminView(View):\n \"\"\"\n View that processes a batch\n \"\"\"\n\n def post(self, request, *args, **kwargs):\n batch = Batch.objects.get(pk=kwargs[\"pk\"])\n\n if \"next\" in request.POST and not url_has_allowed_host_and_scheme(\n request.POST.get(\"next\"), allowed_hosts={request.get_host()}\n ):\n raise DisallowedRedirect\n\n if batch.processed:\n messages.error(\n request, _(\"{} already processed.\").format(model_ngettext(batch, 1))\n )\n else:\n batch.processed = True\n payments = batch.payments_set.select_related(\"paid_by\")\n for payment in payments:\n bank_account = payment.paid_by.bank_accounts.last()\n bank_account.last_used = timezone.now()\n bank_account.save()\n\n batch.save()\n\n services.send_tpay_batch_processing_emails(batch)\n\n messages.success(\n request,\n _(\"Successfully processed {}.\").format(model_ngettext(batch, 1)),\n )\n\n if \"next\" in request.POST:\n return redirect(request.POST[\"next\"])\n\n return redirect(\"admin:payments_batch_change\", kwargs[\"pk\"])\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"payments.process_batches\"), name=\"dispatch\",\n)\nclass BatchExportAdminView(View):\n \"\"\"\n View that exports a batch\n \"\"\"\n\n def post(self, request, *args, **kwargs):\n batch = Batch.objects.get(pk=kwargs[\"pk\"])\n\n response = HttpResponse(content_type=\"text/csv\")\n response[\"Content-Disposition\"] = 'attachment;filename=\"batch.csv\"'\n writer = csv.writer(response)\n headers = [\n _(\"Account holder\"),\n _(\"IBAN\"),\n _(\"Mandate Reference\"),\n _(\"Amount\"),\n _(\"Description\"),\n _(\"Mandate Date\"),\n ]\n writer.writerow([capfirst(x) for x in headers])\n\n member_rows = batch.payments_set.values(\"paid_by\").annotate(total=Sum(\"amount\"))\n\n for row in member_rows:\n member = PaymentUser.objects.get(id=row[\"paid_by\"])\n bankaccount = member.bank_accounts.last()\n writer.writerow(\n [\n member.get_full_name(),\n bankaccount.iban,\n bankaccount.mandate_no,\n f\"{row['total']:.2f}\",\n batch.description,\n bankaccount.valid_from,\n ]\n )\n return response\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"payments.process_batches\"), name=\"dispatch\",\n)\nclass BatchTopicExportAdminView(View):\n \"\"\"\n View that exports a batch per topic\n \"\"\"\n\n def post(self, request, *args, **kwargs):\n batch = Batch.objects.get(pk=kwargs[\"pk\"])\n\n response = HttpResponse(content_type=\"text/csv\")\n response[\"Content-Disposition\"] = 'attachment;filename=\"batch-topic.csv\"'\n writer = csv.writer(response)\n headers = [\n _(\"Topic\"),\n _(\"No. of payments\"),\n _(\"First payment\"),\n _(\"Last payment\"),\n _(\"Total amount\"),\n ]\n writer.writerow([capfirst(x) for x in headers])\n\n topic_rows = (\n batch.payments_set.values(\"topic\")\n .annotate(\n total=Sum(\"amount\"),\n count=Count(\"paid_by\"),\n min_date=Min(\"created_at\"),\n max_date=Max(\"created_at\"),\n )\n .order_by(\"topic\")\n )\n\n for row in topic_rows:\n writer.writerow(\n [\n row[\"topic\"],\n row[\"count\"],\n timezone.localtime(row[\"min_date\"]).date(),\n timezone.localtime(row[\"max_date\"]).date(),\n f\"{row['total']:.2f}\",\n ]\n )\n return response\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"payments.process_batches\"), name=\"dispatch\",\n)\nclass BatchTopicDescriptionAdminView(View):\n \"\"\"\n Shows the topic export as plain text\n \"\"\"\n\n template_name = \"admin/payments/batch_topic.html\"\n\n def post(self, request, *args, **kwargs):\n context = {}\n batch = get_object_or_404(Batch, pk=kwargs[\"pk\"])\n topic_rows = (\n batch.payments_set.values(\"topic\")\n .annotate(\n total=Sum(\"amount\"),\n count=Count(\"paid_by\"),\n min_date=Min(\"created_at\"),\n max_date=Max(\"created_at\"),\n )\n .order_by(\"topic\")\n )\n\n description = f\"Batch {batch.id} - {batch.processing_date if batch.processing_date else timezone.now().date()}:\\n\"\n for row in topic_rows:\n description += f\"- {row['topic']} ({row['count']}x) [{timezone.localtime(row['min_date']).date()} -- {timezone.localtime(row['max_date']).date()}], total \u20ac{row['total']:.2f}\\n\"\n description += f\"\\n{batch.description}\"\n\n context[\"batch\"] = batch\n context[\"description\"] = description\n return render(request, self.template_name, context)\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"payments.process_batches\"), name=\"dispatch\",\n)\nclass BatchNewFilledAdminView(View):\n \"\"\"\n View that adds a new batch filled with all payments that where not already in a batch.\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n batch = Batch()\n batch.save()\n\n payments = Payment.objects.filter(type=Payment.TPAY, batch=None,)\n\n payments.update(batch=batch)\n\n return redirect(\"admin:payments_batch_change\", object_id=batch.id)\n", "path": "website/payments/admin_views.py"}, {"content": "\"\"\"The services defined by the payments package\"\"\"\nimport datetime\nfrom typing import Union\n\nfrom django.conf import settings\nfrom django.db.models import QuerySet, Q, Sum\nfrom django.urls import reverse\nfrom django.utils import timezone, translation\nfrom django.utils.translation import gettext_lazy as _\n\nfrom members.models import Member\nfrom registrations.emails import _send_email\nfrom .exceptions import PaymentError\nfrom .models import Payment, BankAccount, Payable, PaymentUser\n\n\ndef create_payment(\n payable: Payable,\n processed_by: Member,\n pay_type: Union[Payment.CASH, Payment.CARD, Payment.WIRE, Payment.TPAY],\n) -> Payment:\n \"\"\"\n Create a new payment from a payable object\n\n :param payable: Payable object\n :param processed_by: PaymentUser that processed this payment\n :param pay_type: Payment type\n :return: Payment object\n \"\"\"\n payer = (\n PaymentUser.objects.get(pk=payable.payment_payer.pk)\n if payable.payment_payer\n else None\n )\n\n if pay_type == Payment.TPAY and not payer.tpay_enabled:\n raise PaymentError(_(\"This user does not have Thalia Pay enabled\"))\n\n if payable.payment is not None:\n payable.payment.amount = payable.payment_amount\n payable.payment.notes = payable.payment_notes\n payable.payment.topic = payable.payment_topic\n payable.payment.paid_by = payer\n payable.payment.processed_by = processed_by\n payable.payment.type = pay_type\n payable.payment.save()\n else:\n payable.payment = Payment.objects.create(\n processed_by=processed_by,\n amount=payable.payment_amount,\n notes=payable.payment_notes,\n topic=payable.payment_topic,\n paid_by=payer,\n type=pay_type,\n )\n return payable.payment\n\n\ndef delete_payment(payable: Payable):\n \"\"\"\n Removes a payment from a payable object\n :param payable: Payable object\n :return:\n \"\"\"\n payment = payable.payment\n if payment.created_at < timezone.now() - timezone.timedelta(\n seconds=settings.PAYMENT_CHANGE_WINDOW\n ):\n raise PaymentError(_(\"You are not authorized to delete this payment.\"))\n if payment.batch and payment.batch.processed:\n raise PaymentError(_(\"This payment has already been processed.\"))\n\n payable.payment = None\n payable.save()\n payment.delete()\n\n\ndef update_last_used(queryset: QuerySet, date: datetime.date = None) -> int:\n \"\"\"\n Update the last used field of a BankAccount queryset\n\n :param queryset: Queryset of BankAccounts\n :param date: date to set last_used to\n :return: number of affected rows\n \"\"\"\n if not date:\n date = timezone.now().date()\n\n result = queryset.filter(\n (Q(valid_from__gte=timezone.now()) & Q(valid_until__lt=timezone.now()))\n | Q(valid_until=None)\n ).update(last_used=date)\n return result\n\n\ndef revoke_old_mandates() -> int:\n \"\"\"\n Revokes all mandates that have not been used for 36 months or more\n :return: number of affected rows\n \"\"\"\n return BankAccount.objects.filter(\n last_used__lte=(timezone.now() - timezone.timedelta(days=36 * 30))\n ).update(valid_until=timezone.now().date())\n\n\ndef send_tpay_batch_processing_emails(batch):\n \"\"\"Sends withdrawal notice emails to all members in a batch\"\"\"\n member_payments = batch.payments_set.values(\"paid_by\").annotate(total=Sum(\"amount\"))\n for member_row in member_payments:\n member = PaymentUser.objects.get(pk=member_row[\"paid_by\"])\n total_amount = member_row[\"total\"]\n\n with translation.override(member.profile.language):\n _send_email(\n member.email,\n _(\"Thalia Pay withdrawal notice\"),\n \"payments/email/tpay_withdrawal_notice_mail.txt\",\n {\n \"name\": member.get_full_name(),\n \"batch\": batch,\n \"bank_account\": member.bank_accounts.filter(\n mandate_no__isnull=False\n ).last(),\n \"creditor_id\": settings.SEPA_CREDITOR_ID,\n \"payments\": batch.payments_set.filter(paid_by=member),\n \"total_amount\": total_amount,\n \"payments_url\": (\n settings.BASE_URL + reverse(\"payments:payment-list\",)\n ),\n },\n )\n return len(member_payments)\n", "path": "website/payments/services.py"}], "after_files": [{"content": "\"\"\"Admin views provided by the payments package\"\"\"\nimport csv\n\nfrom django.apps import apps\nfrom django.contrib import messages\nfrom django.contrib.admin.utils import model_ngettext\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.auth.decorators import permission_required\nfrom django.db.models import Sum, Count, Min, Max\nfrom django.http import HttpResponse\nfrom django.core.exceptions import SuspiciousOperation, DisallowedRedirect\nfrom django.shortcuts import redirect, get_object_or_404, render\nfrom django.utils import timezone\nfrom django.utils.text import capfirst\nfrom django.utils.decorators import method_decorator\nfrom django.utils.http import url_has_allowed_host_and_scheme\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import View\n\nfrom payments import services\nfrom .models import Payment, Batch, PaymentUser\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"payments.process_payments\"), name=\"dispatch\",\n)\nclass PaymentAdminView(View):\n \"\"\"\n View that creates a payment\n \"\"\"\n\n def post(self, request, *args, app_label, model_name, payable, **kwargs):\n if \"type\" not in request.POST:\n raise SuspiciousOperation(\"Missing POST parameters\")\n\n if \"next\" in request.POST and not url_has_allowed_host_and_scheme(\n request.POST.get(\"next\"), allowed_hosts={request.get_host()}\n ):\n raise DisallowedRedirect\n\n payable_model = apps.get_model(app_label=app_label, model_name=model_name)\n payable_obj = payable_model.objects.get(pk=payable)\n\n result = services.create_payment(\n payable_obj, self.request.member, request.POST[\"type\"],\n )\n payable_obj.save()\n\n if result:\n messages.success(\n request, _(\"Successfully paid %s.\") % model_ngettext(payable_obj, 1),\n )\n else:\n messages.error(\n request, _(\"Could not pay %s.\") % model_ngettext(payable_obj, 1),\n )\n return redirect(f\"admin:{app_label}_{model_name}_change\", payable_obj.pk)\n\n if \"next\" in request.POST:\n return redirect(request.POST[\"next\"])\n\n return redirect(\"admin:payments_payment_change\", result.pk)\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"payments.process_batches\"), name=\"dispatch\",\n)\nclass BatchProcessAdminView(View):\n \"\"\"\n View that processes a batch\n \"\"\"\n\n def post(self, request, *args, **kwargs):\n batch = Batch.objects.get(pk=kwargs[\"pk\"])\n\n if \"next\" in request.POST and not url_has_allowed_host_and_scheme(\n request.POST.get(\"next\"), allowed_hosts={request.get_host()}\n ):\n raise DisallowedRedirect\n\n if batch.processed:\n messages.error(\n request, _(\"{} already processed.\").format(model_ngettext(batch, 1))\n )\n else:\n services.process_batch(batch)\n messages.success(\n request,\n _(\"Successfully processed {}.\").format(model_ngettext(batch, 1)),\n )\n\n if \"next\" in request.POST:\n return redirect(request.POST[\"next\"])\n\n return redirect(\"admin:payments_batch_change\", kwargs[\"pk\"])\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"payments.process_batches\"), name=\"dispatch\",\n)\nclass BatchExportAdminView(View):\n \"\"\"\n View that exports a batch\n \"\"\"\n\n def post(self, request, *args, **kwargs):\n batch = Batch.objects.get(pk=kwargs[\"pk\"])\n\n response = HttpResponse(content_type=\"text/csv\")\n response[\"Content-Disposition\"] = 'attachment;filename=\"batch.csv\"'\n writer = csv.writer(response)\n headers = [\n _(\"Account holder\"),\n _(\"IBAN\"),\n _(\"Mandate Reference\"),\n _(\"Amount\"),\n _(\"Description\"),\n _(\"Mandate Date\"),\n ]\n writer.writerow([capfirst(x) for x in headers])\n\n member_rows = batch.payments_set.values(\"paid_by\").annotate(total=Sum(\"amount\"))\n\n for row in member_rows:\n member = PaymentUser.objects.get(id=row[\"paid_by\"])\n bankaccount = member.bank_accounts.last()\n writer.writerow(\n [\n member.get_full_name(),\n bankaccount.iban,\n bankaccount.mandate_no,\n f\"{row['total']:.2f}\",\n batch.description,\n bankaccount.valid_from,\n ]\n )\n return response\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"payments.process_batches\"), name=\"dispatch\",\n)\nclass BatchTopicExportAdminView(View):\n \"\"\"\n View that exports a batch per topic\n \"\"\"\n\n def post(self, request, *args, **kwargs):\n batch = Batch.objects.get(pk=kwargs[\"pk\"])\n\n response = HttpResponse(content_type=\"text/csv\")\n response[\"Content-Disposition\"] = 'attachment;filename=\"batch-topic.csv\"'\n writer = csv.writer(response)\n headers = [\n _(\"Topic\"),\n _(\"No. of payments\"),\n _(\"First payment\"),\n _(\"Last payment\"),\n _(\"Total amount\"),\n ]\n writer.writerow([capfirst(x) for x in headers])\n\n topic_rows = (\n batch.payments_set.values(\"topic\")\n .annotate(\n total=Sum(\"amount\"),\n count=Count(\"paid_by\"),\n min_date=Min(\"created_at\"),\n max_date=Max(\"created_at\"),\n )\n .order_by(\"topic\")\n )\n\n for row in topic_rows:\n writer.writerow(\n [\n row[\"topic\"],\n row[\"count\"],\n timezone.localtime(row[\"min_date\"]).date(),\n timezone.localtime(row[\"max_date\"]).date(),\n f\"{row['total']:.2f}\",\n ]\n )\n return response\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"payments.process_batches\"), name=\"dispatch\",\n)\nclass BatchTopicDescriptionAdminView(View):\n \"\"\"\n Shows the topic export as plain text\n \"\"\"\n\n template_name = \"admin/payments/batch_topic.html\"\n\n def post(self, request, *args, **kwargs):\n context = {}\n batch = get_object_or_404(Batch, pk=kwargs[\"pk\"])\n topic_rows = (\n batch.payments_set.values(\"topic\")\n .annotate(\n total=Sum(\"amount\"),\n count=Count(\"paid_by\"),\n min_date=Min(\"created_at\"),\n max_date=Max(\"created_at\"),\n )\n .order_by(\"topic\")\n )\n\n description = f\"Batch {batch.id} - {batch.processing_date if batch.processing_date else timezone.now().date()}:\\n\"\n for row in topic_rows:\n description += f\"- {row['topic']} ({row['count']}x) [{timezone.localtime(row['min_date']).date()} -- {timezone.localtime(row['max_date']).date()}], total \u20ac{row['total']:.2f}\\n\"\n description += f\"\\n{batch.description}\"\n\n context[\"batch\"] = batch\n context[\"description\"] = description\n return render(request, self.template_name, context)\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"payments.process_batches\"), name=\"dispatch\",\n)\nclass BatchNewFilledAdminView(View):\n \"\"\"\n View that adds a new batch filled with all payments that where not already in a batch.\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n batch = Batch()\n batch.save()\n\n payments = Payment.objects.filter(type=Payment.TPAY, batch=None,)\n\n payments.update(batch=batch)\n\n return redirect(\"admin:payments_batch_change\", object_id=batch.id)\n", "path": "website/payments/admin_views.py"}, {"content": "\"\"\"The services defined by the payments package\"\"\"\nimport datetime\nfrom typing import Union\n\nfrom django.conf import settings\nfrom django.db.models import QuerySet, Q, Sum\nfrom django.urls import reverse\nfrom django.utils import timezone, translation\nfrom django.utils.translation import gettext_lazy as _\n\nfrom members.models import Member\nfrom registrations.emails import _send_email\nfrom .exceptions import PaymentError\nfrom .models import Payment, BankAccount, Payable, PaymentUser\n\n\ndef create_payment(\n payable: Payable,\n processed_by: Member,\n pay_type: Union[Payment.CASH, Payment.CARD, Payment.WIRE, Payment.TPAY],\n) -> Payment:\n \"\"\"\n Create a new payment from a payable object\n\n :param payable: Payable object\n :param processed_by: PaymentUser that processed this payment\n :param pay_type: Payment type\n :return: Payment object\n \"\"\"\n payer = (\n PaymentUser.objects.get(pk=payable.payment_payer.pk)\n if payable.payment_payer\n else None\n )\n\n if pay_type == Payment.TPAY and not payer.tpay_enabled:\n raise PaymentError(_(\"This user does not have Thalia Pay enabled\"))\n\n if payable.payment is not None:\n payable.payment.amount = payable.payment_amount\n payable.payment.notes = payable.payment_notes\n payable.payment.topic = payable.payment_topic\n payable.payment.paid_by = payer\n payable.payment.processed_by = processed_by\n payable.payment.type = pay_type\n payable.payment.save()\n else:\n payable.payment = Payment.objects.create(\n processed_by=processed_by,\n amount=payable.payment_amount,\n notes=payable.payment_notes,\n topic=payable.payment_topic,\n paid_by=payer,\n type=pay_type,\n )\n return payable.payment\n\n\ndef delete_payment(payable: Payable):\n \"\"\"\n Removes a payment from a payable object\n :param payable: Payable object\n :return:\n \"\"\"\n payment = payable.payment\n if payment.created_at < timezone.now() - timezone.timedelta(\n seconds=settings.PAYMENT_CHANGE_WINDOW\n ):\n raise PaymentError(_(\"You are not authorized to delete this payment.\"))\n if payment.batch and payment.batch.processed:\n raise PaymentError(_(\"This payment has already been processed.\"))\n\n payable.payment = None\n payable.save()\n payment.delete()\n\n\ndef update_last_used(queryset: QuerySet, date: datetime.date = None) -> int:\n \"\"\"\n Update the last used field of a BankAccount queryset\n\n :param queryset: Queryset of BankAccounts\n :param date: date to set last_used to\n :return: number of affected rows\n \"\"\"\n if not date:\n date = timezone.now().date()\n\n result = queryset.filter(\n (Q(valid_from__gte=timezone.now()) & Q(valid_until__lt=timezone.now()))\n | Q(valid_until=None)\n ).update(last_used=date)\n return result\n\n\ndef revoke_old_mandates() -> int:\n \"\"\"\n Revokes all mandates that have not been used for 36 months or more\n :return: number of affected rows\n \"\"\"\n return BankAccount.objects.filter(\n last_used__lte=(timezone.now() - timezone.timedelta(days=36 * 30))\n ).update(valid_until=timezone.now().date())\n\n\ndef process_batch(batch):\n \"\"\"\n Processes a Thalia Pay batch\n\n :param batch: the batch to be processed\n :return:\n \"\"\"\n batch.processed = True\n\n payments = batch.payments_set.select_related(\"paid_by\")\n for payment in payments:\n bank_account = payment.paid_by.bank_accounts.last()\n bank_account.last_used = batch.withdrawal_date\n bank_account.save()\n\n batch.save()\n\n send_tpay_batch_processing_emails(batch)\n\n\ndef send_tpay_batch_processing_emails(batch):\n \"\"\"Sends withdrawal notice emails to all members in a batch\"\"\"\n member_payments = batch.payments_set.values(\"paid_by\").annotate(total=Sum(\"amount\"))\n for member_row in member_payments:\n member = PaymentUser.objects.get(pk=member_row[\"paid_by\"])\n total_amount = member_row[\"total\"]\n\n with translation.override(member.profile.language):\n _send_email(\n member.email,\n _(\"Thalia Pay withdrawal notice\"),\n \"payments/email/tpay_withdrawal_notice_mail.txt\",\n {\n \"name\": member.get_full_name(),\n \"batch\": batch,\n \"bank_account\": member.bank_accounts.filter(\n mandate_no__isnull=False\n ).last(),\n \"creditor_id\": settings.SEPA_CREDITOR_ID,\n \"payments\": batch.payments_set.filter(paid_by=member),\n \"total_amount\": total_amount,\n \"payments_url\": (\n settings.BASE_URL + reverse(\"payments:payment-list\",)\n ),\n },\n )\n return len(member_payments)\n", "path": "website/payments/services.py"}]} | 3,966 | 407 |
gh_patches_debug_10795 | rasdani/github-patches | git_diff | pymedusa__Medusa-3141 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Rtorrent Not connecting
### Before submitting your issue:
Enable debug logging in Medusa settings, reproduce the error (be sure to disable after the bug is fixed)
**Branch/Commit:Main/latest
**OS:Ubuntu 16.x
**What you did: Attempted to connect my seedboxes rTorrent instance, doublechecked with seedbox company to confirm my information is correct
**What happened: "Could Not connect to rtorrent"
**What you expected: .... Connection to r torrent
**Logs:**
```
2017-09-06 14:43:36 DEBUG SNATCHQUEUE-MANUALSNATCH-72218 :: [3f23fd2] Traceback (most recent call last):
File "/opt/medusa/medusa/search/queue.py", line 487, in run
self.success = snatch_episode(result)
File "/opt/medusa/medusa/search/core.py", line 153, in snatch_episode
result_downloaded = client.send_torrent(result)
File "/opt/medusa/medusa/clients/torrent/generic.py", line 242, in send_torrent
if not self._get_auth():
File "/opt/medusa/medusa/clients/torrent/rtorrent_client.py", line 50, in _get_auth
self.auth = RTorrent(self.host, self.username, self.password, True, tp_kwargs=tp_kwargs)
File "/opt/medusa/lib/rtorrent/__init__.py", line 83, in __init__
self._verify_conn()
File "/opt/medusa/lib/rtorrent/__init__.py", line 122, in _verify_conn
assert "system.client_version" in self._get_rpc_methods(
File "/opt/medusa/lib/rtorrent/__init__.py", line 161, in _get_rpc_methods
return(self._rpc_methods or self._update_rpc_methods())
File "/opt/medusa/lib/rtorrent/__init__.py", line 150, in _update_rpc_methods
self._rpc_methods = self._get_conn().system.listMethods()
File "/usr/lib/python2.7/xmlrpclib.py", line 1243, in __call__
return self.__send(self.__name, args)
File "/usr/lib/python2.7/xmlrpclib.py", line 1602, in __request
verbose=self.__verbose
File "/opt/medusa/lib/rtorrent/lib/xmlrpc/requests_transport.py", line 150, in request
response.headers)
ProtocolError: <ProtocolError for http://nl4727.dediseedbox.com//rutorrent/plugins/httprpc/action.php: 401 Client Error: Unauthorized for url: http://nl4727.dediseedbox.com//rutorrent/plugins/httprpc/action.php Traceback (most recent call last):
File "/opt/medusa/lib/rtorrent/lib/xmlrpc/requests_transport.py", line 145, in request
response.raise_for_status()
File "/opt/medusa/lib/requests/models.py", line 844, in raise_for_status
raise HTTPError(http_error_msg, response=self)
HTTPError: 401 Client Error: Unauthorized for url: http://nl4727.dediseedbox.com//rutorrent/plugins/httprpc/action.php
```
Side Note, this does work with couchpotato
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/rtorrent/lib/xmlrpc/requests_transport.py`
Content:
```
1 # Copyright (c) 2013-2015 Alexandre Beloin, <[email protected]>
2 #
3 # This program is free software: you can redistribute it and/or modify
4 # it under the terms of the GNU General Public License as published by
5 # the Free Software Foundation, either version 3 of the License, or
6 # (at your option) any later version.
7 #
8 # This program is distributed in the hope that it will be useful,
9 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # GNU General Public License for more details.
12
13 # You should have received a copy of the GNU General Public License
14 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15
16 """A transport for Python2/3 xmlrpc library using requests
17
18 Support:
19 -SSL with Basic and Digest authentication
20 -Proxies
21 """
22
23 try:
24 import xmlrpc.client as xmlrpc_client
25 except ImportError:
26 import xmlrpclib as xmlrpc_client
27
28 import traceback
29
30 import requests
31 from requests.exceptions import RequestException
32 from requests.auth import HTTPBasicAuth
33 from requests.auth import HTTPDigestAuth
34 from requests.packages.urllib3 import disable_warnings # @UnresolvedImport
35
36
37 class RequestsTransport(xmlrpc_client.Transport):
38
39 """Transport class for xmlrpc using requests"""
40
41 def __init__(self, use_https=True, authtype=None, username=None,
42 password=None, check_ssl_cert=True, proxies=None):
43 """Inits RequestsTransport.
44
45 Args:
46 use_https: If true, https else http
47 authtype: None, basic or digest
48 username: Username
49 password: Password
50 check_ssl_cert: Check SSL certificate
51 proxies: A dict of proxies(
52 Ex: {"http": "http://10.10.1.10:3128",
53 "https": "http://10.10.1.10:1080",})
54
55 Raises:
56 ValueError: Invalid info
57 """
58 # Python 2 can't use super on old style class.
59 if issubclass(xmlrpc_client.Transport, object):
60 super(RequestsTransport, self).__init__()
61 else:
62 xmlrpc_client.Transport.__init__(self)
63
64 self.user_agent = "Python Requests/" + requests.__version__
65
66 self._use_https = use_https
67 self._check_ssl_cert = check_ssl_cert
68
69 if authtype == "basic" or authtype == "digest":
70 self._authtype = authtype
71 else:
72 raise ValueError(
73 "Supported authentication are: basic and digest")
74 if authtype and (not username or not password):
75 raise ValueError(
76 "Username and password required when using authentication")
77
78 self._username = username
79 self._password = password
80 if proxies is None:
81 self._proxies = {}
82 else:
83 self._proxies = proxies
84
85 def request(self, host, handler, request_body, verbose=0):
86 """Replace the xmlrpc request function.
87
88 Process xmlrpc request via requests library.
89
90 Args:
91 host: Target host
92 handler: Target PRC handler.
93 request_body: XML-RPC request body.
94 verbose: Debugging flag.
95
96 Returns:
97 Parsed response.
98
99 Raises:
100 RequestException: Error in requests
101 """
102 if verbose:
103 self._debug()
104
105 if not self._check_ssl_cert:
106 disable_warnings()
107
108 headers = {'User-Agent': self.user_agent, 'Content-Type': 'text/xml', }
109
110 # Need to be done because the schema(http or https) is lost in
111 # xmlrpc.Transport's init.
112 if self._use_https:
113 url = "https://{host}/{handler}".format(host=host, handler=handler)
114 else:
115 url = "http://{host}/{handler}".format(host=host, handler=handler)
116
117 # TODO Construct kwargs query instead
118 try:
119 if self._authtype == "basic":
120 response = requests.post(
121 url,
122 data=request_body,
123 headers=headers,
124 verify=self._check_ssl_cert,
125 auth=HTTPBasicAuth(
126 self._username, self._password),
127 proxies=self._proxies)
128 elif self._authtype == "digest":
129 response = requests.post(
130 url,
131 data=request_body,
132 headers=headers,
133 verify=self._check_ssl_cert,
134 auth=HTTPDigestAuth(
135 self._username, self._password),
136 proxies=self._proxies)
137 else:
138 response = requests.post(
139 url,
140 data=request_body,
141 headers=headers,
142 verify=self._check_ssl_cert,
143 proxies=self._proxies)
144
145 response.raise_for_status()
146 except RequestException as error:
147 raise xmlrpc_client.ProtocolError(url,
148 error.message,
149 traceback.format_exc(),
150 response.headers)
151
152 return self.parse_response(response)
153
154 def parse_response(self, response):
155 """Replace the xmlrpc parse_response function.
156
157 Parse response.
158
159 Args:
160 response: Requests return data
161
162 Returns:
163 Response tuple and target method.
164 """
165 p, u = self.getparser()
166 p.feed(response.text.encode('utf-8'))
167 p.close()
168 return u.close()
169
170 def _debug(self):
171 """Debug requests module.
172
173 Enable verbose logging from requests
174 """
175 # TODO Ugly
176 import logging
177 try:
178 import http.client as http_client
179 except ImportError:
180 import httplib as http_client
181
182 http_client.HTTPConnection.debuglevel = 1
183
184 logging.basicConfig()
185 logging.getLogger().setLevel(logging.DEBUG)
186 requests_log = logging.getLogger("requests.packages.urllib3")
187 requests_log.setLevel(logging.DEBUG)
188 requests_log.propagate = True
189
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/rtorrent/lib/xmlrpc/requests_transport.py b/lib/rtorrent/lib/xmlrpc/requests_transport.py
--- a/lib/rtorrent/lib/xmlrpc/requests_transport.py
+++ b/lib/rtorrent/lib/xmlrpc/requests_transport.py
@@ -110,9 +110,9 @@
# Need to be done because the schema(http or https) is lost in
# xmlrpc.Transport's init.
if self._use_https:
- url = "https://{host}/{handler}".format(host=host, handler=handler)
+ url = "https://{host}{handler}".format(host=host, handler=handler)
else:
- url = "http://{host}/{handler}".format(host=host, handler=handler)
+ url = "http://{host}{handler}".format(host=host, handler=handler)
# TODO Construct kwargs query instead
try:
| {"golden_diff": "diff --git a/lib/rtorrent/lib/xmlrpc/requests_transport.py b/lib/rtorrent/lib/xmlrpc/requests_transport.py\n--- a/lib/rtorrent/lib/xmlrpc/requests_transport.py\n+++ b/lib/rtorrent/lib/xmlrpc/requests_transport.py\n@@ -110,9 +110,9 @@\n # Need to be done because the schema(http or https) is lost in\n # xmlrpc.Transport's init.\n if self._use_https:\n- url = \"https://{host}/{handler}\".format(host=host, handler=handler)\n+ url = \"https://{host}{handler}\".format(host=host, handler=handler)\n else:\n- url = \"http://{host}/{handler}\".format(host=host, handler=handler)\n+ url = \"http://{host}{handler}\".format(host=host, handler=handler)\n \n # TODO Construct kwargs query instead\n try:\n", "issue": "Rtorrent Not connecting\n### Before submitting your issue:\r\n\r\nEnable debug logging in Medusa settings, reproduce the error (be sure to disable after the bug is fixed)\r\n\r\n**Branch/Commit:Main/latest\r\n**OS:Ubuntu 16.x\r\n**What you did: Attempted to connect my seedboxes rTorrent instance, doublechecked with seedbox company to confirm my information is correct\r\n**What happened: \"Could Not connect to rtorrent\"\r\n**What you expected: .... Connection to r torrent\r\n**Logs:**\r\n```\r\n2017-09-06 14:43:36 DEBUG SNATCHQUEUE-MANUALSNATCH-72218 :: [3f23fd2] Traceback (most recent call last):\r\n File \"/opt/medusa/medusa/search/queue.py\", line 487, in run\r\n self.success = snatch_episode(result)\r\n File \"/opt/medusa/medusa/search/core.py\", line 153, in snatch_episode\r\n result_downloaded = client.send_torrent(result)\r\n File \"/opt/medusa/medusa/clients/torrent/generic.py\", line 242, in send_torrent\r\n if not self._get_auth():\r\n File \"/opt/medusa/medusa/clients/torrent/rtorrent_client.py\", line 50, in _get_auth\r\n self.auth = RTorrent(self.host, self.username, self.password, True, tp_kwargs=tp_kwargs)\r\n File \"/opt/medusa/lib/rtorrent/__init__.py\", line 83, in __init__\r\n self._verify_conn()\r\n File \"/opt/medusa/lib/rtorrent/__init__.py\", line 122, in _verify_conn\r\n assert \"system.client_version\" in self._get_rpc_methods(\r\n File \"/opt/medusa/lib/rtorrent/__init__.py\", line 161, in _get_rpc_methods\r\n return(self._rpc_methods or self._update_rpc_methods())\r\n File \"/opt/medusa/lib/rtorrent/__init__.py\", line 150, in _update_rpc_methods\r\n self._rpc_methods = self._get_conn().system.listMethods()\r\n File \"/usr/lib/python2.7/xmlrpclib.py\", line 1243, in __call__\r\n return self.__send(self.__name, args)\r\n File \"/usr/lib/python2.7/xmlrpclib.py\", line 1602, in __request\r\n verbose=self.__verbose\r\n File \"/opt/medusa/lib/rtorrent/lib/xmlrpc/requests_transport.py\", line 150, in request\r\n response.headers)\r\nProtocolError: <ProtocolError for http://nl4727.dediseedbox.com//rutorrent/plugins/httprpc/action.php: 401 Client Error: Unauthorized for url: http://nl4727.dediseedbox.com//rutorrent/plugins/httprpc/action.php Traceback (most recent call last):\r\n File \"/opt/medusa/lib/rtorrent/lib/xmlrpc/requests_transport.py\", line 145, in request\r\n response.raise_for_status()\r\n File \"/opt/medusa/lib/requests/models.py\", line 844, in raise_for_status\r\n raise HTTPError(http_error_msg, response=self)\r\nHTTPError: 401 Client Error: Unauthorized for url: http://nl4727.dediseedbox.com//rutorrent/plugins/httprpc/action.php\r\n```\r\nSide Note, this does work with couchpotato\n", "before_files": [{"content": "# Copyright (c) 2013-2015 Alexandre Beloin, <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"A transport for Python2/3 xmlrpc library using requests\n\nSupport:\n-SSL with Basic and Digest authentication\n-Proxies\n\"\"\"\n\ntry:\n import xmlrpc.client as xmlrpc_client\nexcept ImportError:\n import xmlrpclib as xmlrpc_client\n\nimport traceback\n\nimport requests\nfrom requests.exceptions import RequestException\nfrom requests.auth import HTTPBasicAuth\nfrom requests.auth import HTTPDigestAuth\nfrom requests.packages.urllib3 import disable_warnings # @UnresolvedImport\n\n\nclass RequestsTransport(xmlrpc_client.Transport):\n\n \"\"\"Transport class for xmlrpc using requests\"\"\"\n\n def __init__(self, use_https=True, authtype=None, username=None,\n password=None, check_ssl_cert=True, proxies=None):\n \"\"\"Inits RequestsTransport.\n\n Args:\n use_https: If true, https else http\n authtype: None, basic or digest\n username: Username\n password: Password\n check_ssl_cert: Check SSL certificate\n proxies: A dict of proxies(\n Ex: {\"http\": \"http://10.10.1.10:3128\",\n \"https\": \"http://10.10.1.10:1080\",})\n\n Raises:\n ValueError: Invalid info\n \"\"\"\n # Python 2 can't use super on old style class.\n if issubclass(xmlrpc_client.Transport, object):\n super(RequestsTransport, self).__init__()\n else:\n xmlrpc_client.Transport.__init__(self)\n\n self.user_agent = \"Python Requests/\" + requests.__version__\n\n self._use_https = use_https\n self._check_ssl_cert = check_ssl_cert\n\n if authtype == \"basic\" or authtype == \"digest\":\n self._authtype = authtype\n else:\n raise ValueError(\n \"Supported authentication are: basic and digest\")\n if authtype and (not username or not password):\n raise ValueError(\n \"Username and password required when using authentication\")\n\n self._username = username\n self._password = password\n if proxies is None:\n self._proxies = {}\n else:\n self._proxies = proxies\n\n def request(self, host, handler, request_body, verbose=0):\n \"\"\"Replace the xmlrpc request function.\n\n Process xmlrpc request via requests library.\n\n Args:\n host: Target host\n handler: Target PRC handler.\n request_body: XML-RPC request body.\n verbose: Debugging flag.\n\n Returns:\n Parsed response.\n\n Raises:\n RequestException: Error in requests\n \"\"\"\n if verbose:\n self._debug()\n\n if not self._check_ssl_cert:\n disable_warnings()\n\n headers = {'User-Agent': self.user_agent, 'Content-Type': 'text/xml', }\n\n # Need to be done because the schema(http or https) is lost in\n # xmlrpc.Transport's init.\n if self._use_https:\n url = \"https://{host}/{handler}\".format(host=host, handler=handler)\n else:\n url = \"http://{host}/{handler}\".format(host=host, handler=handler)\n\n # TODO Construct kwargs query instead\n try:\n if self._authtype == \"basic\":\n response = requests.post(\n url,\n data=request_body,\n headers=headers,\n verify=self._check_ssl_cert,\n auth=HTTPBasicAuth(\n self._username, self._password),\n proxies=self._proxies)\n elif self._authtype == \"digest\":\n response = requests.post(\n url,\n data=request_body,\n headers=headers,\n verify=self._check_ssl_cert,\n auth=HTTPDigestAuth(\n self._username, self._password),\n proxies=self._proxies)\n else:\n response = requests.post(\n url,\n data=request_body,\n headers=headers,\n verify=self._check_ssl_cert,\n proxies=self._proxies)\n\n response.raise_for_status()\n except RequestException as error:\n raise xmlrpc_client.ProtocolError(url,\n error.message,\n traceback.format_exc(),\n response.headers)\n\n return self.parse_response(response)\n\n def parse_response(self, response):\n \"\"\"Replace the xmlrpc parse_response function.\n\n Parse response.\n\n Args:\n response: Requests return data\n\n Returns:\n Response tuple and target method.\n \"\"\"\n p, u = self.getparser()\n p.feed(response.text.encode('utf-8'))\n p.close()\n return u.close()\n\n def _debug(self):\n \"\"\"Debug requests module.\n\n Enable verbose logging from requests\n \"\"\"\n # TODO Ugly\n import logging\n try:\n import http.client as http_client\n except ImportError:\n import httplib as http_client\n\n http_client.HTTPConnection.debuglevel = 1\n\n logging.basicConfig()\n logging.getLogger().setLevel(logging.DEBUG)\n requests_log = logging.getLogger(\"requests.packages.urllib3\")\n requests_log.setLevel(logging.DEBUG)\n requests_log.propagate = True\n", "path": "lib/rtorrent/lib/xmlrpc/requests_transport.py"}], "after_files": [{"content": "# Copyright (c) 2013-2015 Alexandre Beloin, <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"A transport for Python2/3 xmlrpc library using requests\n\nSupport:\n-SSL with Basic and Digest authentication\n-Proxies\n\"\"\"\n\ntry:\n import xmlrpc.client as xmlrpc_client\nexcept ImportError:\n import xmlrpclib as xmlrpc_client\n\nimport traceback\n\nimport requests\nfrom requests.exceptions import RequestException\nfrom requests.auth import HTTPBasicAuth\nfrom requests.auth import HTTPDigestAuth\nfrom requests.packages.urllib3 import disable_warnings # @UnresolvedImport\n\n\nclass RequestsTransport(xmlrpc_client.Transport):\n\n \"\"\"Transport class for xmlrpc using requests\"\"\"\n\n def __init__(self, use_https=True, authtype=None, username=None,\n password=None, check_ssl_cert=True, proxies=None):\n \"\"\"Inits RequestsTransport.\n\n Args:\n use_https: If true, https else http\n authtype: None, basic or digest\n username: Username\n password: Password\n check_ssl_cert: Check SSL certificate\n proxies: A dict of proxies(\n Ex: {\"http\": \"http://10.10.1.10:3128\",\n \"https\": \"http://10.10.1.10:1080\",})\n\n Raises:\n ValueError: Invalid info\n \"\"\"\n # Python 2 can't use super on old style class.\n if issubclass(xmlrpc_client.Transport, object):\n super(RequestsTransport, self).__init__()\n else:\n xmlrpc_client.Transport.__init__(self)\n\n self.user_agent = \"Python Requests/\" + requests.__version__\n\n self._use_https = use_https\n self._check_ssl_cert = check_ssl_cert\n\n if authtype == \"basic\" or authtype == \"digest\":\n self._authtype = authtype\n else:\n raise ValueError(\n \"Supported authentication are: basic and digest\")\n if authtype and (not username or not password):\n raise ValueError(\n \"Username and password required when using authentication\")\n\n self._username = username\n self._password = password\n if proxies is None:\n self._proxies = {}\n else:\n self._proxies = proxies\n\n def request(self, host, handler, request_body, verbose=0):\n \"\"\"Replace the xmlrpc request function.\n\n Process xmlrpc request via requests library.\n\n Args:\n host: Target host\n handler: Target PRC handler.\n request_body: XML-RPC request body.\n verbose: Debugging flag.\n\n Returns:\n Parsed response.\n\n Raises:\n RequestException: Error in requests\n \"\"\"\n if verbose:\n self._debug()\n\n if not self._check_ssl_cert:\n disable_warnings()\n\n headers = {'User-Agent': self.user_agent, 'Content-Type': 'text/xml', }\n\n # Need to be done because the schema(http or https) is lost in\n # xmlrpc.Transport's init.\n if self._use_https:\n url = \"https://{host}{handler}\".format(host=host, handler=handler)\n else:\n url = \"http://{host}{handler}\".format(host=host, handler=handler)\n\n # TODO Construct kwargs query instead\n try:\n if self._authtype == \"basic\":\n response = requests.post(\n url,\n data=request_body,\n headers=headers,\n verify=self._check_ssl_cert,\n auth=HTTPBasicAuth(\n self._username, self._password),\n proxies=self._proxies)\n elif self._authtype == \"digest\":\n response = requests.post(\n url,\n data=request_body,\n headers=headers,\n verify=self._check_ssl_cert,\n auth=HTTPDigestAuth(\n self._username, self._password),\n proxies=self._proxies)\n else:\n response = requests.post(\n url,\n data=request_body,\n headers=headers,\n verify=self._check_ssl_cert,\n proxies=self._proxies)\n\n response.raise_for_status()\n except RequestException as error:\n raise xmlrpc_client.ProtocolError(url,\n error.message,\n traceback.format_exc(),\n response.headers)\n\n return self.parse_response(response)\n\n def parse_response(self, response):\n \"\"\"Replace the xmlrpc parse_response function.\n\n Parse response.\n\n Args:\n response: Requests return data\n\n Returns:\n Response tuple and target method.\n \"\"\"\n p, u = self.getparser()\n p.feed(response.text.encode('utf-8'))\n p.close()\n return u.close()\n\n def _debug(self):\n \"\"\"Debug requests module.\n\n Enable verbose logging from requests\n \"\"\"\n # TODO Ugly\n import logging\n try:\n import http.client as http_client\n except ImportError:\n import httplib as http_client\n\n http_client.HTTPConnection.debuglevel = 1\n\n logging.basicConfig()\n logging.getLogger().setLevel(logging.DEBUG)\n requests_log = logging.getLogger(\"requests.packages.urllib3\")\n requests_log.setLevel(logging.DEBUG)\n requests_log.propagate = True\n", "path": "lib/rtorrent/lib/xmlrpc/requests_transport.py"}]} | 2,750 | 197 |
gh_patches_debug_8852 | rasdani/github-patches | git_diff | pwndbg__pwndbg-363 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Broken `entry` command
### Description
The `entry` command pass arguments differently then the `run` command.
### Steps to reproduce
```
[dc@dc:pwndbg|dev *$%]$ gdb python
Loaded 113 commands. Type pwndbg [filter] for a list.
Reading symbols from python...(no debugging symbols found)...done.
pwndbg> set exception-verbose on
Set whether to print a full stacktracefor exceptions raised in Pwndbg commands to True
pwndbg> run -c "print(1); print(2)"
Starting program: /usr/bin/python -c "print(1); print(2)"
[Thread debugging using libthread_db enabled]
Using host libthread_db library "/usr/lib/libthread_db.so.1".
1
2
[Inferior 1 (process 20590) exited normally]
pwndbg> entry -c "print(1); print(2)"
('-c', 'print(1); print(2)')
Running '%s' run -c print(1); print(2)
/bin/bash: -c: line 0: syntax error near unexpected token `('
/bin/bash: -c: line 0: `exec /usr/bin/python -c print(1); print(2)'
Traceback (most recent call last):
File "/home/dc/installed/pwndbg/pwndbg/commands/__init__.py", line 100, in __call__
return self.function(*args, **kwargs)
File "/home/dc/installed/pwndbg/pwndbg/commands/__init__.py", line 181, in _OnlyWithFile
return function(*a, **kw)
File "/home/dc/installed/pwndbg/pwndbg/commands/start.py", line 72, in entry
gdb.execute(run, from_tty=False)
gdb.error: During startup program exited with code 1.
If that is an issue, you can report it on https://github.com/pwndbg/pwndbg/issues
(Please don't forget to search if it hasn't been reported before)
PS: Pull requests are welcome
```
### My version
```
pwndbg> version
Gdb: GNU gdb (GDB) 8.0.1
Python: 3.6.3 (default, Oct 24 2017, 14:48:20) [GCC 7.2.0]
Pwndbg: 1.0.0 build: 5811010
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwndbg/commands/start.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 """
4 Launches the target process after setting a breakpoint at a convenient
5 entry point.
6 """
7 from __future__ import absolute_import
8 from __future__ import division
9 from __future__ import print_function
10 from __future__ import unicode_literals
11
12 import gdb
13
14 import pwndbg.commands
15 import pwndbg.elf
16 import pwndbg.events
17 import pwndbg.symbol
18
19 break_on_first_instruction = False
20
21
22 @pwndbg.events.start
23 def on_start():
24 global break_on_first_instruction
25 if break_on_first_instruction:
26 spec = "*%#x" % (int(pwndbg.elf.entry()))
27 gdb.Breakpoint(spec, temporary=True)
28 break_on_first_instruction = False
29
30
31 @pwndbg.commands.Command
32 def start(*a):
33 """
34 Set a breakpoint at a convenient location in the binary,
35 generally 'main', 'init', or the entry point.
36 """
37 run = 'run ' + ' '.join(a)
38
39 symbols = ["main",
40 "_main",
41 "start",
42 "_start",
43 "init",
44 "_init"]
45
46 for symbol in symbols:
47 address = pwndbg.symbol.address(symbol)
48
49 if not address:
50 continue
51
52 b = gdb.Breakpoint(symbol, temporary=True)
53 gdb.execute(run, from_tty=False, to_string=True)
54 return
55
56 # Try a breakpoint at the binary entry
57 entry(*a)
58
59
60 @pwndbg.commands.Command
61 @pwndbg.commands.OnlyWithFile
62 def entry(*a):
63 """
64 Set a breakpoint at the first instruction executed in
65 the target binary.
66 """
67 global break_on_first_instruction
68 break_on_first_instruction = True
69 run = 'run ' + ' '.join(a)
70 gdb.execute(run, from_tty=False)
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pwndbg/commands/start.py b/pwndbg/commands/start.py
--- a/pwndbg/commands/start.py
+++ b/pwndbg/commands/start.py
@@ -9,6 +9,8 @@
from __future__ import print_function
from __future__ import unicode_literals
+import shlex
+
import gdb
import pwndbg.commands
@@ -66,5 +68,5 @@
"""
global break_on_first_instruction
break_on_first_instruction = True
- run = 'run ' + ' '.join(a)
+ run = 'run ' + ' '.join(map(shlex.quote, a))
gdb.execute(run, from_tty=False)
| {"golden_diff": "diff --git a/pwndbg/commands/start.py b/pwndbg/commands/start.py\n--- a/pwndbg/commands/start.py\n+++ b/pwndbg/commands/start.py\n@@ -9,6 +9,8 @@\n from __future__ import print_function\n from __future__ import unicode_literals\n \n+import shlex\n+\n import gdb\n \n import pwndbg.commands\n@@ -66,5 +68,5 @@\n \"\"\"\n global break_on_first_instruction\n break_on_first_instruction = True\n- run = 'run ' + ' '.join(a)\n+ run = 'run ' + ' '.join(map(shlex.quote, a))\n gdb.execute(run, from_tty=False)\n", "issue": "Broken `entry` command\n### Description\r\n\r\nThe `entry` command pass arguments differently then the `run` command.\r\n\r\n### Steps to reproduce\r\n\r\n```\r\n[dc@dc:pwndbg|dev *$%]$ gdb python\r\nLoaded 113 commands. Type pwndbg [filter] for a list.\r\nReading symbols from python...(no debugging symbols found)...done.\r\npwndbg> set exception-verbose on\r\nSet whether to print a full stacktracefor exceptions raised in Pwndbg commands to True\r\npwndbg> run -c \"print(1); print(2)\"\r\nStarting program: /usr/bin/python -c \"print(1); print(2)\"\r\n[Thread debugging using libthread_db enabled]\r\nUsing host libthread_db library \"/usr/lib/libthread_db.so.1\".\r\n1\r\n2\r\n[Inferior 1 (process 20590) exited normally]\r\npwndbg> entry -c \"print(1); print(2)\"\r\n('-c', 'print(1); print(2)')\r\nRunning '%s' run -c print(1); print(2)\r\n/bin/bash: -c: line 0: syntax error near unexpected token `('\r\n/bin/bash: -c: line 0: `exec /usr/bin/python -c print(1); print(2)'\r\nTraceback (most recent call last):\r\n File \"/home/dc/installed/pwndbg/pwndbg/commands/__init__.py\", line 100, in __call__\r\n return self.function(*args, **kwargs)\r\n File \"/home/dc/installed/pwndbg/pwndbg/commands/__init__.py\", line 181, in _OnlyWithFile\r\n return function(*a, **kw)\r\n File \"/home/dc/installed/pwndbg/pwndbg/commands/start.py\", line 72, in entry\r\n gdb.execute(run, from_tty=False)\r\ngdb.error: During startup program exited with code 1.\r\n\r\nIf that is an issue, you can report it on https://github.com/pwndbg/pwndbg/issues\r\n(Please don't forget to search if it hasn't been reported before)\r\nPS: Pull requests are welcome\r\n```\r\n\r\n### My version\r\n\r\n```\r\npwndbg> version\r\nGdb: GNU gdb (GDB) 8.0.1\r\nPython: 3.6.3 (default, Oct 24 2017, 14:48:20) [GCC 7.2.0]\r\nPwndbg: 1.0.0 build: 5811010\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nLaunches the target process after setting a breakpoint at a convenient\nentry point.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport gdb\n\nimport pwndbg.commands\nimport pwndbg.elf\nimport pwndbg.events\nimport pwndbg.symbol\n\nbreak_on_first_instruction = False\n\n\[email protected]\ndef on_start():\n global break_on_first_instruction\n if break_on_first_instruction:\n spec = \"*%#x\" % (int(pwndbg.elf.entry()))\n gdb.Breakpoint(spec, temporary=True)\n break_on_first_instruction = False\n\n\[email protected]\ndef start(*a):\n \"\"\"\n Set a breakpoint at a convenient location in the binary,\n generally 'main', 'init', or the entry point.\n \"\"\"\n run = 'run ' + ' '.join(a)\n\n symbols = [\"main\",\n \"_main\",\n \"start\",\n \"_start\",\n \"init\",\n \"_init\"]\n\n for symbol in symbols:\n address = pwndbg.symbol.address(symbol)\n\n if not address:\n continue\n\n b = gdb.Breakpoint(symbol, temporary=True)\n gdb.execute(run, from_tty=False, to_string=True)\n return\n\n # Try a breakpoint at the binary entry\n entry(*a)\n\n\[email protected]\[email protected]\ndef entry(*a):\n \"\"\"\n Set a breakpoint at the first instruction executed in\n the target binary.\n \"\"\"\n global break_on_first_instruction\n break_on_first_instruction = True\n run = 'run ' + ' '.join(a)\n gdb.execute(run, from_tty=False)\n", "path": "pwndbg/commands/start.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nLaunches the target process after setting a breakpoint at a convenient\nentry point.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport shlex\n\nimport gdb\n\nimport pwndbg.commands\nimport pwndbg.elf\nimport pwndbg.events\nimport pwndbg.symbol\n\nbreak_on_first_instruction = False\n\n\[email protected]\ndef on_start():\n global break_on_first_instruction\n if break_on_first_instruction:\n spec = \"*%#x\" % (int(pwndbg.elf.entry()))\n gdb.Breakpoint(spec, temporary=True)\n break_on_first_instruction = False\n\n\[email protected]\ndef start(*a):\n \"\"\"\n Set a breakpoint at a convenient location in the binary,\n generally 'main', 'init', or the entry point.\n \"\"\"\n run = 'run ' + ' '.join(a)\n\n symbols = [\"main\",\n \"_main\",\n \"start\",\n \"_start\",\n \"init\",\n \"_init\"]\n\n for symbol in symbols:\n address = pwndbg.symbol.address(symbol)\n\n if not address:\n continue\n\n b = gdb.Breakpoint(symbol, temporary=True)\n gdb.execute(run, from_tty=False, to_string=True)\n return\n\n # Try a breakpoint at the binary entry\n entry(*a)\n\n\[email protected]\[email protected]\ndef entry(*a):\n \"\"\"\n Set a breakpoint at the first instruction executed in\n the target binary.\n \"\"\"\n global break_on_first_instruction\n break_on_first_instruction = True\n run = 'run ' + ' '.join(map(shlex.quote, a))\n gdb.execute(run, from_tty=False)\n", "path": "pwndbg/commands/start.py"}]} | 1,351 | 151 |
gh_patches_debug_7911 | rasdani/github-patches | git_diff | edgedb__edgedb-1946 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ISE when LIMIT/OFFSET correlated with query
```
sully> SELECT Object LIMIT len(<str>.id);
ERROR: InternalServerError: argument of LIMIT must not contain variables
```
```
sully> SELECT Object OFFSET len(<str>.id);
ERROR: InternalServerError: argument of OFFSET must not contain variables
```
Rejecting these is correct but we want a real error.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `edb/edgeql/compiler/clauses.py`
Content:
```
1 #
2 # This source file is part of the EdgeDB open source project.
3 #
4 # Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
17 #
18
19
20 """EdgeQL compiler functions to process shared clauses."""
21
22
23 from __future__ import annotations
24
25 from typing import *
26
27 from edb.edgeql import ast as qlast
28 from edb.ir import ast as irast
29
30 from edb import errors
31
32 from . import context
33 from . import dispatch
34 from . import inference
35 from . import polyres
36 from . import schemactx
37 from . import setgen
38
39
40 def compile_where_clause(
41 ir_stmt: irast.FilteredStmt,
42 where: Optional[qlast.Base], *,
43 ctx: context.ContextLevel) -> None:
44
45 if where is None:
46 return
47
48 with ctx.newscope(fenced=True) as subctx:
49 subctx.path_scope.unnest_fence = True
50 ir_expr = dispatch.compile(where, ctx=subctx)
51 bool_t = ctx.env.get_track_schema_type('std::bool')
52 ir_set = setgen.scoped_set(ir_expr, typehint=bool_t, ctx=subctx)
53
54 ir_stmt.where = ir_set
55
56
57 def compile_orderby_clause(
58 sortexprs: Optional[Iterable[qlast.SortExpr]], *,
59 ctx: context.ContextLevel) -> List[irast.SortExpr]:
60
61 result: List[irast.SortExpr] = []
62 if not sortexprs:
63 return result
64
65 with ctx.new() as subctx:
66 for sortexpr in sortexprs:
67 with subctx.newscope(fenced=True) as exprctx:
68 exprctx.path_scope.unnest_fence = True
69 ir_sortexpr = dispatch.compile(sortexpr.path, ctx=exprctx)
70 ir_sortexpr = setgen.scoped_set(
71 ir_sortexpr, force_reassign=True, ctx=exprctx)
72 ir_sortexpr.context = sortexpr.context
73
74 # Check that the sortexpr type is actually orderable
75 # with either '>' or '<' based on the DESC or ASC sort
76 # order.
77 env = exprctx.env
78 sort_type = inference.infer_type(ir_sortexpr, env)
79 # Postgres by default treats ASC as using '<' and DESC
80 # as using '>'. We should do the same.
81 if sortexpr.direction == qlast.SortDesc:
82 op_name = '>'
83 else:
84 op_name = '<'
85 opers = env.schema.get_operators(
86 op_name, module_aliases=exprctx.modaliases)
87
88 # Verify that a comparison operator is defined for 2
89 # sort_type expressions.
90 matched = polyres.find_callable(
91 opers,
92 args=[(sort_type, ir_sortexpr), (sort_type, ir_sortexpr)],
93 kwargs={},
94 ctx=exprctx)
95 if len(matched) != 1:
96 sort_type_name = schemactx.get_material_type(
97 sort_type, ctx=ctx).get_displayname(env.schema)
98 if len(matched) == 0:
99 raise errors.QueryError(
100 f'type {sort_type_name!r} cannot be used in '
101 f'ORDER BY clause because ordering is not '
102 f'defined for it',
103 context=sortexpr.context)
104
105 elif len(matched) > 1:
106 raise errors.QueryError(
107 f'type {sort_type_name!r} cannot be used in '
108 f'ORDER BY clause because ordering is '
109 f'ambiguous for it',
110 context=sortexpr.context)
111
112 result.append(
113 irast.SortExpr(
114 expr=ir_sortexpr,
115 direction=sortexpr.direction,
116 nones_order=sortexpr.nones_order))
117
118 return result
119
120
121 def compile_limit_offset_clause(
122 expr: Optional[qlast.Base], *,
123 ctx: context.ContextLevel) -> Optional[irast.Set]:
124 if expr is None:
125 ir_set = None
126 else:
127 with ctx.newscope(fenced=True) as subctx:
128 ir_expr = dispatch.compile(expr, ctx=subctx)
129 int_t = ctx.env.get_track_schema_type('std::int64')
130 ir_set = setgen.scoped_set(
131 ir_expr, force_reassign=True, typehint=int_t, ctx=subctx)
132 ir_set.context = expr.context
133
134 return ir_set
135
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/edb/edgeql/compiler/clauses.py b/edb/edgeql/compiler/clauses.py
--- a/edb/edgeql/compiler/clauses.py
+++ b/edb/edgeql/compiler/clauses.py
@@ -125,6 +125,10 @@
ir_set = None
else:
with ctx.newscope(fenced=True) as subctx:
+ # Clear out the partial_path_prefix, since we aren't in
+ # the scope of the select subject
+ subctx.partial_path_prefix = None
+
ir_expr = dispatch.compile(expr, ctx=subctx)
int_t = ctx.env.get_track_schema_type('std::int64')
ir_set = setgen.scoped_set(
| {"golden_diff": "diff --git a/edb/edgeql/compiler/clauses.py b/edb/edgeql/compiler/clauses.py\n--- a/edb/edgeql/compiler/clauses.py\n+++ b/edb/edgeql/compiler/clauses.py\n@@ -125,6 +125,10 @@\n ir_set = None\n else:\n with ctx.newscope(fenced=True) as subctx:\n+ # Clear out the partial_path_prefix, since we aren't in\n+ # the scope of the select subject\n+ subctx.partial_path_prefix = None\n+\n ir_expr = dispatch.compile(expr, ctx=subctx)\n int_t = ctx.env.get_track_schema_type('std::int64')\n ir_set = setgen.scoped_set(\n", "issue": "ISE when LIMIT/OFFSET correlated with query\n```\r\nsully> SELECT Object LIMIT len(<str>.id);\r\nERROR: InternalServerError: argument of LIMIT must not contain variables\r\n```\r\n\r\n```\r\nsully> SELECT Object OFFSET len(<str>.id);\r\nERROR: InternalServerError: argument of OFFSET must not contain variables\r\n```\r\n\r\nRejecting these is correct but we want a real error.\r\n\n", "before_files": [{"content": "#\n# This source file is part of the EdgeDB open source project.\n#\n# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\n\"\"\"EdgeQL compiler functions to process shared clauses.\"\"\"\n\n\nfrom __future__ import annotations\n\nfrom typing import *\n\nfrom edb.edgeql import ast as qlast\nfrom edb.ir import ast as irast\n\nfrom edb import errors\n\nfrom . import context\nfrom . import dispatch\nfrom . import inference\nfrom . import polyres\nfrom . import schemactx\nfrom . import setgen\n\n\ndef compile_where_clause(\n ir_stmt: irast.FilteredStmt,\n where: Optional[qlast.Base], *,\n ctx: context.ContextLevel) -> None:\n\n if where is None:\n return\n\n with ctx.newscope(fenced=True) as subctx:\n subctx.path_scope.unnest_fence = True\n ir_expr = dispatch.compile(where, ctx=subctx)\n bool_t = ctx.env.get_track_schema_type('std::bool')\n ir_set = setgen.scoped_set(ir_expr, typehint=bool_t, ctx=subctx)\n\n ir_stmt.where = ir_set\n\n\ndef compile_orderby_clause(\n sortexprs: Optional[Iterable[qlast.SortExpr]], *,\n ctx: context.ContextLevel) -> List[irast.SortExpr]:\n\n result: List[irast.SortExpr] = []\n if not sortexprs:\n return result\n\n with ctx.new() as subctx:\n for sortexpr in sortexprs:\n with subctx.newscope(fenced=True) as exprctx:\n exprctx.path_scope.unnest_fence = True\n ir_sortexpr = dispatch.compile(sortexpr.path, ctx=exprctx)\n ir_sortexpr = setgen.scoped_set(\n ir_sortexpr, force_reassign=True, ctx=exprctx)\n ir_sortexpr.context = sortexpr.context\n\n # Check that the sortexpr type is actually orderable\n # with either '>' or '<' based on the DESC or ASC sort\n # order.\n env = exprctx.env\n sort_type = inference.infer_type(ir_sortexpr, env)\n # Postgres by default treats ASC as using '<' and DESC\n # as using '>'. We should do the same.\n if sortexpr.direction == qlast.SortDesc:\n op_name = '>'\n else:\n op_name = '<'\n opers = env.schema.get_operators(\n op_name, module_aliases=exprctx.modaliases)\n\n # Verify that a comparison operator is defined for 2\n # sort_type expressions.\n matched = polyres.find_callable(\n opers,\n args=[(sort_type, ir_sortexpr), (sort_type, ir_sortexpr)],\n kwargs={},\n ctx=exprctx)\n if len(matched) != 1:\n sort_type_name = schemactx.get_material_type(\n sort_type, ctx=ctx).get_displayname(env.schema)\n if len(matched) == 0:\n raise errors.QueryError(\n f'type {sort_type_name!r} cannot be used in '\n f'ORDER BY clause because ordering is not '\n f'defined for it',\n context=sortexpr.context)\n\n elif len(matched) > 1:\n raise errors.QueryError(\n f'type {sort_type_name!r} cannot be used in '\n f'ORDER BY clause because ordering is '\n f'ambiguous for it',\n context=sortexpr.context)\n\n result.append(\n irast.SortExpr(\n expr=ir_sortexpr,\n direction=sortexpr.direction,\n nones_order=sortexpr.nones_order))\n\n return result\n\n\ndef compile_limit_offset_clause(\n expr: Optional[qlast.Base], *,\n ctx: context.ContextLevel) -> Optional[irast.Set]:\n if expr is None:\n ir_set = None\n else:\n with ctx.newscope(fenced=True) as subctx:\n ir_expr = dispatch.compile(expr, ctx=subctx)\n int_t = ctx.env.get_track_schema_type('std::int64')\n ir_set = setgen.scoped_set(\n ir_expr, force_reassign=True, typehint=int_t, ctx=subctx)\n ir_set.context = expr.context\n\n return ir_set\n", "path": "edb/edgeql/compiler/clauses.py"}], "after_files": [{"content": "#\n# This source file is part of the EdgeDB open source project.\n#\n# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\n\"\"\"EdgeQL compiler functions to process shared clauses.\"\"\"\n\n\nfrom __future__ import annotations\n\nfrom typing import *\n\nfrom edb.edgeql import ast as qlast\nfrom edb.ir import ast as irast\n\nfrom edb import errors\n\nfrom . import context\nfrom . import dispatch\nfrom . import inference\nfrom . import polyres\nfrom . import schemactx\nfrom . import setgen\n\n\ndef compile_where_clause(\n ir_stmt: irast.FilteredStmt,\n where: Optional[qlast.Base], *,\n ctx: context.ContextLevel) -> None:\n\n if where is None:\n return\n\n with ctx.newscope(fenced=True) as subctx:\n subctx.path_scope.unnest_fence = True\n ir_expr = dispatch.compile(where, ctx=subctx)\n bool_t = ctx.env.get_track_schema_type('std::bool')\n ir_set = setgen.scoped_set(ir_expr, typehint=bool_t, ctx=subctx)\n\n ir_stmt.where = ir_set\n\n\ndef compile_orderby_clause(\n sortexprs: Optional[Iterable[qlast.SortExpr]], *,\n ctx: context.ContextLevel) -> List[irast.SortExpr]:\n\n result: List[irast.SortExpr] = []\n if not sortexprs:\n return result\n\n with ctx.new() as subctx:\n for sortexpr in sortexprs:\n with subctx.newscope(fenced=True) as exprctx:\n exprctx.path_scope.unnest_fence = True\n ir_sortexpr = dispatch.compile(sortexpr.path, ctx=exprctx)\n ir_sortexpr = setgen.scoped_set(\n ir_sortexpr, force_reassign=True, ctx=exprctx)\n ir_sortexpr.context = sortexpr.context\n\n # Check that the sortexpr type is actually orderable\n # with either '>' or '<' based on the DESC or ASC sort\n # order.\n env = exprctx.env\n sort_type = inference.infer_type(ir_sortexpr, env)\n # Postgres by default treats ASC as using '<' and DESC\n # as using '>'. We should do the same.\n if sortexpr.direction == qlast.SortDesc:\n op_name = '>'\n else:\n op_name = '<'\n opers = env.schema.get_operators(\n op_name, module_aliases=exprctx.modaliases)\n\n # Verify that a comparison operator is defined for 2\n # sort_type expressions.\n matched = polyres.find_callable(\n opers,\n args=[(sort_type, ir_sortexpr), (sort_type, ir_sortexpr)],\n kwargs={},\n ctx=exprctx)\n if len(matched) != 1:\n sort_type_name = schemactx.get_material_type(\n sort_type, ctx=ctx).get_displayname(env.schema)\n if len(matched) == 0:\n raise errors.QueryError(\n f'type {sort_type_name!r} cannot be used in '\n f'ORDER BY clause because ordering is not '\n f'defined for it',\n context=sortexpr.context)\n\n elif len(matched) > 1:\n raise errors.QueryError(\n f'type {sort_type_name!r} cannot be used in '\n f'ORDER BY clause because ordering is '\n f'ambiguous for it',\n context=sortexpr.context)\n\n result.append(\n irast.SortExpr(\n expr=ir_sortexpr,\n direction=sortexpr.direction,\n nones_order=sortexpr.nones_order))\n\n return result\n\n\ndef compile_limit_offset_clause(\n expr: Optional[qlast.Base], *,\n ctx: context.ContextLevel) -> Optional[irast.Set]:\n if expr is None:\n ir_set = None\n else:\n with ctx.newscope(fenced=True) as subctx:\n # Clear out the partial_path_prefix, since we aren't in\n # the scope of the select subject\n subctx.partial_path_prefix = None\n\n ir_expr = dispatch.compile(expr, ctx=subctx)\n int_t = ctx.env.get_track_schema_type('std::int64')\n ir_set = setgen.scoped_set(\n ir_expr, force_reassign=True, typehint=int_t, ctx=subctx)\n ir_set.context = expr.context\n\n return ir_set\n", "path": "edb/edgeql/compiler/clauses.py"}]} | 1,686 | 161 |
gh_patches_debug_34984 | rasdani/github-patches | git_diff | DataDog__dd-trace-py-3177 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Psycopg patching doesn't properly handle execute_values
The `execute_values` extension in psycopg2 composes and executes the query with b-string, even if you passed the query as a string. Below is the full function from psycopg2.extras
```python
def execute_values(cur, sql, argslist, template=None, page_size=100, fetch=False):
from psycopg2.sql import Composable
if isinstance(sql, Composable):
sql = sql.as_string(cur)
# we can't just use sql % vals because vals is bytes: if sql is bytes
# there will be some decoding error because of stupid codec used, and Py3
# doesn't implement % on bytes.
if not isinstance(sql, bytes):
sql = sql.encode(_ext.encodings[cur.connection.encoding])
pre, post = _split_sql(sql)
result = [] if fetch else None
for page in _paginate(argslist, page_size=page_size):
if template is None:
template = b'(' + b','.join([b'%s'] * len(page[0])) + b')'
parts = pre[:]
for args in page:
parts.append(cur.mogrify(template, args))
parts.append(b',')
parts[-1:] = post
cur.execute(b''.join(parts))
if fetch:
result.extend(cur.fetchall())
return result
```
The problem is that ddtrace assumes that the "resource" added to a span is a string. The result is that when `span.finish()` is called in the datadog lambda handler and it tries to serialize the span to json, it blows up with "TypeError: Object of type bytes is not JSON serializable". Upon investigation, I discovered that the ddtrace.internal.encoder.py's JSONEncoder just does a simple json.dumps() on all the spans and the `resource` attribute on the span from the using `execute_values` is bytes, not a string.
I think the solution here is simply to update the Psycopg2TracedCursor class to decode the resource from bytes if it is bytes, like this:
```python
class Psycopg2TracedCursor(dbapi.TracedCursor):
"""TracedCursor for psycopg2"""
def _trace_method(self, method, name, resource, extra_tags, *args, **kwargs):
# treat psycopg2.sql.Composable resource objects as strings
if isinstance(resource, Composable):
resource = resource.as_string(self.__wrapped__)
# THIS IS THE NEW PART BELOW (next 2 lines)
if isinstance(resource, bytes):
resource = resource.decode('utf-8')
return super(Psycopg2TracedCursor, self)._trace_method(method, name, resource, extra_tags, *args, **kwargs)
```
### Which version of dd-trace-py are you using?
Lambda layer, v50.
### Which version of pip are you using?
n/a
### How can we reproduce your problem?
Use `execute_values` while inside a tracing context. It should have a 100% failure rate.
### What is the result that you get?
A type error when span.finish() is called and the metrics are furnished to DD.
### What is the result that you expected?
It should work as normal, with the resource decoded as a string.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddtrace/internal/encoding.py`
Content:
```
1 import json
2 from typing import Any
3 from typing import Dict
4 from typing import List
5 from typing import Optional
6 from typing import TYPE_CHECKING
7
8 from ._encoding import ListStringTable
9 from ._encoding import MsgpackEncoderV03
10 from ._encoding import MsgpackEncoderV05
11 from .logger import get_logger
12
13
14 __all__ = ["MsgpackEncoderV03", "MsgpackEncoderV05", "ListStringTable", "MSGPACK_ENCODERS"]
15
16
17 if TYPE_CHECKING:
18 from ..span import Span
19
20
21 log = get_logger(__name__)
22
23
24 class _EncoderBase(object):
25 """
26 Encoder interface that provides the logic to encode traces and service.
27 """
28
29 def encode_traces(self, traces):
30 # type: (List[List[Span]]) -> str
31 """
32 Encodes a list of traces, expecting a list of items where each items
33 is a list of spans. Before dumping the string in a serialized format all
34 traces are normalized according to the encoding format. The trace
35 nesting is not changed.
36
37 :param traces: A list of traces that should be serialized
38 """
39 raise NotImplementedError()
40
41 def encode(self, obj):
42 # type: (List[List[Any]]) -> str
43 """
44 Defines the underlying format used during traces or services encoding.
45 This method must be implemented and should only be used by the internal
46 functions.
47 """
48 raise NotImplementedError()
49
50
51 class JSONEncoder(_EncoderBase):
52 content_type = "application/json"
53
54 def encode_traces(self, traces):
55 normalized_traces = [[span.to_dict() for span in trace] for trace in traces]
56 return self.encode(normalized_traces)
57
58 @staticmethod
59 def encode(obj):
60 # type: (Any) -> str
61 return json.dumps(obj)
62
63
64 class JSONEncoderV2(JSONEncoder):
65 """
66 JSONEncoderV2 encodes traces to the new intake API format.
67 """
68
69 content_type = "application/json"
70
71 def encode_traces(self, traces):
72 # type: (List[List[Span]]) -> str
73 normalized_traces = [[JSONEncoderV2._convert_span(span) for span in trace] for trace in traces]
74 return self.encode({"traces": normalized_traces})
75
76 @staticmethod
77 def _convert_span(span):
78 # type: (Span) -> Dict[str, Any]
79 sp = span.to_dict()
80 sp["trace_id"] = JSONEncoderV2._encode_id_to_hex(sp.get("trace_id"))
81 sp["parent_id"] = JSONEncoderV2._encode_id_to_hex(sp.get("parent_id"))
82 sp["span_id"] = JSONEncoderV2._encode_id_to_hex(sp.get("span_id"))
83 return sp
84
85 @staticmethod
86 def _encode_id_to_hex(dd_id):
87 # type: (Optional[int]) -> str
88 if not dd_id:
89 return "0000000000000000"
90 return "%0.16X" % int(dd_id)
91
92 @staticmethod
93 def _decode_id_to_hex(hex_id):
94 # type: (Optional[str]) -> int
95 if not hex_id:
96 return 0
97 return int(hex_id, 16)
98
99
100 MSGPACK_ENCODERS = {
101 "v0.3": MsgpackEncoderV03,
102 "v0.4": MsgpackEncoderV03,
103 "v0.5": MsgpackEncoderV05,
104 }
105
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ddtrace/internal/encoding.py b/ddtrace/internal/encoding.py
--- a/ddtrace/internal/encoding.py
+++ b/ddtrace/internal/encoding.py
@@ -8,6 +8,9 @@
from ._encoding import ListStringTable
from ._encoding import MsgpackEncoderV03
from ._encoding import MsgpackEncoderV05
+from .compat import PY3
+from .compat import binary_type
+from .compat import ensure_text
from .logger import get_logger
@@ -48,17 +51,33 @@
raise NotImplementedError()
-class JSONEncoder(_EncoderBase):
+class JSONEncoder(json.JSONEncoder, _EncoderBase):
content_type = "application/json"
def encode_traces(self, traces):
- normalized_traces = [[span.to_dict() for span in trace] for trace in traces]
+ normalized_traces = [[JSONEncoder._normalize_span(span.to_dict()) for span in trace] for trace in traces]
return self.encode(normalized_traces)
@staticmethod
- def encode(obj):
- # type: (Any) -> str
- return json.dumps(obj)
+ def _normalize_span(span):
+ # Ensure all string attributes are actually strings and not bytes
+ # DEV: We are deferring meta/metrics to reduce any performance issues.
+ # Meta/metrics may still contain `bytes` and have encoding issues.
+ span["resource"] = JSONEncoder._normalize_str(span["resource"])
+ span["name"] = JSONEncoder._normalize_str(span["name"])
+ span["service"] = JSONEncoder._normalize_str(span["service"])
+ return span
+
+ @staticmethod
+ def _normalize_str(obj):
+ if obj is None:
+ return obj
+
+ if PY3:
+ return ensure_text(obj, errors="backslashreplace")
+ elif isinstance(obj, binary_type):
+ return obj.decode("utf-8", errors="replace")
+ return obj
class JSONEncoderV2(JSONEncoder):
@@ -77,6 +96,7 @@
def _convert_span(span):
# type: (Span) -> Dict[str, Any]
sp = span.to_dict()
+ sp = JSONEncoderV2._normalize_span(sp)
sp["trace_id"] = JSONEncoderV2._encode_id_to_hex(sp.get("trace_id"))
sp["parent_id"] = JSONEncoderV2._encode_id_to_hex(sp.get("parent_id"))
sp["span_id"] = JSONEncoderV2._encode_id_to_hex(sp.get("span_id"))
| {"golden_diff": "diff --git a/ddtrace/internal/encoding.py b/ddtrace/internal/encoding.py\n--- a/ddtrace/internal/encoding.py\n+++ b/ddtrace/internal/encoding.py\n@@ -8,6 +8,9 @@\n from ._encoding import ListStringTable\n from ._encoding import MsgpackEncoderV03\n from ._encoding import MsgpackEncoderV05\n+from .compat import PY3\n+from .compat import binary_type\n+from .compat import ensure_text\n from .logger import get_logger\n \n \n@@ -48,17 +51,33 @@\n raise NotImplementedError()\n \n \n-class JSONEncoder(_EncoderBase):\n+class JSONEncoder(json.JSONEncoder, _EncoderBase):\n content_type = \"application/json\"\n \n def encode_traces(self, traces):\n- normalized_traces = [[span.to_dict() for span in trace] for trace in traces]\n+ normalized_traces = [[JSONEncoder._normalize_span(span.to_dict()) for span in trace] for trace in traces]\n return self.encode(normalized_traces)\n \n @staticmethod\n- def encode(obj):\n- # type: (Any) -> str\n- return json.dumps(obj)\n+ def _normalize_span(span):\n+ # Ensure all string attributes are actually strings and not bytes\n+ # DEV: We are deferring meta/metrics to reduce any performance issues.\n+ # Meta/metrics may still contain `bytes` and have encoding issues.\n+ span[\"resource\"] = JSONEncoder._normalize_str(span[\"resource\"])\n+ span[\"name\"] = JSONEncoder._normalize_str(span[\"name\"])\n+ span[\"service\"] = JSONEncoder._normalize_str(span[\"service\"])\n+ return span\n+\n+ @staticmethod\n+ def _normalize_str(obj):\n+ if obj is None:\n+ return obj\n+\n+ if PY3:\n+ return ensure_text(obj, errors=\"backslashreplace\")\n+ elif isinstance(obj, binary_type):\n+ return obj.decode(\"utf-8\", errors=\"replace\")\n+ return obj\n \n \n class JSONEncoderV2(JSONEncoder):\n@@ -77,6 +96,7 @@\n def _convert_span(span):\n # type: (Span) -> Dict[str, Any]\n sp = span.to_dict()\n+ sp = JSONEncoderV2._normalize_span(sp)\n sp[\"trace_id\"] = JSONEncoderV2._encode_id_to_hex(sp.get(\"trace_id\"))\n sp[\"parent_id\"] = JSONEncoderV2._encode_id_to_hex(sp.get(\"parent_id\"))\n sp[\"span_id\"] = JSONEncoderV2._encode_id_to_hex(sp.get(\"span_id\"))\n", "issue": "Psycopg patching doesn't properly handle execute_values\nThe `execute_values` extension in psycopg2 composes and executes the query with b-string, even if you passed the query as a string. Below is the full function from psycopg2.extras\r\n\r\n```python\r\ndef execute_values(cur, sql, argslist, template=None, page_size=100, fetch=False):\r\n from psycopg2.sql import Composable\r\n if isinstance(sql, Composable):\r\n sql = sql.as_string(cur)\r\n\r\n # we can't just use sql % vals because vals is bytes: if sql is bytes\r\n # there will be some decoding error because of stupid codec used, and Py3\r\n # doesn't implement % on bytes.\r\n if not isinstance(sql, bytes):\r\n sql = sql.encode(_ext.encodings[cur.connection.encoding])\r\n pre, post = _split_sql(sql)\r\n\r\n result = [] if fetch else None\r\n for page in _paginate(argslist, page_size=page_size):\r\n if template is None:\r\n template = b'(' + b','.join([b'%s'] * len(page[0])) + b')'\r\n parts = pre[:]\r\n for args in page:\r\n parts.append(cur.mogrify(template, args))\r\n parts.append(b',')\r\n parts[-1:] = post\r\n cur.execute(b''.join(parts))\r\n if fetch:\r\n result.extend(cur.fetchall())\r\n\r\n return result\r\n```\r\n\r\nThe problem is that ddtrace assumes that the \"resource\" added to a span is a string. The result is that when `span.finish()` is called in the datadog lambda handler and it tries to serialize the span to json, it blows up with \"TypeError: Object of type bytes is not JSON serializable\". Upon investigation, I discovered that the ddtrace.internal.encoder.py's JSONEncoder just does a simple json.dumps() on all the spans and the `resource` attribute on the span from the using `execute_values` is bytes, not a string.\r\n\r\nI think the solution here is simply to update the Psycopg2TracedCursor class to decode the resource from bytes if it is bytes, like this:\r\n\r\n```python\r\nclass Psycopg2TracedCursor(dbapi.TracedCursor):\r\n \"\"\"TracedCursor for psycopg2\"\"\"\r\n\r\n def _trace_method(self, method, name, resource, extra_tags, *args, **kwargs):\r\n # treat psycopg2.sql.Composable resource objects as strings\r\n if isinstance(resource, Composable):\r\n resource = resource.as_string(self.__wrapped__)\r\n # THIS IS THE NEW PART BELOW (next 2 lines)\r\n if isinstance(resource, bytes):\r\n resource = resource.decode('utf-8')\r\n return super(Psycopg2TracedCursor, self)._trace_method(method, name, resource, extra_tags, *args, **kwargs)\r\n```\r\n\r\n### Which version of dd-trace-py are you using?\r\nLambda layer, v50.\r\n### Which version of pip are you using?\r\nn/a\r\n\r\n### How can we reproduce your problem?\r\nUse `execute_values` while inside a tracing context. It should have a 100% failure rate.\r\n\r\n### What is the result that you get?\r\nA type error when span.finish() is called and the metrics are furnished to DD.\r\n\r\n### What is the result that you expected?\r\nIt should work as normal, with the resource decoded as a string.\r\n\n", "before_files": [{"content": "import json\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import TYPE_CHECKING\n\nfrom ._encoding import ListStringTable\nfrom ._encoding import MsgpackEncoderV03\nfrom ._encoding import MsgpackEncoderV05\nfrom .logger import get_logger\n\n\n__all__ = [\"MsgpackEncoderV03\", \"MsgpackEncoderV05\", \"ListStringTable\", \"MSGPACK_ENCODERS\"]\n\n\nif TYPE_CHECKING:\n from ..span import Span\n\n\nlog = get_logger(__name__)\n\n\nclass _EncoderBase(object):\n \"\"\"\n Encoder interface that provides the logic to encode traces and service.\n \"\"\"\n\n def encode_traces(self, traces):\n # type: (List[List[Span]]) -> str\n \"\"\"\n Encodes a list of traces, expecting a list of items where each items\n is a list of spans. Before dumping the string in a serialized format all\n traces are normalized according to the encoding format. The trace\n nesting is not changed.\n\n :param traces: A list of traces that should be serialized\n \"\"\"\n raise NotImplementedError()\n\n def encode(self, obj):\n # type: (List[List[Any]]) -> str\n \"\"\"\n Defines the underlying format used during traces or services encoding.\n This method must be implemented and should only be used by the internal\n functions.\n \"\"\"\n raise NotImplementedError()\n\n\nclass JSONEncoder(_EncoderBase):\n content_type = \"application/json\"\n\n def encode_traces(self, traces):\n normalized_traces = [[span.to_dict() for span in trace] for trace in traces]\n return self.encode(normalized_traces)\n\n @staticmethod\n def encode(obj):\n # type: (Any) -> str\n return json.dumps(obj)\n\n\nclass JSONEncoderV2(JSONEncoder):\n \"\"\"\n JSONEncoderV2 encodes traces to the new intake API format.\n \"\"\"\n\n content_type = \"application/json\"\n\n def encode_traces(self, traces):\n # type: (List[List[Span]]) -> str\n normalized_traces = [[JSONEncoderV2._convert_span(span) for span in trace] for trace in traces]\n return self.encode({\"traces\": normalized_traces})\n\n @staticmethod\n def _convert_span(span):\n # type: (Span) -> Dict[str, Any]\n sp = span.to_dict()\n sp[\"trace_id\"] = JSONEncoderV2._encode_id_to_hex(sp.get(\"trace_id\"))\n sp[\"parent_id\"] = JSONEncoderV2._encode_id_to_hex(sp.get(\"parent_id\"))\n sp[\"span_id\"] = JSONEncoderV2._encode_id_to_hex(sp.get(\"span_id\"))\n return sp\n\n @staticmethod\n def _encode_id_to_hex(dd_id):\n # type: (Optional[int]) -> str\n if not dd_id:\n return \"0000000000000000\"\n return \"%0.16X\" % int(dd_id)\n\n @staticmethod\n def _decode_id_to_hex(hex_id):\n # type: (Optional[str]) -> int\n if not hex_id:\n return 0\n return int(hex_id, 16)\n\n\nMSGPACK_ENCODERS = {\n \"v0.3\": MsgpackEncoderV03,\n \"v0.4\": MsgpackEncoderV03,\n \"v0.5\": MsgpackEncoderV05,\n}\n", "path": "ddtrace/internal/encoding.py"}], "after_files": [{"content": "import json\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import TYPE_CHECKING\n\nfrom ._encoding import ListStringTable\nfrom ._encoding import MsgpackEncoderV03\nfrom ._encoding import MsgpackEncoderV05\nfrom .compat import PY3\nfrom .compat import binary_type\nfrom .compat import ensure_text\nfrom .logger import get_logger\n\n\n__all__ = [\"MsgpackEncoderV03\", \"MsgpackEncoderV05\", \"ListStringTable\", \"MSGPACK_ENCODERS\"]\n\n\nif TYPE_CHECKING:\n from ..span import Span\n\n\nlog = get_logger(__name__)\n\n\nclass _EncoderBase(object):\n \"\"\"\n Encoder interface that provides the logic to encode traces and service.\n \"\"\"\n\n def encode_traces(self, traces):\n # type: (List[List[Span]]) -> str\n \"\"\"\n Encodes a list of traces, expecting a list of items where each items\n is a list of spans. Before dumping the string in a serialized format all\n traces are normalized according to the encoding format. The trace\n nesting is not changed.\n\n :param traces: A list of traces that should be serialized\n \"\"\"\n raise NotImplementedError()\n\n def encode(self, obj):\n # type: (List[List[Any]]) -> str\n \"\"\"\n Defines the underlying format used during traces or services encoding.\n This method must be implemented and should only be used by the internal\n functions.\n \"\"\"\n raise NotImplementedError()\n\n\nclass JSONEncoder(json.JSONEncoder, _EncoderBase):\n content_type = \"application/json\"\n\n def encode_traces(self, traces):\n normalized_traces = [[JSONEncoder._normalize_span(span.to_dict()) for span in trace] for trace in traces]\n return self.encode(normalized_traces)\n\n @staticmethod\n def _normalize_span(span):\n # Ensure all string attributes are actually strings and not bytes\n # DEV: We are deferring meta/metrics to reduce any performance issues.\n # Meta/metrics may still contain `bytes` and have encoding issues.\n span[\"resource\"] = JSONEncoder._normalize_str(span[\"resource\"])\n span[\"name\"] = JSONEncoder._normalize_str(span[\"name\"])\n span[\"service\"] = JSONEncoder._normalize_str(span[\"service\"])\n return span\n\n @staticmethod\n def _normalize_str(obj):\n if obj is None:\n return obj\n\n if PY3:\n return ensure_text(obj, errors=\"backslashreplace\")\n elif isinstance(obj, binary_type):\n return obj.decode(\"utf-8\", errors=\"replace\")\n return obj\n\n\nclass JSONEncoderV2(JSONEncoder):\n \"\"\"\n JSONEncoderV2 encodes traces to the new intake API format.\n \"\"\"\n\n content_type = \"application/json\"\n\n def encode_traces(self, traces):\n # type: (List[List[Span]]) -> str\n normalized_traces = [[JSONEncoderV2._convert_span(span) for span in trace] for trace in traces]\n return self.encode({\"traces\": normalized_traces})\n\n @staticmethod\n def _convert_span(span):\n # type: (Span) -> Dict[str, Any]\n sp = span.to_dict()\n sp = JSONEncoderV2._normalize_span(sp)\n sp[\"trace_id\"] = JSONEncoderV2._encode_id_to_hex(sp.get(\"trace_id\"))\n sp[\"parent_id\"] = JSONEncoderV2._encode_id_to_hex(sp.get(\"parent_id\"))\n sp[\"span_id\"] = JSONEncoderV2._encode_id_to_hex(sp.get(\"span_id\"))\n return sp\n\n @staticmethod\n def _encode_id_to_hex(dd_id):\n # type: (Optional[int]) -> str\n if not dd_id:\n return \"0000000000000000\"\n return \"%0.16X\" % int(dd_id)\n\n @staticmethod\n def _decode_id_to_hex(hex_id):\n # type: (Optional[str]) -> int\n if not hex_id:\n return 0\n return int(hex_id, 16)\n\n\nMSGPACK_ENCODERS = {\n \"v0.3\": MsgpackEncoderV03,\n \"v0.4\": MsgpackEncoderV03,\n \"v0.5\": MsgpackEncoderV05,\n}\n", "path": "ddtrace/internal/encoding.py"}]} | 1,917 | 561 |
gh_patches_debug_30436 | rasdani/github-patches | git_diff | uccser__cs-unplugged-463 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add sorting networks lessons
- [ ] Lesson 2 (8-10) - needs generated resources
- [ ] Lesson 1 (11-14)
- [ ] Lesson 2 (11-14)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `csunplugged/resources/views/sorting_network_cards.py`
Content:
```
1 """Module for generating Sorting Network Cards resource."""
2
3 from random import sample
4 from PIL import Image, ImageDraw, ImageFont
5 from utils.retrieve_query_parameter import retrieve_query_parameter
6
7
8 def resource_image(request, resource):
9 """Create a image for Sorting Network Cards resource.
10
11 Args:
12 request: HTTP request object.
13 resource: Object of resource data.
14
15 Returns:
16 A list of Pillow image objects.
17 """
18 IMAGE_SIZE_X = 2000
19 IMAGE_SIZE_Y = 3000
20 LINE_COLOUR = "#000000"
21 LINE_WIDTH = 3
22 font_path = "static/fonts/PatrickHand-Regular.ttf"
23
24 # Retrieve parameters
25 parameter_options = valid_options()
26 card_type = retrieve_query_parameter(request, "type", parameter_options["type"])
27
28 # Create card outlines
29 card_outlines = Image.new("RGB", (IMAGE_SIZE_X, IMAGE_SIZE_Y), "#fff")
30 draw = ImageDraw.Draw(card_outlines)
31 for x_coord in range(0, IMAGE_SIZE_X, IMAGE_SIZE_X - LINE_WIDTH):
32 draw.line([(x_coord, 0), (x_coord, IMAGE_SIZE_Y)], fill=LINE_COLOUR, width=LINE_WIDTH)
33 for y_coord in range(0, IMAGE_SIZE_Y, int(IMAGE_SIZE_Y / 2 - LINE_WIDTH)):
34 draw.line([(0, y_coord), (IMAGE_SIZE_X, y_coord)], fill=LINE_COLOUR, width=LINE_WIDTH)
35
36 # Prepare text data
37 if card_type == "small_numbers":
38 font_size = 800
39 text = ["1", "2", "3", "4", "5", "6"]
40 elif card_type == "large_numbers":
41 font_size = 500
42 text = []
43 numbers = sample(range(1700000, 2100000), 6)
44 for number in numbers:
45 text.append("{:,}".format(number))
46 elif card_type == "fractions":
47 font_size = 900
48 font_path = "static/fonts/NotoSans-Regular.ttf"
49 text = [u"\u00bd", u"\u2153", u"\u2154", u"\u215c", u"\u00be", u"\u215d"]
50 else:
51 font_size = 300
52 text = [
53 "tahi",
54 "rua",
55 "toru",
56 "whā",
57 "rima",
58 "ono",
59 "whitu",
60 "waru",
61 "iwa",
62 "tekau",
63 "tekau mā tahi",
64 "tekau mā waru",
65 "tekau mā toru",
66 "tekau mā whā",
67 "rua tekau",
68 "rua tekau mā ono",
69 ]
70
71 font = ImageFont.truetype(font_path, font_size)
72 card_centers = [
73 (IMAGE_SIZE_X / 2, IMAGE_SIZE_Y / 4),
74 (IMAGE_SIZE_X / 2, (IMAGE_SIZE_Y / 4) * 3),
75 ]
76
77 # Add text to cards
78 images = []
79 for (text_number, text_string) in enumerate(text):
80 if text_number % 2 == 0:
81 page = card_outlines.copy()
82 draw = ImageDraw.Draw(page)
83 (x, y) = card_centers[0]
84 else:
85 (x, y) = card_centers[1]
86
87 text_width, text_height = draw.textsize(text_string, font=font)
88 coord_x = x - (text_width / 2)
89 coord_y = y - (text_height / 1.5)
90 draw.text(
91 (coord_x, coord_y),
92 text_string,
93 font=font,
94 fill="#000"
95 )
96 # If text on second card but not last page
97 if text_number % 2 == 1 and text_number != len(text) - 1:
98 images.append(page)
99 images.append(page)
100
101 return images
102
103
104 def subtitle(request, resource):
105 """Return the subtitle string of the resource.
106
107 Used after the resource name in the filename, and
108 also on the resource image.
109
110 Args:
111 request: HTTP request object
112 resource: Object of resource data.
113
114 Returns:
115 text for subtitle (string)
116 """
117 return "{} - {}".format(
118 retrieve_query_parameter(request, "type").replace("_", " "),
119 retrieve_query_parameter(request, "paper_size")
120 )
121
122
123 def valid_options():
124 """Provide dictionary of all valid parameters.
125
126 This excludes the header text parameter.
127
128 Returns:
129 All valid options (dict).
130 """
131 return {
132 "type": ["small_numbers", "large_numbers", "fractions", "maori_numbers"],
133 "paper_size": ["a4", "letter"],
134 }
135
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/csunplugged/resources/views/sorting_network_cards.py b/csunplugged/resources/views/sorting_network_cards.py
--- a/csunplugged/resources/views/sorting_network_cards.py
+++ b/csunplugged/resources/views/sorting_network_cards.py
@@ -47,25 +47,24 @@
font_size = 900
font_path = "static/fonts/NotoSans-Regular.ttf"
text = [u"\u00bd", u"\u2153", u"\u2154", u"\u215c", u"\u00be", u"\u215d"]
- else:
+ elif card_type == "maori_numbers":
font_size = 300
text = [
- "tahi",
- "rua",
- "toru",
- "whā",
- "rima",
- "ono",
- "whitu",
- "waru",
- "iwa",
- "tekau",
- "tekau mā tahi",
- "tekau mā waru",
- "tekau mā toru",
- "tekau mā whā",
- "rua tekau",
- "rua tekau mā ono",
+ "tahi", "rua", "toru", "whā", "rima", "ono", "whitu", "waru",
+ "iwa", "tekau", "tekau mā tahi", "tekau mā waru", "tekau mā toru",
+ "tekau mā whā", "rua tekau", "rua tekau mā ono"
+ ]
+ elif card_type == "words":
+ font_size = 500
+ text = ["crocodile", "crochet", "kiwi", "weka", "kiwi", "kiwano"]
+ elif card_type == "letters":
+ font_size = 800
+ text = ["L", "O", "N", "K", "E", "D", "S", "P", "G", "B", "I", "Y"]
+ else:
+ font_size = 500
+ text = [
+ "whero", "kākāriki", "kiwikiwi", "karaka",
+ "kōwhai", "pango", "māwhero", "mā"
]
font = ImageFont.truetype(font_path, font_size)
@@ -129,6 +128,9 @@
All valid options (dict).
"""
return {
- "type": ["small_numbers", "large_numbers", "fractions", "maori_numbers"],
+ "type": [
+ "small_numbers", "large_numbers", "fractions", "maori_numbers",
+ "words", "letters", "maori_colours"
+ ],
"paper_size": ["a4", "letter"],
}
| {"golden_diff": "diff --git a/csunplugged/resources/views/sorting_network_cards.py b/csunplugged/resources/views/sorting_network_cards.py\n--- a/csunplugged/resources/views/sorting_network_cards.py\n+++ b/csunplugged/resources/views/sorting_network_cards.py\n@@ -47,25 +47,24 @@\n font_size = 900\n font_path = \"static/fonts/NotoSans-Regular.ttf\"\n text = [u\"\\u00bd\", u\"\\u2153\", u\"\\u2154\", u\"\\u215c\", u\"\\u00be\", u\"\\u215d\"]\n- else:\n+ elif card_type == \"maori_numbers\":\n font_size = 300\n text = [\n- \"tahi\",\n- \"rua\",\n- \"toru\",\n- \"wh\u0101\",\n- \"rima\",\n- \"ono\",\n- \"whitu\",\n- \"waru\",\n- \"iwa\",\n- \"tekau\",\n- \"tekau m\u0101 tahi\",\n- \"tekau m\u0101 waru\",\n- \"tekau m\u0101 toru\",\n- \"tekau m\u0101 wh\u0101\",\n- \"rua tekau\",\n- \"rua tekau m\u0101 ono\",\n+ \"tahi\", \"rua\", \"toru\", \"wh\u0101\", \"rima\", \"ono\", \"whitu\", \"waru\",\n+ \"iwa\", \"tekau\", \"tekau m\u0101 tahi\", \"tekau m\u0101 waru\", \"tekau m\u0101 toru\",\n+ \"tekau m\u0101 wh\u0101\", \"rua tekau\", \"rua tekau m\u0101 ono\"\n+ ]\n+ elif card_type == \"words\":\n+ font_size = 500\n+ text = [\"crocodile\", \"crochet\", \"kiwi\", \"weka\", \"kiwi\", \"kiwano\"]\n+ elif card_type == \"letters\":\n+ font_size = 800\n+ text = [\"L\", \"O\", \"N\", \"K\", \"E\", \"D\", \"S\", \"P\", \"G\", \"B\", \"I\", \"Y\"]\n+ else:\n+ font_size = 500\n+ text = [\n+ \"whero\", \"k\u0101k\u0101riki\", \"kiwikiwi\", \"karaka\",\n+ \"k\u014dwhai\", \"pango\", \"m\u0101whero\", \"m\u0101\"\n ]\n \n font = ImageFont.truetype(font_path, font_size)\n@@ -129,6 +128,9 @@\n All valid options (dict).\n \"\"\"\n return {\n- \"type\": [\"small_numbers\", \"large_numbers\", \"fractions\", \"maori_numbers\"],\n+ \"type\": [\n+ \"small_numbers\", \"large_numbers\", \"fractions\", \"maori_numbers\",\n+ \"words\", \"letters\", \"maori_colours\"\n+ ],\n \"paper_size\": [\"a4\", \"letter\"],\n }\n", "issue": "Add sorting networks lessons\n- [ ] Lesson 2 (8-10) - needs generated resources\r\n- [ ] Lesson 1 (11-14)\r\n- [ ] Lesson 2 (11-14)\n", "before_files": [{"content": "\"\"\"Module for generating Sorting Network Cards resource.\"\"\"\n\nfrom random import sample\nfrom PIL import Image, ImageDraw, ImageFont\nfrom utils.retrieve_query_parameter import retrieve_query_parameter\n\n\ndef resource_image(request, resource):\n \"\"\"Create a image for Sorting Network Cards resource.\n\n Args:\n request: HTTP request object.\n resource: Object of resource data.\n\n Returns:\n A list of Pillow image objects.\n \"\"\"\n IMAGE_SIZE_X = 2000\n IMAGE_SIZE_Y = 3000\n LINE_COLOUR = \"#000000\"\n LINE_WIDTH = 3\n font_path = \"static/fonts/PatrickHand-Regular.ttf\"\n\n # Retrieve parameters\n parameter_options = valid_options()\n card_type = retrieve_query_parameter(request, \"type\", parameter_options[\"type\"])\n\n # Create card outlines\n card_outlines = Image.new(\"RGB\", (IMAGE_SIZE_X, IMAGE_SIZE_Y), \"#fff\")\n draw = ImageDraw.Draw(card_outlines)\n for x_coord in range(0, IMAGE_SIZE_X, IMAGE_SIZE_X - LINE_WIDTH):\n draw.line([(x_coord, 0), (x_coord, IMAGE_SIZE_Y)], fill=LINE_COLOUR, width=LINE_WIDTH)\n for y_coord in range(0, IMAGE_SIZE_Y, int(IMAGE_SIZE_Y / 2 - LINE_WIDTH)):\n draw.line([(0, y_coord), (IMAGE_SIZE_X, y_coord)], fill=LINE_COLOUR, width=LINE_WIDTH)\n\n # Prepare text data\n if card_type == \"small_numbers\":\n font_size = 800\n text = [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\"]\n elif card_type == \"large_numbers\":\n font_size = 500\n text = []\n numbers = sample(range(1700000, 2100000), 6)\n for number in numbers:\n text.append(\"{:,}\".format(number))\n elif card_type == \"fractions\":\n font_size = 900\n font_path = \"static/fonts/NotoSans-Regular.ttf\"\n text = [u\"\\u00bd\", u\"\\u2153\", u\"\\u2154\", u\"\\u215c\", u\"\\u00be\", u\"\\u215d\"]\n else:\n font_size = 300\n text = [\n \"tahi\",\n \"rua\",\n \"toru\",\n \"wh\u0101\",\n \"rima\",\n \"ono\",\n \"whitu\",\n \"waru\",\n \"iwa\",\n \"tekau\",\n \"tekau m\u0101 tahi\",\n \"tekau m\u0101 waru\",\n \"tekau m\u0101 toru\",\n \"tekau m\u0101 wh\u0101\",\n \"rua tekau\",\n \"rua tekau m\u0101 ono\",\n ]\n\n font = ImageFont.truetype(font_path, font_size)\n card_centers = [\n (IMAGE_SIZE_X / 2, IMAGE_SIZE_Y / 4),\n (IMAGE_SIZE_X / 2, (IMAGE_SIZE_Y / 4) * 3),\n ]\n\n # Add text to cards\n images = []\n for (text_number, text_string) in enumerate(text):\n if text_number % 2 == 0:\n page = card_outlines.copy()\n draw = ImageDraw.Draw(page)\n (x, y) = card_centers[0]\n else:\n (x, y) = card_centers[1]\n\n text_width, text_height = draw.textsize(text_string, font=font)\n coord_x = x - (text_width / 2)\n coord_y = y - (text_height / 1.5)\n draw.text(\n (coord_x, coord_y),\n text_string,\n font=font,\n fill=\"#000\"\n )\n # If text on second card but not last page\n if text_number % 2 == 1 and text_number != len(text) - 1:\n images.append(page)\n images.append(page)\n\n return images\n\n\ndef subtitle(request, resource):\n \"\"\"Return the subtitle string of the resource.\n\n Used after the resource name in the filename, and\n also on the resource image.\n\n Args:\n request: HTTP request object\n resource: Object of resource data.\n\n Returns:\n text for subtitle (string)\n \"\"\"\n return \"{} - {}\".format(\n retrieve_query_parameter(request, \"type\").replace(\"_\", \" \"),\n retrieve_query_parameter(request, \"paper_size\")\n )\n\n\ndef valid_options():\n \"\"\"Provide dictionary of all valid parameters.\n\n This excludes the header text parameter.\n\n Returns:\n All valid options (dict).\n \"\"\"\n return {\n \"type\": [\"small_numbers\", \"large_numbers\", \"fractions\", \"maori_numbers\"],\n \"paper_size\": [\"a4\", \"letter\"],\n }\n", "path": "csunplugged/resources/views/sorting_network_cards.py"}], "after_files": [{"content": "\"\"\"Module for generating Sorting Network Cards resource.\"\"\"\n\nfrom random import sample\nfrom PIL import Image, ImageDraw, ImageFont\nfrom utils.retrieve_query_parameter import retrieve_query_parameter\n\n\ndef resource_image(request, resource):\n \"\"\"Create a image for Sorting Network Cards resource.\n\n Args:\n request: HTTP request object.\n resource: Object of resource data.\n\n Returns:\n A list of Pillow image objects.\n \"\"\"\n IMAGE_SIZE_X = 2000\n IMAGE_SIZE_Y = 3000\n LINE_COLOUR = \"#000000\"\n LINE_WIDTH = 3\n font_path = \"static/fonts/PatrickHand-Regular.ttf\"\n\n # Retrieve parameters\n parameter_options = valid_options()\n card_type = retrieve_query_parameter(request, \"type\", parameter_options[\"type\"])\n\n # Create card outlines\n card_outlines = Image.new(\"RGB\", (IMAGE_SIZE_X, IMAGE_SIZE_Y), \"#fff\")\n draw = ImageDraw.Draw(card_outlines)\n for x_coord in range(0, IMAGE_SIZE_X, IMAGE_SIZE_X - LINE_WIDTH):\n draw.line([(x_coord, 0), (x_coord, IMAGE_SIZE_Y)], fill=LINE_COLOUR, width=LINE_WIDTH)\n for y_coord in range(0, IMAGE_SIZE_Y, int(IMAGE_SIZE_Y / 2 - LINE_WIDTH)):\n draw.line([(0, y_coord), (IMAGE_SIZE_X, y_coord)], fill=LINE_COLOUR, width=LINE_WIDTH)\n\n # Prepare text data\n if card_type == \"small_numbers\":\n font_size = 800\n text = [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\"]\n elif card_type == \"large_numbers\":\n font_size = 500\n text = []\n numbers = sample(range(1700000, 2100000), 6)\n for number in numbers:\n text.append(\"{:,}\".format(number))\n elif card_type == \"fractions\":\n font_size = 900\n font_path = \"static/fonts/NotoSans-Regular.ttf\"\n text = [u\"\\u00bd\", u\"\\u2153\", u\"\\u2154\", u\"\\u215c\", u\"\\u00be\", u\"\\u215d\"]\n elif card_type == \"maori_numbers\":\n font_size = 300\n text = [\n \"tahi\", \"rua\", \"toru\", \"wh\u0101\", \"rima\", \"ono\", \"whitu\", \"waru\",\n \"iwa\", \"tekau\", \"tekau m\u0101 tahi\", \"tekau m\u0101 waru\", \"tekau m\u0101 toru\",\n \"tekau m\u0101 wh\u0101\", \"rua tekau\", \"rua tekau m\u0101 ono\"\n ]\n elif card_type == \"words\":\n font_size = 500\n text = [\"crocodile\", \"crochet\", \"kiwi\", \"weka\", \"kiwi\", \"kiwano\"]\n elif card_type == \"letters\":\n font_size = 800\n text = [\"L\", \"O\", \"N\", \"K\", \"E\", \"D\", \"S\", \"P\", \"G\", \"B\", \"I\", \"Y\"]\n else:\n font_size = 500\n text = [\n \"whero\", \"k\u0101k\u0101riki\", \"kiwikiwi\", \"karaka\",\n \"k\u014dwhai\", \"pango\", \"m\u0101whero\", \"m\u0101\"\n ]\n\n font = ImageFont.truetype(font_path, font_size)\n card_centers = [\n (IMAGE_SIZE_X / 2, IMAGE_SIZE_Y / 4),\n (IMAGE_SIZE_X / 2, (IMAGE_SIZE_Y / 4) * 3),\n ]\n\n # Add text to cards\n images = []\n for (text_number, text_string) in enumerate(text):\n if text_number % 2 == 0:\n page = card_outlines.copy()\n draw = ImageDraw.Draw(page)\n (x, y) = card_centers[0]\n else:\n (x, y) = card_centers[1]\n\n text_width, text_height = draw.textsize(text_string, font=font)\n coord_x = x - (text_width / 2)\n coord_y = y - (text_height / 1.5)\n draw.text(\n (coord_x, coord_y),\n text_string,\n font=font,\n fill=\"#000\"\n )\n # If text on second card but not last page\n if text_number % 2 == 1 and text_number != len(text) - 1:\n images.append(page)\n images.append(page)\n\n return images\n\n\ndef subtitle(request, resource):\n \"\"\"Return the subtitle string of the resource.\n\n Used after the resource name in the filename, and\n also on the resource image.\n\n Args:\n request: HTTP request object\n resource: Object of resource data.\n\n Returns:\n text for subtitle (string)\n \"\"\"\n return \"{} - {}\".format(\n retrieve_query_parameter(request, \"type\").replace(\"_\", \" \"),\n retrieve_query_parameter(request, \"paper_size\")\n )\n\n\ndef valid_options():\n \"\"\"Provide dictionary of all valid parameters.\n\n This excludes the header text parameter.\n\n Returns:\n All valid options (dict).\n \"\"\"\n return {\n \"type\": [\n \"small_numbers\", \"large_numbers\", \"fractions\", \"maori_numbers\",\n \"words\", \"letters\", \"maori_colours\"\n ],\n \"paper_size\": [\"a4\", \"letter\"],\n }\n", "path": "csunplugged/resources/views/sorting_network_cards.py"}]} | 1,676 | 676 |
gh_patches_debug_38932 | rasdani/github-patches | git_diff | ansible-collections__community.general-7352 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cargo fails to find standard homedir path for cargo binary
### Summary
the cargo module fails with -
`TASK [Install tokei Rust package] ***************************************************************************************************************************
fatal: [hostname]: FAILED! => {"changed": false, "msg": "Failed to find required executable \"cargo\" in paths: /usr/local/bin:/usr/bin:/bin:/usr/games:/sbin:/usr/sbin:/usr/local/sbin"}`
cargo executable is located in default rustup install location `/home/username/.cargo/bin/`
### Issue Type
Bug Report
### Component Name
cargo
### Ansible Version
```console (paste below)
$ ansible --version
ansible [core 2.12.3]
config file = /home/username/foo/ansible.cfg
configured module search path = ['/home/username/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3/dist-packages/ansible
ansible collection location = /home/username/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/bin/ansible
python version = 3.9.10 (main, Feb 22 2022, 13:54:07) [GCC 11.2.0]
jinja version = 3.0.3
libyaml = True
```
### Community.general Version
```console (paste below)
$ ansible-galaxy collection list community.general
# /usr/lib/python3/dist-packages/ansible_collections
Collection Version
----------------- -------
community.general 4.5.0
```
### Configuration
```console (paste below)
$ ansible-config dump --only-changed
DEFAULT_HOST_LIST(/home/username/foo/ansible.cfg) = ['/home/username/foo/HOSTS']
```
### OS / Environment
Debian Bookworm targeting Bookworm
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
```
Run this module against home directory with the default rustup install location.
### Expected Results
cargo installs the package
### Actual Results
```console (paste below)
TASK [Install tokei Rust package] ***************************************************************************************************************************
fatal: [hostname]: FAILED! => {"changed": false, "msg": "Failed to find required executable \"cargo\" in paths: /usr/local/bin:/usr/bin:/bin:/usr/games:/sbin:/usr/sbin:/usr/local/sbin"}
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/modules/cargo.py`
Content:
```
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3 # Copyright (c) 2021 Radek Sprta <[email protected]>
4 # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
5 # SPDX-License-Identifier: GPL-3.0-or-later
6
7 from __future__ import absolute_import, division, print_function
8
9 __metaclass__ = type
10
11
12 DOCUMENTATION = r"""
13 ---
14 module: cargo
15 short_description: Manage Rust packages with cargo
16 version_added: 4.3.0
17 description:
18 - Manage Rust packages with cargo.
19 author: "Radek Sprta (@radek-sprta)"
20 extends_documentation_fragment:
21 - community.general.attributes
22 attributes:
23 check_mode:
24 support: full
25 diff_mode:
26 support: none
27 options:
28 name:
29 description:
30 - The name of a Rust package to install.
31 type: list
32 elements: str
33 required: true
34 path:
35 description:
36 ->
37 The base path where to install the Rust packages. Cargo automatically appends
38 V(/bin). In other words, V(/usr/local) will become V(/usr/local/bin).
39 type: path
40 version:
41 description:
42 ->
43 The version to install. If O(name) contains multiple values, the module will
44 try to install all of them in this version.
45 type: str
46 required: false
47 locked:
48 description:
49 - Install with locked dependencies.
50 - This is only used when installing packages.
51 required: false
52 type: bool
53 default: false
54 version_added: 7.5.0
55 state:
56 description:
57 - The state of the Rust package.
58 required: false
59 type: str
60 default: present
61 choices: [ "present", "absent", "latest" ]
62 requirements:
63 - cargo installed in bin path (recommended /usr/local/bin)
64 """
65
66 EXAMPLES = r"""
67 - name: Install "ludusavi" Rust package
68 community.general.cargo:
69 name: ludusavi
70
71 - name: Install "ludusavi" Rust package with locked dependencies
72 community.general.cargo:
73 name: ludusavi
74 locked: true
75
76 - name: Install "ludusavi" Rust package in version 0.10.0
77 community.general.cargo:
78 name: ludusavi
79 version: '0.10.0'
80
81 - name: Install "ludusavi" Rust package to global location
82 community.general.cargo:
83 name: ludusavi
84 path: /usr/local
85
86 - name: Remove "ludusavi" Rust package
87 community.general.cargo:
88 name: ludusavi
89 state: absent
90
91 - name: Update "ludusavi" Rust package its latest version
92 community.general.cargo:
93 name: ludusavi
94 state: latest
95 """
96
97 import os
98 import re
99
100 from ansible.module_utils.basic import AnsibleModule
101
102
103 class Cargo(object):
104 def __init__(self, module, **kwargs):
105 self.module = module
106 self.name = kwargs["name"]
107 self.path = kwargs["path"]
108 self.state = kwargs["state"]
109 self.version = kwargs["version"]
110 self.locked = kwargs["locked"]
111
112 self.executable = [module.get_bin_path("cargo", True)]
113
114 @property
115 def path(self):
116 return self._path
117
118 @path.setter
119 def path(self, path):
120 if path is not None and not os.path.isdir(path):
121 self.module.fail_json(msg="Path %s is not a directory" % path)
122 self._path = path
123
124 def _exec(
125 self, args, run_in_check_mode=False, check_rc=True, add_package_name=True
126 ):
127 if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
128 cmd = self.executable + args
129 rc, out, err = self.module.run_command(cmd, check_rc=check_rc)
130 return out, err
131 return "", ""
132
133 def get_installed(self):
134 cmd = ["install", "--list"]
135 data, dummy = self._exec(cmd, True, False, False)
136
137 package_regex = re.compile(r"^([\w\-]+) v(.+):$")
138 installed = {}
139 for line in data.splitlines():
140 package_info = package_regex.match(line)
141 if package_info:
142 installed[package_info.group(1)] = package_info.group(2)
143
144 return installed
145
146 def install(self, packages=None):
147 cmd = ["install"]
148 cmd.extend(packages or self.name)
149 if self.locked:
150 cmd.append("--locked")
151 if self.path:
152 cmd.append("--root")
153 cmd.append(self.path)
154 if self.version:
155 cmd.append("--version")
156 cmd.append(self.version)
157 return self._exec(cmd)
158
159 def is_outdated(self, name):
160 installed_version = self.get_installed().get(name)
161
162 cmd = ["search", name, "--limit", "1"]
163 data, dummy = self._exec(cmd, True, False, False)
164
165 match = re.search(r'"(.+)"', data)
166 if match:
167 latest_version = match.group(1)
168
169 return installed_version != latest_version
170
171 def uninstall(self, packages=None):
172 cmd = ["uninstall"]
173 cmd.extend(packages or self.name)
174 return self._exec(cmd)
175
176
177 def main():
178 arg_spec = dict(
179 name=dict(required=True, type="list", elements="str"),
180 path=dict(default=None, type="path"),
181 state=dict(default="present", choices=["present", "absent", "latest"]),
182 version=dict(default=None, type="str"),
183 locked=dict(default=False, type="bool"),
184 )
185 module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
186
187 name = module.params["name"]
188 path = module.params["path"]
189 state = module.params["state"]
190 version = module.params["version"]
191 locked = module.params["locked"]
192
193 if not name:
194 module.fail_json(msg="Package name must be specified")
195
196 # Set LANG env since we parse stdout
197 module.run_command_environ_update = dict(
198 LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C"
199 )
200
201 cargo = Cargo(module, name=name, path=path, state=state, version=version, locked=locked)
202 changed, out, err = False, None, None
203 installed_packages = cargo.get_installed()
204 if state == "present":
205 to_install = [
206 n
207 for n in name
208 if (n not in installed_packages)
209 or (version and version != installed_packages[n])
210 ]
211 if to_install:
212 changed = True
213 out, err = cargo.install(to_install)
214 elif state == "latest":
215 to_update = [
216 n for n in name if n not in installed_packages or cargo.is_outdated(n)
217 ]
218 if to_update:
219 changed = True
220 out, err = cargo.install(to_update)
221 else: # absent
222 to_uninstall = [n for n in name if n in installed_packages]
223 if to_uninstall:
224 changed = True
225 out, err = cargo.uninstall(to_uninstall)
226
227 module.exit_json(changed=changed, stdout=out, stderr=err)
228
229
230 if __name__ == "__main__":
231 main()
232
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugins/modules/cargo.py b/plugins/modules/cargo.py
--- a/plugins/modules/cargo.py
+++ b/plugins/modules/cargo.py
@@ -25,6 +25,12 @@
diff_mode:
support: none
options:
+ executable:
+ description:
+ - Path to the C(cargo) installed in the system.
+ - If not specified, the module will look C(cargo) in E(PATH).
+ type: path
+ version_added: 7.5.0
name:
description:
- The name of a Rust package to install.
@@ -60,7 +66,7 @@
default: present
choices: [ "present", "absent", "latest" ]
requirements:
- - cargo installed in bin path (recommended /usr/local/bin)
+ - cargo installed
"""
EXAMPLES = r"""
@@ -103,14 +109,13 @@
class Cargo(object):
def __init__(self, module, **kwargs):
self.module = module
+ self.executable = [kwargs["executable"] or module.get_bin_path("cargo", True)]
self.name = kwargs["name"]
self.path = kwargs["path"]
self.state = kwargs["state"]
self.version = kwargs["version"]
self.locked = kwargs["locked"]
- self.executable = [module.get_bin_path("cargo", True)]
-
@property
def path(self):
return self._path
@@ -176,6 +181,7 @@
def main():
arg_spec = dict(
+ executable=dict(default=None, type="path"),
name=dict(required=True, type="list", elements="str"),
path=dict(default=None, type="path"),
state=dict(default="present", choices=["present", "absent", "latest"]),
@@ -185,10 +191,8 @@
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
name = module.params["name"]
- path = module.params["path"]
state = module.params["state"]
version = module.params["version"]
- locked = module.params["locked"]
if not name:
module.fail_json(msg="Package name must be specified")
@@ -198,7 +202,7 @@
LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C"
)
- cargo = Cargo(module, name=name, path=path, state=state, version=version, locked=locked)
+ cargo = Cargo(module, **module.params)
changed, out, err = False, None, None
installed_packages = cargo.get_installed()
if state == "present":
| {"golden_diff": "diff --git a/plugins/modules/cargo.py b/plugins/modules/cargo.py\n--- a/plugins/modules/cargo.py\n+++ b/plugins/modules/cargo.py\n@@ -25,6 +25,12 @@\n diff_mode:\n support: none\n options:\n+ executable:\n+ description:\n+ - Path to the C(cargo) installed in the system.\n+ - If not specified, the module will look C(cargo) in E(PATH).\n+ type: path\n+ version_added: 7.5.0\n name:\n description:\n - The name of a Rust package to install.\n@@ -60,7 +66,7 @@\n default: present\n choices: [ \"present\", \"absent\", \"latest\" ]\n requirements:\n- - cargo installed in bin path (recommended /usr/local/bin)\n+ - cargo installed\n \"\"\"\n \n EXAMPLES = r\"\"\"\n@@ -103,14 +109,13 @@\n class Cargo(object):\n def __init__(self, module, **kwargs):\n self.module = module\n+ self.executable = [kwargs[\"executable\"] or module.get_bin_path(\"cargo\", True)]\n self.name = kwargs[\"name\"]\n self.path = kwargs[\"path\"]\n self.state = kwargs[\"state\"]\n self.version = kwargs[\"version\"]\n self.locked = kwargs[\"locked\"]\n \n- self.executable = [module.get_bin_path(\"cargo\", True)]\n-\n @property\n def path(self):\n return self._path\n@@ -176,6 +181,7 @@\n \n def main():\n arg_spec = dict(\n+ executable=dict(default=None, type=\"path\"),\n name=dict(required=True, type=\"list\", elements=\"str\"),\n path=dict(default=None, type=\"path\"),\n state=dict(default=\"present\", choices=[\"present\", \"absent\", \"latest\"]),\n@@ -185,10 +191,8 @@\n module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)\n \n name = module.params[\"name\"]\n- path = module.params[\"path\"]\n state = module.params[\"state\"]\n version = module.params[\"version\"]\n- locked = module.params[\"locked\"]\n \n if not name:\n module.fail_json(msg=\"Package name must be specified\")\n@@ -198,7 +202,7 @@\n LANG=\"C\", LC_ALL=\"C\", LC_MESSAGES=\"C\", LC_CTYPE=\"C\"\n )\n \n- cargo = Cargo(module, name=name, path=path, state=state, version=version, locked=locked)\n+ cargo = Cargo(module, **module.params)\n changed, out, err = False, None, None\n installed_packages = cargo.get_installed()\n if state == \"present\":\n", "issue": "cargo fails to find standard homedir path for cargo binary\n### Summary\r\n\r\nthe cargo module fails with -\r\n\r\n`TASK [Install tokei Rust package] ***************************************************************************************************************************\r\nfatal: [hostname]: FAILED! => {\"changed\": false, \"msg\": \"Failed to find required executable \\\"cargo\\\" in paths: /usr/local/bin:/usr/bin:/bin:/usr/games:/sbin:/usr/sbin:/usr/local/sbin\"}`\r\n\r\ncargo executable is located in default rustup install location `/home/username/.cargo/bin/` \r\n\r\n### Issue Type\r\n\r\nBug Report\r\n\r\n### Component Name\r\n\r\ncargo\r\n\r\n### Ansible Version\r\n\r\n```console (paste below)\r\n$ ansible --version\r\nansible [core 2.12.3]\r\n config file = /home/username/foo/ansible.cfg\r\n configured module search path = ['/home/username/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /usr/lib/python3/dist-packages/ansible\r\n ansible collection location = /home/username/.ansible/collections:/usr/share/ansible/collections\r\n executable location = /usr/bin/ansible\r\n python version = 3.9.10 (main, Feb 22 2022, 13:54:07) [GCC 11.2.0]\r\n jinja version = 3.0.3\r\n libyaml = True\r\n```\r\n\r\n\r\n### Community.general Version\r\n\r\n```console (paste below)\r\n$ ansible-galaxy collection list community.general\r\n\r\n# /usr/lib/python3/dist-packages/ansible_collections\r\nCollection Version\r\n----------------- -------\r\ncommunity.general 4.5.0 \r\n```\r\n\r\n\r\n### Configuration\r\n\r\n```console (paste below)\r\n$ ansible-config dump --only-changed\r\nDEFAULT_HOST_LIST(/home/username/foo/ansible.cfg) = ['/home/username/foo/HOSTS']\r\n```\r\n\r\n\r\n### OS / Environment\r\n\r\nDebian Bookworm targeting Bookworm\r\n\r\n### Steps to Reproduce\r\n\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml (paste below)\r\n\r\n```\r\nRun this module against home directory with the default rustup install location.\r\n\r\n### Expected Results\r\n\r\ncargo installs the package\r\n\r\n### Actual Results\r\n\r\n```console (paste below)\r\nTASK [Install tokei Rust package] ***************************************************************************************************************************\r\nfatal: [hostname]: FAILED! => {\"changed\": false, \"msg\": \"Failed to find required executable \\\"cargo\\\" in paths: /usr/local/bin:/usr/bin:/bin:/usr/games:/sbin:/usr/sbin:/usr/local/sbin\"}\r\n```\r\n\r\n\r\n### Code of Conduct\r\n\r\n- [X] I agree to follow the Ansible Code of Conduct\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# Copyright (c) 2021 Radek Sprta <[email protected]>\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import absolute_import, division, print_function\n\n__metaclass__ = type\n\n\nDOCUMENTATION = r\"\"\"\n---\nmodule: cargo\nshort_description: Manage Rust packages with cargo\nversion_added: 4.3.0\ndescription:\n - Manage Rust packages with cargo.\nauthor: \"Radek Sprta (@radek-sprta)\"\nextends_documentation_fragment:\n - community.general.attributes\nattributes:\n check_mode:\n support: full\n diff_mode:\n support: none\noptions:\n name:\n description:\n - The name of a Rust package to install.\n type: list\n elements: str\n required: true\n path:\n description:\n ->\n The base path where to install the Rust packages. Cargo automatically appends\n V(/bin). In other words, V(/usr/local) will become V(/usr/local/bin).\n type: path\n version:\n description:\n ->\n The version to install. If O(name) contains multiple values, the module will\n try to install all of them in this version.\n type: str\n required: false\n locked:\n description:\n - Install with locked dependencies.\n - This is only used when installing packages.\n required: false\n type: bool\n default: false\n version_added: 7.5.0\n state:\n description:\n - The state of the Rust package.\n required: false\n type: str\n default: present\n choices: [ \"present\", \"absent\", \"latest\" ]\nrequirements:\n - cargo installed in bin path (recommended /usr/local/bin)\n\"\"\"\n\nEXAMPLES = r\"\"\"\n- name: Install \"ludusavi\" Rust package\n community.general.cargo:\n name: ludusavi\n\n- name: Install \"ludusavi\" Rust package with locked dependencies\n community.general.cargo:\n name: ludusavi\n locked: true\n\n- name: Install \"ludusavi\" Rust package in version 0.10.0\n community.general.cargo:\n name: ludusavi\n version: '0.10.0'\n\n- name: Install \"ludusavi\" Rust package to global location\n community.general.cargo:\n name: ludusavi\n path: /usr/local\n\n- name: Remove \"ludusavi\" Rust package\n community.general.cargo:\n name: ludusavi\n state: absent\n\n- name: Update \"ludusavi\" Rust package its latest version\n community.general.cargo:\n name: ludusavi\n state: latest\n\"\"\"\n\nimport os\nimport re\n\nfrom ansible.module_utils.basic import AnsibleModule\n\n\nclass Cargo(object):\n def __init__(self, module, **kwargs):\n self.module = module\n self.name = kwargs[\"name\"]\n self.path = kwargs[\"path\"]\n self.state = kwargs[\"state\"]\n self.version = kwargs[\"version\"]\n self.locked = kwargs[\"locked\"]\n\n self.executable = [module.get_bin_path(\"cargo\", True)]\n\n @property\n def path(self):\n return self._path\n\n @path.setter\n def path(self, path):\n if path is not None and not os.path.isdir(path):\n self.module.fail_json(msg=\"Path %s is not a directory\" % path)\n self._path = path\n\n def _exec(\n self, args, run_in_check_mode=False, check_rc=True, add_package_name=True\n ):\n if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):\n cmd = self.executable + args\n rc, out, err = self.module.run_command(cmd, check_rc=check_rc)\n return out, err\n return \"\", \"\"\n\n def get_installed(self):\n cmd = [\"install\", \"--list\"]\n data, dummy = self._exec(cmd, True, False, False)\n\n package_regex = re.compile(r\"^([\\w\\-]+) v(.+):$\")\n installed = {}\n for line in data.splitlines():\n package_info = package_regex.match(line)\n if package_info:\n installed[package_info.group(1)] = package_info.group(2)\n\n return installed\n\n def install(self, packages=None):\n cmd = [\"install\"]\n cmd.extend(packages or self.name)\n if self.locked:\n cmd.append(\"--locked\")\n if self.path:\n cmd.append(\"--root\")\n cmd.append(self.path)\n if self.version:\n cmd.append(\"--version\")\n cmd.append(self.version)\n return self._exec(cmd)\n\n def is_outdated(self, name):\n installed_version = self.get_installed().get(name)\n\n cmd = [\"search\", name, \"--limit\", \"1\"]\n data, dummy = self._exec(cmd, True, False, False)\n\n match = re.search(r'\"(.+)\"', data)\n if match:\n latest_version = match.group(1)\n\n return installed_version != latest_version\n\n def uninstall(self, packages=None):\n cmd = [\"uninstall\"]\n cmd.extend(packages or self.name)\n return self._exec(cmd)\n\n\ndef main():\n arg_spec = dict(\n name=dict(required=True, type=\"list\", elements=\"str\"),\n path=dict(default=None, type=\"path\"),\n state=dict(default=\"present\", choices=[\"present\", \"absent\", \"latest\"]),\n version=dict(default=None, type=\"str\"),\n locked=dict(default=False, type=\"bool\"),\n )\n module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)\n\n name = module.params[\"name\"]\n path = module.params[\"path\"]\n state = module.params[\"state\"]\n version = module.params[\"version\"]\n locked = module.params[\"locked\"]\n\n if not name:\n module.fail_json(msg=\"Package name must be specified\")\n\n # Set LANG env since we parse stdout\n module.run_command_environ_update = dict(\n LANG=\"C\", LC_ALL=\"C\", LC_MESSAGES=\"C\", LC_CTYPE=\"C\"\n )\n\n cargo = Cargo(module, name=name, path=path, state=state, version=version, locked=locked)\n changed, out, err = False, None, None\n installed_packages = cargo.get_installed()\n if state == \"present\":\n to_install = [\n n\n for n in name\n if (n not in installed_packages)\n or (version and version != installed_packages[n])\n ]\n if to_install:\n changed = True\n out, err = cargo.install(to_install)\n elif state == \"latest\":\n to_update = [\n n for n in name if n not in installed_packages or cargo.is_outdated(n)\n ]\n if to_update:\n changed = True\n out, err = cargo.install(to_update)\n else: # absent\n to_uninstall = [n for n in name if n in installed_packages]\n if to_uninstall:\n changed = True\n out, err = cargo.uninstall(to_uninstall)\n\n module.exit_json(changed=changed, stdout=out, stderr=err)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "plugins/modules/cargo.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# Copyright (c) 2021 Radek Sprta <[email protected]>\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import absolute_import, division, print_function\n\n__metaclass__ = type\n\n\nDOCUMENTATION = r\"\"\"\n---\nmodule: cargo\nshort_description: Manage Rust packages with cargo\nversion_added: 4.3.0\ndescription:\n - Manage Rust packages with cargo.\nauthor: \"Radek Sprta (@radek-sprta)\"\nextends_documentation_fragment:\n - community.general.attributes\nattributes:\n check_mode:\n support: full\n diff_mode:\n support: none\noptions:\n executable:\n description:\n - Path to the C(cargo) installed in the system.\n - If not specified, the module will look C(cargo) in E(PATH).\n type: path\n version_added: 7.5.0\n name:\n description:\n - The name of a Rust package to install.\n type: list\n elements: str\n required: true\n path:\n description:\n ->\n The base path where to install the Rust packages. Cargo automatically appends\n V(/bin). In other words, V(/usr/local) will become V(/usr/local/bin).\n type: path\n version:\n description:\n ->\n The version to install. If O(name) contains multiple values, the module will\n try to install all of them in this version.\n type: str\n required: false\n locked:\n description:\n - Install with locked dependencies.\n - This is only used when installing packages.\n required: false\n type: bool\n default: false\n version_added: 7.5.0\n state:\n description:\n - The state of the Rust package.\n required: false\n type: str\n default: present\n choices: [ \"present\", \"absent\", \"latest\" ]\nrequirements:\n - cargo installed\n\"\"\"\n\nEXAMPLES = r\"\"\"\n- name: Install \"ludusavi\" Rust package\n community.general.cargo:\n name: ludusavi\n\n- name: Install \"ludusavi\" Rust package with locked dependencies\n community.general.cargo:\n name: ludusavi\n locked: true\n\n- name: Install \"ludusavi\" Rust package in version 0.10.0\n community.general.cargo:\n name: ludusavi\n version: '0.10.0'\n\n- name: Install \"ludusavi\" Rust package to global location\n community.general.cargo:\n name: ludusavi\n path: /usr/local\n\n- name: Remove \"ludusavi\" Rust package\n community.general.cargo:\n name: ludusavi\n state: absent\n\n- name: Update \"ludusavi\" Rust package its latest version\n community.general.cargo:\n name: ludusavi\n state: latest\n\"\"\"\n\nimport os\nimport re\n\nfrom ansible.module_utils.basic import AnsibleModule\n\n\nclass Cargo(object):\n def __init__(self, module, **kwargs):\n self.module = module\n self.executable = [kwargs[\"executable\"] or module.get_bin_path(\"cargo\", True)]\n self.name = kwargs[\"name\"]\n self.path = kwargs[\"path\"]\n self.state = kwargs[\"state\"]\n self.version = kwargs[\"version\"]\n self.locked = kwargs[\"locked\"]\n\n @property\n def path(self):\n return self._path\n\n @path.setter\n def path(self, path):\n if path is not None and not os.path.isdir(path):\n self.module.fail_json(msg=\"Path %s is not a directory\" % path)\n self._path = path\n\n def _exec(\n self, args, run_in_check_mode=False, check_rc=True, add_package_name=True\n ):\n if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):\n cmd = self.executable + args\n rc, out, err = self.module.run_command(cmd, check_rc=check_rc)\n return out, err\n return \"\", \"\"\n\n def get_installed(self):\n cmd = [\"install\", \"--list\"]\n data, dummy = self._exec(cmd, True, False, False)\n\n package_regex = re.compile(r\"^([\\w\\-]+) v(.+):$\")\n installed = {}\n for line in data.splitlines():\n package_info = package_regex.match(line)\n if package_info:\n installed[package_info.group(1)] = package_info.group(2)\n\n return installed\n\n def install(self, packages=None):\n cmd = [\"install\"]\n cmd.extend(packages or self.name)\n if self.locked:\n cmd.append(\"--locked\")\n if self.path:\n cmd.append(\"--root\")\n cmd.append(self.path)\n if self.version:\n cmd.append(\"--version\")\n cmd.append(self.version)\n return self._exec(cmd)\n\n def is_outdated(self, name):\n installed_version = self.get_installed().get(name)\n\n cmd = [\"search\", name, \"--limit\", \"1\"]\n data, dummy = self._exec(cmd, True, False, False)\n\n match = re.search(r'\"(.+)\"', data)\n if match:\n latest_version = match.group(1)\n\n return installed_version != latest_version\n\n def uninstall(self, packages=None):\n cmd = [\"uninstall\"]\n cmd.extend(packages or self.name)\n return self._exec(cmd)\n\n\ndef main():\n arg_spec = dict(\n executable=dict(default=None, type=\"path\"),\n name=dict(required=True, type=\"list\", elements=\"str\"),\n path=dict(default=None, type=\"path\"),\n state=dict(default=\"present\", choices=[\"present\", \"absent\", \"latest\"]),\n version=dict(default=None, type=\"str\"),\n locked=dict(default=False, type=\"bool\"),\n )\n module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)\n\n name = module.params[\"name\"]\n state = module.params[\"state\"]\n version = module.params[\"version\"]\n\n if not name:\n module.fail_json(msg=\"Package name must be specified\")\n\n # Set LANG env since we parse stdout\n module.run_command_environ_update = dict(\n LANG=\"C\", LC_ALL=\"C\", LC_MESSAGES=\"C\", LC_CTYPE=\"C\"\n )\n\n cargo = Cargo(module, **module.params)\n changed, out, err = False, None, None\n installed_packages = cargo.get_installed()\n if state == \"present\":\n to_install = [\n n\n for n in name\n if (n not in installed_packages)\n or (version and version != installed_packages[n])\n ]\n if to_install:\n changed = True\n out, err = cargo.install(to_install)\n elif state == \"latest\":\n to_update = [\n n for n in name if n not in installed_packages or cargo.is_outdated(n)\n ]\n if to_update:\n changed = True\n out, err = cargo.install(to_update)\n else: # absent\n to_uninstall = [n for n in name if n in installed_packages]\n if to_uninstall:\n changed = True\n out, err = cargo.uninstall(to_uninstall)\n\n module.exit_json(changed=changed, stdout=out, stderr=err)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "plugins/modules/cargo.py"}]} | 3,035 | 600 |
gh_patches_debug_10063 | rasdani/github-patches | git_diff | kymatio__kymatio-316 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unused 2D Psi filters
The generated 2D filterbank includes filters that are not used in the computation.
Let's create a scattering transform:
```
>>> import kymatio
>>> s = kymatio.Scattering2D(J=2, shape=(32, 32))
```
Let's look at the shapes of the generated filters:
```
>>> print("\n".join('{' + ', '.join('%s: %s' % (k, getattr(v, 'shape', v)) for k, v in psi.items()) + '}' for psi in s.Psi))
{j: 0, theta: 0, 0: torch.Size([40, 40, 2])}
{j: 0, theta: 1, 0: torch.Size([40, 40, 2])}
{j: 0, theta: 2, 0: torch.Size([40, 40, 2])}
{j: 0, theta: 3, 0: torch.Size([40, 40, 2])}
{j: 0, theta: 4, 0: torch.Size([40, 40, 2])}
{j: 0, theta: 5, 0: torch.Size([40, 40, 2])}
{j: 0, theta: 6, 0: torch.Size([40, 40, 2])}
{j: 0, theta: 7, 0: torch.Size([40, 40, 2])}
{j: 1, theta: 0, 0: torch.Size([40, 40, 2]), 1: torch.Size([20, 20, 2])}
{j: 1, theta: 1, 0: torch.Size([40, 40, 2]), 1: torch.Size([20, 20, 2])}
{j: 1, theta: 2, 0: torch.Size([40, 40, 2]), 1: torch.Size([20, 20, 2])}
{j: 1, theta: 3, 0: torch.Size([40, 40, 2]), 1: torch.Size([20, 20, 2])}
{j: 1, theta: 4, 0: torch.Size([40, 40, 2]), 1: torch.Size([20, 20, 2])}
{j: 1, theta: 5, 0: torch.Size([40, 40, 2]), 1: torch.Size([20, 20, 2])}
{j: 1, theta: 6, 0: torch.Size([40, 40, 2]), 1: torch.Size([20, 20, 2])}
{j: 1, theta: 7, 0: torch.Size([40, 40, 2]), 1: torch.Size([20, 20, 2])}
```
The 20x20 filters are all unused. Demonstration:
```
>>> for p in s.Psi:
... if 1 in p:
... p[1] = 'foo'
...
>>> import torch
>>> x = torch.rand((1, 1, 32, 32)).cuda()
>>> s = s.cuda()
>>> y = s(x)
```
They should probably not be part of the filterbank in the first place?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kymatio/scattering2d/filter_bank.py`
Content:
```
1 """
2 Authors: Eugene Belilovsky, Edouard Oyallon and Sergey Zagoruyko
3 All rights reserved, 2017.
4 """
5
6 __all__ = ['filter_bank']
7
8 import torch
9 import numpy as np
10 from .utils import fft2
11
12
13 def filter_bank(M, N, J, L=8):
14 """
15 Builds in Fourier the Morlet filters used for the scattering transform.
16 Each single filter is provided as a dictionary with the following keys:
17 * 'j' : scale
18 * 'theta' : angle used
19 Parameters
20 ----------
21 M, N : int
22 spatial support of the input
23 J : int
24 logscale of the scattering
25 L : int, optional
26 number of angles used for the wavelet transform
27 Returns
28 -------
29 filters : list
30 A two list of dictionary containing respectively the low-pass and
31 wavelet filters.
32 Notes
33 -----
34 The design of the filters is optimized for the value L = 8.
35 """
36 filters = {}
37 filters['psi'] = []
38
39 offset_unpad = 0
40 for j in range(J):
41 for theta in range(L):
42 psi = {}
43 psi['j'] = j
44 psi['theta'] = theta
45 psi_signal = morlet_2d(M, N, 0.8 * 2**j,
46 (int(L-L/2-1)-theta) * np.pi / L,
47 3.0 / 4.0 * np.pi /2**j, 4.0/L, offset=offset_unpad)
48 psi_signal_fourier = fft2(psi_signal)
49 for res in range(j + 1):
50 psi_signal_fourier_res = periodize_filter_fft(
51 psi_signal_fourier, res)
52 psi[res] = torch.FloatTensor(
53 np.stack((np.real(psi_signal_fourier_res),
54 np.imag(psi_signal_fourier_res)), axis=2))
55 # Normalization to avoid doing it with the FFT.
56 psi[res].div_(M*N// 2**(2*j))
57 filters['psi'].append(psi)
58
59 filters['phi'] = {}
60 phi_signal = gabor_2d(M, N, 0.8 * 2**(J-1), 0, 0, offset=offset_unpad)
61 phi_signal_fourier = fft2(phi_signal)
62 filters['phi']['j'] = J
63 for res in range(J):
64 phi_signal_fourier_res = periodize_filter_fft(phi_signal_fourier, res)
65 filters['phi'][res] = torch.FloatTensor(np.stack(
66 (np.real(phi_signal_fourier_res), np.imag(phi_signal_fourier_res)),
67 axis=2))
68 filters['phi'][res].div_(M*N // 2 ** (2 * J))
69
70 return filters
71
72
73 def periodize_filter_fft(x, res):
74 """
75 Parameters
76 ----------
77 x : numpy array
78 signal to periodize in Fourier
79 res :
80 resolution to which the signal is cropped.
81
82 Returns
83 -------
84 crop : numpy array
85 It returns a crop version of the filter, assuming that
86 the convolutions will be done via compactly supported signals.
87 """
88 M = x.shape[0]
89 N = x.shape[1]
90
91 crop = np.zeros((M // 2 ** res, N // 2 ** res), np.complex64)
92
93 mask = np.ones(x.shape, np.float32)
94 len_x = int(M * (1 - 2 ** (-res)))
95 start_x = int(M * 2 ** (-res - 1))
96 len_y = int(N * (1 - 2 ** (-res)))
97 start_y = int(N * 2 ** (-res - 1))
98 mask[start_x:start_x + len_x,:] = 0
99 mask[:, start_y:start_y + len_y] = 0
100 x = np.multiply(x,mask)
101
102 for k in range(int(M / 2 ** res)):
103 for l in range(int(N / 2 ** res)):
104 for i in range(int(2 ** res)):
105 for j in range(int(2 ** res)):
106 crop[k, l] += x[k + i * int(M / 2 ** res), l + j * int(N / 2 ** res)]
107
108 return crop
109
110
111 def morlet_2d(M, N, sigma, theta, xi, slant=0.5, offset=0, fft_shift=False):
112 """
113 Computes a 2D Morlet filter.
114 A Morlet filter is the sum of a Gabor filter and a low-pass filter
115 to ensure that the sum has exactly zero mean in the temporal domain.
116 It is defined by the following formula in space:
117 psi(u) = g_{sigma}(u) (e^(i xi^T u) - beta)
118 where g_{sigma} is a Gaussian envelope, xi is a frequency and beta is
119 the cancelling parameter.
120
121 Parameters
122 ----------
123 M, N : int
124 spatial sizes
125 sigma : float
126 bandwidth parameter
127 xi : float
128 central frequency (in [0, 1])
129 theta : float
130 angle in [0, pi]
131 slant : float, optional
132 parameter which guides the elipsoidal shape of the morlet
133 offset : int, optional
134 offset by which the signal starts
135 fft_shift : boolean
136 if true, shift the signal in a numpy style
137
138 Returns
139 -------
140 morlet_fft : ndarray
141 numpy array of size (M, N)
142 """
143 wv = gabor_2d(M, N, sigma, theta, xi, slant, offset, fft_shift)
144 wv_modulus = gabor_2d(M, N, sigma, theta, 0, slant, offset, fft_shift)
145 K = np.sum(wv) / np.sum(wv_modulus)
146
147 mor = wv - K * wv_modulus
148 return mor
149
150
151 def gabor_2d(M, N, sigma, theta, xi, slant=1.0, offset=0, fft_shift=False):
152 """
153 Computes a 2D Gabor filter.
154 A Gabor filter is defined by the following formula in space:
155 psi(u) = g_{sigma}(u) e^(i xi^T u)
156 where g_{sigma} is a Gaussian envelope and xi is a frequency.
157
158 Parameters
159 ----------
160 M, N : int
161 spatial sizes
162 sigma : float
163 bandwidth parameter
164 xi : float
165 central frequency (in [0, 1])
166 theta : float
167 angle in [0, pi]
168 slant : float, optional
169 parameter which guides the elipsoidal shape of the morlet
170 offset : int, optional
171 offset by which the signal starts
172 fft_shift : boolean
173 if true, shift the signal in a numpy style
174
175 Returns
176 -------
177 morlet_fft : ndarray
178 numpy array of size (M, N)
179 """
180 gab = np.zeros((M, N), np.complex64)
181 R = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]], np.float32)
182 R_inv = np.array([[np.cos(theta), np.sin(theta)], [-np.sin(theta), np.cos(theta)]], np.float32)
183 D = np.array([[1, 0], [0, slant * slant]])
184 curv = np.dot(R, np.dot(D, R_inv)) / ( 2 * sigma * sigma)
185
186 for ex in [-2, -1, 0, 1, 2]:
187 for ey in [-2, -1, 0, 1, 2]:
188 [xx, yy] = np.mgrid[offset + ex * M:offset + M + ex * M, offset + ey * N:offset + N + ey * N]
189 arg = -(curv[0, 0] * np.multiply(xx, xx) + (curv[0, 1] + curv[1, 0]) * np.multiply(xx, yy) + curv[
190 1, 1] * np.multiply(yy, yy)) + 1.j * (xx * xi * np.cos(theta) + yy * xi * np.sin(theta))
191 gab = gab + np.exp(arg)
192
193 norm_factor = (2 * 3.1415 * sigma * sigma / slant)
194 gab = gab / norm_factor
195
196 if (fft_shift):
197 gab = np.fft.fftshift(gab, axes=(0, 1))
198 return gab
199
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kymatio/scattering2d/filter_bank.py b/kymatio/scattering2d/filter_bank.py
--- a/kymatio/scattering2d/filter_bank.py
+++ b/kymatio/scattering2d/filter_bank.py
@@ -46,7 +46,7 @@
(int(L-L/2-1)-theta) * np.pi / L,
3.0 / 4.0 * np.pi /2**j, 4.0/L, offset=offset_unpad)
psi_signal_fourier = fft2(psi_signal)
- for res in range(j + 1):
+ for res in range(min(j + 1, J - 1)):
psi_signal_fourier_res = periodize_filter_fft(
psi_signal_fourier, res)
psi[res] = torch.FloatTensor(
| {"golden_diff": "diff --git a/kymatio/scattering2d/filter_bank.py b/kymatio/scattering2d/filter_bank.py\n--- a/kymatio/scattering2d/filter_bank.py\n+++ b/kymatio/scattering2d/filter_bank.py\n@@ -46,7 +46,7 @@\n (int(L-L/2-1)-theta) * np.pi / L,\n 3.0 / 4.0 * np.pi /2**j, 4.0/L, offset=offset_unpad)\n psi_signal_fourier = fft2(psi_signal)\n- for res in range(j + 1):\n+ for res in range(min(j + 1, J - 1)):\n psi_signal_fourier_res = periodize_filter_fft(\n psi_signal_fourier, res)\n psi[res] = torch.FloatTensor(\n", "issue": "Unused 2D Psi filters\nThe generated 2D filterbank includes filters that are not used in the computation.\r\n\r\nLet's create a scattering transform:\r\n```\r\n>>> import kymatio\r\n>>> s = kymatio.Scattering2D(J=2, shape=(32, 32))\r\n```\r\n\r\nLet's look at the shapes of the generated filters:\r\n```\r\n>>> print(\"\\n\".join('{' + ', '.join('%s: %s' % (k, getattr(v, 'shape', v)) for k, v in psi.items()) + '}' for psi in s.Psi))\r\n{j: 0, theta: 0, 0: torch.Size([40, 40, 2])}\r\n{j: 0, theta: 1, 0: torch.Size([40, 40, 2])}\r\n{j: 0, theta: 2, 0: torch.Size([40, 40, 2])}\r\n{j: 0, theta: 3, 0: torch.Size([40, 40, 2])}\r\n{j: 0, theta: 4, 0: torch.Size([40, 40, 2])}\r\n{j: 0, theta: 5, 0: torch.Size([40, 40, 2])}\r\n{j: 0, theta: 6, 0: torch.Size([40, 40, 2])}\r\n{j: 0, theta: 7, 0: torch.Size([40, 40, 2])}\r\n{j: 1, theta: 0, 0: torch.Size([40, 40, 2]), 1: torch.Size([20, 20, 2])}\r\n{j: 1, theta: 1, 0: torch.Size([40, 40, 2]), 1: torch.Size([20, 20, 2])}\r\n{j: 1, theta: 2, 0: torch.Size([40, 40, 2]), 1: torch.Size([20, 20, 2])}\r\n{j: 1, theta: 3, 0: torch.Size([40, 40, 2]), 1: torch.Size([20, 20, 2])}\r\n{j: 1, theta: 4, 0: torch.Size([40, 40, 2]), 1: torch.Size([20, 20, 2])}\r\n{j: 1, theta: 5, 0: torch.Size([40, 40, 2]), 1: torch.Size([20, 20, 2])}\r\n{j: 1, theta: 6, 0: torch.Size([40, 40, 2]), 1: torch.Size([20, 20, 2])}\r\n{j: 1, theta: 7, 0: torch.Size([40, 40, 2]), 1: torch.Size([20, 20, 2])}\r\n```\r\n\r\nThe 20x20 filters are all unused. Demonstration:\r\n```\r\n>>> for p in s.Psi:\r\n... if 1 in p:\r\n... p[1] = 'foo'\r\n...\r\n>>> import torch\r\n>>> x = torch.rand((1, 1, 32, 32)).cuda()\r\n>>> s = s.cuda()\r\n>>> y = s(x)\r\n```\r\n\r\nThey should probably not be part of the filterbank in the first place?\n", "before_files": [{"content": "\"\"\"\nAuthors: Eugene Belilovsky, Edouard Oyallon and Sergey Zagoruyko\nAll rights reserved, 2017.\n\"\"\"\n\n__all__ = ['filter_bank']\n\nimport torch\nimport numpy as np\nfrom .utils import fft2\n\n\ndef filter_bank(M, N, J, L=8):\n \"\"\"\n Builds in Fourier the Morlet filters used for the scattering transform.\n Each single filter is provided as a dictionary with the following keys:\n * 'j' : scale\n * 'theta' : angle used\n Parameters\n ----------\n M, N : int\n spatial support of the input\n J : int\n logscale of the scattering\n L : int, optional\n number of angles used for the wavelet transform\n Returns\n -------\n filters : list\n A two list of dictionary containing respectively the low-pass and\n wavelet filters.\n Notes\n -----\n The design of the filters is optimized for the value L = 8.\n \"\"\"\n filters = {}\n filters['psi'] = []\n\n offset_unpad = 0\n for j in range(J):\n for theta in range(L):\n psi = {}\n psi['j'] = j\n psi['theta'] = theta\n psi_signal = morlet_2d(M, N, 0.8 * 2**j,\n (int(L-L/2-1)-theta) * np.pi / L,\n 3.0 / 4.0 * np.pi /2**j, 4.0/L, offset=offset_unpad)\n psi_signal_fourier = fft2(psi_signal)\n for res in range(j + 1):\n psi_signal_fourier_res = periodize_filter_fft(\n psi_signal_fourier, res)\n psi[res] = torch.FloatTensor(\n np.stack((np.real(psi_signal_fourier_res),\n np.imag(psi_signal_fourier_res)), axis=2))\n # Normalization to avoid doing it with the FFT.\n psi[res].div_(M*N// 2**(2*j))\n filters['psi'].append(psi)\n\n filters['phi'] = {}\n phi_signal = gabor_2d(M, N, 0.8 * 2**(J-1), 0, 0, offset=offset_unpad)\n phi_signal_fourier = fft2(phi_signal)\n filters['phi']['j'] = J\n for res in range(J):\n phi_signal_fourier_res = periodize_filter_fft(phi_signal_fourier, res)\n filters['phi'][res] = torch.FloatTensor(np.stack(\n (np.real(phi_signal_fourier_res), np.imag(phi_signal_fourier_res)),\n axis=2))\n filters['phi'][res].div_(M*N // 2 ** (2 * J))\n\n return filters\n\n\ndef periodize_filter_fft(x, res):\n \"\"\"\n Parameters\n ----------\n x : numpy array\n signal to periodize in Fourier\n res :\n resolution to which the signal is cropped.\n\n Returns\n -------\n crop : numpy array\n It returns a crop version of the filter, assuming that\n the convolutions will be done via compactly supported signals.\n \"\"\"\n M = x.shape[0]\n N = x.shape[1]\n\n crop = np.zeros((M // 2 ** res, N // 2 ** res), np.complex64)\n\n mask = np.ones(x.shape, np.float32)\n len_x = int(M * (1 - 2 ** (-res)))\n start_x = int(M * 2 ** (-res - 1))\n len_y = int(N * (1 - 2 ** (-res)))\n start_y = int(N * 2 ** (-res - 1))\n mask[start_x:start_x + len_x,:] = 0\n mask[:, start_y:start_y + len_y] = 0\n x = np.multiply(x,mask)\n\n for k in range(int(M / 2 ** res)):\n for l in range(int(N / 2 ** res)):\n for i in range(int(2 ** res)):\n for j in range(int(2 ** res)):\n crop[k, l] += x[k + i * int(M / 2 ** res), l + j * int(N / 2 ** res)]\n\n return crop\n\n\ndef morlet_2d(M, N, sigma, theta, xi, slant=0.5, offset=0, fft_shift=False):\n \"\"\"\n Computes a 2D Morlet filter.\n A Morlet filter is the sum of a Gabor filter and a low-pass filter\n to ensure that the sum has exactly zero mean in the temporal domain.\n It is defined by the following formula in space:\n psi(u) = g_{sigma}(u) (e^(i xi^T u) - beta)\n where g_{sigma} is a Gaussian envelope, xi is a frequency and beta is\n the cancelling parameter.\n\n Parameters\n ----------\n M, N : int\n spatial sizes\n sigma : float\n bandwidth parameter\n xi : float\n central frequency (in [0, 1])\n theta : float\n angle in [0, pi]\n slant : float, optional\n parameter which guides the elipsoidal shape of the morlet\n offset : int, optional\n offset by which the signal starts\n fft_shift : boolean\n if true, shift the signal in a numpy style\n\n Returns\n -------\n morlet_fft : ndarray\n numpy array of size (M, N)\n \"\"\"\n wv = gabor_2d(M, N, sigma, theta, xi, slant, offset, fft_shift)\n wv_modulus = gabor_2d(M, N, sigma, theta, 0, slant, offset, fft_shift)\n K = np.sum(wv) / np.sum(wv_modulus)\n\n mor = wv - K * wv_modulus\n return mor\n\n\ndef gabor_2d(M, N, sigma, theta, xi, slant=1.0, offset=0, fft_shift=False):\n \"\"\"\n Computes a 2D Gabor filter.\n A Gabor filter is defined by the following formula in space:\n psi(u) = g_{sigma}(u) e^(i xi^T u)\n where g_{sigma} is a Gaussian envelope and xi is a frequency.\n\n Parameters\n ----------\n M, N : int\n spatial sizes\n sigma : float\n bandwidth parameter\n xi : float\n central frequency (in [0, 1])\n theta : float\n angle in [0, pi]\n slant : float, optional\n parameter which guides the elipsoidal shape of the morlet\n offset : int, optional\n offset by which the signal starts\n fft_shift : boolean\n if true, shift the signal in a numpy style\n\n Returns\n -------\n morlet_fft : ndarray\n numpy array of size (M, N)\n \"\"\"\n gab = np.zeros((M, N), np.complex64)\n R = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]], np.float32)\n R_inv = np.array([[np.cos(theta), np.sin(theta)], [-np.sin(theta), np.cos(theta)]], np.float32)\n D = np.array([[1, 0], [0, slant * slant]])\n curv = np.dot(R, np.dot(D, R_inv)) / ( 2 * sigma * sigma)\n\n for ex in [-2, -1, 0, 1, 2]:\n for ey in [-2, -1, 0, 1, 2]:\n [xx, yy] = np.mgrid[offset + ex * M:offset + M + ex * M, offset + ey * N:offset + N + ey * N]\n arg = -(curv[0, 0] * np.multiply(xx, xx) + (curv[0, 1] + curv[1, 0]) * np.multiply(xx, yy) + curv[\n 1, 1] * np.multiply(yy, yy)) + 1.j * (xx * xi * np.cos(theta) + yy * xi * np.sin(theta))\n gab = gab + np.exp(arg)\n\n norm_factor = (2 * 3.1415 * sigma * sigma / slant)\n gab = gab / norm_factor\n\n if (fft_shift):\n gab = np.fft.fftshift(gab, axes=(0, 1))\n return gab\n", "path": "kymatio/scattering2d/filter_bank.py"}], "after_files": [{"content": "\"\"\"\nAuthors: Eugene Belilovsky, Edouard Oyallon and Sergey Zagoruyko\nAll rights reserved, 2017.\n\"\"\"\n\n__all__ = ['filter_bank']\n\nimport torch\nimport numpy as np\nfrom .utils import fft2\n\n\ndef filter_bank(M, N, J, L=8):\n \"\"\"\n Builds in Fourier the Morlet filters used for the scattering transform.\n Each single filter is provided as a dictionary with the following keys:\n * 'j' : scale\n * 'theta' : angle used\n Parameters\n ----------\n M, N : int\n spatial support of the input\n J : int\n logscale of the scattering\n L : int, optional\n number of angles used for the wavelet transform\n Returns\n -------\n filters : list\n A two list of dictionary containing respectively the low-pass and\n wavelet filters.\n Notes\n -----\n The design of the filters is optimized for the value L = 8.\n \"\"\"\n filters = {}\n filters['psi'] = []\n\n offset_unpad = 0\n for j in range(J):\n for theta in range(L):\n psi = {}\n psi['j'] = j\n psi['theta'] = theta\n psi_signal = morlet_2d(M, N, 0.8 * 2**j,\n (int(L-L/2-1)-theta) * np.pi / L,\n 3.0 / 4.0 * np.pi /2**j, 4.0/L, offset=offset_unpad)\n psi_signal_fourier = fft2(psi_signal)\n for res in range(min(j + 1, J - 1)):\n psi_signal_fourier_res = periodize_filter_fft(\n psi_signal_fourier, res)\n psi[res] = torch.FloatTensor(\n np.stack((np.real(psi_signal_fourier_res),\n np.imag(psi_signal_fourier_res)), axis=2))\n # Normalization to avoid doing it with the FFT.\n psi[res].div_(M*N// 2**(2*j))\n filters['psi'].append(psi)\n\n filters['phi'] = {}\n phi_signal = gabor_2d(M, N, 0.8 * 2**(J-1), 0, 0, offset=offset_unpad)\n phi_signal_fourier = fft2(phi_signal)\n filters['phi']['j'] = J\n for res in range(J):\n phi_signal_fourier_res = periodize_filter_fft(phi_signal_fourier, res)\n filters['phi'][res] = torch.FloatTensor(np.stack(\n (np.real(phi_signal_fourier_res), np.imag(phi_signal_fourier_res)),\n axis=2))\n filters['phi'][res].div_(M*N // 2 ** (2 * J))\n\n return filters\n\n\ndef periodize_filter_fft(x, res):\n \"\"\"\n Parameters\n ----------\n x : numpy array\n signal to periodize in Fourier\n res :\n resolution to which the signal is cropped.\n\n Returns\n -------\n crop : numpy array\n It returns a crop version of the filter, assuming that\n the convolutions will be done via compactly supported signals.\n \"\"\"\n M = x.shape[0]\n N = x.shape[1]\n\n crop = np.zeros((M // 2 ** res, N // 2 ** res), np.complex64)\n\n mask = np.ones(x.shape, np.float32)\n len_x = int(M * (1 - 2 ** (-res)))\n start_x = int(M * 2 ** (-res - 1))\n len_y = int(N * (1 - 2 ** (-res)))\n start_y = int(N * 2 ** (-res - 1))\n mask[start_x:start_x + len_x,:] = 0\n mask[:, start_y:start_y + len_y] = 0\n x = np.multiply(x,mask)\n\n for k in range(int(M / 2 ** res)):\n for l in range(int(N / 2 ** res)):\n for i in range(int(2 ** res)):\n for j in range(int(2 ** res)):\n crop[k, l] += x[k + i * int(M / 2 ** res), l + j * int(N / 2 ** res)]\n\n return crop\n\n\ndef morlet_2d(M, N, sigma, theta, xi, slant=0.5, offset=0, fft_shift=False):\n \"\"\"\n Computes a 2D Morlet filter.\n A Morlet filter is the sum of a Gabor filter and a low-pass filter\n to ensure that the sum has exactly zero mean in the temporal domain.\n It is defined by the following formula in space:\n psi(u) = g_{sigma}(u) (e^(i xi^T u) - beta)\n where g_{sigma} is a Gaussian envelope, xi is a frequency and beta is\n the cancelling parameter.\n\n Parameters\n ----------\n M, N : int\n spatial sizes\n sigma : float\n bandwidth parameter\n xi : float\n central frequency (in [0, 1])\n theta : float\n angle in [0, pi]\n slant : float, optional\n parameter which guides the elipsoidal shape of the morlet\n offset : int, optional\n offset by which the signal starts\n fft_shift : boolean\n if true, shift the signal in a numpy style\n\n Returns\n -------\n morlet_fft : ndarray\n numpy array of size (M, N)\n \"\"\"\n wv = gabor_2d(M, N, sigma, theta, xi, slant, offset, fft_shift)\n wv_modulus = gabor_2d(M, N, sigma, theta, 0, slant, offset, fft_shift)\n K = np.sum(wv) / np.sum(wv_modulus)\n\n mor = wv - K * wv_modulus\n return mor\n\n\ndef gabor_2d(M, N, sigma, theta, xi, slant=1.0, offset=0, fft_shift=False):\n \"\"\"\n Computes a 2D Gabor filter.\n A Gabor filter is defined by the following formula in space:\n psi(u) = g_{sigma}(u) e^(i xi^T u)\n where g_{sigma} is a Gaussian envelope and xi is a frequency.\n\n Parameters\n ----------\n M, N : int\n spatial sizes\n sigma : float\n bandwidth parameter\n xi : float\n central frequency (in [0, 1])\n theta : float\n angle in [0, pi]\n slant : float, optional\n parameter which guides the elipsoidal shape of the morlet\n offset : int, optional\n offset by which the signal starts\n fft_shift : boolean\n if true, shift the signal in a numpy style\n\n Returns\n -------\n morlet_fft : ndarray\n numpy array of size (M, N)\n \"\"\"\n gab = np.zeros((M, N), np.complex64)\n R = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]], np.float32)\n R_inv = np.array([[np.cos(theta), np.sin(theta)], [-np.sin(theta), np.cos(theta)]], np.float32)\n D = np.array([[1, 0], [0, slant * slant]])\n curv = np.dot(R, np.dot(D, R_inv)) / ( 2 * sigma * sigma)\n\n for ex in [-2, -1, 0, 1, 2]:\n for ey in [-2, -1, 0, 1, 2]:\n [xx, yy] = np.mgrid[offset + ex * M:offset + M + ex * M, offset + ey * N:offset + N + ey * N]\n arg = -(curv[0, 0] * np.multiply(xx, xx) + (curv[0, 1] + curv[1, 0]) * np.multiply(xx, yy) + curv[\n 1, 1] * np.multiply(yy, yy)) + 1.j * (xx * xi * np.cos(theta) + yy * xi * np.sin(theta))\n gab = gab + np.exp(arg)\n\n norm_factor = (2 * 3.1415 * sigma * sigma / slant)\n gab = gab / norm_factor\n\n if (fft_shift):\n gab = np.fft.fftshift(gab, axes=(0, 1))\n return gab\n", "path": "kymatio/scattering2d/filter_bank.py"}]} | 3,419 | 180 |
gh_patches_debug_8121 | rasdani/github-patches | git_diff | netket__netket-852 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Broken equations in docs
See for example the unredered tex in https://www.netket.org/docs/sr.html
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 import sphinx_bootstrap_theme
2
3 # -- Project information -----------------------------------------------------
4
5 project = "netket"
6 copyright = "2019-2021, The Netket authors - All rights reserved"
7 author = "Giuseppe Carleo et al."
8
9 # The full version, including alpha/beta/rc tags
10 release = "v3.0"
11
12
13 # -- General configuration ---------------------------------------------------
14
15 # Add any Sphinx extension module names here, as strings. They can be
16 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
17 # ones.
18 extensions = [
19 "sphinx.ext.napoleon",
20 "sphinx.ext.autodoc",
21 "sphinx_autodoc_typehints",
22 "sphinx.ext.autosummary",
23 "sphinx.ext.doctest",
24 "sphinx.ext.intersphinx",
25 "sphinx.ext.mathjax",
26 "sphinx.ext.viewcode",
27 "sphinx.ext.autosectionlabel",
28 "sphinx_reredirects",
29 "sphinx_panels",
30 "nbsphinx",
31 "myst_parser",
32 ]
33
34 # Napoleon settings
35 autodoc_docstring_signature = True
36 autodoc_inherit_docstrings = True
37 allow_inherited = True
38 autosummary_generate = True
39 napoleon_preprocess_types = True
40
41 panels_add_bootstrap_css = False
42
43 master_doc = "index"
44
45 autoclass_content = "class"
46 autodoc_class_signature = "separated"
47 autodoc_typehints = "description"
48
49 # Add any paths that contain templates here, relative to this directory.
50 templates_path = ["_templates", "_templates/autosummary"]
51
52 # List of patterns, relative to source directory, that match files and
53 # directories to ignore when looking for source files.
54 # This pattern also affects html_static_path and html_extra_path.
55 exclude_patterns = ["_build", "**.ipynb_checkpoints"]
56
57 # The suffix(es) of source filenames.
58 # Note: important to list ipynb before md here: we have both md and ipynb
59 # copies of each notebook, and myst will choose which to convert based on
60 # the order in the source_suffix list. Notebooks which are not executed have
61 # outputs stored in ipynb but not in md, so we must convert the ipynb.
62 source_suffix = [".rst", ".ipynb", ".md"]
63
64 # Markdown parser latex support
65 myst_enable_extensions = ["dollarmath", "amsmath"]
66
67 mathjax_path = "https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"
68
69
70 # -- Options for HTML output -------------------------------------------------
71
72 html_theme = "bootstrap"
73 html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
74
75 # Add any paths that contain custom static files (such as style sheets) here,
76 # relative to this directory. They are copied after the builtin static files,
77 # so a file named "default.css" will overwrite the builtin "default.css".
78 html_static_path = ["_static"]
79
80 # html_js_files = ["https://kit.fontawesome.com/7c145f31db.js"]
81 html_css_files = [
82 "jumbo-style.css",
83 "css/all.min.css",
84 "css/custom.css",
85 "css/rtd_theme.css",
86 ]
87
88 html_js_files = [
89 "js/rtd_theme.js",
90 ]
91
92 # Example configuration for intersphinx: refer to the Python standard library.
93 intersphinx_mapping = {
94 "python": ("https://docs.python.org/3/", None),
95 "numpy": ("https://numpy.org/doc/stable/", None),
96 "scipy": ("https://docs.scipy.org/doc/scipy/reference/", None),
97 "jax": ("https://jax.readthedocs.io/en/latest/", None),
98 "flax": ("https://flax.readthedocs.io/en/latest/", None),
99 # "networkx": ("https://networkx.org/doc/reference/", None),
100 }
101
102 # (Optional) Logo. Should be small enough to fit the navbar (ideally 24x24).
103 # Path should be relative to the ``_static`` files directory.
104 html_logo = "_static/logonav.png"
105
106 # Theme options are theme-specific and customize the look and feel of a
107 # theme further.
108 html_theme_options = {
109 # Navigation bar title. (Default: ``project`` value)
110 "navbar_title": "NetKet",
111 # Tab name for entire site. (Default: "Site")
112 "navbar_site_name": "Site",
113 # A list of tuples containing pages or urls to link to.
114 # Valid tuples should be in the following forms:
115 # (name, page) # a link to a page
116 # (name, "/aa/bb", 1) # a link to an arbitrary relative url
117 # (name, "http://example.com", True) # arbitrary absolute url
118 # Note the "1" or "True" value above as the third argument to indicate
119 # an arbitrary url.
120 "navbar_links": [
121 ("Get Started", "getting_started"),
122 ("Documentation", "docs/getting_started"),
123 ("Tutorials", "tutorials"),
124 ("Citing NetKet", "citing"),
125 ("About", "about"),
126 (
127 '<i class="fab fa-github" aria-hidden="true"></i>',
128 "https://github.com/netket/netket",
129 True,
130 ),
131 (
132 '<i class="fab fa-twitter" aria-hidden="true"></i>',
133 "https://twitter.com/NetKetOrg",
134 True,
135 ),
136 ],
137 # Render the next and previous page links in navbar. (Default: true)
138 "navbar_sidebarrel": False,
139 # Render the current pages TOC in the navbar. (Default: true)
140 "navbar_pagenav": False,
141 # Tab name for the current pages TOC. (Default: "Page")
142 "navbar_pagenav_name": "Page",
143 # Global TOC depth for "site" navbar tab. (Default: 1)
144 # Switching to -1 shows all levels.
145 "globaltoc_depth": 10,
146 # Include hidden TOCs in Site navbar?
147 #
148 # Note: If this is "false", you cannot have mixed ``:hidden:`` and
149 # non-hidden ``toctree`` directives in the same page, or else the build
150 # will break.
151 #
152 # Values: "true" (default) or "false"
153 "globaltoc_includehidden": "false",
154 # HTML navbar class (Default: "navbar") to attach to <div> element.
155 # For black navbar, do "navbar navbar-inverse"
156 "navbar_class": "navbar",
157 # Fix navigation bar to top of page?
158 # Values: "true" (default) or "false"
159 "navbar_fixed_top": "true",
160 # Location of link to source.
161 # Options are "nav" (default), "footer" or anything else to exclude.
162 "source_link_position": "none",
163 # Bootswatch (http://bootswatch.com/) theme.
164 #
165 # Options are nothing (default) or the name of a valid theme
166 # such as "cosmo" or "sandstone".
167 #
168 # The set of valid themes depend on the version of Bootstrap
169 # that's used (the next config option).
170 #
171 # Currently, the supported themes are:
172 # - Bootstrap 2: https://bootswatch.com/2
173 # - Bootstrap 3: https://bootswatch.com/3
174 "bootswatch_theme": "flatly",
175 # Choose Bootstrap version.
176 # Values: "3" (default) or "2" (in quotes)
177 "bootstrap_version": "3",
178 }
179
180 html_sidebars = {
181 "docs/*": ["custom_localtoc.html"],
182 "docs/_generated/**/*": ["custom_localtoc.html"],
183 "modules/*": ["custom_localtoc.html"],
184 }
185
186 ## redirects
187 redirects = {
188 "documentation": "docs/getting_started.html",
189 }
190
191
192 # do not show __init__ if it does not have a docstring
193 def autodoc_skip_member(app, what, name, obj, skip, options):
194 # Ref: https://stackoverflow.com/a/21449475/
195 exclusions = (
196 "__weakref__", # special-members
197 "__doc__",
198 "__module__",
199 "__dict__", # undoc-members
200 )
201 exclude = name in exclusions
202 if name == "__init__":
203 exclude = True if obj.__doc__ is None else False
204 return True if (skip or exclude) else None
205
206
207 ## bug in sphinx: take docstring
208 # def warn_undocumented_members(app, what, name, obj, options, lines):
209 # if name.startswith("netket"):
210 # print(f"Autodoc dostuff: {what}, {name}, {obj}, {lines}, {options}")
211 # print(f"the type is {type(obj)}")
212 # if obj.__doc__ == None:
213 #
214 # else:
215 # print(f"Autodoc cacca: {what}, {name}, {obj}, {lines}, {options}")
216
217
218 def setup(app):
219 app.connect("autodoc-skip-member", autodoc_skip_member)
220 # app.connect('autodoc-process-docstring', warn_undocumented_members);
221
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -62,11 +62,10 @@
source_suffix = [".rst", ".ipynb", ".md"]
# Markdown parser latex support
-myst_enable_extensions = ["dollarmath", "amsmath"]
-
+myst_enable_extensions = ["dollarmath", "amsmath", "braket"]
+myst_update_mathjax = False
mathjax_path = "https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"
-
# -- Options for HTML output -------------------------------------------------
html_theme = "bootstrap"
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -62,11 +62,10 @@\n source_suffix = [\".rst\", \".ipynb\", \".md\"]\n \n # Markdown parser latex support\n-myst_enable_extensions = [\"dollarmath\", \"amsmath\"]\n-\n+myst_enable_extensions = [\"dollarmath\", \"amsmath\", \"braket\"]\n+myst_update_mathjax = False\n mathjax_path = \"https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js\"\n \n-\n # -- Options for HTML output -------------------------------------------------\n \n html_theme = \"bootstrap\"\n", "issue": "Broken equations in docs\nSee for example the unredered tex in https://www.netket.org/docs/sr.html \n", "before_files": [{"content": "import sphinx_bootstrap_theme\n\n# -- Project information -----------------------------------------------------\n\nproject = \"netket\"\ncopyright = \"2019-2021, The Netket authors - All rights reserved\"\nauthor = \"Giuseppe Carleo et al.\"\n\n# The full version, including alpha/beta/rc tags\nrelease = \"v3.0\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.autodoc\",\n \"sphinx_autodoc_typehints\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.autosectionlabel\",\n \"sphinx_reredirects\",\n \"sphinx_panels\",\n \"nbsphinx\",\n \"myst_parser\",\n]\n\n# Napoleon settings\nautodoc_docstring_signature = True\nautodoc_inherit_docstrings = True\nallow_inherited = True\nautosummary_generate = True\nnapoleon_preprocess_types = True\n\npanels_add_bootstrap_css = False\n\nmaster_doc = \"index\"\n\nautoclass_content = \"class\"\nautodoc_class_signature = \"separated\"\nautodoc_typehints = \"description\"\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\", \"_templates/autosummary\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\", \"**.ipynb_checkpoints\"]\n\n# The suffix(es) of source filenames.\n# Note: important to list ipynb before md here: we have both md and ipynb\n# copies of each notebook, and myst will choose which to convert based on\n# the order in the source_suffix list. Notebooks which are not executed have\n# outputs stored in ipynb but not in md, so we must convert the ipynb.\nsource_suffix = [\".rst\", \".ipynb\", \".md\"]\n\n# Markdown parser latex support\nmyst_enable_extensions = [\"dollarmath\", \"amsmath\"]\n\nmathjax_path = \"https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js\"\n\n\n# -- Options for HTML output -------------------------------------------------\n\nhtml_theme = \"bootstrap\"\nhtml_theme_path = sphinx_bootstrap_theme.get_html_theme_path()\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# html_js_files = [\"https://kit.fontawesome.com/7c145f31db.js\"]\nhtml_css_files = [\n \"jumbo-style.css\",\n \"css/all.min.css\",\n \"css/custom.css\",\n \"css/rtd_theme.css\",\n]\n\nhtml_js_files = [\n \"js/rtd_theme.js\",\n]\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n \"numpy\": (\"https://numpy.org/doc/stable/\", None),\n \"scipy\": (\"https://docs.scipy.org/doc/scipy/reference/\", None),\n \"jax\": (\"https://jax.readthedocs.io/en/latest/\", None),\n \"flax\": (\"https://flax.readthedocs.io/en/latest/\", None),\n # \"networkx\": (\"https://networkx.org/doc/reference/\", None),\n}\n\n# (Optional) Logo. Should be small enough to fit the navbar (ideally 24x24).\n# Path should be relative to the ``_static`` files directory.\nhtml_logo = \"_static/logonav.png\"\n\n# Theme options are theme-specific and customize the look and feel of a\n# theme further.\nhtml_theme_options = {\n # Navigation bar title. (Default: ``project`` value)\n \"navbar_title\": \"NetKet\",\n # Tab name for entire site. (Default: \"Site\")\n \"navbar_site_name\": \"Site\",\n # A list of tuples containing pages or urls to link to.\n # Valid tuples should be in the following forms:\n # (name, page) # a link to a page\n # (name, \"/aa/bb\", 1) # a link to an arbitrary relative url\n # (name, \"http://example.com\", True) # arbitrary absolute url\n # Note the \"1\" or \"True\" value above as the third argument to indicate\n # an arbitrary url.\n \"navbar_links\": [\n (\"Get Started\", \"getting_started\"),\n (\"Documentation\", \"docs/getting_started\"),\n (\"Tutorials\", \"tutorials\"),\n (\"Citing NetKet\", \"citing\"),\n (\"About\", \"about\"),\n (\n '<i class=\"fab fa-github\" aria-hidden=\"true\"></i>',\n \"https://github.com/netket/netket\",\n True,\n ),\n (\n '<i class=\"fab fa-twitter\" aria-hidden=\"true\"></i>',\n \"https://twitter.com/NetKetOrg\",\n True,\n ),\n ],\n # Render the next and previous page links in navbar. (Default: true)\n \"navbar_sidebarrel\": False,\n # Render the current pages TOC in the navbar. (Default: true)\n \"navbar_pagenav\": False,\n # Tab name for the current pages TOC. (Default: \"Page\")\n \"navbar_pagenav_name\": \"Page\",\n # Global TOC depth for \"site\" navbar tab. (Default: 1)\n # Switching to -1 shows all levels.\n \"globaltoc_depth\": 10,\n # Include hidden TOCs in Site navbar?\n #\n # Note: If this is \"false\", you cannot have mixed ``:hidden:`` and\n # non-hidden ``toctree`` directives in the same page, or else the build\n # will break.\n #\n # Values: \"true\" (default) or \"false\"\n \"globaltoc_includehidden\": \"false\",\n # HTML navbar class (Default: \"navbar\") to attach to <div> element.\n # For black navbar, do \"navbar navbar-inverse\"\n \"navbar_class\": \"navbar\",\n # Fix navigation bar to top of page?\n # Values: \"true\" (default) or \"false\"\n \"navbar_fixed_top\": \"true\",\n # Location of link to source.\n # Options are \"nav\" (default), \"footer\" or anything else to exclude.\n \"source_link_position\": \"none\",\n # Bootswatch (http://bootswatch.com/) theme.\n #\n # Options are nothing (default) or the name of a valid theme\n # such as \"cosmo\" or \"sandstone\".\n #\n # The set of valid themes depend on the version of Bootstrap\n # that's used (the next config option).\n #\n # Currently, the supported themes are:\n # - Bootstrap 2: https://bootswatch.com/2\n # - Bootstrap 3: https://bootswatch.com/3\n \"bootswatch_theme\": \"flatly\",\n # Choose Bootstrap version.\n # Values: \"3\" (default) or \"2\" (in quotes)\n \"bootstrap_version\": \"3\",\n}\n\nhtml_sidebars = {\n \"docs/*\": [\"custom_localtoc.html\"],\n \"docs/_generated/**/*\": [\"custom_localtoc.html\"],\n \"modules/*\": [\"custom_localtoc.html\"],\n}\n\n## redirects\nredirects = {\n \"documentation\": \"docs/getting_started.html\",\n}\n\n\n# do not show __init__ if it does not have a docstring\ndef autodoc_skip_member(app, what, name, obj, skip, options):\n # Ref: https://stackoverflow.com/a/21449475/\n exclusions = (\n \"__weakref__\", # special-members\n \"__doc__\",\n \"__module__\",\n \"__dict__\", # undoc-members\n )\n exclude = name in exclusions\n if name == \"__init__\":\n exclude = True if obj.__doc__ is None else False\n return True if (skip or exclude) else None\n\n\n## bug in sphinx: take docstring\n# def warn_undocumented_members(app, what, name, obj, options, lines):\n# if name.startswith(\"netket\"):\n# print(f\"Autodoc dostuff: {what}, {name}, {obj}, {lines}, {options}\")\n# print(f\"the type is {type(obj)}\")\n# if obj.__doc__ == None:\n#\n# else:\n# print(f\"Autodoc cacca: {what}, {name}, {obj}, {lines}, {options}\")\n\n\ndef setup(app):\n app.connect(\"autodoc-skip-member\", autodoc_skip_member)\n # app.connect('autodoc-process-docstring', warn_undocumented_members);\n", "path": "docs/conf.py"}], "after_files": [{"content": "import sphinx_bootstrap_theme\n\n# -- Project information -----------------------------------------------------\n\nproject = \"netket\"\ncopyright = \"2019-2021, The Netket authors - All rights reserved\"\nauthor = \"Giuseppe Carleo et al.\"\n\n# The full version, including alpha/beta/rc tags\nrelease = \"v3.0\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.autodoc\",\n \"sphinx_autodoc_typehints\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.autosectionlabel\",\n \"sphinx_reredirects\",\n \"sphinx_panels\",\n \"nbsphinx\",\n \"myst_parser\",\n]\n\n# Napoleon settings\nautodoc_docstring_signature = True\nautodoc_inherit_docstrings = True\nallow_inherited = True\nautosummary_generate = True\nnapoleon_preprocess_types = True\n\npanels_add_bootstrap_css = False\n\nmaster_doc = \"index\"\n\nautoclass_content = \"class\"\nautodoc_class_signature = \"separated\"\nautodoc_typehints = \"description\"\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\", \"_templates/autosummary\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\", \"**.ipynb_checkpoints\"]\n\n# The suffix(es) of source filenames.\n# Note: important to list ipynb before md here: we have both md and ipynb\n# copies of each notebook, and myst will choose which to convert based on\n# the order in the source_suffix list. Notebooks which are not executed have\n# outputs stored in ipynb but not in md, so we must convert the ipynb.\nsource_suffix = [\".rst\", \".ipynb\", \".md\"]\n\n# Markdown parser latex support\nmyst_enable_extensions = [\"dollarmath\", \"amsmath\", \"braket\"]\nmyst_update_mathjax = False\nmathjax_path = \"https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js\"\n\n# -- Options for HTML output -------------------------------------------------\n\nhtml_theme = \"bootstrap\"\nhtml_theme_path = sphinx_bootstrap_theme.get_html_theme_path()\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# html_js_files = [\"https://kit.fontawesome.com/7c145f31db.js\"]\nhtml_css_files = [\n \"jumbo-style.css\",\n \"css/all.min.css\",\n \"css/custom.css\",\n \"css/rtd_theme.css\",\n]\n\nhtml_js_files = [\n \"js/rtd_theme.js\",\n]\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n \"numpy\": (\"https://numpy.org/doc/stable/\", None),\n \"scipy\": (\"https://docs.scipy.org/doc/scipy/reference/\", None),\n \"jax\": (\"https://jax.readthedocs.io/en/latest/\", None),\n \"flax\": (\"https://flax.readthedocs.io/en/latest/\", None),\n # \"networkx\": (\"https://networkx.org/doc/reference/\", None),\n}\n\n# (Optional) Logo. Should be small enough to fit the navbar (ideally 24x24).\n# Path should be relative to the ``_static`` files directory.\nhtml_logo = \"_static/logonav.png\"\n\n# Theme options are theme-specific and customize the look and feel of a\n# theme further.\nhtml_theme_options = {\n # Navigation bar title. (Default: ``project`` value)\n \"navbar_title\": \"NetKet\",\n # Tab name for entire site. (Default: \"Site\")\n \"navbar_site_name\": \"Site\",\n # A list of tuples containing pages or urls to link to.\n # Valid tuples should be in the following forms:\n # (name, page) # a link to a page\n # (name, \"/aa/bb\", 1) # a link to an arbitrary relative url\n # (name, \"http://example.com\", True) # arbitrary absolute url\n # Note the \"1\" or \"True\" value above as the third argument to indicate\n # an arbitrary url.\n \"navbar_links\": [\n (\"Get Started\", \"getting_started\"),\n (\"Documentation\", \"docs/getting_started\"),\n (\"Tutorials\", \"tutorials\"),\n (\"Citing NetKet\", \"citing\"),\n (\"About\", \"about\"),\n (\n '<i class=\"fab fa-github\" aria-hidden=\"true\"></i>',\n \"https://github.com/netket/netket\",\n True,\n ),\n (\n '<i class=\"fab fa-twitter\" aria-hidden=\"true\"></i>',\n \"https://twitter.com/NetKetOrg\",\n True,\n ),\n ],\n # Render the next and previous page links in navbar. (Default: true)\n \"navbar_sidebarrel\": False,\n # Render the current pages TOC in the navbar. (Default: true)\n \"navbar_pagenav\": False,\n # Tab name for the current pages TOC. (Default: \"Page\")\n \"navbar_pagenav_name\": \"Page\",\n # Global TOC depth for \"site\" navbar tab. (Default: 1)\n # Switching to -1 shows all levels.\n \"globaltoc_depth\": 10,\n # Include hidden TOCs in Site navbar?\n #\n # Note: If this is \"false\", you cannot have mixed ``:hidden:`` and\n # non-hidden ``toctree`` directives in the same page, or else the build\n # will break.\n #\n # Values: \"true\" (default) or \"false\"\n \"globaltoc_includehidden\": \"false\",\n # HTML navbar class (Default: \"navbar\") to attach to <div> element.\n # For black navbar, do \"navbar navbar-inverse\"\n \"navbar_class\": \"navbar\",\n # Fix navigation bar to top of page?\n # Values: \"true\" (default) or \"false\"\n \"navbar_fixed_top\": \"true\",\n # Location of link to source.\n # Options are \"nav\" (default), \"footer\" or anything else to exclude.\n \"source_link_position\": \"none\",\n # Bootswatch (http://bootswatch.com/) theme.\n #\n # Options are nothing (default) or the name of a valid theme\n # such as \"cosmo\" or \"sandstone\".\n #\n # The set of valid themes depend on the version of Bootstrap\n # that's used (the next config option).\n #\n # Currently, the supported themes are:\n # - Bootstrap 2: https://bootswatch.com/2\n # - Bootstrap 3: https://bootswatch.com/3\n \"bootswatch_theme\": \"flatly\",\n # Choose Bootstrap version.\n # Values: \"3\" (default) or \"2\" (in quotes)\n \"bootstrap_version\": \"3\",\n}\n\nhtml_sidebars = {\n \"docs/*\": [\"custom_localtoc.html\"],\n \"docs/_generated/**/*\": [\"custom_localtoc.html\"],\n \"modules/*\": [\"custom_localtoc.html\"],\n}\n\n## redirects\nredirects = {\n \"documentation\": \"docs/getting_started.html\",\n}\n\n\n# do not show __init__ if it does not have a docstring\ndef autodoc_skip_member(app, what, name, obj, skip, options):\n # Ref: https://stackoverflow.com/a/21449475/\n exclusions = (\n \"__weakref__\", # special-members\n \"__doc__\",\n \"__module__\",\n \"__dict__\", # undoc-members\n )\n exclude = name in exclusions\n if name == \"__init__\":\n exclude = True if obj.__doc__ is None else False\n return True if (skip or exclude) else None\n\n\n## bug in sphinx: take docstring\n# def warn_undocumented_members(app, what, name, obj, options, lines):\n# if name.startswith(\"netket\"):\n# print(f\"Autodoc dostuff: {what}, {name}, {obj}, {lines}, {options}\")\n# print(f\"the type is {type(obj)}\")\n# if obj.__doc__ == None:\n#\n# else:\n# print(f\"Autodoc cacca: {what}, {name}, {obj}, {lines}, {options}\")\n\n\ndef setup(app):\n app.connect(\"autodoc-skip-member\", autodoc_skip_member)\n # app.connect('autodoc-process-docstring', warn_undocumented_members);\n", "path": "docs/conf.py"}]} | 2,856 | 145 |
gh_patches_debug_26005 | rasdani/github-patches | git_diff | internetarchive__openlibrary-8350 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Subject tag autocomplete no longer sorts by frequency
When I developed autocomplete for subject tags, the results returned were automatically sorted by frequency — the most used subjects that matched were first on the list. This not only produced useful results, but helped concentrate the usage of particular subjects where multiple similar labels exist for a single topic, improving the quality of subject pages.
In subsequent refactoring of autocomplete support, this frequency sorting was lost.
### Steps to Reproduce
Edit a work and start typing in one of the subject fields to display the list of matching subjects.
<!-- What actually happened after these steps? What did you expect to happen? -->
* Actual: Subject suggestions are sorted in an apparently random order
* Expected: Subjects should be sorted with the "better" (more frequently used) choices at the top.
### Proposal & Constraints
I will be submitting a PR that restores the SOLR query that produces the frequency-sorted results.
### Stakeholders
@mheiman @cdrini
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openlibrary/plugins/worksearch/autocomplete.py`
Content:
```
1 import itertools
2 import web
3 import json
4
5
6 from infogami.utils import delegate
7 from infogami.utils.view import safeint
8 from openlibrary.core.models import Thing
9 from openlibrary.plugins.upstream import utils
10 from openlibrary.plugins.worksearch.search import get_solr
11 from openlibrary.utils import (
12 find_olid_in_string,
13 olid_to_key,
14 )
15
16
17 def to_json(d):
18 web.header('Content-Type', 'application/json')
19 return delegate.RawText(json.dumps(d))
20
21
22 class autocomplete(delegate.page):
23 path = "/_autocomplete"
24 fq = ['-type:edition']
25 fl = 'key,type,name,title,score'
26 olid_suffix: str | None = None
27 query = 'title:"{q}"^2 OR title:({q}*) OR name:"{q}"^2 OR name:({q}*)'
28
29 def db_fetch(self, key: str) -> Thing | None:
30 if thing := web.ctx.site.get(key):
31 return thing.as_fake_solr_record()
32 else:
33 return None
34
35 def doc_wrap(self, doc: dict):
36 """Modify the returned solr document in place."""
37 if 'name' not in doc:
38 doc['name'] = doc.get('title')
39
40 def doc_filter(self, doc: dict) -> bool:
41 """Exclude certain documents"""
42 return True
43
44 def GET(self):
45 return self.direct_get()
46
47 def direct_get(self, fq: list[str] | None = None):
48 i = web.input(q="", limit=5)
49 i.limit = safeint(i.limit, 5)
50
51 solr = get_solr()
52
53 # look for ID in query string here
54 q = solr.escape(i.q).strip()
55 embedded_olid = None
56 if self.olid_suffix:
57 embedded_olid = find_olid_in_string(q, self.olid_suffix)
58
59 if embedded_olid:
60 solr_q = f'key:"{olid_to_key(embedded_olid)}"'
61 else:
62 solr_q = self.query.format(q=q)
63
64 fq = fq or self.fq
65 params = {
66 'q_op': 'AND',
67 'rows': i.limit,
68 **({'fq': fq} if fq else {}),
69 # limit the fields returned for better performance
70 'fl': self.fl,
71 }
72
73 data = solr.select(solr_q, **params)
74 docs = data['docs']
75
76 if embedded_olid and not docs:
77 # Grumble! Work not in solr yet. Create a dummy.
78 fake_doc = self.db_fetch(olid_to_key(embedded_olid))
79 if fake_doc:
80 docs = [fake_doc]
81
82 result_docs = []
83 for d in docs:
84 if self.doc_filter(d):
85 self.doc_wrap(d)
86 result_docs.append(d)
87
88 return to_json(result_docs)
89
90
91 class languages_autocomplete(delegate.page):
92 path = "/languages/_autocomplete"
93
94 def GET(self):
95 i = web.input(q="", limit=5)
96 i.limit = safeint(i.limit, 5)
97 return to_json(
98 list(itertools.islice(utils.autocomplete_languages(i.q), i.limit))
99 )
100
101
102 class works_autocomplete(autocomplete):
103 path = "/works/_autocomplete"
104 fq = ['type:work']
105 fl = 'key,title,subtitle,cover_i,first_publish_year,author_name,edition_count'
106 olid_suffix = 'W'
107 query = 'title:"{q}"^2 OR title:({q}*)'
108
109 def doc_filter(self, doc: dict) -> bool:
110 # Exclude orphaned editions from autocomplete results
111 # Note: Do this here instead of with an `fq=key:*W` for performance
112 # reasons.
113 return doc['key'][-1] == 'W'
114
115 def doc_wrap(self, doc: dict):
116 doc['full_title'] = doc['title']
117 if 'subtitle' in doc:
118 doc['full_title'] += ": " + doc['subtitle']
119 doc['name'] = doc.get('title')
120
121
122 class authors_autocomplete(autocomplete):
123 path = "/authors/_autocomplete"
124 fq = ['type:author']
125 fl = 'key,name,alternate_names,birth_date,death_date,work_count,top_work,top_subjects'
126 olid_suffix = 'A'
127 query = 'name:({q}*) OR alternate_names:({q}*) OR name:"{q}"^2 OR alternate_names:"{q}"^2'
128
129 def doc_wrap(self, doc: dict):
130 if 'top_work' in doc:
131 doc['works'] = [doc.pop('top_work')]
132 else:
133 doc['works'] = []
134 doc['subjects'] = doc.pop('top_subjects', [])
135
136
137 class subjects_autocomplete(autocomplete):
138 # can't use /subjects/_autocomplete because the subjects endpoint = /subjects/[^/]+
139 path = "/subjects_autocomplete"
140 fq = ['type:subject']
141 fl = 'key,name'
142 query = 'name:({q}*)'
143
144 def GET(self):
145 i = web.input(type="")
146 fq = self.fq
147 if i.type:
148 fq = fq + [f'subject_type:{i.type}']
149
150 return super().direct_get(fq=fq)
151
152
153 def setup():
154 """Do required setup."""
155 pass
156
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/openlibrary/plugins/worksearch/autocomplete.py b/openlibrary/plugins/worksearch/autocomplete.py
--- a/openlibrary/plugins/worksearch/autocomplete.py
+++ b/openlibrary/plugins/worksearch/autocomplete.py
@@ -24,6 +24,7 @@
fq = ['-type:edition']
fl = 'key,type,name,title,score'
olid_suffix: str | None = None
+ sort: str | None = None
query = 'title:"{q}"^2 OR title:({q}*) OR name:"{q}"^2 OR name:({q}*)'
def db_fetch(self, key: str) -> Thing | None:
@@ -68,6 +69,7 @@
**({'fq': fq} if fq else {}),
# limit the fields returned for better performance
'fl': self.fl,
+ **({'sort': self.sort} if self.sort else {}),
}
data = solr.select(solr_q, **params)
@@ -138,8 +140,9 @@
# can't use /subjects/_autocomplete because the subjects endpoint = /subjects/[^/]+
path = "/subjects_autocomplete"
fq = ['type:subject']
- fl = 'key,name'
+ fl = 'key,name,work_count'
query = 'name:({q}*)'
+ sort = 'work_count desc'
def GET(self):
i = web.input(type="")
| {"golden_diff": "diff --git a/openlibrary/plugins/worksearch/autocomplete.py b/openlibrary/plugins/worksearch/autocomplete.py\n--- a/openlibrary/plugins/worksearch/autocomplete.py\n+++ b/openlibrary/plugins/worksearch/autocomplete.py\n@@ -24,6 +24,7 @@\n fq = ['-type:edition']\n fl = 'key,type,name,title,score'\n olid_suffix: str | None = None\n+ sort: str | None = None\n query = 'title:\"{q}\"^2 OR title:({q}*) OR name:\"{q}\"^2 OR name:({q}*)'\n \n def db_fetch(self, key: str) -> Thing | None:\n@@ -68,6 +69,7 @@\n **({'fq': fq} if fq else {}),\n # limit the fields returned for better performance\n 'fl': self.fl,\n+ **({'sort': self.sort} if self.sort else {}),\n }\n \n data = solr.select(solr_q, **params)\n@@ -138,8 +140,9 @@\n # can't use /subjects/_autocomplete because the subjects endpoint = /subjects/[^/]+\n path = \"/subjects_autocomplete\"\n fq = ['type:subject']\n- fl = 'key,name'\n+ fl = 'key,name,work_count'\n query = 'name:({q}*)'\n+ sort = 'work_count desc'\n \n def GET(self):\n i = web.input(type=\"\")\n", "issue": "Subject tag autocomplete no longer sorts by frequency\nWhen I developed autocomplete for subject tags, the results returned were automatically sorted by frequency \u2014 the most used subjects that matched were first on the list. This not only produced useful results, but helped concentrate the usage of particular subjects where multiple similar labels exist for a single topic, improving the quality of subject pages.\r\n\r\nIn subsequent refactoring of autocomplete support, this frequency sorting was lost.\r\n\r\n### Steps to Reproduce\r\nEdit a work and start typing in one of the subject fields to display the list of matching subjects.\r\n\r\n<!-- What actually happened after these steps? What did you expect to happen? -->\r\n* Actual: Subject suggestions are sorted in an apparently random order\r\n* Expected: Subjects should be sorted with the \"better\" (more frequently used) choices at the top.\r\n\r\n### Proposal & Constraints\r\nI will be submitting a PR that restores the SOLR query that produces the frequency-sorted results.\r\n\r\n### Stakeholders\r\n@mheiman @cdrini \r\n\n", "before_files": [{"content": "import itertools\nimport web\nimport json\n\n\nfrom infogami.utils import delegate\nfrom infogami.utils.view import safeint\nfrom openlibrary.core.models import Thing\nfrom openlibrary.plugins.upstream import utils\nfrom openlibrary.plugins.worksearch.search import get_solr\nfrom openlibrary.utils import (\n find_olid_in_string,\n olid_to_key,\n)\n\n\ndef to_json(d):\n web.header('Content-Type', 'application/json')\n return delegate.RawText(json.dumps(d))\n\n\nclass autocomplete(delegate.page):\n path = \"/_autocomplete\"\n fq = ['-type:edition']\n fl = 'key,type,name,title,score'\n olid_suffix: str | None = None\n query = 'title:\"{q}\"^2 OR title:({q}*) OR name:\"{q}\"^2 OR name:({q}*)'\n\n def db_fetch(self, key: str) -> Thing | None:\n if thing := web.ctx.site.get(key):\n return thing.as_fake_solr_record()\n else:\n return None\n\n def doc_wrap(self, doc: dict):\n \"\"\"Modify the returned solr document in place.\"\"\"\n if 'name' not in doc:\n doc['name'] = doc.get('title')\n\n def doc_filter(self, doc: dict) -> bool:\n \"\"\"Exclude certain documents\"\"\"\n return True\n\n def GET(self):\n return self.direct_get()\n\n def direct_get(self, fq: list[str] | None = None):\n i = web.input(q=\"\", limit=5)\n i.limit = safeint(i.limit, 5)\n\n solr = get_solr()\n\n # look for ID in query string here\n q = solr.escape(i.q).strip()\n embedded_olid = None\n if self.olid_suffix:\n embedded_olid = find_olid_in_string(q, self.olid_suffix)\n\n if embedded_olid:\n solr_q = f'key:\"{olid_to_key(embedded_olid)}\"'\n else:\n solr_q = self.query.format(q=q)\n\n fq = fq or self.fq\n params = {\n 'q_op': 'AND',\n 'rows': i.limit,\n **({'fq': fq} if fq else {}),\n # limit the fields returned for better performance\n 'fl': self.fl,\n }\n\n data = solr.select(solr_q, **params)\n docs = data['docs']\n\n if embedded_olid and not docs:\n # Grumble! Work not in solr yet. Create a dummy.\n fake_doc = self.db_fetch(olid_to_key(embedded_olid))\n if fake_doc:\n docs = [fake_doc]\n\n result_docs = []\n for d in docs:\n if self.doc_filter(d):\n self.doc_wrap(d)\n result_docs.append(d)\n\n return to_json(result_docs)\n\n\nclass languages_autocomplete(delegate.page):\n path = \"/languages/_autocomplete\"\n\n def GET(self):\n i = web.input(q=\"\", limit=5)\n i.limit = safeint(i.limit, 5)\n return to_json(\n list(itertools.islice(utils.autocomplete_languages(i.q), i.limit))\n )\n\n\nclass works_autocomplete(autocomplete):\n path = \"/works/_autocomplete\"\n fq = ['type:work']\n fl = 'key,title,subtitle,cover_i,first_publish_year,author_name,edition_count'\n olid_suffix = 'W'\n query = 'title:\"{q}\"^2 OR title:({q}*)'\n\n def doc_filter(self, doc: dict) -> bool:\n # Exclude orphaned editions from autocomplete results\n # Note: Do this here instead of with an `fq=key:*W` for performance\n # reasons.\n return doc['key'][-1] == 'W'\n\n def doc_wrap(self, doc: dict):\n doc['full_title'] = doc['title']\n if 'subtitle' in doc:\n doc['full_title'] += \": \" + doc['subtitle']\n doc['name'] = doc.get('title')\n\n\nclass authors_autocomplete(autocomplete):\n path = \"/authors/_autocomplete\"\n fq = ['type:author']\n fl = 'key,name,alternate_names,birth_date,death_date,work_count,top_work,top_subjects'\n olid_suffix = 'A'\n query = 'name:({q}*) OR alternate_names:({q}*) OR name:\"{q}\"^2 OR alternate_names:\"{q}\"^2'\n\n def doc_wrap(self, doc: dict):\n if 'top_work' in doc:\n doc['works'] = [doc.pop('top_work')]\n else:\n doc['works'] = []\n doc['subjects'] = doc.pop('top_subjects', [])\n\n\nclass subjects_autocomplete(autocomplete):\n # can't use /subjects/_autocomplete because the subjects endpoint = /subjects/[^/]+\n path = \"/subjects_autocomplete\"\n fq = ['type:subject']\n fl = 'key,name'\n query = 'name:({q}*)'\n\n def GET(self):\n i = web.input(type=\"\")\n fq = self.fq\n if i.type:\n fq = fq + [f'subject_type:{i.type}']\n\n return super().direct_get(fq=fq)\n\n\ndef setup():\n \"\"\"Do required setup.\"\"\"\n pass\n", "path": "openlibrary/plugins/worksearch/autocomplete.py"}], "after_files": [{"content": "import itertools\nimport web\nimport json\n\n\nfrom infogami.utils import delegate\nfrom infogami.utils.view import safeint\nfrom openlibrary.core.models import Thing\nfrom openlibrary.plugins.upstream import utils\nfrom openlibrary.plugins.worksearch.search import get_solr\nfrom openlibrary.utils import (\n find_olid_in_string,\n olid_to_key,\n)\n\n\ndef to_json(d):\n web.header('Content-Type', 'application/json')\n return delegate.RawText(json.dumps(d))\n\n\nclass autocomplete(delegate.page):\n path = \"/_autocomplete\"\n fq = ['-type:edition']\n fl = 'key,type,name,title,score'\n olid_suffix: str | None = None\n sort: str | None = None\n query = 'title:\"{q}\"^2 OR title:({q}*) OR name:\"{q}\"^2 OR name:({q}*)'\n\n def db_fetch(self, key: str) -> Thing | None:\n if thing := web.ctx.site.get(key):\n return thing.as_fake_solr_record()\n else:\n return None\n\n def doc_wrap(self, doc: dict):\n \"\"\"Modify the returned solr document in place.\"\"\"\n if 'name' not in doc:\n doc['name'] = doc.get('title')\n\n def doc_filter(self, doc: dict) -> bool:\n \"\"\"Exclude certain documents\"\"\"\n return True\n\n def GET(self):\n return self.direct_get()\n\n def direct_get(self, fq: list[str] | None = None):\n i = web.input(q=\"\", limit=5)\n i.limit = safeint(i.limit, 5)\n\n solr = get_solr()\n\n # look for ID in query string here\n q = solr.escape(i.q).strip()\n embedded_olid = None\n if self.olid_suffix:\n embedded_olid = find_olid_in_string(q, self.olid_suffix)\n\n if embedded_olid:\n solr_q = f'key:\"{olid_to_key(embedded_olid)}\"'\n else:\n solr_q = self.query.format(q=q)\n\n fq = fq or self.fq\n params = {\n 'q_op': 'AND',\n 'rows': i.limit,\n **({'fq': fq} if fq else {}),\n # limit the fields returned for better performance\n 'fl': self.fl,\n **({'sort': self.sort} if self.sort else {}),\n }\n\n data = solr.select(solr_q, **params)\n docs = data['docs']\n\n if embedded_olid and not docs:\n # Grumble! Work not in solr yet. Create a dummy.\n fake_doc = self.db_fetch(olid_to_key(embedded_olid))\n if fake_doc:\n docs = [fake_doc]\n\n result_docs = []\n for d in docs:\n if self.doc_filter(d):\n self.doc_wrap(d)\n result_docs.append(d)\n\n return to_json(result_docs)\n\n\nclass languages_autocomplete(delegate.page):\n path = \"/languages/_autocomplete\"\n\n def GET(self):\n i = web.input(q=\"\", limit=5)\n i.limit = safeint(i.limit, 5)\n return to_json(\n list(itertools.islice(utils.autocomplete_languages(i.q), i.limit))\n )\n\n\nclass works_autocomplete(autocomplete):\n path = \"/works/_autocomplete\"\n fq = ['type:work']\n fl = 'key,title,subtitle,cover_i,first_publish_year,author_name,edition_count'\n olid_suffix = 'W'\n query = 'title:\"{q}\"^2 OR title:({q}*)'\n\n def doc_filter(self, doc: dict) -> bool:\n # Exclude orphaned editions from autocomplete results\n # Note: Do this here instead of with an `fq=key:*W` for performance\n # reasons.\n return doc['key'][-1] == 'W'\n\n def doc_wrap(self, doc: dict):\n doc['full_title'] = doc['title']\n if 'subtitle' in doc:\n doc['full_title'] += \": \" + doc['subtitle']\n doc['name'] = doc.get('title')\n\n\nclass authors_autocomplete(autocomplete):\n path = \"/authors/_autocomplete\"\n fq = ['type:author']\n fl = 'key,name,alternate_names,birth_date,death_date,work_count,top_work,top_subjects'\n olid_suffix = 'A'\n query = 'name:({q}*) OR alternate_names:({q}*) OR name:\"{q}\"^2 OR alternate_names:\"{q}\"^2'\n\n def doc_wrap(self, doc: dict):\n if 'top_work' in doc:\n doc['works'] = [doc.pop('top_work')]\n else:\n doc['works'] = []\n doc['subjects'] = doc.pop('top_subjects', [])\n\n\nclass subjects_autocomplete(autocomplete):\n # can't use /subjects/_autocomplete because the subjects endpoint = /subjects/[^/]+\n path = \"/subjects_autocomplete\"\n fq = ['type:subject']\n fl = 'key,name,work_count'\n query = 'name:({q}*)'\n sort = 'work_count desc'\n\n def GET(self):\n i = web.input(type=\"\")\n fq = self.fq\n if i.type:\n fq = fq + [f'subject_type:{i.type}']\n\n return super().direct_get(fq=fq)\n\n\ndef setup():\n \"\"\"Do required setup.\"\"\"\n pass\n", "path": "openlibrary/plugins/worksearch/autocomplete.py"}]} | 1,979 | 319 |
gh_patches_debug_11597 | rasdani/github-patches | git_diff | fedora-infra__bodhi-4037 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Side tag updates are not automatically pushed to testing
When I create a side tag update for a stable Fedora, it sits in pending until I manually click "push to testing". This is inconsistent with regular updates and a tad confusing.
See also https://pagure.io/fedora-infrastructure/issue/8930
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bodhi/server/consumers/signed.py`
Content:
```
1 # Copyright © 2016-2019 Red Hat, Inc.
2 #
3 # This file is part of Bodhi.
4 #
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
9 #
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License along with
16 # this program; if not, write to the Free Software Foundation, Inc., 51
17 # Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 """
19 The "signed handler".
20
21 This module is responsible for marking builds as "signed" when they get moved
22 from the pending-signing to pending-updates-testing tag by RoboSignatory.
23 """
24
25 import logging
26
27 import fedora_messaging
28 from sqlalchemy import func
29
30 from bodhi.server.config import config
31 from bodhi.server.models import Build, UpdateStatus, TestGatingStatus
32 from bodhi.server.util import transactional_session_maker
33
34 log = logging.getLogger('bodhi')
35
36
37 class SignedHandler(object):
38 """
39 The Bodhi Signed Handler.
40
41 A fedora-messaging listener waiting for messages from koji about builds being tagged.
42 """
43
44 def __init__(self):
45 """Initialize the SignedHandler."""
46 self.db_factory = transactional_session_maker()
47
48 def __call__(self, message: fedora_messaging.api.Message):
49 """
50 Handle messages arriving with the configured topic.
51
52 This marks a build as signed if it is assigned to the pending testing release tag.
53
54 Example message format::
55 {
56 'body': {
57 'build_id': 442562,
58 'name': 'colord',
59 'tag_id': 214,
60 'instance': 's390',
61 'tag': 'f26-updates-testing-pending',
62 'user': 'sharkcz',
63 'version': '1.3.4',
64 'owner': 'sharkcz',
65 'release': '1.fc26'
66 },
67 }
68
69 The message can contain additional keys.
70
71 Duplicate messages: this method is idempotent.
72
73 Args:
74 message: The incoming message in the format described above.
75 """
76 message = message.body
77 build_nvr = '%(name)s-%(version)s-%(release)s' % message
78 tag = message['tag']
79
80 log.info("%s tagged into %s" % (build_nvr, tag))
81
82 with self.db_factory() as dbsession:
83 build = Build.get(build_nvr)
84 if not build:
85 log.info("Build was not submitted, skipping")
86 return
87
88 if not build.release:
89 log.info('Build is not assigned to release, skipping')
90 return
91
92 if build.update and build.update.from_tag:
93 koji_testing_tag = build.release.get_testing_side_tag(build.update.from_tag)
94 if tag != koji_testing_tag:
95 log.info("Tag is not testing side tag, skipping")
96 return
97 else:
98 if build.release.pending_testing_tag != tag:
99 log.info("Tag is not pending_testing tag, skipping")
100 return
101
102 if build.signed:
103 log.info("Build was already marked as signed (maybe a duplicate message)")
104 return
105
106 # This build was moved into the pending_testing tag for the applicable release, which
107 # is done by RoboSignatory to indicate that the build has been correctly signed and
108 # written out. Mark it as such.
109 log.info("Build has been signed, marking")
110 build.signed = True
111 dbsession.flush()
112 log.info("Build %s has been marked as signed" % build_nvr)
113
114 # If every build in update is signed change status to testing
115 if build.update \
116 and not build.update.release.composed_by_bodhi \
117 and build.update.signed:
118 log.info("Every build in update is signed, set status to testing")
119
120 build.update.status = UpdateStatus.testing
121 build.update.date_testing = func.current_timestamp()
122 build.update.request = None
123 build.update.pushed = True
124
125 if config.get("test_gating.required"):
126 log.debug('Test gating is required, marking the update as waiting on test '
127 'gating and updating it from Greenwave to get the real status.')
128 build.update.test_gating_status = TestGatingStatus.waiting
129 build.update.update_test_gating_status()
130
131 log.info(f"Update {build.update.display_name} status has been set to testing")
132
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bodhi/server/consumers/signed.py b/bodhi/server/consumers/signed.py
--- a/bodhi/server/consumers/signed.py
+++ b/bodhi/server/consumers/signed.py
@@ -89,7 +89,9 @@
log.info('Build is not assigned to release, skipping')
return
- if build.update and build.update.from_tag:
+ if build.update \
+ and build.update.from_tag \
+ and not build.update.release.composed_by_bodhi:
koji_testing_tag = build.release.get_testing_side_tag(build.update.from_tag)
if tag != koji_testing_tag:
log.info("Tag is not testing side tag, skipping")
| {"golden_diff": "diff --git a/bodhi/server/consumers/signed.py b/bodhi/server/consumers/signed.py\n--- a/bodhi/server/consumers/signed.py\n+++ b/bodhi/server/consumers/signed.py\n@@ -89,7 +89,9 @@\n log.info('Build is not assigned to release, skipping')\n return\n \n- if build.update and build.update.from_tag:\n+ if build.update \\\n+ and build.update.from_tag \\\n+ and not build.update.release.composed_by_bodhi:\n koji_testing_tag = build.release.get_testing_side_tag(build.update.from_tag)\n if tag != koji_testing_tag:\n log.info(\"Tag is not testing side tag, skipping\")\n", "issue": "Side tag updates are not automatically pushed to testing \nWhen I create a side tag update for a stable Fedora, it sits in pending until I manually click \"push to testing\". This is inconsistent with regular updates and a tad confusing.\r\n\r\nSee also https://pagure.io/fedora-infrastructure/issue/8930\n", "before_files": [{"content": "# Copyright \u00a9 2016-2019 Red Hat, Inc.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"\nThe \"signed handler\".\n\nThis module is responsible for marking builds as \"signed\" when they get moved\nfrom the pending-signing to pending-updates-testing tag by RoboSignatory.\n\"\"\"\n\nimport logging\n\nimport fedora_messaging\nfrom sqlalchemy import func\n\nfrom bodhi.server.config import config\nfrom bodhi.server.models import Build, UpdateStatus, TestGatingStatus\nfrom bodhi.server.util import transactional_session_maker\n\nlog = logging.getLogger('bodhi')\n\n\nclass SignedHandler(object):\n \"\"\"\n The Bodhi Signed Handler.\n\n A fedora-messaging listener waiting for messages from koji about builds being tagged.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize the SignedHandler.\"\"\"\n self.db_factory = transactional_session_maker()\n\n def __call__(self, message: fedora_messaging.api.Message):\n \"\"\"\n Handle messages arriving with the configured topic.\n\n This marks a build as signed if it is assigned to the pending testing release tag.\n\n Example message format::\n {\n 'body': {\n 'build_id': 442562,\n 'name': 'colord',\n 'tag_id': 214,\n 'instance': 's390',\n 'tag': 'f26-updates-testing-pending',\n 'user': 'sharkcz',\n 'version': '1.3.4',\n 'owner': 'sharkcz',\n 'release': '1.fc26'\n },\n }\n\n The message can contain additional keys.\n\n Duplicate messages: this method is idempotent.\n\n Args:\n message: The incoming message in the format described above.\n \"\"\"\n message = message.body\n build_nvr = '%(name)s-%(version)s-%(release)s' % message\n tag = message['tag']\n\n log.info(\"%s tagged into %s\" % (build_nvr, tag))\n\n with self.db_factory() as dbsession:\n build = Build.get(build_nvr)\n if not build:\n log.info(\"Build was not submitted, skipping\")\n return\n\n if not build.release:\n log.info('Build is not assigned to release, skipping')\n return\n\n if build.update and build.update.from_tag:\n koji_testing_tag = build.release.get_testing_side_tag(build.update.from_tag)\n if tag != koji_testing_tag:\n log.info(\"Tag is not testing side tag, skipping\")\n return\n else:\n if build.release.pending_testing_tag != tag:\n log.info(\"Tag is not pending_testing tag, skipping\")\n return\n\n if build.signed:\n log.info(\"Build was already marked as signed (maybe a duplicate message)\")\n return\n\n # This build was moved into the pending_testing tag for the applicable release, which\n # is done by RoboSignatory to indicate that the build has been correctly signed and\n # written out. Mark it as such.\n log.info(\"Build has been signed, marking\")\n build.signed = True\n dbsession.flush()\n log.info(\"Build %s has been marked as signed\" % build_nvr)\n\n # If every build in update is signed change status to testing\n if build.update \\\n and not build.update.release.composed_by_bodhi \\\n and build.update.signed:\n log.info(\"Every build in update is signed, set status to testing\")\n\n build.update.status = UpdateStatus.testing\n build.update.date_testing = func.current_timestamp()\n build.update.request = None\n build.update.pushed = True\n\n if config.get(\"test_gating.required\"):\n log.debug('Test gating is required, marking the update as waiting on test '\n 'gating and updating it from Greenwave to get the real status.')\n build.update.test_gating_status = TestGatingStatus.waiting\n build.update.update_test_gating_status()\n\n log.info(f\"Update {build.update.display_name} status has been set to testing\")\n", "path": "bodhi/server/consumers/signed.py"}], "after_files": [{"content": "# Copyright \u00a9 2016-2019 Red Hat, Inc.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"\nThe \"signed handler\".\n\nThis module is responsible for marking builds as \"signed\" when they get moved\nfrom the pending-signing to pending-updates-testing tag by RoboSignatory.\n\"\"\"\n\nimport logging\n\nimport fedora_messaging\nfrom sqlalchemy import func\n\nfrom bodhi.server.config import config\nfrom bodhi.server.models import Build, UpdateStatus, TestGatingStatus\nfrom bodhi.server.util import transactional_session_maker\n\nlog = logging.getLogger('bodhi')\n\n\nclass SignedHandler(object):\n \"\"\"\n The Bodhi Signed Handler.\n\n A fedora-messaging listener waiting for messages from koji about builds being tagged.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize the SignedHandler.\"\"\"\n self.db_factory = transactional_session_maker()\n\n def __call__(self, message: fedora_messaging.api.Message):\n \"\"\"\n Handle messages arriving with the configured topic.\n\n This marks a build as signed if it is assigned to the pending testing release tag.\n\n Example message format::\n {\n 'body': {\n 'build_id': 442562,\n 'name': 'colord',\n 'tag_id': 214,\n 'instance': 's390',\n 'tag': 'f26-updates-testing-pending',\n 'user': 'sharkcz',\n 'version': '1.3.4',\n 'owner': 'sharkcz',\n 'release': '1.fc26'\n },\n }\n\n The message can contain additional keys.\n\n Duplicate messages: this method is idempotent.\n\n Args:\n message: The incoming message in the format described above.\n \"\"\"\n message = message.body\n build_nvr = '%(name)s-%(version)s-%(release)s' % message\n tag = message['tag']\n\n log.info(\"%s tagged into %s\" % (build_nvr, tag))\n\n with self.db_factory() as dbsession:\n build = Build.get(build_nvr)\n if not build:\n log.info(\"Build was not submitted, skipping\")\n return\n\n if not build.release:\n log.info('Build is not assigned to release, skipping')\n return\n\n if build.update \\\n and build.update.from_tag \\\n and not build.update.release.composed_by_bodhi:\n koji_testing_tag = build.release.get_testing_side_tag(build.update.from_tag)\n if tag != koji_testing_tag:\n log.info(\"Tag is not testing side tag, skipping\")\n return\n else:\n if build.release.pending_testing_tag != tag:\n log.info(\"Tag is not pending_testing tag, skipping\")\n return\n\n if build.signed:\n log.info(\"Build was already marked as signed (maybe a duplicate message)\")\n return\n\n # This build was moved into the pending_testing tag for the applicable release, which\n # is done by RoboSignatory to indicate that the build has been correctly signed and\n # written out. Mark it as such.\n log.info(\"Build has been signed, marking\")\n build.signed = True\n dbsession.flush()\n log.info(\"Build %s has been marked as signed\" % build_nvr)\n\n # If every build in update is signed change status to testing\n if build.update \\\n and not build.update.release.composed_by_bodhi \\\n and build.update.signed:\n log.info(\"Every build in update is signed, set status to testing\")\n\n build.update.status = UpdateStatus.testing\n build.update.date_testing = func.current_timestamp()\n build.update.request = None\n build.update.pushed = True\n\n if config.get(\"test_gating.required\"):\n log.debug('Test gating is required, marking the update as waiting on test '\n 'gating and updating it from Greenwave to get the real status.')\n build.update.test_gating_status = TestGatingStatus.waiting\n build.update.update_test_gating_status()\n\n log.info(f\"Update {build.update.display_name} status has been set to testing\")\n", "path": "bodhi/server/consumers/signed.py"}]} | 1,666 | 156 |
gh_patches_debug_22750 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-4761 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot inject websocket with binary or string content.
#### Problem Description
mitmproxy attempts to parse the injected websocket binary data as text, and replaces the binary data if it cannot be decoded into text, changing the injected websocket message. Also, when a string is given instead, there is error:
File "\mitmproxy\proxy\layers\websocket.py", line 230, in msg
data_str = data.decode(errors="replace")
AttributeError: 'str' object has no attribute 'decode'
#### Steps to reproduce the behavior:
1. from mitmproxy.ctx import master
2. master.commands.call("inject.websocket", flow, False, bytes([i for i in range(256)])) # No error, but message contents are changed
3. master.commands.call("inject.websocket", flow, False, "string") # AttributeError
#### System Information
Mitmproxy: 7.0.2
Python: 3.9.6
OpenSSL: OpenSSL 1.1.1k 25 Mar 2021
Platform: Windows-10-10.0.19043-SP0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/addons/websocket-inject-message.py`
Content:
```
1 """
2 Inject a WebSocket message into a running connection.
3
4 This example shows how to inject a WebSocket message into a running connection.
5 """
6 import asyncio
7
8 from mitmproxy import ctx, http
9
10
11 # Simple example: Inject a message as a response to an event
12
13 def websocket_message(flow: http.HTTPFlow):
14 assert flow.websocket is not None # make type checker happy
15 last_message = flow.websocket.messages[-1]
16 if b"secret" in last_message.content:
17 last_message.drop()
18 ctx.master.commands.call("inject.websocket", flow, last_message.from_client, "ssssssh")
19
20
21 # Complex example: Schedule a periodic timer
22
23 async def inject_async(flow: http.HTTPFlow):
24 msg = "hello from mitmproxy! "
25 assert flow.websocket is not None # make type checker happy
26 while flow.websocket.timestamp_end is None:
27 ctx.master.commands.call("inject.websocket", flow, True, msg)
28 await asyncio.sleep(1)
29 msg = msg[1:] + msg[:1]
30
31
32 def websocket_start(flow: http.HTTPFlow):
33 asyncio.create_task(inject_async(flow))
34
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/addons/websocket-inject-message.py b/examples/addons/websocket-inject-message.py
--- a/examples/addons/websocket-inject-message.py
+++ b/examples/addons/websocket-inject-message.py
@@ -13,9 +13,9 @@
def websocket_message(flow: http.HTTPFlow):
assert flow.websocket is not None # make type checker happy
last_message = flow.websocket.messages[-1]
- if b"secret" in last_message.content:
+ if last_message.is_text and "secret" in last_message.text:
last_message.drop()
- ctx.master.commands.call("inject.websocket", flow, last_message.from_client, "ssssssh")
+ ctx.master.commands.call("inject.websocket", flow, last_message.from_client, "ssssssh".encode())
# Complex example: Schedule a periodic timer
@@ -24,7 +24,7 @@
msg = "hello from mitmproxy! "
assert flow.websocket is not None # make type checker happy
while flow.websocket.timestamp_end is None:
- ctx.master.commands.call("inject.websocket", flow, True, msg)
+ ctx.master.commands.call("inject.websocket", flow, True, msg.encode())
await asyncio.sleep(1)
msg = msg[1:] + msg[:1]
| {"golden_diff": "diff --git a/examples/addons/websocket-inject-message.py b/examples/addons/websocket-inject-message.py\n--- a/examples/addons/websocket-inject-message.py\n+++ b/examples/addons/websocket-inject-message.py\n@@ -13,9 +13,9 @@\n def websocket_message(flow: http.HTTPFlow):\n assert flow.websocket is not None # make type checker happy\n last_message = flow.websocket.messages[-1]\n- if b\"secret\" in last_message.content:\n+ if last_message.is_text and \"secret\" in last_message.text:\n last_message.drop()\n- ctx.master.commands.call(\"inject.websocket\", flow, last_message.from_client, \"ssssssh\")\n+ ctx.master.commands.call(\"inject.websocket\", flow, last_message.from_client, \"ssssssh\".encode())\n \n \n # Complex example: Schedule a periodic timer\n@@ -24,7 +24,7 @@\n msg = \"hello from mitmproxy! \"\n assert flow.websocket is not None # make type checker happy\n while flow.websocket.timestamp_end is None:\n- ctx.master.commands.call(\"inject.websocket\", flow, True, msg)\n+ ctx.master.commands.call(\"inject.websocket\", flow, True, msg.encode())\n await asyncio.sleep(1)\n msg = msg[1:] + msg[:1]\n", "issue": "Cannot inject websocket with binary or string content.\n#### Problem Description\r\nmitmproxy attempts to parse the injected websocket binary data as text, and replaces the binary data if it cannot be decoded into text, changing the injected websocket message. Also, when a string is given instead, there is error:\r\nFile \"\\mitmproxy\\proxy\\layers\\websocket.py\", line 230, in msg\r\n data_str = data.decode(errors=\"replace\")\r\nAttributeError: 'str' object has no attribute 'decode'\r\n\r\n#### Steps to reproduce the behavior:\r\n1. from mitmproxy.ctx import master\r\n2. master.commands.call(\"inject.websocket\", flow, False, bytes([i for i in range(256)])) # No error, but message contents are changed\r\n3. master.commands.call(\"inject.websocket\", flow, False, \"string\") # AttributeError\r\n\r\n#### System Information\r\nMitmproxy: 7.0.2\r\nPython: 3.9.6\r\nOpenSSL: OpenSSL 1.1.1k 25 Mar 2021\r\nPlatform: Windows-10-10.0.19043-SP0\r\n\n", "before_files": [{"content": "\"\"\"\nInject a WebSocket message into a running connection.\n\nThis example shows how to inject a WebSocket message into a running connection.\n\"\"\"\nimport asyncio\n\nfrom mitmproxy import ctx, http\n\n\n# Simple example: Inject a message as a response to an event\n\ndef websocket_message(flow: http.HTTPFlow):\n assert flow.websocket is not None # make type checker happy\n last_message = flow.websocket.messages[-1]\n if b\"secret\" in last_message.content:\n last_message.drop()\n ctx.master.commands.call(\"inject.websocket\", flow, last_message.from_client, \"ssssssh\")\n\n\n# Complex example: Schedule a periodic timer\n\nasync def inject_async(flow: http.HTTPFlow):\n msg = \"hello from mitmproxy! \"\n assert flow.websocket is not None # make type checker happy\n while flow.websocket.timestamp_end is None:\n ctx.master.commands.call(\"inject.websocket\", flow, True, msg)\n await asyncio.sleep(1)\n msg = msg[1:] + msg[:1]\n\n\ndef websocket_start(flow: http.HTTPFlow):\n asyncio.create_task(inject_async(flow))\n", "path": "examples/addons/websocket-inject-message.py"}], "after_files": [{"content": "\"\"\"\nInject a WebSocket message into a running connection.\n\nThis example shows how to inject a WebSocket message into a running connection.\n\"\"\"\nimport asyncio\n\nfrom mitmproxy import ctx, http\n\n\n# Simple example: Inject a message as a response to an event\n\ndef websocket_message(flow: http.HTTPFlow):\n assert flow.websocket is not None # make type checker happy\n last_message = flow.websocket.messages[-1]\n if last_message.is_text and \"secret\" in last_message.text:\n last_message.drop()\n ctx.master.commands.call(\"inject.websocket\", flow, last_message.from_client, \"ssssssh\".encode())\n\n\n# Complex example: Schedule a periodic timer\n\nasync def inject_async(flow: http.HTTPFlow):\n msg = \"hello from mitmproxy! \"\n assert flow.websocket is not None # make type checker happy\n while flow.websocket.timestamp_end is None:\n ctx.master.commands.call(\"inject.websocket\", flow, True, msg.encode())\n await asyncio.sleep(1)\n msg = msg[1:] + msg[:1]\n\n\ndef websocket_start(flow: http.HTTPFlow):\n asyncio.create_task(inject_async(flow))\n", "path": "examples/addons/websocket-inject-message.py"}]} | 805 | 280 |
gh_patches_debug_35044 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-4771 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove /embed endpoint
Similar to #4731
https://github.com/rtfd/readthedocs.org/blob/604e1fa0374a0ba1b242b88e6f242eff77b7b6c3/readthedocs/restapi/urls.py#L66-L66
Raised in https://github.com/rtfd/readthedocs.org/pull/4731/files#r222783446
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `readthedocs/restapi/urls.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """Define routes between URL paths and views/endpoints."""
4
5 from __future__ import (
6 absolute_import,
7 division,
8 print_function,
9 unicode_literals,
10 )
11
12 from django.conf import settings
13 from django.conf.urls import include, url
14 from rest_framework import routers
15
16 from readthedocs.constants import pattern_opts
17 from readthedocs.restapi import views
18 from readthedocs.restapi.views import (
19 core_views,
20 footer_views,
21 integrations,
22 search_views,
23 task_views,
24 )
25
26 from .views.model_views import (
27 BuildCommandViewSet,
28 BuildViewSet,
29 DomainViewSet,
30 NotificationViewSet,
31 ProjectViewSet,
32 RemoteOrganizationViewSet,
33 RemoteRepositoryViewSet,
34 SocialAccountViewSet,
35 VersionViewSet,
36 )
37
38 router = routers.DefaultRouter()
39 router.register(r'build', BuildViewSet, base_name='build')
40 router.register(r'command', BuildCommandViewSet, base_name='buildcommandresult')
41 router.register(r'version', VersionViewSet, base_name='version')
42 router.register(r'project', ProjectViewSet, base_name='project')
43 router.register(r'notification', NotificationViewSet, base_name='emailhook')
44 router.register(r'domain', DomainViewSet, base_name='domain')
45 router.register(
46 r'remote/org',
47 RemoteOrganizationViewSet,
48 base_name='remoteorganization',
49 )
50 router.register(
51 r'remote/repo',
52 RemoteRepositoryViewSet,
53 base_name='remoterepository',
54 )
55 router.register(
56 r'remote/account',
57 SocialAccountViewSet,
58 base_name='remoteaccount',
59 )
60
61 urlpatterns = [
62 url(r'^', include(router.urls)),
63 ]
64
65 function_urls = [
66 url(r'embed/', core_views.embed, name='embed'),
67 url(r'docurl/', core_views.docurl, name='docurl'),
68 url(r'footer_html/', footer_views.footer_html, name='footer_html'),
69 ]
70
71 search_urls = [
72 url(
73 r'index_search/',
74 search_views.index_search,
75 name='index_search',
76 ),
77 url(r'search/$', views.search_views.search, name='api_search'),
78 url(
79 r'search/project/$',
80 search_views.project_search,
81 name='api_project_search',
82 ),
83 url(
84 r'search/section/$',
85 search_views.section_search,
86 name='api_section_search',
87 ),
88 ]
89
90 task_urls = [
91 url(
92 r'jobs/status/(?P<task_id>[^/]+)/',
93 task_views.job_status,
94 name='api_job_status',
95 ),
96 url(
97 r'jobs/sync-remote-repositories/',
98 task_views.sync_remote_repositories,
99 name='api_sync_remote_repositories',
100 ),
101 ]
102
103 integration_urls = [
104 url(
105 r'webhook/github/(?P<project_slug>{project_slug})/$'
106 .format(**pattern_opts),
107 integrations.GitHubWebhookView.as_view(),
108 name='api_webhook_github',
109 ),
110 url(
111 r'webhook/gitlab/(?P<project_slug>{project_slug})/$'
112 .format(**pattern_opts),
113 integrations.GitLabWebhookView.as_view(),
114 name='api_webhook_gitlab',
115 ),
116 url(
117 r'webhook/bitbucket/(?P<project_slug>{project_slug})/$'
118 .format(**pattern_opts),
119 integrations.BitbucketWebhookView.as_view(),
120 name='api_webhook_bitbucket',
121 ),
122 url(
123 r'webhook/generic/(?P<project_slug>{project_slug})/$'
124 .format(**pattern_opts),
125 integrations.APIWebhookView.as_view(),
126 name='api_webhook_generic',
127 ),
128 url(
129 (
130 r'webhook/(?P<project_slug>{project_slug})/'
131 r'(?P<integration_pk>{integer_pk})/$'.format(**pattern_opts)
132 ),
133 integrations.WebhookView.as_view(),
134 name='api_webhook',
135 ),
136 ]
137
138 urlpatterns += function_urls
139 urlpatterns += search_urls
140 urlpatterns += task_urls
141 urlpatterns += integration_urls
142
143 if 'readthedocsext.search' in settings.INSTALLED_APPS:
144 # pylint: disable=import-error
145 from readthedocsext.search.docsearch import DocSearch
146 api_search_urls = [
147 url(r'^docsearch/$', DocSearch.as_view(), name='doc_search'),
148 ]
149 urlpatterns += api_search_urls
150
151 if 'readthedocsext.donate' in settings.INSTALLED_APPS:
152 # pylint: disable=import-error
153 from readthedocsext.donate.restapi.urls import urlpatterns \
154 as sustainability_urls
155
156 urlpatterns += [
157 url(r'^sustainability/', include(sustainability_urls)),
158 ]
159
```
Path: `readthedocs/restapi/views/core_views.py`
Content:
```
1 """Utility endpoints relating to canonical urls, embedded content, etc."""
2
3 from __future__ import absolute_import
4
5 from rest_framework import decorators, permissions, status
6 from rest_framework.renderers import JSONRenderer
7 from rest_framework.response import Response
8
9 import json
10 import requests
11
12 from django.conf import settings
13 from django.core.cache import cache
14 from django.shortcuts import get_object_or_404
15
16 from readthedocs.builds.constants import LATEST
17 from readthedocs.builds.models import Version
18 from readthedocs.projects.models import Project
19 from readthedocs.core.templatetags.core_tags import make_document_url
20
21
22 @decorators.api_view(['GET'])
23 @decorators.permission_classes((permissions.AllowAny,))
24 @decorators.renderer_classes((JSONRenderer,))
25 def docurl(request):
26 """
27 Get the url that a slug resolves to.
28
29 Example::
30
31 GET https://readthedocs.org/api/v2/docurl/?project=requests&version=latest&doc=index
32
33 """
34 project = request.GET.get('project')
35 version = request.GET.get('version', LATEST)
36 doc = request.GET.get('doc', 'index')
37 if project is None:
38 return Response({'error': 'Need project and doc'}, status=status.HTTP_400_BAD_REQUEST)
39
40 project = get_object_or_404(Project, slug=project)
41 version = get_object_or_404(
42 Version.objects.public(request.user, project=project, only_active=False),
43 slug=version)
44 return Response({
45 'url': make_document_url(project=project, version=version.slug, page=doc)
46 })
47
48
49 @decorators.api_view(['GET'])
50 @decorators.permission_classes((permissions.AllowAny,))
51 @decorators.renderer_classes((JSONRenderer,))
52 def embed(request):
53 """
54 Embed a section of content from any Read the Docs page.
55
56 Returns headers and content that matches the queried section.
57
58 ### Arguments
59
60 * project (required)
61 * doc (required)
62 * version (default latest)
63 * section
64
65 ### Example
66
67 GET https://readthedocs.org/api/v2/embed/?project=requests&doc=index§ion=User%20Guide
68
69 # Current Request
70 """
71 project = request.GET.get('project')
72 version = request.GET.get('version', LATEST)
73 doc = request.GET.get('doc')
74 section = request.GET.get('section')
75
76 if project is None or doc is None:
77 return Response({'error': 'Need project and doc'}, status=status.HTTP_400_BAD_REQUEST)
78
79 embed_cache = cache.get('embed:%s' % project)
80 if embed_cache:
81 embed = json.loads(embed_cache)
82 else:
83 try:
84 resp = requests.get(
85 '{host}/api/v1/embed/'.format(host=settings.GROK_API_HOST),
86 params={'project': project, 'version': version, 'doc': doc, 'section': section}
87 )
88 embed = resp.json()
89 cache.set('embed:%s' % project, resp.content, 1800)
90 except Exception as e:
91 return Response({'error': '%s' % e.msg}, status=status.HTTP_400_BAD_REQUEST)
92
93 return Response(embed)
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/readthedocs/restapi/urls.py b/readthedocs/restapi/urls.py
--- a/readthedocs/restapi/urls.py
+++ b/readthedocs/restapi/urls.py
@@ -63,7 +63,6 @@
]
function_urls = [
- url(r'embed/', core_views.embed, name='embed'),
url(r'docurl/', core_views.docurl, name='docurl'),
url(r'footer_html/', footer_views.footer_html, name='footer_html'),
]
diff --git a/readthedocs/restapi/views/core_views.py b/readthedocs/restapi/views/core_views.py
--- a/readthedocs/restapi/views/core_views.py
+++ b/readthedocs/restapi/views/core_views.py
@@ -6,11 +6,6 @@
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
-import json
-import requests
-
-from django.conf import settings
-from django.core.cache import cache
from django.shortcuts import get_object_or_404
from readthedocs.builds.constants import LATEST
@@ -44,50 +39,3 @@
return Response({
'url': make_document_url(project=project, version=version.slug, page=doc)
})
-
-
[email protected]_view(['GET'])
[email protected]_classes((permissions.AllowAny,))
[email protected]_classes((JSONRenderer,))
-def embed(request):
- """
- Embed a section of content from any Read the Docs page.
-
- Returns headers and content that matches the queried section.
-
- ### Arguments
-
- * project (required)
- * doc (required)
- * version (default latest)
- * section
-
- ### Example
-
- GET https://readthedocs.org/api/v2/embed/?project=requests&doc=index§ion=User%20Guide
-
- # Current Request
- """
- project = request.GET.get('project')
- version = request.GET.get('version', LATEST)
- doc = request.GET.get('doc')
- section = request.GET.get('section')
-
- if project is None or doc is None:
- return Response({'error': 'Need project and doc'}, status=status.HTTP_400_BAD_REQUEST)
-
- embed_cache = cache.get('embed:%s' % project)
- if embed_cache:
- embed = json.loads(embed_cache)
- else:
- try:
- resp = requests.get(
- '{host}/api/v1/embed/'.format(host=settings.GROK_API_HOST),
- params={'project': project, 'version': version, 'doc': doc, 'section': section}
- )
- embed = resp.json()
- cache.set('embed:%s' % project, resp.content, 1800)
- except Exception as e:
- return Response({'error': '%s' % e.msg}, status=status.HTTP_400_BAD_REQUEST)
-
- return Response(embed)
| {"golden_diff": "diff --git a/readthedocs/restapi/urls.py b/readthedocs/restapi/urls.py\n--- a/readthedocs/restapi/urls.py\n+++ b/readthedocs/restapi/urls.py\n@@ -63,7 +63,6 @@\n ]\n \n function_urls = [\n- url(r'embed/', core_views.embed, name='embed'),\n url(r'docurl/', core_views.docurl, name='docurl'),\n url(r'footer_html/', footer_views.footer_html, name='footer_html'),\n ]\ndiff --git a/readthedocs/restapi/views/core_views.py b/readthedocs/restapi/views/core_views.py\n--- a/readthedocs/restapi/views/core_views.py\n+++ b/readthedocs/restapi/views/core_views.py\n@@ -6,11 +6,6 @@\n from rest_framework.renderers import JSONRenderer\n from rest_framework.response import Response\n \n-import json\n-import requests\n-\n-from django.conf import settings\n-from django.core.cache import cache\n from django.shortcuts import get_object_or_404\n \n from readthedocs.builds.constants import LATEST\n@@ -44,50 +39,3 @@\n return Response({\n 'url': make_document_url(project=project, version=version.slug, page=doc)\n })\n-\n-\[email protected]_view(['GET'])\[email protected]_classes((permissions.AllowAny,))\[email protected]_classes((JSONRenderer,))\n-def embed(request):\n- \"\"\"\n- Embed a section of content from any Read the Docs page.\n-\n- Returns headers and content that matches the queried section.\n-\n- ### Arguments\n-\n- * project (required)\n- * doc (required)\n- * version (default latest)\n- * section\n-\n- ### Example\n-\n- GET https://readthedocs.org/api/v2/embed/?project=requests&doc=index§ion=User%20Guide\n-\n- # Current Request\n- \"\"\"\n- project = request.GET.get('project')\n- version = request.GET.get('version', LATEST)\n- doc = request.GET.get('doc')\n- section = request.GET.get('section')\n-\n- if project is None or doc is None:\n- return Response({'error': 'Need project and doc'}, status=status.HTTP_400_BAD_REQUEST)\n-\n- embed_cache = cache.get('embed:%s' % project)\n- if embed_cache:\n- embed = json.loads(embed_cache)\n- else:\n- try:\n- resp = requests.get(\n- '{host}/api/v1/embed/'.format(host=settings.GROK_API_HOST),\n- params={'project': project, 'version': version, 'doc': doc, 'section': section}\n- )\n- embed = resp.json()\n- cache.set('embed:%s' % project, resp.content, 1800)\n- except Exception as e:\n- return Response({'error': '%s' % e.msg}, status=status.HTTP_400_BAD_REQUEST)\n-\n- return Response(embed)\n", "issue": "Remove /embed endpoint\nSimilar to #4731\r\n\r\nhttps://github.com/rtfd/readthedocs.org/blob/604e1fa0374a0ba1b242b88e6f242eff77b7b6c3/readthedocs/restapi/urls.py#L66-L66\r\n\r\nRaised in https://github.com/rtfd/readthedocs.org/pull/4731/files#r222783446\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Define routes between URL paths and views/endpoints.\"\"\"\n\nfrom __future__ import (\n absolute_import,\n division,\n print_function,\n unicode_literals,\n)\n\nfrom django.conf import settings\nfrom django.conf.urls import include, url\nfrom rest_framework import routers\n\nfrom readthedocs.constants import pattern_opts\nfrom readthedocs.restapi import views\nfrom readthedocs.restapi.views import (\n core_views,\n footer_views,\n integrations,\n search_views,\n task_views,\n)\n\nfrom .views.model_views import (\n BuildCommandViewSet,\n BuildViewSet,\n DomainViewSet,\n NotificationViewSet,\n ProjectViewSet,\n RemoteOrganizationViewSet,\n RemoteRepositoryViewSet,\n SocialAccountViewSet,\n VersionViewSet,\n)\n\nrouter = routers.DefaultRouter()\nrouter.register(r'build', BuildViewSet, base_name='build')\nrouter.register(r'command', BuildCommandViewSet, base_name='buildcommandresult')\nrouter.register(r'version', VersionViewSet, base_name='version')\nrouter.register(r'project', ProjectViewSet, base_name='project')\nrouter.register(r'notification', NotificationViewSet, base_name='emailhook')\nrouter.register(r'domain', DomainViewSet, base_name='domain')\nrouter.register(\n r'remote/org',\n RemoteOrganizationViewSet,\n base_name='remoteorganization',\n)\nrouter.register(\n r'remote/repo',\n RemoteRepositoryViewSet,\n base_name='remoterepository',\n)\nrouter.register(\n r'remote/account',\n SocialAccountViewSet,\n base_name='remoteaccount',\n)\n\nurlpatterns = [\n url(r'^', include(router.urls)),\n]\n\nfunction_urls = [\n url(r'embed/', core_views.embed, name='embed'),\n url(r'docurl/', core_views.docurl, name='docurl'),\n url(r'footer_html/', footer_views.footer_html, name='footer_html'),\n]\n\nsearch_urls = [\n url(\n r'index_search/',\n search_views.index_search,\n name='index_search',\n ),\n url(r'search/$', views.search_views.search, name='api_search'),\n url(\n r'search/project/$',\n search_views.project_search,\n name='api_project_search',\n ),\n url(\n r'search/section/$',\n search_views.section_search,\n name='api_section_search',\n ),\n]\n\ntask_urls = [\n url(\n r'jobs/status/(?P<task_id>[^/]+)/',\n task_views.job_status,\n name='api_job_status',\n ),\n url(\n r'jobs/sync-remote-repositories/',\n task_views.sync_remote_repositories,\n name='api_sync_remote_repositories',\n ),\n]\n\nintegration_urls = [\n url(\n r'webhook/github/(?P<project_slug>{project_slug})/$'\n .format(**pattern_opts),\n integrations.GitHubWebhookView.as_view(),\n name='api_webhook_github',\n ),\n url(\n r'webhook/gitlab/(?P<project_slug>{project_slug})/$'\n .format(**pattern_opts),\n integrations.GitLabWebhookView.as_view(),\n name='api_webhook_gitlab',\n ),\n url(\n r'webhook/bitbucket/(?P<project_slug>{project_slug})/$'\n .format(**pattern_opts),\n integrations.BitbucketWebhookView.as_view(),\n name='api_webhook_bitbucket',\n ),\n url(\n r'webhook/generic/(?P<project_slug>{project_slug})/$'\n .format(**pattern_opts),\n integrations.APIWebhookView.as_view(),\n name='api_webhook_generic',\n ),\n url(\n (\n r'webhook/(?P<project_slug>{project_slug})/'\n r'(?P<integration_pk>{integer_pk})/$'.format(**pattern_opts)\n ),\n integrations.WebhookView.as_view(),\n name='api_webhook',\n ),\n]\n\nurlpatterns += function_urls\nurlpatterns += search_urls\nurlpatterns += task_urls\nurlpatterns += integration_urls\n\nif 'readthedocsext.search' in settings.INSTALLED_APPS:\n # pylint: disable=import-error\n from readthedocsext.search.docsearch import DocSearch\n api_search_urls = [\n url(r'^docsearch/$', DocSearch.as_view(), name='doc_search'),\n ]\n urlpatterns += api_search_urls\n\nif 'readthedocsext.donate' in settings.INSTALLED_APPS:\n # pylint: disable=import-error\n from readthedocsext.donate.restapi.urls import urlpatterns \\\n as sustainability_urls\n\n urlpatterns += [\n url(r'^sustainability/', include(sustainability_urls)),\n ]\n", "path": "readthedocs/restapi/urls.py"}, {"content": "\"\"\"Utility endpoints relating to canonical urls, embedded content, etc.\"\"\"\n\nfrom __future__ import absolute_import\n\nfrom rest_framework import decorators, permissions, status\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.response import Response\n\nimport json\nimport requests\n\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom django.shortcuts import get_object_or_404\n\nfrom readthedocs.builds.constants import LATEST\nfrom readthedocs.builds.models import Version\nfrom readthedocs.projects.models import Project\nfrom readthedocs.core.templatetags.core_tags import make_document_url\n\n\[email protected]_view(['GET'])\[email protected]_classes((permissions.AllowAny,))\[email protected]_classes((JSONRenderer,))\ndef docurl(request):\n \"\"\"\n Get the url that a slug resolves to.\n\n Example::\n\n GET https://readthedocs.org/api/v2/docurl/?project=requests&version=latest&doc=index\n\n \"\"\"\n project = request.GET.get('project')\n version = request.GET.get('version', LATEST)\n doc = request.GET.get('doc', 'index')\n if project is None:\n return Response({'error': 'Need project and doc'}, status=status.HTTP_400_BAD_REQUEST)\n\n project = get_object_or_404(Project, slug=project)\n version = get_object_or_404(\n Version.objects.public(request.user, project=project, only_active=False),\n slug=version)\n return Response({\n 'url': make_document_url(project=project, version=version.slug, page=doc)\n })\n\n\[email protected]_view(['GET'])\[email protected]_classes((permissions.AllowAny,))\[email protected]_classes((JSONRenderer,))\ndef embed(request):\n \"\"\"\n Embed a section of content from any Read the Docs page.\n\n Returns headers and content that matches the queried section.\n\n ### Arguments\n\n * project (required)\n * doc (required)\n * version (default latest)\n * section\n\n ### Example\n\n GET https://readthedocs.org/api/v2/embed/?project=requests&doc=index§ion=User%20Guide\n\n # Current Request\n \"\"\"\n project = request.GET.get('project')\n version = request.GET.get('version', LATEST)\n doc = request.GET.get('doc')\n section = request.GET.get('section')\n\n if project is None or doc is None:\n return Response({'error': 'Need project and doc'}, status=status.HTTP_400_BAD_REQUEST)\n\n embed_cache = cache.get('embed:%s' % project)\n if embed_cache:\n embed = json.loads(embed_cache)\n else:\n try:\n resp = requests.get(\n '{host}/api/v1/embed/'.format(host=settings.GROK_API_HOST),\n params={'project': project, 'version': version, 'doc': doc, 'section': section}\n )\n embed = resp.json()\n cache.set('embed:%s' % project, resp.content, 1800)\n except Exception as e:\n return Response({'error': '%s' % e.msg}, status=status.HTTP_400_BAD_REQUEST)\n\n return Response(embed)\n", "path": "readthedocs/restapi/views/core_views.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Define routes between URL paths and views/endpoints.\"\"\"\n\nfrom __future__ import (\n absolute_import,\n division,\n print_function,\n unicode_literals,\n)\n\nfrom django.conf import settings\nfrom django.conf.urls import include, url\nfrom rest_framework import routers\n\nfrom readthedocs.constants import pattern_opts\nfrom readthedocs.restapi import views\nfrom readthedocs.restapi.views import (\n core_views,\n footer_views,\n integrations,\n search_views,\n task_views,\n)\n\nfrom .views.model_views import (\n BuildCommandViewSet,\n BuildViewSet,\n DomainViewSet,\n NotificationViewSet,\n ProjectViewSet,\n RemoteOrganizationViewSet,\n RemoteRepositoryViewSet,\n SocialAccountViewSet,\n VersionViewSet,\n)\n\nrouter = routers.DefaultRouter()\nrouter.register(r'build', BuildViewSet, base_name='build')\nrouter.register(r'command', BuildCommandViewSet, base_name='buildcommandresult')\nrouter.register(r'version', VersionViewSet, base_name='version')\nrouter.register(r'project', ProjectViewSet, base_name='project')\nrouter.register(r'notification', NotificationViewSet, base_name='emailhook')\nrouter.register(r'domain', DomainViewSet, base_name='domain')\nrouter.register(\n r'remote/org',\n RemoteOrganizationViewSet,\n base_name='remoteorganization',\n)\nrouter.register(\n r'remote/repo',\n RemoteRepositoryViewSet,\n base_name='remoterepository',\n)\nrouter.register(\n r'remote/account',\n SocialAccountViewSet,\n base_name='remoteaccount',\n)\n\nurlpatterns = [\n url(r'^', include(router.urls)),\n]\n\nfunction_urls = [\n url(r'docurl/', core_views.docurl, name='docurl'),\n url(r'footer_html/', footer_views.footer_html, name='footer_html'),\n]\n\nsearch_urls = [\n url(\n r'index_search/',\n search_views.index_search,\n name='index_search',\n ),\n url(r'search/$', views.search_views.search, name='api_search'),\n url(\n r'search/project/$',\n search_views.project_search,\n name='api_project_search',\n ),\n url(\n r'search/section/$',\n search_views.section_search,\n name='api_section_search',\n ),\n]\n\ntask_urls = [\n url(\n r'jobs/status/(?P<task_id>[^/]+)/',\n task_views.job_status,\n name='api_job_status',\n ),\n url(\n r'jobs/sync-remote-repositories/',\n task_views.sync_remote_repositories,\n name='api_sync_remote_repositories',\n ),\n]\n\nintegration_urls = [\n url(\n r'webhook/github/(?P<project_slug>{project_slug})/$'\n .format(**pattern_opts),\n integrations.GitHubWebhookView.as_view(),\n name='api_webhook_github',\n ),\n url(\n r'webhook/gitlab/(?P<project_slug>{project_slug})/$'\n .format(**pattern_opts),\n integrations.GitLabWebhookView.as_view(),\n name='api_webhook_gitlab',\n ),\n url(\n r'webhook/bitbucket/(?P<project_slug>{project_slug})/$'\n .format(**pattern_opts),\n integrations.BitbucketWebhookView.as_view(),\n name='api_webhook_bitbucket',\n ),\n url(\n r'webhook/generic/(?P<project_slug>{project_slug})/$'\n .format(**pattern_opts),\n integrations.APIWebhookView.as_view(),\n name='api_webhook_generic',\n ),\n url(\n (\n r'webhook/(?P<project_slug>{project_slug})/'\n r'(?P<integration_pk>{integer_pk})/$'.format(**pattern_opts)\n ),\n integrations.WebhookView.as_view(),\n name='api_webhook',\n ),\n]\n\nurlpatterns += function_urls\nurlpatterns += search_urls\nurlpatterns += task_urls\nurlpatterns += integration_urls\n\nif 'readthedocsext.search' in settings.INSTALLED_APPS:\n # pylint: disable=import-error\n from readthedocsext.search.docsearch import DocSearch\n api_search_urls = [\n url(r'^docsearch/$', DocSearch.as_view(), name='doc_search'),\n ]\n urlpatterns += api_search_urls\n\nif 'readthedocsext.donate' in settings.INSTALLED_APPS:\n # pylint: disable=import-error\n from readthedocsext.donate.restapi.urls import urlpatterns \\\n as sustainability_urls\n\n urlpatterns += [\n url(r'^sustainability/', include(sustainability_urls)),\n ]\n", "path": "readthedocs/restapi/urls.py"}, {"content": "\"\"\"Utility endpoints relating to canonical urls, embedded content, etc.\"\"\"\n\nfrom __future__ import absolute_import\n\nfrom rest_framework import decorators, permissions, status\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.response import Response\n\nfrom django.shortcuts import get_object_or_404\n\nfrom readthedocs.builds.constants import LATEST\nfrom readthedocs.builds.models import Version\nfrom readthedocs.projects.models import Project\nfrom readthedocs.core.templatetags.core_tags import make_document_url\n\n\[email protected]_view(['GET'])\[email protected]_classes((permissions.AllowAny,))\[email protected]_classes((JSONRenderer,))\ndef docurl(request):\n \"\"\"\n Get the url that a slug resolves to.\n\n Example::\n\n GET https://readthedocs.org/api/v2/docurl/?project=requests&version=latest&doc=index\n\n \"\"\"\n project = request.GET.get('project')\n version = request.GET.get('version', LATEST)\n doc = request.GET.get('doc', 'index')\n if project is None:\n return Response({'error': 'Need project and doc'}, status=status.HTTP_400_BAD_REQUEST)\n\n project = get_object_or_404(Project, slug=project)\n version = get_object_or_404(\n Version.objects.public(request.user, project=project, only_active=False),\n slug=version)\n return Response({\n 'url': make_document_url(project=project, version=version.slug, page=doc)\n })\n", "path": "readthedocs/restapi/views/core_views.py"}]} | 2,616 | 646 |
gh_patches_debug_1816 | rasdani/github-patches | git_diff | pymedusa__Medusa-9537 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error message "Failed parsing provider" & "list index out of range"
**Describe the bug**
Error message:
`2020-12-05 07:52:25 ERROR SEARCHQUEUE-BACKLOG-260586 :: [Beyond-HD] :: [ce68da5] Failed parsing provider.
Traceback (most recent call last):
File "/Applications/Medusa/medusa/providers/torrent/html/beyondhd.py", line 128, in parse
download_url = urljoin(self.url, cells[2].find('a')['href'])
IndexError: list index out of range`
**Medusa (please complete the following information):**
- OS: macOS Catalina 10.15,.7
- Branch: master
- Commit: ce68da57b3878591f77c21bb2acf28e6a58269fa
- Python version: 3.8.5
- Database version: 44.17
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `medusa/providers/torrent/html/beyondhd.py`
Content:
```
1 # coding=utf-8
2
3 """Provider code for Beyond-hd."""
4
5 from __future__ import unicode_literals
6
7 import logging
8
9 from medusa import tv
10 from medusa.bs4_parser import BS4Parser
11 from medusa.helper.common import convert_size
12 from medusa.logger.adapters.style import BraceAdapter
13 from medusa.providers.torrent.torrent_provider import TorrentProvider
14
15 from requests.compat import urljoin
16
17 log = BraceAdapter(logging.getLogger(__name__))
18 log.logger.addHandler(logging.NullHandler())
19
20
21 class BeyondHDProvider(TorrentProvider):
22 """Beyond-hd Torrent provider."""
23
24 def __init__(self):
25 """Initialize the class."""
26 super(BeyondHDProvider, self).__init__('Beyond-HD')
27
28 self.enable_cookies = True
29 self.cookies = ''
30 self.required_cookies = ('remember_web_[**long_hash**]',)
31
32 self.url = 'https://beyond-hd.me'
33 self.urls = {
34 'login': urljoin(self.url, 'login'),
35 'search': urljoin(self.url, 'torrents'),
36 }
37
38 # Proper Strings
39 self.proper_strings = ['PROPER', 'REPACK', 'REAL', 'RERIP']
40
41 # Miscellaneous Options
42
43 # Cache
44 self.cache = tv.Cache(self)
45
46 def search(self, search_strings, *args, **kwargs):
47 """
48 Search a provider and parse the results.
49
50 :param search_strings: A dict with mode (key) and the search value (value)
51 :returns: A list of search results (structure)
52 """
53 results = []
54 if not self.login():
55 return results
56
57 for mode in search_strings:
58 log.debug('Search mode: {0}', mode)
59
60 for search_string in search_strings[mode]:
61
62 search_params = {
63 'categories[]': 2,
64 'sorting': 'created_at',
65 'qty': '100',
66 'direction': 'desc',
67 'doSearch': 'Search'
68 }
69
70 if mode != 'RSS':
71 log.debug('Search string: {search}',
72 {'search': search_string})
73 search_params['search'] = search_string
74
75 if mode == 'season':
76 search_params['pack'] = 1
77
78 response = self.session.get(self.urls['search'], params=search_params)
79 if not response or not response.text:
80 log.debug('No data returned from provider')
81 continue
82
83 results += self.parse(response.text, mode)
84
85 return results
86
87 def parse(self, data, mode):
88 """
89 Parse search results for items.
90
91 :param data: The raw response from a search
92 :param mode: The current mode used to search, e.g. RSS
93
94 :return: A list of items found
95 """
96 # Units
97 units = ['B', 'KIB', 'MIB', 'GIB', 'TIB', 'PIB']
98
99 items = []
100
101 with BS4Parser(data, 'html5lib') as html:
102 if html.find('div', class_='table-torrents'):
103 theme = 'modern'
104 torrent_table = html.find('div', class_='table-torrents').find('table')
105 else:
106 theme = 'classic'
107 torrent_table = html.find('div', class_='table-responsive').find('table')
108
109 torrent_rows = torrent_table('tr') if torrent_table else []
110 labels = [label.get_text(strip=True) for label in torrent_rows[0]('th')]
111 # For the classic theme, the tr don't match the td.
112 if theme == 'classic':
113 del labels[3]
114
115 # Continue only if one release is found
116 if len(torrent_rows) < 2:
117 log.debug('Data returned from provider does not contain any torrents')
118 return items
119
120 for result in torrent_rows[1:]:
121 cells = result('td')
122
123 try:
124 if len(cells) < 2:
125 continue
126
127 link = cells[1].find('a')
128 download_url = urljoin(self.url, cells[2].find('a')['href'])
129 title = link.get_text(strip=True)
130 if not all([title, download_url]):
131 continue
132
133 seeders = int(cells[labels.index('S')].find('span').get_text())
134 leechers = int(cells[labels.index('L')].find('span').get_text())
135
136 # Filter unseeded torrent
137 if seeders < self.minseed:
138 if mode != 'RSS':
139 log.debug("Discarding torrent because it doesn't meet the"
140 ' minimum seeders: {0}. Seeders: {1}',
141 title, seeders)
142 continue
143
144 torrent_size = cells[labels.index('Size')].find('span').get_text()
145 size = convert_size(torrent_size, units=units) or -1
146
147 pubdate_raw = cells[labels.index('Age')].find('span').get_text()
148 pubdate = self.parse_pubdate(pubdate_raw, human_time=True)
149
150 item = {
151 'title': title,
152 'link': download_url,
153 'size': size,
154 'seeders': seeders,
155 'leechers': leechers,
156 'pubdate': pubdate,
157 }
158 if mode != 'RSS':
159 log.debug('Found result: {0} with {1} seeders and {2} leechers',
160 title, seeders, leechers)
161
162 items.append(item)
163 except (AttributeError, TypeError, KeyError, ValueError, IndexError):
164 log.exception('Failed parsing provider.')
165
166 return items
167
168 def login(self):
169 """Login method used for logging in before doing search and torrent downloads."""
170 return self.cookie_login('Login now')
171
172 def check_required_cookies(self):
173 """
174 Check if we have the required cookies in the requests sessions object.
175
176 Meaning that we've already successfully authenticated once, and we don't need to go through this again.
177 Note! This doesn't mean the cookies are correct!
178 """
179 return False
180
181
182 provider = BeyondHDProvider()
183
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/medusa/providers/torrent/html/beyondhd.py b/medusa/providers/torrent/html/beyondhd.py
--- a/medusa/providers/torrent/html/beyondhd.py
+++ b/medusa/providers/torrent/html/beyondhd.py
@@ -121,7 +121,7 @@
cells = result('td')
try:
- if len(cells) < 2:
+ if len(cells) < 3:
continue
link = cells[1].find('a')
| {"golden_diff": "diff --git a/medusa/providers/torrent/html/beyondhd.py b/medusa/providers/torrent/html/beyondhd.py\n--- a/medusa/providers/torrent/html/beyondhd.py\n+++ b/medusa/providers/torrent/html/beyondhd.py\n@@ -121,7 +121,7 @@\n cells = result('td')\n \n try:\n- if len(cells) < 2:\n+ if len(cells) < 3:\n continue\n \n link = cells[1].find('a')\n", "issue": "Error message \"Failed parsing provider\" & \"list index out of range\"\n**Describe the bug**\r\nError message:\r\n\r\n`2020-12-05 07:52:25 ERROR SEARCHQUEUE-BACKLOG-260586 :: [Beyond-HD] :: [ce68da5] Failed parsing provider.\r\nTraceback (most recent call last):\r\n File \"/Applications/Medusa/medusa/providers/torrent/html/beyondhd.py\", line 128, in parse\r\n download_url = urljoin(self.url, cells[2].find('a')['href'])\r\nIndexError: list index out of range`\r\n\r\n**Medusa (please complete the following information):**\r\n - OS: macOS Catalina 10.15,.7\r\n - Branch: master\r\n - Commit: ce68da57b3878591f77c21bb2acf28e6a58269fa\r\n - Python version: 3.8.5\r\n - Database version: 44.17\r\n\r\n\r\n\n", "before_files": [{"content": "# coding=utf-8\n\n\"\"\"Provider code for Beyond-hd.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\n\nfrom medusa import tv\nfrom medusa.bs4_parser import BS4Parser\nfrom medusa.helper.common import convert_size\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medusa.providers.torrent.torrent_provider import TorrentProvider\n\nfrom requests.compat import urljoin\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass BeyondHDProvider(TorrentProvider):\n \"\"\"Beyond-hd Torrent provider.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the class.\"\"\"\n super(BeyondHDProvider, self).__init__('Beyond-HD')\n\n self.enable_cookies = True\n self.cookies = ''\n self.required_cookies = ('remember_web_[**long_hash**]',)\n\n self.url = 'https://beyond-hd.me'\n self.urls = {\n 'login': urljoin(self.url, 'login'),\n 'search': urljoin(self.url, 'torrents'),\n }\n\n # Proper Strings\n self.proper_strings = ['PROPER', 'REPACK', 'REAL', 'RERIP']\n\n # Miscellaneous Options\n\n # Cache\n self.cache = tv.Cache(self)\n\n def search(self, search_strings, *args, **kwargs):\n \"\"\"\n Search a provider and parse the results.\n\n :param search_strings: A dict with mode (key) and the search value (value)\n :returns: A list of search results (structure)\n \"\"\"\n results = []\n if not self.login():\n return results\n\n for mode in search_strings:\n log.debug('Search mode: {0}', mode)\n\n for search_string in search_strings[mode]:\n\n search_params = {\n 'categories[]': 2,\n 'sorting': 'created_at',\n 'qty': '100',\n 'direction': 'desc',\n 'doSearch': 'Search'\n }\n\n if mode != 'RSS':\n log.debug('Search string: {search}',\n {'search': search_string})\n search_params['search'] = search_string\n\n if mode == 'season':\n search_params['pack'] = 1\n\n response = self.session.get(self.urls['search'], params=search_params)\n if not response or not response.text:\n log.debug('No data returned from provider')\n continue\n\n results += self.parse(response.text, mode)\n\n return results\n\n def parse(self, data, mode):\n \"\"\"\n Parse search results for items.\n\n :param data: The raw response from a search\n :param mode: The current mode used to search, e.g. RSS\n\n :return: A list of items found\n \"\"\"\n # Units\n units = ['B', 'KIB', 'MIB', 'GIB', 'TIB', 'PIB']\n\n items = []\n\n with BS4Parser(data, 'html5lib') as html:\n if html.find('div', class_='table-torrents'):\n theme = 'modern'\n torrent_table = html.find('div', class_='table-torrents').find('table')\n else:\n theme = 'classic'\n torrent_table = html.find('div', class_='table-responsive').find('table')\n\n torrent_rows = torrent_table('tr') if torrent_table else []\n labels = [label.get_text(strip=True) for label in torrent_rows[0]('th')]\n # For the classic theme, the tr don't match the td.\n if theme == 'classic':\n del labels[3]\n\n # Continue only if one release is found\n if len(torrent_rows) < 2:\n log.debug('Data returned from provider does not contain any torrents')\n return items\n\n for result in torrent_rows[1:]:\n cells = result('td')\n\n try:\n if len(cells) < 2:\n continue\n\n link = cells[1].find('a')\n download_url = urljoin(self.url, cells[2].find('a')['href'])\n title = link.get_text(strip=True)\n if not all([title, download_url]):\n continue\n\n seeders = int(cells[labels.index('S')].find('span').get_text())\n leechers = int(cells[labels.index('L')].find('span').get_text())\n\n # Filter unseeded torrent\n if seeders < self.minseed:\n if mode != 'RSS':\n log.debug(\"Discarding torrent because it doesn't meet the\"\n ' minimum seeders: {0}. Seeders: {1}',\n title, seeders)\n continue\n\n torrent_size = cells[labels.index('Size')].find('span').get_text()\n size = convert_size(torrent_size, units=units) or -1\n\n pubdate_raw = cells[labels.index('Age')].find('span').get_text()\n pubdate = self.parse_pubdate(pubdate_raw, human_time=True)\n\n item = {\n 'title': title,\n 'link': download_url,\n 'size': size,\n 'seeders': seeders,\n 'leechers': leechers,\n 'pubdate': pubdate,\n }\n if mode != 'RSS':\n log.debug('Found result: {0} with {1} seeders and {2} leechers',\n title, seeders, leechers)\n\n items.append(item)\n except (AttributeError, TypeError, KeyError, ValueError, IndexError):\n log.exception('Failed parsing provider.')\n\n return items\n\n def login(self):\n \"\"\"Login method used for logging in before doing search and torrent downloads.\"\"\"\n return self.cookie_login('Login now')\n\n def check_required_cookies(self):\n \"\"\"\n Check if we have the required cookies in the requests sessions object.\n\n Meaning that we've already successfully authenticated once, and we don't need to go through this again.\n Note! This doesn't mean the cookies are correct!\n \"\"\"\n return False\n\n\nprovider = BeyondHDProvider()\n", "path": "medusa/providers/torrent/html/beyondhd.py"}], "after_files": [{"content": "# coding=utf-8\n\n\"\"\"Provider code for Beyond-hd.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\n\nfrom medusa import tv\nfrom medusa.bs4_parser import BS4Parser\nfrom medusa.helper.common import convert_size\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medusa.providers.torrent.torrent_provider import TorrentProvider\n\nfrom requests.compat import urljoin\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass BeyondHDProvider(TorrentProvider):\n \"\"\"Beyond-hd Torrent provider.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the class.\"\"\"\n super(BeyondHDProvider, self).__init__('Beyond-HD')\n\n self.enable_cookies = True\n self.cookies = ''\n self.required_cookies = ('remember_web_[**long_hash**]',)\n\n self.url = 'https://beyond-hd.me'\n self.urls = {\n 'login': urljoin(self.url, 'login'),\n 'search': urljoin(self.url, 'torrents'),\n }\n\n # Proper Strings\n self.proper_strings = ['PROPER', 'REPACK', 'REAL', 'RERIP']\n\n # Miscellaneous Options\n\n # Cache\n self.cache = tv.Cache(self)\n\n def search(self, search_strings, *args, **kwargs):\n \"\"\"\n Search a provider and parse the results.\n\n :param search_strings: A dict with mode (key) and the search value (value)\n :returns: A list of search results (structure)\n \"\"\"\n results = []\n if not self.login():\n return results\n\n for mode in search_strings:\n log.debug('Search mode: {0}', mode)\n\n for search_string in search_strings[mode]:\n\n search_params = {\n 'categories[]': 2,\n 'sorting': 'created_at',\n 'qty': '100',\n 'direction': 'desc',\n 'doSearch': 'Search'\n }\n\n if mode != 'RSS':\n log.debug('Search string: {search}',\n {'search': search_string})\n search_params['search'] = search_string\n\n if mode == 'season':\n search_params['pack'] = 1\n\n response = self.session.get(self.urls['search'], params=search_params)\n if not response or not response.text:\n log.debug('No data returned from provider')\n continue\n\n results += self.parse(response.text, mode)\n\n return results\n\n def parse(self, data, mode):\n \"\"\"\n Parse search results for items.\n\n :param data: The raw response from a search\n :param mode: The current mode used to search, e.g. RSS\n\n :return: A list of items found\n \"\"\"\n # Units\n units = ['B', 'KIB', 'MIB', 'GIB', 'TIB', 'PIB']\n\n items = []\n\n with BS4Parser(data, 'html5lib') as html:\n if html.find('div', class_='table-torrents'):\n theme = 'modern'\n torrent_table = html.find('div', class_='table-torrents').find('table')\n else:\n theme = 'classic'\n torrent_table = html.find('div', class_='table-responsive').find('table')\n\n torrent_rows = torrent_table('tr') if torrent_table else []\n labels = [label.get_text(strip=True) for label in torrent_rows[0]('th')]\n # For the classic theme, the tr don't match the td.\n if theme == 'classic':\n del labels[3]\n\n # Continue only if one release is found\n if len(torrent_rows) < 2:\n log.debug('Data returned from provider does not contain any torrents')\n return items\n\n for result in torrent_rows[1:]:\n cells = result('td')\n\n try:\n if len(cells) < 3:\n continue\n\n link = cells[1].find('a')\n download_url = urljoin(self.url, cells[2].find('a')['href'])\n title = link.get_text(strip=True)\n if not all([title, download_url]):\n continue\n\n seeders = int(cells[labels.index('S')].find('span').get_text())\n leechers = int(cells[labels.index('L')].find('span').get_text())\n\n # Filter unseeded torrent\n if seeders < self.minseed:\n if mode != 'RSS':\n log.debug(\"Discarding torrent because it doesn't meet the\"\n ' minimum seeders: {0}. Seeders: {1}',\n title, seeders)\n continue\n\n torrent_size = cells[labels.index('Size')].find('span').get_text()\n size = convert_size(torrent_size, units=units) or -1\n\n pubdate_raw = cells[labels.index('Age')].find('span').get_text()\n pubdate = self.parse_pubdate(pubdate_raw, human_time=True)\n\n item = {\n 'title': title,\n 'link': download_url,\n 'size': size,\n 'seeders': seeders,\n 'leechers': leechers,\n 'pubdate': pubdate,\n }\n if mode != 'RSS':\n log.debug('Found result: {0} with {1} seeders and {2} leechers',\n title, seeders, leechers)\n\n items.append(item)\n except (AttributeError, TypeError, KeyError, ValueError, IndexError):\n log.exception('Failed parsing provider.')\n\n return items\n\n def login(self):\n \"\"\"Login method used for logging in before doing search and torrent downloads.\"\"\"\n return self.cookie_login('Login now')\n\n def check_required_cookies(self):\n \"\"\"\n Check if we have the required cookies in the requests sessions object.\n\n Meaning that we've already successfully authenticated once, and we don't need to go through this again.\n Note! This doesn't mean the cookies are correct!\n \"\"\"\n return False\n\n\nprovider = BeyondHDProvider()\n", "path": "medusa/providers/torrent/html/beyondhd.py"}]} | 2,247 | 115 |
gh_patches_debug_385 | rasdani/github-patches | git_diff | searx__searx-1477 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
New release?
Hi,
I'm the Debian maintainer of searx and we are still shipping version 0.14.0 of searx because there has not been a more recent release since February.
Unfortunately, we see a number of services that do not work anymore with 0.14.0 but which are fixed in git. Would it be possible to make a release soon?
The last commit to the repository was back in August. Is the project still alive? Debian plans to release its next stable version soon and we should not include searx if it's not maintained anymore.
Thanks!
cheers, josch
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/version.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 '''
3 searx is free software: you can redistribute it and/or modify
4 it under the terms of the GNU Affero General Public License as published by
5 the Free Software Foundation, either version 3 of the License, or
6 (at your option) any later version.
7
8 searx is distributed in the hope that it will be useful,
9 but WITHOUT ANY WARRANTY; without even the implied warranty of
10 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 GNU Affero General Public License for more details.
12
13 You should have received a copy of the GNU Affero General Public License
14 along with searx. If not, see < http://www.gnu.org/licenses/ >.
15
16 (C) 2013- by Adam Tauber, <[email protected]>
17 '''
18
19 # version of searx
20 VERSION_MAJOR = 0
21 VERSION_MINOR = 14
22 VERSION_BUILD = 0
23
24 VERSION_STRING = "{0}.{1}.{2}".format(VERSION_MAJOR,
25 VERSION_MINOR,
26 VERSION_BUILD)
27
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/searx/version.py b/searx/version.py
--- a/searx/version.py
+++ b/searx/version.py
@@ -18,7 +18,7 @@
# version of searx
VERSION_MAJOR = 0
-VERSION_MINOR = 14
+VERSION_MINOR = 15
VERSION_BUILD = 0
VERSION_STRING = "{0}.{1}.{2}".format(VERSION_MAJOR,
| {"golden_diff": "diff --git a/searx/version.py b/searx/version.py\n--- a/searx/version.py\n+++ b/searx/version.py\n@@ -18,7 +18,7 @@\n \n # version of searx\n VERSION_MAJOR = 0\n-VERSION_MINOR = 14\n+VERSION_MINOR = 15\n VERSION_BUILD = 0\n \n VERSION_STRING = \"{0}.{1}.{2}\".format(VERSION_MAJOR,\n", "issue": "New release?\nHi,\r\n\r\nI'm the Debian maintainer of searx and we are still shipping version 0.14.0 of searx because there has not been a more recent release since February.\r\n\r\nUnfortunately, we see a number of services that do not work anymore with 0.14.0 but which are fixed in git. Would it be possible to make a release soon?\r\n\r\nThe last commit to the repository was back in August. Is the project still alive? Debian plans to release its next stable version soon and we should not include searx if it's not maintained anymore.\r\n\r\nThanks!\r\ncheers, josch\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n'''\nsearx is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nsearx is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with searx. If not, see < http://www.gnu.org/licenses/ >.\n\n(C) 2013- by Adam Tauber, <[email protected]>\n'''\n\n# version of searx\nVERSION_MAJOR = 0\nVERSION_MINOR = 14\nVERSION_BUILD = 0\n\nVERSION_STRING = \"{0}.{1}.{2}\".format(VERSION_MAJOR,\n VERSION_MINOR,\n VERSION_BUILD)\n", "path": "searx/version.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n'''\nsearx is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nsearx is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with searx. If not, see < http://www.gnu.org/licenses/ >.\n\n(C) 2013- by Adam Tauber, <[email protected]>\n'''\n\n# version of searx\nVERSION_MAJOR = 0\nVERSION_MINOR = 15\nVERSION_BUILD = 0\n\nVERSION_STRING = \"{0}.{1}.{2}\".format(VERSION_MAJOR,\n VERSION_MINOR,\n VERSION_BUILD)\n", "path": "searx/version.py"}]} | 657 | 95 |
gh_patches_debug_25557 | rasdani/github-patches | git_diff | aws__aws-cli-2562 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ImportError on any CLI command on Windows
Hi there,
I just ran `pip install awscli` in a new Python 2.7 virtual environment on Windows. It installed successfully, but any command I run gives output like this:
```
$ aws ecr get-login --region us-east-1
Traceback (most recent call last):
File "...\.tox\tools\Scripts\aws.cmd", line 50, in <module>
import awscli.clidriver
ImportError: No module named awscli.clidriver
```
The package in the virtual environment seems fine and I can run this command successfully (no `ImportError`):
```
$ python -c "from awscli.clidriver import main; main()" ecr get-login --region us-east-1
Unable to locate credentials. You can configure credentials by running "aws configure".
```
Any idea what's wrong?
I'm using this version:
```
$ pip freeze | grep awscli
awscli==1.11.36
```
ImportError on any CLI command on Windows
Hi there,
I just ran `pip install awscli` in a new Python 2.7 virtual environment on Windows. It installed successfully, but any command I run gives output like this:
```
$ aws ecr get-login --region us-east-1
Traceback (most recent call last):
File "...\.tox\tools\Scripts\aws.cmd", line 50, in <module>
import awscli.clidriver
ImportError: No module named awscli.clidriver
```
The package in the virtual environment seems fine and I can run this command successfully (no `ImportError`):
```
$ python -c "from awscli.clidriver import main; main()" ecr get-login --region us-east-1
Unable to locate credentials. You can configure credentials by running "aws configure".
```
Any idea what's wrong?
I'm using this version:
```
$ pip freeze | grep awscli
awscli==1.11.36
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 import codecs
3 import os.path
4 import re
5 import sys
6
7 from setuptools import setup, find_packages
8
9
10 here = os.path.abspath(os.path.dirname(__file__))
11
12
13 def read(*parts):
14 return codecs.open(os.path.join(here, *parts), 'r').read()
15
16
17 def find_version(*file_paths):
18 version_file = read(*file_paths)
19 version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
20 version_file, re.M)
21 if version_match:
22 return version_match.group(1)
23 raise RuntimeError("Unable to find version string.")
24
25
26 requires = ['botocore==1.5.43',
27 'colorama>=0.2.5,<=0.3.7',
28 'docutils>=0.10',
29 'rsa>=3.1.2,<=3.5.0',
30 's3transfer>=0.1.9,<0.2.0',
31 'PyYAML>=3.10,<=3.12']
32
33
34 if sys.version_info[:2] == (2, 6):
35 # For python2.6 we have to require argparse since it
36 # was not in stdlib until 2.7.
37 requires.append('argparse>=1.1')
38
39
40 setup_options = dict(
41 name='awscli',
42 version=find_version("awscli", "__init__.py"),
43 description='Universal Command Line Environment for AWS.',
44 long_description=open('README.rst').read(),
45 author='Amazon Web Services',
46 url='http://aws.amazon.com/cli/',
47 scripts=['bin/aws', 'bin/aws.cmd',
48 'bin/aws_completer', 'bin/aws_zsh_completer.sh',
49 'bin/aws_bash_completer'],
50 packages=find_packages(exclude=['tests*']),
51 package_data={'awscli': ['data/*.json', 'examples/*/*.rst',
52 'examples/*/*/*.rst', 'topics/*.rst',
53 'topics/*.json']},
54 install_requires=requires,
55 extras_require={
56 ':python_version=="2.6"': [
57 'argparse>=1.1',
58 ]
59 },
60 license="Apache License 2.0",
61 classifiers=(
62 'Development Status :: 5 - Production/Stable',
63 'Intended Audience :: Developers',
64 'Intended Audience :: System Administrators',
65 'Natural Language :: English',
66 'License :: OSI Approved :: Apache Software License',
67 'Programming Language :: Python',
68 'Programming Language :: Python :: 2.6',
69 'Programming Language :: Python :: 2.7',
70 'Programming Language :: Python :: 3',
71 'Programming Language :: Python :: 3.3',
72 'Programming Language :: Python :: 3.4',
73 'Programming Language :: Python :: 3.5',
74 'Programming Language :: Python :: 3.6',
75 ),
76 )
77
78 if 'py2exe' in sys.argv:
79 # This will actually give us a py2exe command.
80 import py2exe
81 # And we have some py2exe specific options.
82 setup_options['options'] = {
83 'py2exe': {
84 'optimize': 0,
85 'skip_archive': True,
86 'dll_excludes': ['crypt32.dll'],
87 'packages': ['docutils', 'urllib', 'httplib', 'HTMLParser',
88 'awscli', 'ConfigParser', 'xml.etree', 'pipes'],
89 }
90 }
91 setup_options['console'] = ['bin/aws']
92
93
94 setup(**setup_options)
95
```
Path: `awscli/__init__.py`
Content:
```
1 # Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 """
14 AWSCLI
15 ----
16 A Universal Command Line Environment for Amazon Web Services.
17 """
18 import os
19
20 __version__ = '1.11.80'
21
22 #
23 # Get our data path to be added to botocore's search path
24 #
25 _awscli_data_path = []
26 if 'AWS_DATA_PATH' in os.environ:
27 for path in os.environ['AWS_DATA_PATH'].split(os.pathsep):
28 path = os.path.expandvars(path)
29 path = os.path.expanduser(path)
30 _awscli_data_path.append(path)
31 _awscli_data_path.append(
32 os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')
33 )
34 os.environ['AWS_DATA_PATH'] = os.pathsep.join(_awscli_data_path)
35
36
37 EnvironmentVariables = {
38 'ca_bundle': ('ca_bundle', 'AWS_CA_BUNDLE', None, None),
39 'output': ('output', 'AWS_DEFAULT_OUTPUT', 'json', None),
40 }
41
42
43 SCALAR_TYPES = set([
44 'string', 'float', 'integer', 'long', 'boolean', 'double',
45 'blob', 'timestamp'
46 ])
47 COMPLEX_TYPES = set(['structure', 'map', 'list'])
48
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/awscli/__init__.py b/awscli/__init__.py
--- a/awscli/__init__.py
+++ b/awscli/__init__.py
@@ -19,6 +19,16 @@
__version__ = '1.11.80'
+
+#
+# Account for LC_CTYPE of UTF-8
+# For more information see:
+# * http://bugs.python.org/issue18378
+# * #945
+#
+if os.environ.get('LC_CTYPE', '') == 'UTF-8':
+ os.environ['LC_CTYPE'] = 'en_US.UTF-8'
+
#
# Get our data path to be added to botocore's search path
#
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -44,13 +44,13 @@
long_description=open('README.rst').read(),
author='Amazon Web Services',
url='http://aws.amazon.com/cli/',
- scripts=['bin/aws', 'bin/aws.cmd',
- 'bin/aws_completer', 'bin/aws_zsh_completer.sh',
+ scripts=['bin/aws_completer', 'bin/aws_zsh_completer.sh',
'bin/aws_bash_completer'],
packages=find_packages(exclude=['tests*']),
package_data={'awscli': ['data/*.json', 'examples/*/*.rst',
'examples/*/*/*.rst', 'topics/*.rst',
'topics/*.json']},
+ entry_points={"console_scripts": ["aws = awscli.clidriver:main"]},
install_requires=requires,
extras_require={
':python_version=="2.6"': [
| {"golden_diff": "diff --git a/awscli/__init__.py b/awscli/__init__.py\n--- a/awscli/__init__.py\n+++ b/awscli/__init__.py\n@@ -19,6 +19,16 @@\n \n __version__ = '1.11.80'\n \n+\n+#\n+# Account for LC_CTYPE of UTF-8\n+# For more information see:\n+# * http://bugs.python.org/issue18378\n+# * #945\n+#\n+if os.environ.get('LC_CTYPE', '') == 'UTF-8':\n+ os.environ['LC_CTYPE'] = 'en_US.UTF-8'\n+\n #\n # Get our data path to be added to botocore's search path\n #\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -44,13 +44,13 @@\n long_description=open('README.rst').read(),\n author='Amazon Web Services',\n url='http://aws.amazon.com/cli/',\n- scripts=['bin/aws', 'bin/aws.cmd',\n- 'bin/aws_completer', 'bin/aws_zsh_completer.sh',\n+ scripts=['bin/aws_completer', 'bin/aws_zsh_completer.sh',\n 'bin/aws_bash_completer'],\n packages=find_packages(exclude=['tests*']),\n package_data={'awscli': ['data/*.json', 'examples/*/*.rst',\n 'examples/*/*/*.rst', 'topics/*.rst',\n 'topics/*.json']},\n+ entry_points={\"console_scripts\": [\"aws = awscli.clidriver:main\"]},\n install_requires=requires,\n extras_require={\n ':python_version==\"2.6\"': [\n", "issue": "ImportError on any CLI command on Windows\nHi there,\r\n\r\nI just ran `pip install awscli` in a new Python 2.7 virtual environment on Windows. It installed successfully, but any command I run gives output like this:\r\n\r\n```\r\n$ aws ecr get-login --region us-east-1\r\nTraceback (most recent call last):\r\n File \"...\\.tox\\tools\\Scripts\\aws.cmd\", line 50, in <module>\r\n import awscli.clidriver\r\nImportError: No module named awscli.clidriver\r\n```\r\n\r\nThe package in the virtual environment seems fine and I can run this command successfully (no `ImportError`):\r\n\r\n```\r\n$ python -c \"from awscli.clidriver import main; main()\" ecr get-login --region us-east-1\r\nUnable to locate credentials. You can configure credentials by running \"aws configure\".\r\n```\r\n\r\nAny idea what's wrong?\r\n\r\nI'm using this version:\r\n\r\n```\r\n$ pip freeze | grep awscli\r\nawscli==1.11.36\r\n```\nImportError on any CLI command on Windows\nHi there,\r\n\r\nI just ran `pip install awscli` in a new Python 2.7 virtual environment on Windows. It installed successfully, but any command I run gives output like this:\r\n\r\n```\r\n$ aws ecr get-login --region us-east-1\r\nTraceback (most recent call last):\r\n File \"...\\.tox\\tools\\Scripts\\aws.cmd\", line 50, in <module>\r\n import awscli.clidriver\r\nImportError: No module named awscli.clidriver\r\n```\r\n\r\nThe package in the virtual environment seems fine and I can run this command successfully (no `ImportError`):\r\n\r\n```\r\n$ python -c \"from awscli.clidriver import main; main()\" ecr get-login --region us-east-1\r\nUnable to locate credentials. You can configure credentials by running \"aws configure\".\r\n```\r\n\r\nAny idea what's wrong?\r\n\r\nI'm using this version:\r\n\r\n```\r\n$ pip freeze | grep awscli\r\nawscli==1.11.36\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\nimport codecs\nimport os.path\nimport re\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(*parts):\n return codecs.open(os.path.join(here, *parts), 'r').read()\n\n\ndef find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nrequires = ['botocore==1.5.43',\n 'colorama>=0.2.5,<=0.3.7',\n 'docutils>=0.10',\n 'rsa>=3.1.2,<=3.5.0',\n 's3transfer>=0.1.9,<0.2.0',\n 'PyYAML>=3.10,<=3.12']\n\n\nif sys.version_info[:2] == (2, 6):\n # For python2.6 we have to require argparse since it\n # was not in stdlib until 2.7.\n requires.append('argparse>=1.1')\n\n\nsetup_options = dict(\n name='awscli',\n version=find_version(\"awscli\", \"__init__.py\"),\n description='Universal Command Line Environment for AWS.',\n long_description=open('README.rst').read(),\n author='Amazon Web Services',\n url='http://aws.amazon.com/cli/',\n scripts=['bin/aws', 'bin/aws.cmd',\n 'bin/aws_completer', 'bin/aws_zsh_completer.sh',\n 'bin/aws_bash_completer'],\n packages=find_packages(exclude=['tests*']),\n package_data={'awscli': ['data/*.json', 'examples/*/*.rst',\n 'examples/*/*/*.rst', 'topics/*.rst',\n 'topics/*.json']},\n install_requires=requires,\n extras_require={\n ':python_version==\"2.6\"': [\n 'argparse>=1.1',\n ]\n },\n license=\"Apache License 2.0\",\n classifiers=(\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ),\n)\n\nif 'py2exe' in sys.argv:\n # This will actually give us a py2exe command.\n import py2exe\n # And we have some py2exe specific options.\n setup_options['options'] = {\n 'py2exe': {\n 'optimize': 0,\n 'skip_archive': True,\n 'dll_excludes': ['crypt32.dll'],\n 'packages': ['docutils', 'urllib', 'httplib', 'HTMLParser',\n 'awscli', 'ConfigParser', 'xml.etree', 'pipes'],\n }\n }\n setup_options['console'] = ['bin/aws']\n\n\nsetup(**setup_options)\n", "path": "setup.py"}, {"content": "# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\"\"\"\nAWSCLI\n----\nA Universal Command Line Environment for Amazon Web Services.\n\"\"\"\nimport os\n\n__version__ = '1.11.80'\n\n#\n# Get our data path to be added to botocore's search path\n#\n_awscli_data_path = []\nif 'AWS_DATA_PATH' in os.environ:\n for path in os.environ['AWS_DATA_PATH'].split(os.pathsep):\n path = os.path.expandvars(path)\n path = os.path.expanduser(path)\n _awscli_data_path.append(path)\n_awscli_data_path.append(\n os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')\n)\nos.environ['AWS_DATA_PATH'] = os.pathsep.join(_awscli_data_path)\n\n\nEnvironmentVariables = {\n 'ca_bundle': ('ca_bundle', 'AWS_CA_BUNDLE', None, None),\n 'output': ('output', 'AWS_DEFAULT_OUTPUT', 'json', None),\n}\n\n\nSCALAR_TYPES = set([\n 'string', 'float', 'integer', 'long', 'boolean', 'double',\n 'blob', 'timestamp'\n])\nCOMPLEX_TYPES = set(['structure', 'map', 'list'])\n", "path": "awscli/__init__.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport codecs\nimport os.path\nimport re\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(*parts):\n return codecs.open(os.path.join(here, *parts), 'r').read()\n\n\ndef find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nrequires = ['botocore==1.5.43',\n 'colorama>=0.2.5,<=0.3.7',\n 'docutils>=0.10',\n 'rsa>=3.1.2,<=3.5.0',\n 's3transfer>=0.1.9,<0.2.0',\n 'PyYAML>=3.10,<=3.12']\n\n\nif sys.version_info[:2] == (2, 6):\n # For python2.6 we have to require argparse since it\n # was not in stdlib until 2.7.\n requires.append('argparse>=1.1')\n\n\nsetup_options = dict(\n name='awscli',\n version=find_version(\"awscli\", \"__init__.py\"),\n description='Universal Command Line Environment for AWS.',\n long_description=open('README.rst').read(),\n author='Amazon Web Services',\n url='http://aws.amazon.com/cli/',\n scripts=['bin/aws_completer', 'bin/aws_zsh_completer.sh',\n 'bin/aws_bash_completer'],\n packages=find_packages(exclude=['tests*']),\n package_data={'awscli': ['data/*.json', 'examples/*/*.rst',\n 'examples/*/*/*.rst', 'topics/*.rst',\n 'topics/*.json']},\n entry_points={\"console_scripts\": [\"aws = awscli.clidriver:main\"]},\n install_requires=requires,\n extras_require={\n ':python_version==\"2.6\"': [\n 'argparse>=1.1',\n ]\n },\n license=\"Apache License 2.0\",\n classifiers=(\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ),\n)\n\nif 'py2exe' in sys.argv:\n # This will actually give us a py2exe command.\n import py2exe\n # And we have some py2exe specific options.\n setup_options['options'] = {\n 'py2exe': {\n 'optimize': 0,\n 'skip_archive': True,\n 'dll_excludes': ['crypt32.dll'],\n 'packages': ['docutils', 'urllib', 'httplib', 'HTMLParser',\n 'awscli', 'ConfigParser', 'xml.etree', 'pipes'],\n }\n }\n setup_options['console'] = ['bin/aws']\n\n\nsetup(**setup_options)\n", "path": "setup.py"}, {"content": "# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\"\"\"\nAWSCLI\n----\nA Universal Command Line Environment for Amazon Web Services.\n\"\"\"\nimport os\n\n__version__ = '1.11.80'\n\n\n#\n# Account for LC_CTYPE of UTF-8\n# For more information see:\n# * http://bugs.python.org/issue18378\n# * #945\n#\nif os.environ.get('LC_CTYPE', '') == 'UTF-8':\n os.environ['LC_CTYPE'] = 'en_US.UTF-8'\n\n#\n# Get our data path to be added to botocore's search path\n#\n_awscli_data_path = []\nif 'AWS_DATA_PATH' in os.environ:\n for path in os.environ['AWS_DATA_PATH'].split(os.pathsep):\n path = os.path.expandvars(path)\n path = os.path.expanduser(path)\n _awscli_data_path.append(path)\n_awscli_data_path.append(\n os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')\n)\nos.environ['AWS_DATA_PATH'] = os.pathsep.join(_awscli_data_path)\n\n\nEnvironmentVariables = {\n 'ca_bundle': ('ca_bundle', 'AWS_CA_BUNDLE', None, None),\n 'output': ('output', 'AWS_DEFAULT_OUTPUT', 'json', None),\n}\n\n\nSCALAR_TYPES = set([\n 'string', 'float', 'integer', 'long', 'boolean', 'double',\n 'blob', 'timestamp'\n])\nCOMPLEX_TYPES = set(['structure', 'map', 'list'])\n", "path": "awscli/__init__.py"}]} | 2,140 | 374 |
gh_patches_debug_2628 | rasdani/github-patches | git_diff | praw-dev__praw-1441 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PRAW 6.5.1 and 7.0.0 require Python versions above 3.5.2
**Describe the bug**
At https://praw.readthedocs.io/en/latest/getting_started/installation.html, it says:
> PRAW supports Python 3.5+
3.5.2 seems to be insufficient for PRAW versions after 6.4.0. I *think* 3.5.3 is probably sufficient based on what I have read searching for information on this error message, but I am skipping that version on this particular system so I haven't confirmed.
**To Reproduce**
Steps to reproduce the behavior:
1. Upgrade PRAW to either version 6.5.1 or 7.0.0
2. Run a simple PRAW script
3. Get this error:
```
$ python3 ~/test.py
Traceback (most recent call last):
File "/home/myusername/test.py", line 5, in <module>
import praw
File "/home/myusername/.local/lib/python3.5/site-packages/praw/__init__.py", line 14, in <module>
from .reddit import Reddit # NOQA
File "/home/myusername/.local/lib/python3.5/site-packages/praw/reddit.py", line 50, in <module>
class Reddit:
File "/home/myusername/.local/lib/python3.5/site-packages/praw/reddit.py", line 128, in Reddit
requestor_kwargs: Dict[str, Any] = None,
File "/usr/lib/python3.5/typing.py", line 649, in __getitem__
return Union[arg, type(None)]
File "/usr/lib/python3.5/typing.py", line 552, in __getitem__
dict(self.__dict__), parameters, _root=True)
File "/usr/lib/python3.5/typing.py", line 512, in __new__
for t2 in all_params - {t1} if not isinstance(t2, TypeVar)):
File "/usr/lib/python3.5/typing.py", line 512, in <genexpr>
for t2 in all_params - {t1} if not isinstance(t2, TypeVar)):
File "/usr/lib/python3.5/typing.py", line 1077, in __subclasscheck__
if super().__subclasscheck__(cls):
File "/usr/lib/python3.5/abc.py", line 225, in __subclasscheck__
for scls in cls.__subclasses__():
TypeError: descriptor '__subclasses__' of 'type' object needs an argument
```
**Expected behavior**
Python 3.5.2 works fine with PRAW 6.4.0 and earlier.
**Code/Logs**
`import praw` will do the trick.
**System Info**
- OS: Linux
- Python: 3.5.2
- PRAW Version: 6.5.1 or 7.0.0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 """praw setup.py"""
2
3 import re
4 from codecs import open
5 from os import path
6
7 from setuptools import find_packages, setup
8
9 PACKAGE_NAME = "praw"
10 HERE = path.abspath(path.dirname(__file__))
11 with open(path.join(HERE, "README.rst"), encoding="utf-8") as fp:
12 README = fp.read()
13 with open(path.join(HERE, PACKAGE_NAME, "const.py"), encoding="utf-8") as fp:
14 VERSION = re.search('__version__ = "([^"]+)"', fp.read()).group(1)
15
16 extras = {
17 "ci": ["coveralls"],
18 "dev": ["pre-commit"],
19 "lint": [
20 "black",
21 "flake8",
22 "pydocstyle",
23 "sphinx<3.0",
24 "sphinx_rtd_theme",
25 ],
26 "test": [
27 "betamax >=0.8, <0.9",
28 "betamax-matchers >=0.3.0, <0.5",
29 "pytest >=2.7.3",
30 ],
31 }
32 extras["dev"] += extras["lint"] + extras["test"]
33
34 setup(
35 name=PACKAGE_NAME,
36 author="Bryce Boe",
37 author_email="[email protected]",
38 python_requires=">=3.5",
39 classifiers=[
40 "Development Status :: 5 - Production/Stable",
41 "Environment :: Console",
42 "Intended Audience :: Developers",
43 "License :: OSI Approved :: BSD License",
44 "Natural Language :: English",
45 "Operating System :: OS Independent",
46 "Programming Language :: Python",
47 "Programming Language :: Python :: 3",
48 "Programming Language :: Python :: 3.5",
49 "Programming Language :: Python :: 3.6",
50 "Programming Language :: Python :: 3.7",
51 "Programming Language :: Python :: 3.8",
52 "Topic :: Utilities",
53 ],
54 description=(
55 "PRAW, an acronym for `Python Reddit API Wrapper`, is a "
56 "python package that allows for simple access to "
57 "reddit's API."
58 ),
59 extras_require=extras,
60 install_requires=[
61 "prawcore >=1.3.0, <2.0",
62 "update_checker >=0.16",
63 "websocket-client >=0.54.0",
64 ],
65 keywords="reddit api wrapper",
66 license="Simplified BSD License",
67 long_description=README,
68 package_data={
69 "": ["LICENSE.txt"],
70 PACKAGE_NAME: ["*.ini", "images/*.jpg"],
71 },
72 packages=find_packages(exclude=["tests", "tests.*", "tools", "tools.*"]),
73 url="https://praw.readthedocs.org/",
74 version=VERSION,
75 )
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -35,7 +35,7 @@
name=PACKAGE_NAME,
author="Bryce Boe",
author_email="[email protected]",
- python_requires=">=3.5",
+ python_requires=">3.5.3",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -35,7 +35,7 @@\n name=PACKAGE_NAME,\n author=\"Bryce Boe\",\n author_email=\"[email protected]\",\n- python_requires=\">=3.5\",\n+ python_requires=\">3.5.3\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n", "issue": "PRAW 6.5.1 and 7.0.0 require Python versions above 3.5.2\n**Describe the bug**\r\n\r\nAt https://praw.readthedocs.io/en/latest/getting_started/installation.html, it says:\r\n\r\n> PRAW supports Python 3.5+\r\n\r\n3.5.2 seems to be insufficient for PRAW versions after 6.4.0. I *think* 3.5.3 is probably sufficient based on what I have read searching for information on this error message, but I am skipping that version on this particular system so I haven't confirmed.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Upgrade PRAW to either version 6.5.1 or 7.0.0\r\n\r\n2. Run a simple PRAW script\r\n\r\n3. Get this error:\r\n\r\n```\r\n$ python3 ~/test.py\r\nTraceback (most recent call last):\r\n File \"/home/myusername/test.py\", line 5, in <module>\r\n import praw\r\n File \"/home/myusername/.local/lib/python3.5/site-packages/praw/__init__.py\", line 14, in <module>\r\n from .reddit import Reddit # NOQA\r\n File \"/home/myusername/.local/lib/python3.5/site-packages/praw/reddit.py\", line 50, in <module>\r\n class Reddit:\r\n File \"/home/myusername/.local/lib/python3.5/site-packages/praw/reddit.py\", line 128, in Reddit\r\n requestor_kwargs: Dict[str, Any] = None,\r\n File \"/usr/lib/python3.5/typing.py\", line 649, in __getitem__\r\n return Union[arg, type(None)]\r\n File \"/usr/lib/python3.5/typing.py\", line 552, in __getitem__\r\n dict(self.__dict__), parameters, _root=True)\r\n File \"/usr/lib/python3.5/typing.py\", line 512, in __new__\r\n for t2 in all_params - {t1} if not isinstance(t2, TypeVar)):\r\n File \"/usr/lib/python3.5/typing.py\", line 512, in <genexpr>\r\n for t2 in all_params - {t1} if not isinstance(t2, TypeVar)):\r\n File \"/usr/lib/python3.5/typing.py\", line 1077, in __subclasscheck__\r\n if super().__subclasscheck__(cls):\r\n File \"/usr/lib/python3.5/abc.py\", line 225, in __subclasscheck__\r\n for scls in cls.__subclasses__():\r\nTypeError: descriptor '__subclasses__' of 'type' object needs an argument\r\n```\r\n\r\n**Expected behavior**\r\n\r\nPython 3.5.2 works fine with PRAW 6.4.0 and earlier.\r\n\r\n**Code/Logs**\r\n\r\n`import praw` will do the trick.\r\n\r\n**System Info**\r\n - OS: Linux\r\n - Python: 3.5.2\r\n - PRAW Version: 6.5.1 or 7.0.0\r\n\r\n\n", "before_files": [{"content": "\"\"\"praw setup.py\"\"\"\n\nimport re\nfrom codecs import open\nfrom os import path\n\nfrom setuptools import find_packages, setup\n\nPACKAGE_NAME = \"praw\"\nHERE = path.abspath(path.dirname(__file__))\nwith open(path.join(HERE, \"README.rst\"), encoding=\"utf-8\") as fp:\n README = fp.read()\nwith open(path.join(HERE, PACKAGE_NAME, \"const.py\"), encoding=\"utf-8\") as fp:\n VERSION = re.search('__version__ = \"([^\"]+)\"', fp.read()).group(1)\n\nextras = {\n \"ci\": [\"coveralls\"],\n \"dev\": [\"pre-commit\"],\n \"lint\": [\n \"black\",\n \"flake8\",\n \"pydocstyle\",\n \"sphinx<3.0\",\n \"sphinx_rtd_theme\",\n ],\n \"test\": [\n \"betamax >=0.8, <0.9\",\n \"betamax-matchers >=0.3.0, <0.5\",\n \"pytest >=2.7.3\",\n ],\n}\nextras[\"dev\"] += extras[\"lint\"] + extras[\"test\"]\n\nsetup(\n name=PACKAGE_NAME,\n author=\"Bryce Boe\",\n author_email=\"[email protected]\",\n python_requires=\">=3.5\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Utilities\",\n ],\n description=(\n \"PRAW, an acronym for `Python Reddit API Wrapper`, is a \"\n \"python package that allows for simple access to \"\n \"reddit's API.\"\n ),\n extras_require=extras,\n install_requires=[\n \"prawcore >=1.3.0, <2.0\",\n \"update_checker >=0.16\",\n \"websocket-client >=0.54.0\",\n ],\n keywords=\"reddit api wrapper\",\n license=\"Simplified BSD License\",\n long_description=README,\n package_data={\n \"\": [\"LICENSE.txt\"],\n PACKAGE_NAME: [\"*.ini\", \"images/*.jpg\"],\n },\n packages=find_packages(exclude=[\"tests\", \"tests.*\", \"tools\", \"tools.*\"]),\n url=\"https://praw.readthedocs.org/\",\n version=VERSION,\n)\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"praw setup.py\"\"\"\n\nimport re\nfrom codecs import open\nfrom os import path\n\nfrom setuptools import find_packages, setup\n\nPACKAGE_NAME = \"praw\"\nHERE = path.abspath(path.dirname(__file__))\nwith open(path.join(HERE, \"README.rst\"), encoding=\"utf-8\") as fp:\n README = fp.read()\nwith open(path.join(HERE, PACKAGE_NAME, \"const.py\"), encoding=\"utf-8\") as fp:\n VERSION = re.search('__version__ = \"([^\"]+)\"', fp.read()).group(1)\n\nextras = {\n \"ci\": [\"coveralls\"],\n \"dev\": [\"pre-commit\"],\n \"lint\": [\n \"black\",\n \"flake8\",\n \"pydocstyle\",\n \"sphinx<3.0\",\n \"sphinx_rtd_theme\",\n ],\n \"test\": [\n \"betamax >=0.8, <0.9\",\n \"betamax-matchers >=0.3.0, <0.5\",\n \"pytest >=2.7.3\",\n ],\n}\nextras[\"dev\"] += extras[\"lint\"] + extras[\"test\"]\n\nsetup(\n name=PACKAGE_NAME,\n author=\"Bryce Boe\",\n author_email=\"[email protected]\",\n python_requires=\">3.5.3\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Utilities\",\n ],\n description=(\n \"PRAW, an acronym for `Python Reddit API Wrapper`, is a \"\n \"python package that allows for simple access to \"\n \"reddit's API.\"\n ),\n extras_require=extras,\n install_requires=[\n \"prawcore >=1.3.0, <2.0\",\n \"update_checker >=0.16\",\n \"websocket-client >=0.54.0\",\n ],\n keywords=\"reddit api wrapper\",\n license=\"Simplified BSD License\",\n long_description=README,\n package_data={\n \"\": [\"LICENSE.txt\"],\n PACKAGE_NAME: [\"*.ini\", \"images/*.jpg\"],\n },\n packages=find_packages(exclude=[\"tests\", \"tests.*\", \"tools\", \"tools.*\"]),\n url=\"https://praw.readthedocs.org/\",\n version=VERSION,\n)\n", "path": "setup.py"}]} | 1,637 | 101 |
gh_patches_debug_39983 | rasdani/github-patches | git_diff | elastic__apm-agent-python-1613 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove py2 leftovers
This is an issue to track all of the py2 stuff we can remove once a sufficient waiting period after 6.0 has passed.
- [x] Remove `zerorpc` (py2 only)
- [x] Remove `compat.py`
- [x] Run `pyupgrade`?
Ref #928
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticapm/contrib/zerorpc/__init__.py`
Content:
```
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2012, the Sentry Team, see AUTHORS for more details
4 # Copyright (c) 2019, Elasticsearch BV
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are met:
9 #
10 # * Redistributions of source code must retain the above copyright notice, this
11 # list of conditions and the following disclaimer.
12 #
13 # * Redistributions in binary form must reproduce the above copyright notice,
14 # this list of conditions and the following disclaimer in the documentation
15 # and/or other materials provided with the distribution.
16 #
17 # * Neither the name of the copyright holder nor the names of its
18 # contributors may be used to endorse or promote products derived from
19 # this software without specific prior written permission.
20 #
21 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
25 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30
31
32 import inspect
33
34 from elasticapm.base import Client
35
36
37 class Middleware(object):
38 """ElasticAPM middleware for ZeroRPC.
39
40 >>> elasticapm = Middleware(service_name='..', secret_token='...')
41 >>> zerorpc.Context.get_instance().register_middleware(elasticapm)
42
43 Exceptions detected server-side in ZeroRPC will be submitted to the apm server (and
44 propagated to the client as well).
45 """
46
47 def __init__(self, hide_zerorpc_frames=True, client=None, **kwargs):
48 """Create a middleware object that can be injected in a ZeroRPC server.
49
50 - hide_zerorpc_frames: modify the exception stacktrace to remove the
51 internal zerorpc frames (True by default to make
52 the stacktrace as readable as possible);
53 - client: use an existing raven.Client object, otherwise one will be
54 instantiated from the keyword arguments.
55
56 """
57 self._elasticapm_client = client or Client(**kwargs)
58 self._hide_zerorpc_frames = hide_zerorpc_frames
59
60 def server_inspect_exception(self, req_event, rep_event, task_ctx, exc_info):
61 """Called when an exception has been raised in the code run by ZeroRPC"""
62
63 # Hide the zerorpc internal frames for readability, for a REQ/REP or
64 # REQ/STREAM server the frames to hide are:
65 # - core.ServerBase._async_task
66 # - core.Pattern*.process_call
67 # - core.DecoratorBase.__call__
68 #
69 # For a PUSH/PULL or PUB/SUB server the frame to hide is:
70 # - core.Puller._receiver
71 if self._hide_zerorpc_frames:
72 traceback = exc_info[2]
73 while traceback:
74 zerorpc_frame = traceback.tb_frame
75 zerorpc_frame.f_locals["__traceback_hide__"] = True
76 frame_info = inspect.getframeinfo(zerorpc_frame)
77 # Is there a better way than this (or looking up the filenames
78 # or hardcoding the number of frames to skip) to know when we
79 # are out of zerorpc?
80 if frame_info.function == "__call__" or frame_info.function == "_receiver":
81 break
82 traceback = traceback.tb_next
83
84 self._elasticapm_client.capture_exception(exc_info, extra=task_ctx, handled=False)
85
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticapm/contrib/zerorpc/__init__.py b/elasticapm/contrib/zerorpc/__init__.py
deleted file mode 100644
--- a/elasticapm/contrib/zerorpc/__init__.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# BSD 3-Clause License
-#
-# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details
-# Copyright (c) 2019, Elasticsearch BV
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice, this
-# list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# * Neither the name of the copyright holder nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-
-
-import inspect
-
-from elasticapm.base import Client
-
-
-class Middleware(object):
- """ElasticAPM middleware for ZeroRPC.
-
- >>> elasticapm = Middleware(service_name='..', secret_token='...')
- >>> zerorpc.Context.get_instance().register_middleware(elasticapm)
-
- Exceptions detected server-side in ZeroRPC will be submitted to the apm server (and
- propagated to the client as well).
- """
-
- def __init__(self, hide_zerorpc_frames=True, client=None, **kwargs):
- """Create a middleware object that can be injected in a ZeroRPC server.
-
- - hide_zerorpc_frames: modify the exception stacktrace to remove the
- internal zerorpc frames (True by default to make
- the stacktrace as readable as possible);
- - client: use an existing raven.Client object, otherwise one will be
- instantiated from the keyword arguments.
-
- """
- self._elasticapm_client = client or Client(**kwargs)
- self._hide_zerorpc_frames = hide_zerorpc_frames
-
- def server_inspect_exception(self, req_event, rep_event, task_ctx, exc_info):
- """Called when an exception has been raised in the code run by ZeroRPC"""
-
- # Hide the zerorpc internal frames for readability, for a REQ/REP or
- # REQ/STREAM server the frames to hide are:
- # - core.ServerBase._async_task
- # - core.Pattern*.process_call
- # - core.DecoratorBase.__call__
- #
- # For a PUSH/PULL or PUB/SUB server the frame to hide is:
- # - core.Puller._receiver
- if self._hide_zerorpc_frames:
- traceback = exc_info[2]
- while traceback:
- zerorpc_frame = traceback.tb_frame
- zerorpc_frame.f_locals["__traceback_hide__"] = True
- frame_info = inspect.getframeinfo(zerorpc_frame)
- # Is there a better way than this (or looking up the filenames
- # or hardcoding the number of frames to skip) to know when we
- # are out of zerorpc?
- if frame_info.function == "__call__" or frame_info.function == "_receiver":
- break
- traceback = traceback.tb_next
-
- self._elasticapm_client.capture_exception(exc_info, extra=task_ctx, handled=False)
| {"golden_diff": "diff --git a/elasticapm/contrib/zerorpc/__init__.py b/elasticapm/contrib/zerorpc/__init__.py\ndeleted file mode 100644\n--- a/elasticapm/contrib/zerorpc/__init__.py\n+++ /dev/null\n@@ -1,84 +0,0 @@\n-# BSD 3-Clause License\n-#\n-# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n-# Copyright (c) 2019, Elasticsearch BV\n-# All rights reserved.\n-#\n-# Redistribution and use in source and binary forms, with or without\n-# modification, are permitted provided that the following conditions are met:\n-#\n-# * Redistributions of source code must retain the above copyright notice, this\n-# list of conditions and the following disclaimer.\n-#\n-# * Redistributions in binary form must reproduce the above copyright notice,\n-# this list of conditions and the following disclaimer in the documentation\n-# and/or other materials provided with the distribution.\n-#\n-# * Neither the name of the copyright holder nor the names of its\n-# contributors may be used to endorse or promote products derived from\n-# this software without specific prior written permission.\n-#\n-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n-# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n-\n-\n-import inspect\n-\n-from elasticapm.base import Client\n-\n-\n-class Middleware(object):\n- \"\"\"ElasticAPM middleware for ZeroRPC.\n-\n- >>> elasticapm = Middleware(service_name='..', secret_token='...')\n- >>> zerorpc.Context.get_instance().register_middleware(elasticapm)\n-\n- Exceptions detected server-side in ZeroRPC will be submitted to the apm server (and\n- propagated to the client as well).\n- \"\"\"\n-\n- def __init__(self, hide_zerorpc_frames=True, client=None, **kwargs):\n- \"\"\"Create a middleware object that can be injected in a ZeroRPC server.\n-\n- - hide_zerorpc_frames: modify the exception stacktrace to remove the\n- internal zerorpc frames (True by default to make\n- the stacktrace as readable as possible);\n- - client: use an existing raven.Client object, otherwise one will be\n- instantiated from the keyword arguments.\n-\n- \"\"\"\n- self._elasticapm_client = client or Client(**kwargs)\n- self._hide_zerorpc_frames = hide_zerorpc_frames\n-\n- def server_inspect_exception(self, req_event, rep_event, task_ctx, exc_info):\n- \"\"\"Called when an exception has been raised in the code run by ZeroRPC\"\"\"\n-\n- # Hide the zerorpc internal frames for readability, for a REQ/REP or\n- # REQ/STREAM server the frames to hide are:\n- # - core.ServerBase._async_task\n- # - core.Pattern*.process_call\n- # - core.DecoratorBase.__call__\n- #\n- # For a PUSH/PULL or PUB/SUB server the frame to hide is:\n- # - core.Puller._receiver\n- if self._hide_zerorpc_frames:\n- traceback = exc_info[2]\n- while traceback:\n- zerorpc_frame = traceback.tb_frame\n- zerorpc_frame.f_locals[\"__traceback_hide__\"] = True\n- frame_info = inspect.getframeinfo(zerorpc_frame)\n- # Is there a better way than this (or looking up the filenames\n- # or hardcoding the number of frames to skip) to know when we\n- # are out of zerorpc?\n- if frame_info.function == \"__call__\" or frame_info.function == \"_receiver\":\n- break\n- traceback = traceback.tb_next\n-\n- self._elasticapm_client.capture_exception(exc_info, extra=task_ctx, handled=False)\n", "issue": "Remove py2 leftovers\nThis is an issue to track all of the py2 stuff we can remove once a sufficient waiting period after 6.0 has passed.\r\n\r\n- [x] Remove `zerorpc` (py2 only)\r\n- [x] Remove `compat.py`\r\n- [x] Run `pyupgrade`?\r\n\r\nRef #928 \n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\n\nimport inspect\n\nfrom elasticapm.base import Client\n\n\nclass Middleware(object):\n \"\"\"ElasticAPM middleware for ZeroRPC.\n\n >>> elasticapm = Middleware(service_name='..', secret_token='...')\n >>> zerorpc.Context.get_instance().register_middleware(elasticapm)\n\n Exceptions detected server-side in ZeroRPC will be submitted to the apm server (and\n propagated to the client as well).\n \"\"\"\n\n def __init__(self, hide_zerorpc_frames=True, client=None, **kwargs):\n \"\"\"Create a middleware object that can be injected in a ZeroRPC server.\n\n - hide_zerorpc_frames: modify the exception stacktrace to remove the\n internal zerorpc frames (True by default to make\n the stacktrace as readable as possible);\n - client: use an existing raven.Client object, otherwise one will be\n instantiated from the keyword arguments.\n\n \"\"\"\n self._elasticapm_client = client or Client(**kwargs)\n self._hide_zerorpc_frames = hide_zerorpc_frames\n\n def server_inspect_exception(self, req_event, rep_event, task_ctx, exc_info):\n \"\"\"Called when an exception has been raised in the code run by ZeroRPC\"\"\"\n\n # Hide the zerorpc internal frames for readability, for a REQ/REP or\n # REQ/STREAM server the frames to hide are:\n # - core.ServerBase._async_task\n # - core.Pattern*.process_call\n # - core.DecoratorBase.__call__\n #\n # For a PUSH/PULL or PUB/SUB server the frame to hide is:\n # - core.Puller._receiver\n if self._hide_zerorpc_frames:\n traceback = exc_info[2]\n while traceback:\n zerorpc_frame = traceback.tb_frame\n zerorpc_frame.f_locals[\"__traceback_hide__\"] = True\n frame_info = inspect.getframeinfo(zerorpc_frame)\n # Is there a better way than this (or looking up the filenames\n # or hardcoding the number of frames to skip) to know when we\n # are out of zerorpc?\n if frame_info.function == \"__call__\" or frame_info.function == \"_receiver\":\n break\n traceback = traceback.tb_next\n\n self._elasticapm_client.capture_exception(exc_info, extra=task_ctx, handled=False)\n", "path": "elasticapm/contrib/zerorpc/__init__.py"}], "after_files": [{"content": null, "path": "elasticapm/contrib/zerorpc/__init__.py"}]} | 1,362 | 1,006 |
gh_patches_debug_9845 | rasdani/github-patches | git_diff | python-poetry__poetry-1458 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Poetry cant resolve with git dependency
I have `werkzeug` installed with git version and then add `flask-jwt-extended` shows error though `werkzeug` is 1.0.0.dev0
```bash
$ poetry --version
Poetry 0.12.11
```
```bash
$ cat pyproject.toml
[tool.poetry]
name = "test-poetry-git-deps"
version = "0.1.0"
description = ""
authors = []
[tool.poetry.dependencies]
python = "^3.6"
werkzeug = {git = "https://github.com/pallets/werkzeug.git"}
[tool.poetry.dev-dependencies]
[build-system]
requires = ["poetry>=0.12"]
build-backend = "poetry.masonry.api"
```
```bash
$ cat poetry.lock
[[package]]
category = "main"
description = "The comprehensive WSGI web application library."
name = "Werkzeug"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
version = "1.0.0.dev0"
[package.source]
reference = "86f7bdf6d7c005d3ab8be50226fe42edc1ce4f7c"
type = "git"
url = "https://github.com/pallets/werkzeug.git"
[metadata]
content-hash = "7d88d5e01ecd59f104fee20ece9f80aa3b98bc2a281afc0975c36010d6af2648"
python-versions = "^3.6"
[metadata.hashes]
Werkzeug = []
```
```bash
$ poetry add flask-jwt-extended
Using version ^3.18 for flask-jwt-extended
Updating dependencies
Resolving dependencies... (6.9s)
[SolverProblemError]
Because no versions of flask-jwt-extended match >3.18,<4.0
and flask-jwt-extended (3.18.0) depends on Werkzeug (>=0.14), flask-jwt-extended (>=3.18,<4.0) requires Werkzeug (>=0.14).
So, because no versions of werkzeug match >=0.14
and test-poetry-git-deps depends on flask-jwt-extended (^3.18), version solving failed.
add [-D|--dev] [--git GIT] [--path PATH] [-E|--extras EXTRAS] [--optional] [--python PYTHON] [--platform PLATFORM] [--allow-prereleases] [--dry-run] [--] <name> (<name>)...
```
```bash
$ poetry show
Werkzeug 1.0.0.dev0 86f7bdf The comprehensive WSGI web application library.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `poetry/repositories/repository.py`
Content:
```
1 from poetry.semver import parse_constraint
2 from poetry.semver import VersionConstraint
3 from poetry.semver import VersionRange
4
5 from .base_repository import BaseRepository
6
7
8 class Repository(BaseRepository):
9 def __init__(self, packages=None):
10 super(Repository, self).__init__()
11
12 self._name = None
13
14 if packages is None:
15 packages = []
16
17 for package in packages:
18 self.add_package(package)
19
20 @property
21 def name(self):
22 return self._name
23
24 def package(self, name, version, extras=None):
25 name = name.lower()
26
27 if extras is None:
28 extras = []
29
30 for package in self.packages:
31 if name == package.name and package.version.text == version:
32 # Activate extra dependencies
33 for extra in extras:
34 if extra in package.extras:
35 for extra_dep in package.extras[extra]:
36 for dep in package.requires:
37 if dep.name == extra_dep.name:
38 dep.activate()
39
40 return package.clone()
41
42 def find_packages(
43 self, name, constraint=None, extras=None, allow_prereleases=False
44 ):
45 name = name.lower()
46 packages = []
47 if extras is None:
48 extras = []
49
50 if constraint is None:
51 constraint = "*"
52
53 if not isinstance(constraint, VersionConstraint):
54 constraint = parse_constraint(constraint)
55
56 if isinstance(constraint, VersionRange):
57 if (
58 constraint.max is not None
59 and constraint.max.is_prerelease()
60 or constraint.min is not None
61 and constraint.min.is_prerelease()
62 ):
63 allow_prereleases = True
64
65 for package in self.packages:
66 if name == package.name:
67 if package.is_prerelease() and not allow_prereleases:
68 continue
69
70 if constraint.allows(package.version):
71 for dep in package.requires:
72 for extra in extras:
73 if extra not in package.extras:
74 continue
75
76 reqs = package.extras[extra]
77 for req in reqs:
78 if req.name == dep.name:
79 dep.activate()
80
81 if extras:
82 package.requires_extras = extras
83
84 packages.append(package)
85
86 return packages
87
88 def has_package(self, package):
89 package_id = package.unique_name
90
91 for repo_package in self.packages:
92 if package_id == repo_package.unique_name:
93 return True
94
95 return False
96
97 def add_package(self, package):
98 self._packages.append(package)
99
100 def remove_package(self, package):
101 package_id = package.unique_name
102
103 index = None
104 for i, repo_package in enumerate(self.packages):
105 if package_id == repo_package.unique_name:
106 index = i
107 break
108
109 if index is not None:
110 del self._packages[index]
111
112 def search(self, query, mode=0):
113 results = []
114
115 for package in self.packages:
116 if query in package.name:
117 results.append(package)
118
119 return results
120
121 def __len__(self):
122 return len(self._packages)
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/poetry/repositories/repository.py b/poetry/repositories/repository.py
--- a/poetry/repositories/repository.py
+++ b/poetry/repositories/repository.py
@@ -64,7 +64,13 @@
for package in self.packages:
if name == package.name:
- if package.is_prerelease() and not allow_prereleases:
+ if (
+ package.is_prerelease()
+ and not allow_prereleases
+ and not package.source_type
+ ):
+ # If prereleases are not allowed and the package is a prerelease
+ # and is a standard package then we skip it
continue
if constraint.allows(package.version):
| {"golden_diff": "diff --git a/poetry/repositories/repository.py b/poetry/repositories/repository.py\n--- a/poetry/repositories/repository.py\n+++ b/poetry/repositories/repository.py\n@@ -64,7 +64,13 @@\n \n for package in self.packages:\n if name == package.name:\n- if package.is_prerelease() and not allow_prereleases:\n+ if (\n+ package.is_prerelease()\n+ and not allow_prereleases\n+ and not package.source_type\n+ ):\n+ # If prereleases are not allowed and the package is a prerelease\n+ # and is a standard package then we skip it\n continue\n \n if constraint.allows(package.version):\n", "issue": "Poetry cant resolve with git dependency\nI have `werkzeug` installed with git version and then add `flask-jwt-extended` shows error though `werkzeug` is 1.0.0.dev0\r\n\r\n```bash\r\n$ poetry --version\r\nPoetry 0.12.11\r\n```\r\n\r\n```bash\r\n$ cat pyproject.toml\r\n[tool.poetry]\r\nname = \"test-poetry-git-deps\"\r\nversion = \"0.1.0\"\r\ndescription = \"\"\r\nauthors = []\r\n\r\n[tool.poetry.dependencies]\r\npython = \"^3.6\"\r\nwerkzeug = {git = \"https://github.com/pallets/werkzeug.git\"}\r\n\r\n[tool.poetry.dev-dependencies]\r\n\r\n[build-system]\r\nrequires = [\"poetry>=0.12\"]\r\nbuild-backend = \"poetry.masonry.api\"\r\n```\r\n\r\n```bash\r\n$ cat poetry.lock\r\n[[package]]\r\ncategory = \"main\"\r\ndescription = \"The comprehensive WSGI web application library.\"\r\nname = \"Werkzeug\"\r\noptional = false\r\npython-versions = \">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*\"\r\nversion = \"1.0.0.dev0\"\r\n\r\n[package.source]\r\nreference = \"86f7bdf6d7c005d3ab8be50226fe42edc1ce4f7c\"\r\ntype = \"git\"\r\nurl = \"https://github.com/pallets/werkzeug.git\"\r\n[metadata]\r\ncontent-hash = \"7d88d5e01ecd59f104fee20ece9f80aa3b98bc2a281afc0975c36010d6af2648\"\r\npython-versions = \"^3.6\"\r\n\r\n[metadata.hashes]\r\nWerkzeug = []\r\n```\r\n\r\n```bash\r\n$ poetry add flask-jwt-extended\r\nUsing version ^3.18 for flask-jwt-extended\r\n\r\nUpdating dependencies\r\nResolving dependencies... (6.9s)\r\n \r\n[SolverProblemError] \r\nBecause no versions of flask-jwt-extended match >3.18,<4.0 \r\n and flask-jwt-extended (3.18.0) depends on Werkzeug (>=0.14), flask-jwt-extended (>=3.18,<4.0) requires Werkzeug (>=0.14). \r\nSo, because no versions of werkzeug match >=0.14 \r\n and test-poetry-git-deps depends on flask-jwt-extended (^3.18), version solving failed. \r\n \r\nadd [-D|--dev] [--git GIT] [--path PATH] [-E|--extras EXTRAS] [--optional] [--python PYTHON] [--platform PLATFORM] [--allow-prereleases] [--dry-run] [--] <name> (<name>)...\r\n```\r\n\r\n```bash\r\n$ poetry show \r\nWerkzeug 1.0.0.dev0 86f7bdf The comprehensive WSGI web application library.\r\n```\n", "before_files": [{"content": "from poetry.semver import parse_constraint\nfrom poetry.semver import VersionConstraint\nfrom poetry.semver import VersionRange\n\nfrom .base_repository import BaseRepository\n\n\nclass Repository(BaseRepository):\n def __init__(self, packages=None):\n super(Repository, self).__init__()\n\n self._name = None\n\n if packages is None:\n packages = []\n\n for package in packages:\n self.add_package(package)\n\n @property\n def name(self):\n return self._name\n\n def package(self, name, version, extras=None):\n name = name.lower()\n\n if extras is None:\n extras = []\n\n for package in self.packages:\n if name == package.name and package.version.text == version:\n # Activate extra dependencies\n for extra in extras:\n if extra in package.extras:\n for extra_dep in package.extras[extra]:\n for dep in package.requires:\n if dep.name == extra_dep.name:\n dep.activate()\n\n return package.clone()\n\n def find_packages(\n self, name, constraint=None, extras=None, allow_prereleases=False\n ):\n name = name.lower()\n packages = []\n if extras is None:\n extras = []\n\n if constraint is None:\n constraint = \"*\"\n\n if not isinstance(constraint, VersionConstraint):\n constraint = parse_constraint(constraint)\n\n if isinstance(constraint, VersionRange):\n if (\n constraint.max is not None\n and constraint.max.is_prerelease()\n or constraint.min is not None\n and constraint.min.is_prerelease()\n ):\n allow_prereleases = True\n\n for package in self.packages:\n if name == package.name:\n if package.is_prerelease() and not allow_prereleases:\n continue\n\n if constraint.allows(package.version):\n for dep in package.requires:\n for extra in extras:\n if extra not in package.extras:\n continue\n\n reqs = package.extras[extra]\n for req in reqs:\n if req.name == dep.name:\n dep.activate()\n\n if extras:\n package.requires_extras = extras\n\n packages.append(package)\n\n return packages\n\n def has_package(self, package):\n package_id = package.unique_name\n\n for repo_package in self.packages:\n if package_id == repo_package.unique_name:\n return True\n\n return False\n\n def add_package(self, package):\n self._packages.append(package)\n\n def remove_package(self, package):\n package_id = package.unique_name\n\n index = None\n for i, repo_package in enumerate(self.packages):\n if package_id == repo_package.unique_name:\n index = i\n break\n\n if index is not None:\n del self._packages[index]\n\n def search(self, query, mode=0):\n results = []\n\n for package in self.packages:\n if query in package.name:\n results.append(package)\n\n return results\n\n def __len__(self):\n return len(self._packages)\n", "path": "poetry/repositories/repository.py"}], "after_files": [{"content": "from poetry.semver import parse_constraint\nfrom poetry.semver import VersionConstraint\nfrom poetry.semver import VersionRange\n\nfrom .base_repository import BaseRepository\n\n\nclass Repository(BaseRepository):\n def __init__(self, packages=None):\n super(Repository, self).__init__()\n\n self._name = None\n\n if packages is None:\n packages = []\n\n for package in packages:\n self.add_package(package)\n\n @property\n def name(self):\n return self._name\n\n def package(self, name, version, extras=None):\n name = name.lower()\n\n if extras is None:\n extras = []\n\n for package in self.packages:\n if name == package.name and package.version.text == version:\n # Activate extra dependencies\n for extra in extras:\n if extra in package.extras:\n for extra_dep in package.extras[extra]:\n for dep in package.requires:\n if dep.name == extra_dep.name:\n dep.activate()\n\n return package.clone()\n\n def find_packages(\n self, name, constraint=None, extras=None, allow_prereleases=False\n ):\n name = name.lower()\n packages = []\n if extras is None:\n extras = []\n\n if constraint is None:\n constraint = \"*\"\n\n if not isinstance(constraint, VersionConstraint):\n constraint = parse_constraint(constraint)\n\n if isinstance(constraint, VersionRange):\n if (\n constraint.max is not None\n and constraint.max.is_prerelease()\n or constraint.min is not None\n and constraint.min.is_prerelease()\n ):\n allow_prereleases = True\n\n for package in self.packages:\n if name == package.name:\n if (\n package.is_prerelease()\n and not allow_prereleases\n and not package.source_type\n ):\n # If prereleases are not allowed and the package is a prerelease\n # and is a standard package then we skip it\n continue\n\n if constraint.allows(package.version):\n for dep in package.requires:\n for extra in extras:\n if extra not in package.extras:\n continue\n\n reqs = package.extras[extra]\n for req in reqs:\n if req.name == dep.name:\n dep.activate()\n\n if extras:\n package.requires_extras = extras\n\n packages.append(package)\n\n return packages\n\n def has_package(self, package):\n package_id = package.unique_name\n\n for repo_package in self.packages:\n if package_id == repo_package.unique_name:\n return True\n\n return False\n\n def add_package(self, package):\n self._packages.append(package)\n\n def remove_package(self, package):\n package_id = package.unique_name\n\n index = None\n for i, repo_package in enumerate(self.packages):\n if package_id == repo_package.unique_name:\n index = i\n break\n\n if index is not None:\n del self._packages[index]\n\n def search(self, query, mode=0):\n results = []\n\n for package in self.packages:\n if query in package.name:\n results.append(package)\n\n return results\n\n def __len__(self):\n return len(self._packages)\n", "path": "poetry/repositories/repository.py"}]} | 1,839 | 158 |
gh_patches_debug_7407 | rasdani/github-patches | git_diff | modin-project__modin-472 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Modin not working on latest ray master
With
```python
import ray
import modin.pandas as pd
frame_data = [1, 2, 3, 4, 5, 6, 7, 8]
frame = pd.DataFrame(frame_data)
assert frame.sum().squeeze() == sum(frame_data)
```
we get the following error:
```
Traceback (most recent call last):
File "python/ray/test/test_modin.py", line 16, in <module>
test_modin_import()
File "python/ray/test/test_modin.py", line 9, in test_modin_import
import modin.pandas as pd
File "/home/travis/.local/lib/python3.6/site-packages/ray-0.7.0.dev0-py3.6-linux-x86_64.egg/ray/modin/modin/pandas/__init__.py", line 134, in <module>
initialize_ray()
File "/home/travis/.local/lib/python3.6/site-packages/ray-0.7.0.dev0-py3.6-linux-x86_64.egg/ray/modin/modin/pandas/__init__.py", line 126, in initialize_ray
object_store_memory=object_store_memory,
File "/home/travis/.local/lib/python3.6/site-packages/ray-0.7.0.dev0-py3.6-linux-x86_64.egg/ray/worker.py", line 1427, in init
_internal_config=_internal_config,
File "/home/travis/.local/lib/python3.6/site-packages/ray-0.7.0.dev0-py3.6-linux-x86_64.egg/ray/parameter.py", line 145, in __init__
self._check_usage()
File "/home/travis/.local/lib/python3.6/site-packages/ray-0.7.0.dev0-py3.6-linux-x86_64.egg/ray/parameter.py", line 189, in _check_usage
"The redirect_worker_output argument is deprecated. To "
DeprecationWarning: The redirect_worker_output argument is deprecated. To control logging to the driver, use the 'log_to_driver' argument to 'ray.init()'
```
see also https://github.com/ray-project/ray/pull/4069 and https://github.com/ray-project/ray/commit/5f717518914d303126ed1bac3752150c0284fc3e.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `modin/pandas/__init__.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import division
3 from __future__ import print_function
4
5 # TODO: In the future `set_option` or similar needs to run on every node
6 # in order to keep all pandas instances across nodes consistent
7 import pandas
8 from pandas import (
9 eval,
10 unique,
11 value_counts,
12 cut,
13 to_numeric,
14 factorize,
15 test,
16 qcut,
17 match,
18 Panel,
19 date_range,
20 period_range,
21 Index,
22 MultiIndex,
23 CategoricalIndex,
24 Series,
25 bdate_range,
26 DatetimeIndex,
27 Timedelta,
28 Timestamp,
29 to_timedelta,
30 set_eng_float_format,
31 set_option,
32 NaT,
33 PeriodIndex,
34 Categorical,
35 )
36 import threading
37 import os
38 import ray
39 import types
40
41 from .. import __version__
42 from .concat import concat
43 from .dataframe import DataFrame
44 from .datetimes import to_datetime
45 from .io import (
46 read_csv,
47 read_parquet,
48 read_json,
49 read_html,
50 read_clipboard,
51 read_excel,
52 read_hdf,
53 read_feather,
54 read_msgpack,
55 read_stata,
56 read_sas,
57 read_pickle,
58 read_sql,
59 read_gbq,
60 read_table,
61 )
62 from .reshape import get_dummies, melt, crosstab
63 from .general import isna, isnull, merge, pivot_table
64 from .plotting import Plotting as plotting
65 from .. import __execution_engine__ as execution_engine
66
67 __pandas_version__ = "0.23.4"
68
69 if pandas.__version__ != __pandas_version__:
70 raise ImportError(
71 "The pandas version installed does not match the required pandas "
72 "version in Modin. Please install pandas {} to use "
73 "Modin.".format(__pandas_version__)
74 )
75
76 # Set this so that Pandas doesn't try to multithread by itself
77 os.environ["OMP_NUM_THREADS"] = "1"
78 num_cpus = 1
79
80
81 def initialize_ray():
82 """Initializes ray based on environment variables and internal defaults."""
83 if threading.current_thread().name == "MainThread":
84 plasma_directory = None
85 object_store_memory = None
86 if "MODIN_MEMORY" in os.environ:
87 object_store_memory = os.environ["MODIN_MEMORY"]
88 if (
89 "MODIN_OUT_OF_CORE" in os.environ
90 and os.environ["MODIN_OUT_OF_CORE"].title() == "True"
91 ):
92 from tempfile import gettempdir
93
94 plasma_directory = gettempdir()
95 # We may have already set the memory from the environment variable, we don't
96 # want to overwrite that value if we have.
97 if object_store_memory is None:
98 try:
99 from psutil import virtual_memory
100 except ImportError:
101 raise ImportError(
102 "To use Modin out of core, please install modin[out_of_core]: "
103 '`pip install "modin[out_of_core]"`'
104 )
105 # Round down to the nearest Gigabyte.
106 mem_bytes = virtual_memory().total // 10 ** 9 * 10 ** 9
107 # Default to 8x memory for out of core
108 object_store_memory = 8 * mem_bytes
109 elif "MODIN_MEMORY" in os.environ:
110 object_store_memory = os.environ["MODIN_MEMORY"]
111 # In case anything failed above, we can still improve the memory for Modin.
112 if object_store_memory is None:
113 # Round down to the nearest Gigabyte.
114 object_store_memory = int(
115 0.6 * ray.utils.get_system_memory() // 10 ** 9 * 10 ** 9
116 )
117 # If the memory pool is smaller than 2GB, just use the default in ray.
118 if object_store_memory == 0:
119 object_store_memory = None
120 ray.init(
121 redirect_output=True,
122 include_webui=False,
123 redirect_worker_output=True,
124 ignore_reinit_error=True,
125 plasma_directory=plasma_directory,
126 object_store_memory=object_store_memory,
127 )
128 # Register custom serializer for method objects to avoid warning message.
129 # We serialize `MethodType` objects when we use AxisPartition operations.
130 ray.register_custom_serializer(types.MethodType, use_pickle=True)
131
132
133 if execution_engine == "Ray":
134 initialize_ray()
135 num_cpus = ray.global_state.cluster_resources()["CPU"]
136 elif execution_engine == "Dask":
137 from distributed.client import _get_global_client
138
139 if threading.current_thread().name == "MainThread":
140 # initialize the dask client
141 client = _get_global_client()
142 if client is None:
143 from distributed import Client
144
145 client = Client()
146 num_cpus = sum(client.ncores().values())
147 elif execution_engine != "Python":
148 raise ImportError("Unrecognized execution engine: {}.".format(execution_engine))
149
150 DEFAULT_NPARTITIONS = max(4, int(num_cpus))
151
152 __all__ = [
153 "DataFrame",
154 "Series",
155 "read_csv",
156 "read_parquet",
157 "read_json",
158 "read_html",
159 "read_clipboard",
160 "read_excel",
161 "read_hdf",
162 "read_feather",
163 "read_msgpack",
164 "read_stata",
165 "read_sas",
166 "read_pickle",
167 "read_sql",
168 "read_gbq",
169 "read_table",
170 "concat",
171 "eval",
172 "unique",
173 "value_counts",
174 "cut",
175 "to_numeric",
176 "factorize",
177 "test",
178 "qcut",
179 "match",
180 "to_datetime",
181 "get_dummies",
182 "isna",
183 "isnull",
184 "merge",
185 "pivot_table",
186 "Panel",
187 "date_range",
188 "Index",
189 "MultiIndex",
190 "Series",
191 "bdate_range",
192 "period_range",
193 "DatetimeIndex",
194 "to_timedelta",
195 "set_eng_float_format",
196 "set_option",
197 "CategoricalIndex",
198 "Timedelta",
199 "Timestamp",
200 "NaT",
201 "PeriodIndex",
202 "Categorical",
203 "__version__",
204 "melt",
205 "crosstab",
206 "plotting",
207 ]
208
209 del pandas
210
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/modin/pandas/__init__.py b/modin/pandas/__init__.py
--- a/modin/pandas/__init__.py
+++ b/modin/pandas/__init__.py
@@ -118,9 +118,7 @@
if object_store_memory == 0:
object_store_memory = None
ray.init(
- redirect_output=True,
include_webui=False,
- redirect_worker_output=True,
ignore_reinit_error=True,
plasma_directory=plasma_directory,
object_store_memory=object_store_memory,
| {"golden_diff": "diff --git a/modin/pandas/__init__.py b/modin/pandas/__init__.py\n--- a/modin/pandas/__init__.py\n+++ b/modin/pandas/__init__.py\n@@ -118,9 +118,7 @@\n if object_store_memory == 0:\n object_store_memory = None\n ray.init(\n- redirect_output=True,\n include_webui=False,\n- redirect_worker_output=True,\n ignore_reinit_error=True,\n plasma_directory=plasma_directory,\n object_store_memory=object_store_memory,\n", "issue": "Modin not working on latest ray master\nWith\r\n\r\n```python\r\nimport ray\r\nimport modin.pandas as pd\r\nframe_data = [1, 2, 3, 4, 5, 6, 7, 8]\r\nframe = pd.DataFrame(frame_data)\r\nassert frame.sum().squeeze() == sum(frame_data)\r\n```\r\n\r\nwe get the following error:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"python/ray/test/test_modin.py\", line 16, in <module>\r\n test_modin_import()\r\n File \"python/ray/test/test_modin.py\", line 9, in test_modin_import\r\n import modin.pandas as pd\r\n File \"/home/travis/.local/lib/python3.6/site-packages/ray-0.7.0.dev0-py3.6-linux-x86_64.egg/ray/modin/modin/pandas/__init__.py\", line 134, in <module>\r\n initialize_ray()\r\n File \"/home/travis/.local/lib/python3.6/site-packages/ray-0.7.0.dev0-py3.6-linux-x86_64.egg/ray/modin/modin/pandas/__init__.py\", line 126, in initialize_ray\r\n object_store_memory=object_store_memory,\r\n File \"/home/travis/.local/lib/python3.6/site-packages/ray-0.7.0.dev0-py3.6-linux-x86_64.egg/ray/worker.py\", line 1427, in init\r\n _internal_config=_internal_config,\r\n File \"/home/travis/.local/lib/python3.6/site-packages/ray-0.7.0.dev0-py3.6-linux-x86_64.egg/ray/parameter.py\", line 145, in __init__\r\n self._check_usage()\r\n File \"/home/travis/.local/lib/python3.6/site-packages/ray-0.7.0.dev0-py3.6-linux-x86_64.egg/ray/parameter.py\", line 189, in _check_usage\r\n \"The redirect_worker_output argument is deprecated. To \"\r\nDeprecationWarning: The redirect_worker_output argument is deprecated. To control logging to the driver, use the 'log_to_driver' argument to 'ray.init()'\r\n```\r\n\r\nsee also https://github.com/ray-project/ray/pull/4069 and https://github.com/ray-project/ray/commit/5f717518914d303126ed1bac3752150c0284fc3e.\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# TODO: In the future `set_option` or similar needs to run on every node\n# in order to keep all pandas instances across nodes consistent\nimport pandas\nfrom pandas import (\n eval,\n unique,\n value_counts,\n cut,\n to_numeric,\n factorize,\n test,\n qcut,\n match,\n Panel,\n date_range,\n period_range,\n Index,\n MultiIndex,\n CategoricalIndex,\n Series,\n bdate_range,\n DatetimeIndex,\n Timedelta,\n Timestamp,\n to_timedelta,\n set_eng_float_format,\n set_option,\n NaT,\n PeriodIndex,\n Categorical,\n)\nimport threading\nimport os\nimport ray\nimport types\n\nfrom .. import __version__\nfrom .concat import concat\nfrom .dataframe import DataFrame\nfrom .datetimes import to_datetime\nfrom .io import (\n read_csv,\n read_parquet,\n read_json,\n read_html,\n read_clipboard,\n read_excel,\n read_hdf,\n read_feather,\n read_msgpack,\n read_stata,\n read_sas,\n read_pickle,\n read_sql,\n read_gbq,\n read_table,\n)\nfrom .reshape import get_dummies, melt, crosstab\nfrom .general import isna, isnull, merge, pivot_table\nfrom .plotting import Plotting as plotting\nfrom .. import __execution_engine__ as execution_engine\n\n__pandas_version__ = \"0.23.4\"\n\nif pandas.__version__ != __pandas_version__:\n raise ImportError(\n \"The pandas version installed does not match the required pandas \"\n \"version in Modin. Please install pandas {} to use \"\n \"Modin.\".format(__pandas_version__)\n )\n\n# Set this so that Pandas doesn't try to multithread by itself\nos.environ[\"OMP_NUM_THREADS\"] = \"1\"\nnum_cpus = 1\n\n\ndef initialize_ray():\n \"\"\"Initializes ray based on environment variables and internal defaults.\"\"\"\n if threading.current_thread().name == \"MainThread\":\n plasma_directory = None\n object_store_memory = None\n if \"MODIN_MEMORY\" in os.environ:\n object_store_memory = os.environ[\"MODIN_MEMORY\"]\n if (\n \"MODIN_OUT_OF_CORE\" in os.environ\n and os.environ[\"MODIN_OUT_OF_CORE\"].title() == \"True\"\n ):\n from tempfile import gettempdir\n\n plasma_directory = gettempdir()\n # We may have already set the memory from the environment variable, we don't\n # want to overwrite that value if we have.\n if object_store_memory is None:\n try:\n from psutil import virtual_memory\n except ImportError:\n raise ImportError(\n \"To use Modin out of core, please install modin[out_of_core]: \"\n '`pip install \"modin[out_of_core]\"`'\n )\n # Round down to the nearest Gigabyte.\n mem_bytes = virtual_memory().total // 10 ** 9 * 10 ** 9\n # Default to 8x memory for out of core\n object_store_memory = 8 * mem_bytes\n elif \"MODIN_MEMORY\" in os.environ:\n object_store_memory = os.environ[\"MODIN_MEMORY\"]\n # In case anything failed above, we can still improve the memory for Modin.\n if object_store_memory is None:\n # Round down to the nearest Gigabyte.\n object_store_memory = int(\n 0.6 * ray.utils.get_system_memory() // 10 ** 9 * 10 ** 9\n )\n # If the memory pool is smaller than 2GB, just use the default in ray.\n if object_store_memory == 0:\n object_store_memory = None\n ray.init(\n redirect_output=True,\n include_webui=False,\n redirect_worker_output=True,\n ignore_reinit_error=True,\n plasma_directory=plasma_directory,\n object_store_memory=object_store_memory,\n )\n # Register custom serializer for method objects to avoid warning message.\n # We serialize `MethodType` objects when we use AxisPartition operations.\n ray.register_custom_serializer(types.MethodType, use_pickle=True)\n\n\nif execution_engine == \"Ray\":\n initialize_ray()\n num_cpus = ray.global_state.cluster_resources()[\"CPU\"]\nelif execution_engine == \"Dask\":\n from distributed.client import _get_global_client\n\n if threading.current_thread().name == \"MainThread\":\n # initialize the dask client\n client = _get_global_client()\n if client is None:\n from distributed import Client\n\n client = Client()\n num_cpus = sum(client.ncores().values())\nelif execution_engine != \"Python\":\n raise ImportError(\"Unrecognized execution engine: {}.\".format(execution_engine))\n\nDEFAULT_NPARTITIONS = max(4, int(num_cpus))\n\n__all__ = [\n \"DataFrame\",\n \"Series\",\n \"read_csv\",\n \"read_parquet\",\n \"read_json\",\n \"read_html\",\n \"read_clipboard\",\n \"read_excel\",\n \"read_hdf\",\n \"read_feather\",\n \"read_msgpack\",\n \"read_stata\",\n \"read_sas\",\n \"read_pickle\",\n \"read_sql\",\n \"read_gbq\",\n \"read_table\",\n \"concat\",\n \"eval\",\n \"unique\",\n \"value_counts\",\n \"cut\",\n \"to_numeric\",\n \"factorize\",\n \"test\",\n \"qcut\",\n \"match\",\n \"to_datetime\",\n \"get_dummies\",\n \"isna\",\n \"isnull\",\n \"merge\",\n \"pivot_table\",\n \"Panel\",\n \"date_range\",\n \"Index\",\n \"MultiIndex\",\n \"Series\",\n \"bdate_range\",\n \"period_range\",\n \"DatetimeIndex\",\n \"to_timedelta\",\n \"set_eng_float_format\",\n \"set_option\",\n \"CategoricalIndex\",\n \"Timedelta\",\n \"Timestamp\",\n \"NaT\",\n \"PeriodIndex\",\n \"Categorical\",\n \"__version__\",\n \"melt\",\n \"crosstab\",\n \"plotting\",\n]\n\ndel pandas\n", "path": "modin/pandas/__init__.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# TODO: In the future `set_option` or similar needs to run on every node\n# in order to keep all pandas instances across nodes consistent\nimport pandas\nfrom pandas import (\n eval,\n unique,\n value_counts,\n cut,\n to_numeric,\n factorize,\n test,\n qcut,\n match,\n Panel,\n date_range,\n period_range,\n Index,\n MultiIndex,\n CategoricalIndex,\n Series,\n bdate_range,\n DatetimeIndex,\n Timedelta,\n Timestamp,\n to_timedelta,\n set_eng_float_format,\n set_option,\n NaT,\n PeriodIndex,\n Categorical,\n)\nimport threading\nimport os\nimport ray\nimport types\n\nfrom .. import __version__\nfrom .concat import concat\nfrom .dataframe import DataFrame\nfrom .datetimes import to_datetime\nfrom .io import (\n read_csv,\n read_parquet,\n read_json,\n read_html,\n read_clipboard,\n read_excel,\n read_hdf,\n read_feather,\n read_msgpack,\n read_stata,\n read_sas,\n read_pickle,\n read_sql,\n read_gbq,\n read_table,\n)\nfrom .reshape import get_dummies, melt, crosstab\nfrom .general import isna, isnull, merge, pivot_table\nfrom .plotting import Plotting as plotting\nfrom .. import __execution_engine__ as execution_engine\n\n__pandas_version__ = \"0.23.4\"\n\nif pandas.__version__ != __pandas_version__:\n raise ImportError(\n \"The pandas version installed does not match the required pandas \"\n \"version in Modin. Please install pandas {} to use \"\n \"Modin.\".format(__pandas_version__)\n )\n\n# Set this so that Pandas doesn't try to multithread by itself\nos.environ[\"OMP_NUM_THREADS\"] = \"1\"\nnum_cpus = 1\n\n\ndef initialize_ray():\n \"\"\"Initializes ray based on environment variables and internal defaults.\"\"\"\n if threading.current_thread().name == \"MainThread\":\n plasma_directory = None\n object_store_memory = None\n if \"MODIN_MEMORY\" in os.environ:\n object_store_memory = os.environ[\"MODIN_MEMORY\"]\n if (\n \"MODIN_OUT_OF_CORE\" in os.environ\n and os.environ[\"MODIN_OUT_OF_CORE\"].title() == \"True\"\n ):\n from tempfile import gettempdir\n\n plasma_directory = gettempdir()\n # We may have already set the memory from the environment variable, we don't\n # want to overwrite that value if we have.\n if object_store_memory is None:\n try:\n from psutil import virtual_memory\n except ImportError:\n raise ImportError(\n \"To use Modin out of core, please install modin[out_of_core]: \"\n '`pip install \"modin[out_of_core]\"`'\n )\n # Round down to the nearest Gigabyte.\n mem_bytes = virtual_memory().total // 10 ** 9 * 10 ** 9\n # Default to 8x memory for out of core\n object_store_memory = 8 * mem_bytes\n elif \"MODIN_MEMORY\" in os.environ:\n object_store_memory = os.environ[\"MODIN_MEMORY\"]\n # In case anything failed above, we can still improve the memory for Modin.\n if object_store_memory is None:\n # Round down to the nearest Gigabyte.\n object_store_memory = int(\n 0.6 * ray.utils.get_system_memory() // 10 ** 9 * 10 ** 9\n )\n # If the memory pool is smaller than 2GB, just use the default in ray.\n if object_store_memory == 0:\n object_store_memory = None\n ray.init(\n include_webui=False,\n ignore_reinit_error=True,\n plasma_directory=plasma_directory,\n object_store_memory=object_store_memory,\n )\n # Register custom serializer for method objects to avoid warning message.\n # We serialize `MethodType` objects when we use AxisPartition operations.\n ray.register_custom_serializer(types.MethodType, use_pickle=True)\n\n\nif execution_engine == \"Ray\":\n initialize_ray()\n num_cpus = ray.global_state.cluster_resources()[\"CPU\"]\nelif execution_engine == \"Dask\":\n from distributed.client import _get_global_client\n\n if threading.current_thread().name == \"MainThread\":\n # initialize the dask client\n client = _get_global_client()\n if client is None:\n from distributed import Client\n\n client = Client()\n num_cpus = sum(client.ncores().values())\nelif execution_engine != \"Python\":\n raise ImportError(\"Unrecognized execution engine: {}.\".format(execution_engine))\n\nDEFAULT_NPARTITIONS = max(4, int(num_cpus))\n\n__all__ = [\n \"DataFrame\",\n \"Series\",\n \"read_csv\",\n \"read_parquet\",\n \"read_json\",\n \"read_html\",\n \"read_clipboard\",\n \"read_excel\",\n \"read_hdf\",\n \"read_feather\",\n \"read_msgpack\",\n \"read_stata\",\n \"read_sas\",\n \"read_pickle\",\n \"read_sql\",\n \"read_gbq\",\n \"read_table\",\n \"concat\",\n \"eval\",\n \"unique\",\n \"value_counts\",\n \"cut\",\n \"to_numeric\",\n \"factorize\",\n \"test\",\n \"qcut\",\n \"match\",\n \"to_datetime\",\n \"get_dummies\",\n \"isna\",\n \"isnull\",\n \"merge\",\n \"pivot_table\",\n \"Panel\",\n \"date_range\",\n \"Index\",\n \"MultiIndex\",\n \"Series\",\n \"bdate_range\",\n \"period_range\",\n \"DatetimeIndex\",\n \"to_timedelta\",\n \"set_eng_float_format\",\n \"set_option\",\n \"CategoricalIndex\",\n \"Timedelta\",\n \"Timestamp\",\n \"NaT\",\n \"PeriodIndex\",\n \"Categorical\",\n \"__version__\",\n \"melt\",\n \"crosstab\",\n \"plotting\",\n]\n\ndel pandas\n", "path": "modin/pandas/__init__.py"}]} | 2,702 | 121 |
gh_patches_debug_4266 | rasdani/github-patches | git_diff | getredash__redash-732 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Migration Error(0017_add_organization.py)
Migration Error with current master branch.
```
$ vagrant@vagrant-ubuntu-trusty-64:/opt/redash/current$ export PYTHONPATH=. && bin/run python migrations/0017_add_organization.py
```
### output
```
[2016-01-05 09:38:42,746][PID:1822][WARNING][redash.query_runner] Oracle query runner enabled but not supported, not registering. Either disable or install missing dependencies.
Traceback (most recent call last):
File "migrations/0017_add_organization.py", line 12, in <module>
Organization.SETTING_GOOGLE_APPS_DOMAINS: settings.GOOGLE_APPS_DOMAIN
File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 4001, in create
inst.save(force_insert=True)
File "/opt/redash/current/redash/models.py", line 93, in save
super(BaseModel, self).save(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 4148, in save
pk_from_cursor = self.insert(**field_dict).execute()
File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 2858, in execute
cursor = self._execute()
File "/opt/redash/current/redash/metrics/database.py", line 50, in metered_execute
result = real_execute(self, *args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 2370, in _execute
sql, params = self.sql()
File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 2832, in sql
return self.compiler().generate_insert(self)
File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 1733, in generate_insert
return self.build_query(clauses, alias_map)
File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 1542, in build_query
return self.parse_node(Clause(*clauses), alias_map)
File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 1503, in parse_node
sql, params, unknown = self._parse(node, alias_map, conv)
File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 1478, in _parse
sql, params = self._parse_map[node_type](node, alias_map, conv)
File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 1406, in _parse_clause
node.nodes, alias_map, conv, node.glue)
File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 1520, in parse_node_list
node_sql, node_params = self.parse_node(node, alias_map, conv)
File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 1503, in parse_node
sql, params, unknown = self._parse(node, alias_map, conv)
File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 1478, in _parse
sql, params = self._parse_map[node_type](node, alias_map, conv)
File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 1406, in _parse_clause
node.nodes, alias_map, conv, node.glue)
File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 1520, in parse_node_list
node_sql, node_params = self.parse_node(node, alias_map, conv)
File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 1503, in parse_node
sql, params, unknown = self._parse(node, alias_map, conv)
File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 1478, in _parse
sql, params = self._parse_map[node_type](node, alias_map, conv)
File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 1406, in _parse_clause
node.nodes, alias_map, conv, node.glue)
File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 1520, in parse_node_list
node_sql, node_params = self.parse_node(node, alias_map, conv)
File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 1503, in parse_node
sql, params, unknown = self._parse(node, alias_map, conv)
File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 1478, in _parse
sql, params = self._parse_map[node_type](node, alias_map, conv)
File "/usr/local/lib/python2.7/dist-packages/peewee.py", line 1394, in _parse_param
params = [node.conv(node.value)]
File "/opt/redash/current/redash/models.py", line 68, in db_value
return json.dumps(value)
File "/usr/lib/python2.7/json/__init__.py", line 243, in dumps
return _default_encoder.encode(obj)
File "/usr/lib/python2.7/json/encoder.py", line 207, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/usr/lib/python2.7/json/encoder.py", line 270, in iterencode
return _iterencode(o, 0)
File "/usr/lib/python2.7/json/encoder.py", line 184, in default
raise TypeError(repr(o) + " is not JSON serializable")
TypeError: set([]) is not JSON serializable
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `migrations/0017_add_organization.py`
Content:
```
1 from redash.models import db, Organization, Group
2 from redash import settings
3 from playhouse.migrate import PostgresqlMigrator, migrate
4
5 if __name__ == '__main__':
6 migrator = PostgresqlMigrator(db.database)
7
8 with db.database.transaction():
9 Organization.create_table()
10
11 default_org = Organization.create(name="Default", slug='default', settings={
12 Organization.SETTING_GOOGLE_APPS_DOMAINS: settings.GOOGLE_APPS_DOMAIN
13 })
14
15 column = Group.org
16 column.default = default_org
17
18 migrate(
19 migrator.add_column('groups', 'org_id', column),
20 migrator.add_column('events', 'org_id', column),
21 migrator.add_column('data_sources', 'org_id', column),
22 migrator.add_column('users', 'org_id', column),
23 migrator.add_column('dashboards', 'org_id', column),
24 migrator.add_column('queries', 'org_id', column),
25 migrator.add_column('query_results', 'org_id', column),
26 )
27
28 # Change the uniqueness constraint on user email to be (org, email):
29 migrate(
30 migrator.drop_index('users', 'users_email'),
31 migrator.add_index('users', ('org_id', 'email'), unique=True)
32 )
33
34 db.close_db(None)
35
36
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/migrations/0017_add_organization.py b/migrations/0017_add_organization.py
--- a/migrations/0017_add_organization.py
+++ b/migrations/0017_add_organization.py
@@ -9,7 +9,7 @@
Organization.create_table()
default_org = Organization.create(name="Default", slug='default', settings={
- Organization.SETTING_GOOGLE_APPS_DOMAINS: settings.GOOGLE_APPS_DOMAIN
+ Organization.SETTING_GOOGLE_APPS_DOMAINS: list(settings.GOOGLE_APPS_DOMAIN)
})
column = Group.org
| {"golden_diff": "diff --git a/migrations/0017_add_organization.py b/migrations/0017_add_organization.py\n--- a/migrations/0017_add_organization.py\n+++ b/migrations/0017_add_organization.py\n@@ -9,7 +9,7 @@\n Organization.create_table()\n \n default_org = Organization.create(name=\"Default\", slug='default', settings={\n- Organization.SETTING_GOOGLE_APPS_DOMAINS: settings.GOOGLE_APPS_DOMAIN\n+ Organization.SETTING_GOOGLE_APPS_DOMAINS: list(settings.GOOGLE_APPS_DOMAIN)\n })\n \n column = Group.org\n", "issue": "Migration Error(0017_add_organization.py)\nMigration Error with current master branch.\n\n```\n$ vagrant@vagrant-ubuntu-trusty-64:/opt/redash/current$ export PYTHONPATH=. && bin/run python migrations/0017_add_organization.py\n```\n### output\n\n```\n[2016-01-05 09:38:42,746][PID:1822][WARNING][redash.query_runner] Oracle query runner enabled but not supported, not registering. Either disable or install missing dependencies.\nTraceback (most recent call last):\n File \"migrations/0017_add_organization.py\", line 12, in <module>\n Organization.SETTING_GOOGLE_APPS_DOMAINS: settings.GOOGLE_APPS_DOMAIN\n File \"/usr/local/lib/python2.7/dist-packages/peewee.py\", line 4001, in create\n inst.save(force_insert=True)\n File \"/opt/redash/current/redash/models.py\", line 93, in save\n super(BaseModel, self).save(*args, **kwargs)\n File \"/usr/local/lib/python2.7/dist-packages/peewee.py\", line 4148, in save\n pk_from_cursor = self.insert(**field_dict).execute()\n File \"/usr/local/lib/python2.7/dist-packages/peewee.py\", line 2858, in execute\n cursor = self._execute()\n File \"/opt/redash/current/redash/metrics/database.py\", line 50, in metered_execute\n result = real_execute(self, *args, **kwargs)\n File \"/usr/local/lib/python2.7/dist-packages/peewee.py\", line 2370, in _execute\n sql, params = self.sql()\n File \"/usr/local/lib/python2.7/dist-packages/peewee.py\", line 2832, in sql\n return self.compiler().generate_insert(self)\n File \"/usr/local/lib/python2.7/dist-packages/peewee.py\", line 1733, in generate_insert\n return self.build_query(clauses, alias_map)\n File \"/usr/local/lib/python2.7/dist-packages/peewee.py\", line 1542, in build_query\n return self.parse_node(Clause(*clauses), alias_map)\n File \"/usr/local/lib/python2.7/dist-packages/peewee.py\", line 1503, in parse_node\n sql, params, unknown = self._parse(node, alias_map, conv)\n File \"/usr/local/lib/python2.7/dist-packages/peewee.py\", line 1478, in _parse\n sql, params = self._parse_map[node_type](node, alias_map, conv)\n File \"/usr/local/lib/python2.7/dist-packages/peewee.py\", line 1406, in _parse_clause\n node.nodes, alias_map, conv, node.glue)\n File \"/usr/local/lib/python2.7/dist-packages/peewee.py\", line 1520, in parse_node_list\n node_sql, node_params = self.parse_node(node, alias_map, conv)\n File \"/usr/local/lib/python2.7/dist-packages/peewee.py\", line 1503, in parse_node\n sql, params, unknown = self._parse(node, alias_map, conv)\n File \"/usr/local/lib/python2.7/dist-packages/peewee.py\", line 1478, in _parse\n sql, params = self._parse_map[node_type](node, alias_map, conv)\n File \"/usr/local/lib/python2.7/dist-packages/peewee.py\", line 1406, in _parse_clause\n node.nodes, alias_map, conv, node.glue)\n File \"/usr/local/lib/python2.7/dist-packages/peewee.py\", line 1520, in parse_node_list\n node_sql, node_params = self.parse_node(node, alias_map, conv)\n File \"/usr/local/lib/python2.7/dist-packages/peewee.py\", line 1503, in parse_node\n sql, params, unknown = self._parse(node, alias_map, conv)\n File \"/usr/local/lib/python2.7/dist-packages/peewee.py\", line 1478, in _parse\n sql, params = self._parse_map[node_type](node, alias_map, conv)\n File \"/usr/local/lib/python2.7/dist-packages/peewee.py\", line 1406, in _parse_clause\n node.nodes, alias_map, conv, node.glue)\n File \"/usr/local/lib/python2.7/dist-packages/peewee.py\", line 1520, in parse_node_list\n node_sql, node_params = self.parse_node(node, alias_map, conv)\n File \"/usr/local/lib/python2.7/dist-packages/peewee.py\", line 1503, in parse_node\n sql, params, unknown = self._parse(node, alias_map, conv)\n File \"/usr/local/lib/python2.7/dist-packages/peewee.py\", line 1478, in _parse\n sql, params = self._parse_map[node_type](node, alias_map, conv)\n File \"/usr/local/lib/python2.7/dist-packages/peewee.py\", line 1394, in _parse_param\n params = [node.conv(node.value)]\n File \"/opt/redash/current/redash/models.py\", line 68, in db_value\n return json.dumps(value)\n File \"/usr/lib/python2.7/json/__init__.py\", line 243, in dumps\n return _default_encoder.encode(obj)\n File \"/usr/lib/python2.7/json/encoder.py\", line 207, in encode\n chunks = self.iterencode(o, _one_shot=True)\n File \"/usr/lib/python2.7/json/encoder.py\", line 270, in iterencode\n return _iterencode(o, 0)\n File \"/usr/lib/python2.7/json/encoder.py\", line 184, in default\n raise TypeError(repr(o) + \" is not JSON serializable\")\nTypeError: set([]) is not JSON serializable\n```\n\n", "before_files": [{"content": "from redash.models import db, Organization, Group\nfrom redash import settings\nfrom playhouse.migrate import PostgresqlMigrator, migrate\n\nif __name__ == '__main__':\n migrator = PostgresqlMigrator(db.database)\n\n with db.database.transaction():\n Organization.create_table()\n\n default_org = Organization.create(name=\"Default\", slug='default', settings={\n Organization.SETTING_GOOGLE_APPS_DOMAINS: settings.GOOGLE_APPS_DOMAIN\n })\n\n column = Group.org\n column.default = default_org\n\n migrate(\n migrator.add_column('groups', 'org_id', column),\n migrator.add_column('events', 'org_id', column),\n migrator.add_column('data_sources', 'org_id', column),\n migrator.add_column('users', 'org_id', column),\n migrator.add_column('dashboards', 'org_id', column),\n migrator.add_column('queries', 'org_id', column),\n migrator.add_column('query_results', 'org_id', column),\n )\n\n # Change the uniqueness constraint on user email to be (org, email):\n migrate(\n migrator.drop_index('users', 'users_email'),\n migrator.add_index('users', ('org_id', 'email'), unique=True)\n )\n\n db.close_db(None)\n\n", "path": "migrations/0017_add_organization.py"}], "after_files": [{"content": "from redash.models import db, Organization, Group\nfrom redash import settings\nfrom playhouse.migrate import PostgresqlMigrator, migrate\n\nif __name__ == '__main__':\n migrator = PostgresqlMigrator(db.database)\n\n with db.database.transaction():\n Organization.create_table()\n\n default_org = Organization.create(name=\"Default\", slug='default', settings={\n Organization.SETTING_GOOGLE_APPS_DOMAINS: list(settings.GOOGLE_APPS_DOMAIN)\n })\n\n column = Group.org\n column.default = default_org\n\n migrate(\n migrator.add_column('groups', 'org_id', column),\n migrator.add_column('events', 'org_id', column),\n migrator.add_column('data_sources', 'org_id', column),\n migrator.add_column('users', 'org_id', column),\n migrator.add_column('dashboards', 'org_id', column),\n migrator.add_column('queries', 'org_id', column),\n migrator.add_column('query_results', 'org_id', column),\n )\n\n # Change the uniqueness constraint on user email to be (org, email):\n migrate(\n migrator.drop_index('users', 'users_email'),\n migrator.add_index('users', ('org_id', 'email'), unique=True)\n )\n\n db.close_db(None)\n\n", "path": "migrations/0017_add_organization.py"}]} | 1,979 | 128 |
gh_patches_debug_13435 | rasdani/github-patches | git_diff | fal-ai__dbt-fal-685 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fal read only dbt *.yml files and ignore *.yaml
<!-- *** Make sure you have searched for an existing bug report for this issue *** -->
**Describe the bug**
In dbt models you can have schems in either .yml or yaml extentions
also see : https://www.w3schools.io/file/yaml-vs-yml/
**Your environment**
- OS: MacOS
- Paste the following commands output:
```
dbt-fal==1.3.7
fal==0.7.3
dbt-core==1.3.1
```
- Adapter being used: <!-- (if more than one dbt plugin installed) -->
**How to reproduce**
If you create schema.yaml with fal section the schema will be ignored
**Expected behavior**
schema.yaml should be detected and parsed correctly
**Actual behavior**
Tell us what actually happened.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Additional context**
Add any other context about the problem here.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/faldbt/parse.py`
Content:
```
1 import os
2 from dataclasses import dataclass
3 import glob
4 from pathlib import Path
5 from typing import Any, List, Dict, Optional, Union, TYPE_CHECKING
6
7 from dbt.contracts.project import Project as ProjectContract
8 from dbt.config import RuntimeConfig, Project
9 from dbt.config.utils import parse_cli_vars
10 from dbt.contracts.graph.manifest import Manifest
11 from dbt.contracts.results import RunResultsArtifact, FreshnessExecutionResultArtifact
12 from dbt.contracts.project import UserConfig
13 from dbt.config.profile import read_user_config
14 from dbt.exceptions import IncompatibleSchemaException, RuntimeException
15 from fal.utils import cache_static
16
17 from faldbt.logger import LOGGER
18 from faldbt.utils.yaml_helper import load_yaml
19 from fal.telemetry import telemetry
20
21 if TYPE_CHECKING:
22 from fal.packages.environments import BaseEnvironment
23
24 FAL_SCRIPTS_PATH = "fal-scripts-path"
25 FAL_MODELS_PATHS = "fal-models-paths"
26
27
28 class FalParseError(Exception):
29 pass
30
31
32 def get_dbt_user_config(profiles_dir: str) -> UserConfig:
33 return read_user_config(profiles_dir)
34
35
36 @dataclass
37 class RuntimeArgs:
38 project_dir: str
39 profiles_dir: str
40 threads: Optional[int]
41 single_threaded: bool
42 profile: Optional[str]
43 target: Optional[str]
44
45
46 def load_dbt_project_contract(project_dir: str) -> ProjectContract:
47 partial_project = Project.partial_load(project_dir)
48 contract = ProjectContract.from_dict(partial_project.project_dict)
49 if not hasattr(contract, "model_paths") or contract.model_paths is None:
50 setattr(contract, "model_paths", contract.source_paths)
51 if not hasattr(contract, "seed_paths") or contract.seed_paths is None:
52 setattr(contract, "seed_paths", contract.data_paths)
53 return contract
54
55
56 def get_dbt_config(
57 *,
58 project_dir: str,
59 profiles_dir: str,
60 profile_target: Optional[str] = None,
61 threads: Optional[int] = None,
62 profile: Optional[str] = None,
63 ) -> RuntimeConfig:
64 # Construct a phony config
65 import os
66 args = RuntimeArgs(
67 project_dir=project_dir,
68 profiles_dir=profiles_dir,
69 threads=threads,
70 single_threaded=False,
71 profile=profile,
72 target=profile_target,
73 )
74
75 if project_dir and not "PYTEST_CURRENT_TEST" in os.environ:
76 # HACK: initializing dbt-fal requires cwd to be project_dir
77 # TODO: this doesn't work in pytest + Github Actions
78 owd = os.getcwd()
79 os.chdir(project_dir)
80 config = RuntimeConfig.from_args(args)
81 os.chdir(owd)
82 else:
83 config = RuntimeConfig.from_args(args)
84 return config
85
86
87 def get_el_configs(
88 profiles_dir: str, profile_name: str, target_name: str
89 ) -> Dict[str, Dict]:
90 path = os.path.join(profiles_dir, "profiles.yml")
91 yml = load_yaml(path)
92 sync_configs = (
93 yml.get(profile_name, {}).get("fal_extract_load", {}).get(target_name, {})
94 )
95 return sync_configs
96
97
98 def get_vars_dict(project_dir: str, args_vars: str) -> Dict[str, Any]:
99 project_contract = load_dbt_project_contract(project_dir)
100
101 # NOTE: This happens usually inside unit tests
102 vars = (project_contract is not None and project_contract.vars) or {}
103 cli_vars = parse_cli_vars(args_vars)
104
105 # cli_vars have higher priority
106 return {**vars, **cli_vars}
107
108
109 @cache_static
110 def get_fal_models_dirs(project_dir: str, args_vars: str) -> List[str]:
111 vars = get_vars_dict(project_dir, args_vars)
112 model_paths = vars.get(FAL_MODELS_PATHS) or []
113 if not model_paths:
114 # None or empty list
115 LOGGER.warn(
116 f"Variable '{FAL_MODELS_PATHS}' not defined. Locate fal-format "
117 "Python models in a separate model directory and set it as the variable. "
118 "e.g. {FAL_MODELS_PATHS}: ['fal_models']"
119 )
120
121 telemetry.log_api(action="fal_models_paths_not_set")
122
123 if not isinstance(model_paths, list):
124 raise FalParseError(
125 f"Error parsing '{FAL_MODELS_PATHS}'. Expected list of strings and got '{type(model_paths)}'"
126 )
127
128 return model_paths
129
130
131 def get_scripts_dir(project_dir: str, args_vars: str) -> str:
132 vars = get_vars_dict(project_dir, args_vars)
133 scripts_dir = vars.get(FAL_SCRIPTS_PATH, project_dir)
134
135 if not isinstance(scripts_dir, str):
136 raise FalParseError(
137 f"Error parsing '{FAL_SCRIPTS_PATH}'. Expected string and got '{type(scripts_dir)}'"
138 )
139
140 return os.path.join(project_dir, scripts_dir)
141
142
143 def get_dbt_manifest(config) -> Manifest:
144 from dbt.parser.manifest import ManifestLoader
145
146 return ManifestLoader.get_full_manifest(config)
147
148
149 def get_dbt_sources_artifact(project_dir: str, config: RuntimeConfig):
150 sources_path = os.path.join(project_dir, config.target_path, "sources.json")
151 try:
152 # BACKWARDS: Change intorduced in 1.0.0
153 if hasattr(FreshnessExecutionResultArtifact, "read_and_check_versions"):
154 return FreshnessExecutionResultArtifact.read_and_check_versions(
155 sources_path
156 )
157 else:
158 return FreshnessExecutionResultArtifact.read(sources_path)
159
160 except IncompatibleSchemaException as exc:
161 # TODO: add test for this case
162 exc.add_filename(sources_path)
163 raise
164 except RuntimeException as exc:
165 LOGGER.warn("Could not read dbt sources artifact")
166 return None
167
168
169 def get_dbt_results(
170 project_dir: str, config: RuntimeConfig
171 ) -> Optional[RunResultsArtifact]:
172 results_path = os.path.join(project_dir, config.target_path, "run_results.json")
173 try:
174 # BACKWARDS: Change intorduced in 1.0.0
175 if hasattr(RunResultsArtifact, "read_and_check_versions"):
176 return RunResultsArtifact.read_and_check_versions(results_path)
177 else:
178 return RunResultsArtifact.read(results_path)
179
180 except IncompatibleSchemaException as exc:
181 # TODO: add test for this case
182 exc.add_filename(results_path)
183 raise
184 except RuntimeException as exc:
185 LOGGER.warn("Could not read dbt run_results artifact")
186 return None
187
188
189 def get_scripts_list(scripts_dir: str) -> List[str]:
190 scripts_path = Path(scripts_dir)
191 return list(map(str, [*scripts_path.rglob("*.py"), *scripts_path.rglob("*.ipynb")]))
192
193
194 def get_global_script_configs(source_dirs: List[Path]) -> Dict[str, List[str]]:
195 global_scripts = {"before": [], "after": []}
196 for source_dir in source_dirs:
197 schema_files = glob.glob(os.path.join(source_dir, "**.yml"), recursive=True)
198 for file in schema_files:
199 schema_yml = load_yaml(file)
200 if schema_yml is not None:
201 fal_config = schema_yml.get("fal", None)
202 if fal_config is not None:
203 # sometimes `scripts` can *be* there and still be None
204 script_paths = fal_config.get("scripts") or []
205 if isinstance(script_paths, list):
206 global_scripts["after"] += script_paths
207 else:
208 global_scripts["before"] += script_paths.get("before") or []
209 global_scripts["after"] += script_paths.get("after") or []
210 else:
211 raise FalParseError("Error parsing the schema file " + file)
212
213 return global_scripts
214
215
216 def _get_required_key(data: Dict[str, Any], name: str) -> Any:
217 if name not in data:
218 raise FalParseError("Missing required key: " + name)
219 return data[name]
220
221
222 def load_environments(base_dir: str) -> Dict[str, "BaseEnvironment"]:
223 from fal.packages.environments import create_environment
224 from fal.fal_script import _is_local_environment
225
226 try:
227 fal_project_path = os.path.join(base_dir, "fal_project.yml")
228 if not os.path.exists(fal_project_path):
229 raise FalParseError(f"{fal_project_path} must exist to define environments")
230
231 fal_project = load_yaml(fal_project_path)
232
233 environments = {}
234 for environment in fal_project.get("environments", []):
235 env_name = _get_required_key(environment, "name")
236 if _is_local_environment(env_name):
237 raise FalParseError(
238 f"Environment name conflicts with a reserved name: {env_name}."
239 )
240
241 env_kind = _get_required_key(environment, "type")
242 environments[env_name] = create_environment(env_name, env_kind, environment)
243 return environments
244 except FalParseError as e:
245 raise RuntimeError("Error loading environments from fal_project.yml") from e
246
247 def normalize_path(base: str, path: Union[Path, str]):
248 real_base = os.path.realpath(os.path.normpath(base))
249 return Path(os.path.realpath(os.path.join(real_base, path)))
250
251
252 def normalize_paths(
253 base: str, paths: Union[List[Path], List[str], List[Union[Path, str]]]
254 ):
255 return list(map(lambda path: normalize_path(base, path), paths))
256
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/faldbt/parse.py b/src/faldbt/parse.py
--- a/src/faldbt/parse.py
+++ b/src/faldbt/parse.py
@@ -194,7 +194,10 @@
def get_global_script_configs(source_dirs: List[Path]) -> Dict[str, List[str]]:
global_scripts = {"before": [], "after": []}
for source_dir in source_dirs:
+ # Scan directories for .yml files
schema_files = glob.glob(os.path.join(source_dir, "**.yml"), recursive=True)
+ # Scan directories for .yaml files
+ schema_files += glob.glob(os.path.join(source_dir, "**.yaml"), recursive=True)
for file in schema_files:
schema_yml = load_yaml(file)
if schema_yml is not None:
| {"golden_diff": "diff --git a/src/faldbt/parse.py b/src/faldbt/parse.py\n--- a/src/faldbt/parse.py\n+++ b/src/faldbt/parse.py\n@@ -194,7 +194,10 @@\n def get_global_script_configs(source_dirs: List[Path]) -> Dict[str, List[str]]:\n global_scripts = {\"before\": [], \"after\": []}\n for source_dir in source_dirs:\n+ # Scan directories for .yml files\n schema_files = glob.glob(os.path.join(source_dir, \"**.yml\"), recursive=True)\n+ # Scan directories for .yaml files\n+ schema_files += glob.glob(os.path.join(source_dir, \"**.yaml\"), recursive=True)\n for file in schema_files:\n schema_yml = load_yaml(file)\n if schema_yml is not None:\n", "issue": "Fal read only dbt *.yml files and ignore *.yaml \n<!-- *** Make sure you have searched for an existing bug report for this issue *** -->\r\n\r\n**Describe the bug**\r\nIn dbt models you can have schems in either .yml or yaml extentions \r\nalso see : https://www.w3schools.io/file/yaml-vs-yml/\r\n\r\n**Your environment**\r\n- OS: MacOS\r\n- Paste the following commands output:\r\n```\r\ndbt-fal==1.3.7\r\nfal==0.7.3\r\ndbt-core==1.3.1\r\n```\r\n- Adapter being used: <!-- (if more than one dbt plugin installed) --> \r\n\r\n**How to reproduce**\r\nIf you create schema.yaml with fal section the schema will be ignored\r\n\r\n**Expected behavior**\r\nschema.yaml should be detected and parsed correctly\r\n\r\n**Actual behavior**\r\nTell us what actually happened.\r\n\r\n**Screenshots**\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "import os\nfrom dataclasses import dataclass\nimport glob\nfrom pathlib import Path\nfrom typing import Any, List, Dict, Optional, Union, TYPE_CHECKING\n\nfrom dbt.contracts.project import Project as ProjectContract\nfrom dbt.config import RuntimeConfig, Project\nfrom dbt.config.utils import parse_cli_vars\nfrom dbt.contracts.graph.manifest import Manifest\nfrom dbt.contracts.results import RunResultsArtifact, FreshnessExecutionResultArtifact\nfrom dbt.contracts.project import UserConfig\nfrom dbt.config.profile import read_user_config\nfrom dbt.exceptions import IncompatibleSchemaException, RuntimeException\nfrom fal.utils import cache_static\n\nfrom faldbt.logger import LOGGER\nfrom faldbt.utils.yaml_helper import load_yaml\nfrom fal.telemetry import telemetry\n\nif TYPE_CHECKING:\n from fal.packages.environments import BaseEnvironment\n\nFAL_SCRIPTS_PATH = \"fal-scripts-path\"\nFAL_MODELS_PATHS = \"fal-models-paths\"\n\n\nclass FalParseError(Exception):\n pass\n\n\ndef get_dbt_user_config(profiles_dir: str) -> UserConfig:\n return read_user_config(profiles_dir)\n\n\n@dataclass\nclass RuntimeArgs:\n project_dir: str\n profiles_dir: str\n threads: Optional[int]\n single_threaded: bool\n profile: Optional[str]\n target: Optional[str]\n\n\ndef load_dbt_project_contract(project_dir: str) -> ProjectContract:\n partial_project = Project.partial_load(project_dir)\n contract = ProjectContract.from_dict(partial_project.project_dict)\n if not hasattr(contract, \"model_paths\") or contract.model_paths is None:\n setattr(contract, \"model_paths\", contract.source_paths)\n if not hasattr(contract, \"seed_paths\") or contract.seed_paths is None:\n setattr(contract, \"seed_paths\", contract.data_paths)\n return contract\n\n\ndef get_dbt_config(\n *,\n project_dir: str,\n profiles_dir: str,\n profile_target: Optional[str] = None,\n threads: Optional[int] = None,\n profile: Optional[str] = None,\n) -> RuntimeConfig:\n # Construct a phony config\n import os\n args = RuntimeArgs(\n project_dir=project_dir,\n profiles_dir=profiles_dir,\n threads=threads,\n single_threaded=False,\n profile=profile,\n target=profile_target,\n )\n\n if project_dir and not \"PYTEST_CURRENT_TEST\" in os.environ:\n # HACK: initializing dbt-fal requires cwd to be project_dir\n # TODO: this doesn't work in pytest + Github Actions\n owd = os.getcwd()\n os.chdir(project_dir)\n config = RuntimeConfig.from_args(args)\n os.chdir(owd)\n else:\n config = RuntimeConfig.from_args(args)\n return config\n\n\ndef get_el_configs(\n profiles_dir: str, profile_name: str, target_name: str\n) -> Dict[str, Dict]:\n path = os.path.join(profiles_dir, \"profiles.yml\")\n yml = load_yaml(path)\n sync_configs = (\n yml.get(profile_name, {}).get(\"fal_extract_load\", {}).get(target_name, {})\n )\n return sync_configs\n\n\ndef get_vars_dict(project_dir: str, args_vars: str) -> Dict[str, Any]:\n project_contract = load_dbt_project_contract(project_dir)\n\n # NOTE: This happens usually inside unit tests\n vars = (project_contract is not None and project_contract.vars) or {}\n cli_vars = parse_cli_vars(args_vars)\n\n # cli_vars have higher priority\n return {**vars, **cli_vars}\n\n\n@cache_static\ndef get_fal_models_dirs(project_dir: str, args_vars: str) -> List[str]:\n vars = get_vars_dict(project_dir, args_vars)\n model_paths = vars.get(FAL_MODELS_PATHS) or []\n if not model_paths:\n # None or empty list\n LOGGER.warn(\n f\"Variable '{FAL_MODELS_PATHS}' not defined. Locate fal-format \"\n \"Python models in a separate model directory and set it as the variable. \"\n \"e.g. {FAL_MODELS_PATHS}: ['fal_models']\"\n )\n\n telemetry.log_api(action=\"fal_models_paths_not_set\")\n\n if not isinstance(model_paths, list):\n raise FalParseError(\n f\"Error parsing '{FAL_MODELS_PATHS}'. Expected list of strings and got '{type(model_paths)}'\"\n )\n\n return model_paths\n\n\ndef get_scripts_dir(project_dir: str, args_vars: str) -> str:\n vars = get_vars_dict(project_dir, args_vars)\n scripts_dir = vars.get(FAL_SCRIPTS_PATH, project_dir)\n\n if not isinstance(scripts_dir, str):\n raise FalParseError(\n f\"Error parsing '{FAL_SCRIPTS_PATH}'. Expected string and got '{type(scripts_dir)}'\"\n )\n\n return os.path.join(project_dir, scripts_dir)\n\n\ndef get_dbt_manifest(config) -> Manifest:\n from dbt.parser.manifest import ManifestLoader\n\n return ManifestLoader.get_full_manifest(config)\n\n\ndef get_dbt_sources_artifact(project_dir: str, config: RuntimeConfig):\n sources_path = os.path.join(project_dir, config.target_path, \"sources.json\")\n try:\n # BACKWARDS: Change intorduced in 1.0.0\n if hasattr(FreshnessExecutionResultArtifact, \"read_and_check_versions\"):\n return FreshnessExecutionResultArtifact.read_and_check_versions(\n sources_path\n )\n else:\n return FreshnessExecutionResultArtifact.read(sources_path)\n\n except IncompatibleSchemaException as exc:\n # TODO: add test for this case\n exc.add_filename(sources_path)\n raise\n except RuntimeException as exc:\n LOGGER.warn(\"Could not read dbt sources artifact\")\n return None\n\n\ndef get_dbt_results(\n project_dir: str, config: RuntimeConfig\n) -> Optional[RunResultsArtifact]:\n results_path = os.path.join(project_dir, config.target_path, \"run_results.json\")\n try:\n # BACKWARDS: Change intorduced in 1.0.0\n if hasattr(RunResultsArtifact, \"read_and_check_versions\"):\n return RunResultsArtifact.read_and_check_versions(results_path)\n else:\n return RunResultsArtifact.read(results_path)\n\n except IncompatibleSchemaException as exc:\n # TODO: add test for this case\n exc.add_filename(results_path)\n raise\n except RuntimeException as exc:\n LOGGER.warn(\"Could not read dbt run_results artifact\")\n return None\n\n\ndef get_scripts_list(scripts_dir: str) -> List[str]:\n scripts_path = Path(scripts_dir)\n return list(map(str, [*scripts_path.rglob(\"*.py\"), *scripts_path.rglob(\"*.ipynb\")]))\n\n\ndef get_global_script_configs(source_dirs: List[Path]) -> Dict[str, List[str]]:\n global_scripts = {\"before\": [], \"after\": []}\n for source_dir in source_dirs:\n schema_files = glob.glob(os.path.join(source_dir, \"**.yml\"), recursive=True)\n for file in schema_files:\n schema_yml = load_yaml(file)\n if schema_yml is not None:\n fal_config = schema_yml.get(\"fal\", None)\n if fal_config is not None:\n # sometimes `scripts` can *be* there and still be None\n script_paths = fal_config.get(\"scripts\") or []\n if isinstance(script_paths, list):\n global_scripts[\"after\"] += script_paths\n else:\n global_scripts[\"before\"] += script_paths.get(\"before\") or []\n global_scripts[\"after\"] += script_paths.get(\"after\") or []\n else:\n raise FalParseError(\"Error parsing the schema file \" + file)\n\n return global_scripts\n\n\ndef _get_required_key(data: Dict[str, Any], name: str) -> Any:\n if name not in data:\n raise FalParseError(\"Missing required key: \" + name)\n return data[name]\n\n\ndef load_environments(base_dir: str) -> Dict[str, \"BaseEnvironment\"]:\n from fal.packages.environments import create_environment\n from fal.fal_script import _is_local_environment\n\n try:\n fal_project_path = os.path.join(base_dir, \"fal_project.yml\")\n if not os.path.exists(fal_project_path):\n raise FalParseError(f\"{fal_project_path} must exist to define environments\")\n\n fal_project = load_yaml(fal_project_path)\n\n environments = {}\n for environment in fal_project.get(\"environments\", []):\n env_name = _get_required_key(environment, \"name\")\n if _is_local_environment(env_name):\n raise FalParseError(\n f\"Environment name conflicts with a reserved name: {env_name}.\"\n )\n\n env_kind = _get_required_key(environment, \"type\")\n environments[env_name] = create_environment(env_name, env_kind, environment)\n return environments\n except FalParseError as e:\n raise RuntimeError(\"Error loading environments from fal_project.yml\") from e\n\ndef normalize_path(base: str, path: Union[Path, str]):\n real_base = os.path.realpath(os.path.normpath(base))\n return Path(os.path.realpath(os.path.join(real_base, path)))\n\n\ndef normalize_paths(\n base: str, paths: Union[List[Path], List[str], List[Union[Path, str]]]\n):\n return list(map(lambda path: normalize_path(base, path), paths))\n", "path": "src/faldbt/parse.py"}], "after_files": [{"content": "import os\nfrom dataclasses import dataclass\nimport glob\nfrom pathlib import Path\nfrom typing import Any, List, Dict, Optional, Union, TYPE_CHECKING\n\nfrom dbt.contracts.project import Project as ProjectContract\nfrom dbt.config import RuntimeConfig, Project\nfrom dbt.config.utils import parse_cli_vars\nfrom dbt.contracts.graph.manifest import Manifest\nfrom dbt.contracts.results import RunResultsArtifact, FreshnessExecutionResultArtifact\nfrom dbt.contracts.project import UserConfig\nfrom dbt.config.profile import read_user_config\nfrom dbt.exceptions import IncompatibleSchemaException, RuntimeException\nfrom fal.utils import cache_static\n\nfrom faldbt.logger import LOGGER\nfrom faldbt.utils.yaml_helper import load_yaml\nfrom fal.telemetry import telemetry\n\nif TYPE_CHECKING:\n from fal.packages.environments import BaseEnvironment\n\nFAL_SCRIPTS_PATH = \"fal-scripts-path\"\nFAL_MODELS_PATHS = \"fal-models-paths\"\n\n\nclass FalParseError(Exception):\n pass\n\n\ndef get_dbt_user_config(profiles_dir: str) -> UserConfig:\n return read_user_config(profiles_dir)\n\n\n@dataclass\nclass RuntimeArgs:\n project_dir: str\n profiles_dir: str\n threads: Optional[int]\n single_threaded: bool\n profile: Optional[str]\n target: Optional[str]\n\n\ndef load_dbt_project_contract(project_dir: str) -> ProjectContract:\n partial_project = Project.partial_load(project_dir)\n contract = ProjectContract.from_dict(partial_project.project_dict)\n if not hasattr(contract, \"model_paths\") or contract.model_paths is None:\n setattr(contract, \"model_paths\", contract.source_paths)\n if not hasattr(contract, \"seed_paths\") or contract.seed_paths is None:\n setattr(contract, \"seed_paths\", contract.data_paths)\n return contract\n\n\ndef get_dbt_config(\n *,\n project_dir: str,\n profiles_dir: str,\n profile_target: Optional[str] = None,\n threads: Optional[int] = None,\n profile: Optional[str] = None,\n) -> RuntimeConfig:\n # Construct a phony config\n import os\n args = RuntimeArgs(\n project_dir=project_dir,\n profiles_dir=profiles_dir,\n threads=threads,\n single_threaded=False,\n profile=profile,\n target=profile_target,\n )\n\n if project_dir and not \"PYTEST_CURRENT_TEST\" in os.environ:\n # HACK: initializing dbt-fal requires cwd to be project_dir\n # TODO: this doesn't work in pytest + Github Actions\n owd = os.getcwd()\n os.chdir(project_dir)\n config = RuntimeConfig.from_args(args)\n os.chdir(owd)\n else:\n config = RuntimeConfig.from_args(args)\n return config\n\n\ndef get_el_configs(\n profiles_dir: str, profile_name: str, target_name: str\n) -> Dict[str, Dict]:\n path = os.path.join(profiles_dir, \"profiles.yml\")\n yml = load_yaml(path)\n sync_configs = (\n yml.get(profile_name, {}).get(\"fal_extract_load\", {}).get(target_name, {})\n )\n return sync_configs\n\n\ndef get_vars_dict(project_dir: str, args_vars: str) -> Dict[str, Any]:\n project_contract = load_dbt_project_contract(project_dir)\n\n # NOTE: This happens usually inside unit tests\n vars = (project_contract is not None and project_contract.vars) or {}\n cli_vars = parse_cli_vars(args_vars)\n\n # cli_vars have higher priority\n return {**vars, **cli_vars}\n\n\n@cache_static\ndef get_fal_models_dirs(project_dir: str, args_vars: str) -> List[str]:\n vars = get_vars_dict(project_dir, args_vars)\n model_paths = vars.get(FAL_MODELS_PATHS) or []\n if not model_paths:\n # None or empty list\n LOGGER.warn(\n f\"Variable '{FAL_MODELS_PATHS}' not defined. Locate fal-format \"\n \"Python models in a separate model directory and set it as the variable. \"\n \"e.g. {FAL_MODELS_PATHS}: ['fal_models']\"\n )\n\n telemetry.log_api(action=\"fal_models_paths_not_set\")\n\n if not isinstance(model_paths, list):\n raise FalParseError(\n f\"Error parsing '{FAL_MODELS_PATHS}'. Expected list of strings and got '{type(model_paths)}'\"\n )\n\n return model_paths\n\n\ndef get_scripts_dir(project_dir: str, args_vars: str) -> str:\n vars = get_vars_dict(project_dir, args_vars)\n scripts_dir = vars.get(FAL_SCRIPTS_PATH, project_dir)\n\n if not isinstance(scripts_dir, str):\n raise FalParseError(\n f\"Error parsing '{FAL_SCRIPTS_PATH}'. Expected string and got '{type(scripts_dir)}'\"\n )\n\n return os.path.join(project_dir, scripts_dir)\n\n\ndef get_dbt_manifest(config) -> Manifest:\n from dbt.parser.manifest import ManifestLoader\n\n return ManifestLoader.get_full_manifest(config)\n\n\ndef get_dbt_sources_artifact(project_dir: str, config: RuntimeConfig):\n sources_path = os.path.join(project_dir, config.target_path, \"sources.json\")\n try:\n # BACKWARDS: Change intorduced in 1.0.0\n if hasattr(FreshnessExecutionResultArtifact, \"read_and_check_versions\"):\n return FreshnessExecutionResultArtifact.read_and_check_versions(\n sources_path\n )\n else:\n return FreshnessExecutionResultArtifact.read(sources_path)\n\n except IncompatibleSchemaException as exc:\n # TODO: add test for this case\n exc.add_filename(sources_path)\n raise\n except RuntimeException as exc:\n LOGGER.warn(\"Could not read dbt sources artifact\")\n return None\n\n\ndef get_dbt_results(\n project_dir: str, config: RuntimeConfig\n) -> Optional[RunResultsArtifact]:\n results_path = os.path.join(project_dir, config.target_path, \"run_results.json\")\n try:\n # BACKWARDS: Change intorduced in 1.0.0\n if hasattr(RunResultsArtifact, \"read_and_check_versions\"):\n return RunResultsArtifact.read_and_check_versions(results_path)\n else:\n return RunResultsArtifact.read(results_path)\n\n except IncompatibleSchemaException as exc:\n # TODO: add test for this case\n exc.add_filename(results_path)\n raise\n except RuntimeException as exc:\n LOGGER.warn(\"Could not read dbt run_results artifact\")\n return None\n\n\ndef get_scripts_list(scripts_dir: str) -> List[str]:\n scripts_path = Path(scripts_dir)\n return list(map(str, [*scripts_path.rglob(\"*.py\"), *scripts_path.rglob(\"*.ipynb\")]))\n\n\ndef get_global_script_configs(source_dirs: List[Path]) -> Dict[str, List[str]]:\n global_scripts = {\"before\": [], \"after\": []}\n for source_dir in source_dirs:\n # Scan directories for .yml files\n schema_files = glob.glob(os.path.join(source_dir, \"**.yml\"), recursive=True)\n # Scan directories for .yaml files\n schema_files += glob.glob(os.path.join(source_dir, \"**.yaml\"), recursive=True)\n for file in schema_files:\n schema_yml = load_yaml(file)\n if schema_yml is not None:\n fal_config = schema_yml.get(\"fal\", None)\n if fal_config is not None:\n # sometimes `scripts` can *be* there and still be None\n script_paths = fal_config.get(\"scripts\") or []\n if isinstance(script_paths, list):\n global_scripts[\"after\"] += script_paths\n else:\n global_scripts[\"before\"] += script_paths.get(\"before\") or []\n global_scripts[\"after\"] += script_paths.get(\"after\") or []\n else:\n raise FalParseError(\"Error parsing the schema file \" + file)\n\n return global_scripts\n\n\ndef _get_required_key(data: Dict[str, Any], name: str) -> Any:\n if name not in data:\n raise FalParseError(\"Missing required key: \" + name)\n return data[name]\n\n\ndef load_environments(base_dir: str) -> Dict[str, \"BaseEnvironment\"]:\n from fal.packages.environments import create_environment\n from fal.fal_script import _is_local_environment\n\n try:\n fal_project_path = os.path.join(base_dir, \"fal_project.yml\")\n if not os.path.exists(fal_project_path):\n raise FalParseError(f\"{fal_project_path} must exist to define environments\")\n\n fal_project = load_yaml(fal_project_path)\n\n environments = {}\n for environment in fal_project.get(\"environments\", []):\n env_name = _get_required_key(environment, \"name\")\n if _is_local_environment(env_name):\n raise FalParseError(\n f\"Environment name conflicts with a reserved name: {env_name}.\"\n )\n\n env_kind = _get_required_key(environment, \"type\")\n environments[env_name] = create_environment(env_name, env_kind, environment)\n return environments\n except FalParseError as e:\n raise RuntimeError(\"Error loading environments from fal_project.yml\") from e\n\ndef normalize_path(base: str, path: Union[Path, str]):\n real_base = os.path.realpath(os.path.normpath(base))\n return Path(os.path.realpath(os.path.join(real_base, path)))\n\n\ndef normalize_paths(\n base: str, paths: Union[List[Path], List[str], List[Union[Path, str]]]\n):\n return list(map(lambda path: normalize_path(base, path), paths))\n", "path": "src/faldbt/parse.py"}]} | 3,162 | 185 |
gh_patches_debug_24645 | rasdani/github-patches | git_diff | getnikola__nikola-3455 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
reST doc directive: slugs with slashes in no longer work
At some point in the previous few versions, slugs with slashes in no longer work - links pointing to them have their slashes removed before the link is resolved, leading to an error like `"swowndata" slug doesn't exist` for a slug called `sw/owndata`. I can't find this being mentioned in the release notes or the other issues here, so I assume this was done by accident, is a bug, and should be fixed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nikola/plugins/compile/rest/doc.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2020 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 """reST role for linking to other documents."""
28
29 from docutils import nodes
30 from docutils.parsers.rst import roles
31
32 from nikola.utils import split_explicit_title, LOGGER, slugify
33 from nikola.plugin_categories import RestExtension
34
35
36 class Plugin(RestExtension):
37 """Plugin for doc role."""
38
39 name = 'rest_doc'
40
41 def set_site(self, site):
42 """Set Nikola site."""
43 self.site = site
44 roles.register_canonical_role('doc', doc_role)
45 self.site.register_shortcode('doc', doc_shortcode)
46 doc_role.site = site
47 return super().set_site(site)
48
49
50 def _doc_link(rawtext, text, options={}, content=[]):
51 """Handle the doc role."""
52 # split link's text and post's slug in role content
53 has_explicit_title, title, slug = split_explicit_title(text)
54 if '#' in slug:
55 slug, fragment = slug.split('#', 1)
56 else:
57 fragment = None
58 slug = slugify(slug)
59 # check if the slug given is part of our blog posts/pages
60 twin_slugs = False
61 post = None
62 for p in doc_role.site.timeline:
63 if p.meta('slug') == slug:
64 if post is None:
65 post = p
66 else:
67 twin_slugs = True
68 break
69
70 try:
71 if post is None:
72 raise ValueError("No post with matching slug found.")
73 except ValueError:
74 return False, False, None, None, slug
75
76 if not has_explicit_title:
77 # use post's title as link's text
78 title = post.title()
79 permalink = post.permalink()
80 if fragment:
81 permalink += '#' + fragment
82
83 return True, twin_slugs, title, permalink, slug
84
85
86 def doc_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
87 """Handle the doc role."""
88 success, twin_slugs, title, permalink, slug = _doc_link(rawtext, text, options, content)
89 if success:
90 if twin_slugs:
91 inliner.reporter.warning(
92 'More than one post with the same slug. Using "{0}"'.format(permalink))
93 LOGGER.warning(
94 'More than one post with the same slug. Using "{0}" for doc role'.format(permalink))
95 node = make_link_node(rawtext, title, permalink, options)
96 return [node], []
97 else:
98 msg = inliner.reporter.error(
99 '"{0}" slug doesn\'t exist.'.format(slug),
100 line=lineno)
101 prb = inliner.problematic(rawtext, rawtext, msg)
102 return [prb], [msg]
103
104
105 def doc_shortcode(*args, **kwargs):
106 """Implement the doc shortcode."""
107 text = kwargs['data']
108 success, twin_slugs, title, permalink, slug = _doc_link(text, text, LOGGER)
109 if success:
110 if twin_slugs:
111 LOGGER.warning(
112 'More than one post with the same slug. Using "{0}" for doc shortcode'.format(permalink))
113 return '<a href="{0}">{1}</a>'.format(permalink, title)
114 else:
115 LOGGER.error(
116 '"{0}" slug doesn\'t exist.'.format(slug))
117 return '<span class="error text-error" style="color: red;">Invalid link: {0}</span>'.format(text)
118
119
120 def make_link_node(rawtext, text, url, options):
121 """Make a reST link node."""
122 node = nodes.reference(rawtext, text, refuri=url, *options)
123 return node
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nikola/plugins/compile/rest/doc.py b/nikola/plugins/compile/rest/doc.py
--- a/nikola/plugins/compile/rest/doc.py
+++ b/nikola/plugins/compile/rest/doc.py
@@ -47,16 +47,8 @@
return super().set_site(site)
-def _doc_link(rawtext, text, options={}, content=[]):
- """Handle the doc role."""
- # split link's text and post's slug in role content
- has_explicit_title, title, slug = split_explicit_title(text)
- if '#' in slug:
- slug, fragment = slug.split('#', 1)
- else:
- fragment = None
- slug = slugify(slug)
- # check if the slug given is part of our blog posts/pages
+def _find_post(slug):
+ """Find a post with the given slug in posts or pages."""
twin_slugs = False
post = None
for p in doc_role.site.timeline:
@@ -66,6 +58,23 @@
else:
twin_slugs = True
break
+ return post, twin_slugs
+
+
+def _doc_link(rawtext, text, options={}, content=[]):
+ """Handle the doc role."""
+ # split link's text and post's slug in role content
+ has_explicit_title, title, slug = split_explicit_title(text)
+ if '#' in slug:
+ slug, fragment = slug.split('#', 1)
+ else:
+ fragment = None
+
+ # Look for the unslugified input first, then try to slugify (Issue #3450)
+ post, twin_slugs = _find_post(slug)
+ if post is None:
+ slug = slugify(slug)
+ post, twin_slugs = _find_post(slug)
try:
if post is None:
| {"golden_diff": "diff --git a/nikola/plugins/compile/rest/doc.py b/nikola/plugins/compile/rest/doc.py\n--- a/nikola/plugins/compile/rest/doc.py\n+++ b/nikola/plugins/compile/rest/doc.py\n@@ -47,16 +47,8 @@\n return super().set_site(site)\n \n \n-def _doc_link(rawtext, text, options={}, content=[]):\n- \"\"\"Handle the doc role.\"\"\"\n- # split link's text and post's slug in role content\n- has_explicit_title, title, slug = split_explicit_title(text)\n- if '#' in slug:\n- slug, fragment = slug.split('#', 1)\n- else:\n- fragment = None\n- slug = slugify(slug)\n- # check if the slug given is part of our blog posts/pages\n+def _find_post(slug):\n+ \"\"\"Find a post with the given slug in posts or pages.\"\"\"\n twin_slugs = False\n post = None\n for p in doc_role.site.timeline:\n@@ -66,6 +58,23 @@\n else:\n twin_slugs = True\n break\n+ return post, twin_slugs\n+\n+\n+def _doc_link(rawtext, text, options={}, content=[]):\n+ \"\"\"Handle the doc role.\"\"\"\n+ # split link's text and post's slug in role content\n+ has_explicit_title, title, slug = split_explicit_title(text)\n+ if '#' in slug:\n+ slug, fragment = slug.split('#', 1)\n+ else:\n+ fragment = None\n+\n+ # Look for the unslugified input first, then try to slugify (Issue #3450)\n+ post, twin_slugs = _find_post(slug)\n+ if post is None:\n+ slug = slugify(slug)\n+ post, twin_slugs = _find_post(slug)\n \n try:\n if post is None:\n", "issue": "reST doc directive: slugs with slashes in no longer work\nAt some point in the previous few versions, slugs with slashes in no longer work - links pointing to them have their slashes removed before the link is resolved, leading to an error like `\"swowndata\" slug doesn't exist` for a slug called `sw/owndata`. I can't find this being mentioned in the release notes or the other issues here, so I assume this was done by accident, is a bug, and should be fixed.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2020 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"reST role for linking to other documents.\"\"\"\n\nfrom docutils import nodes\nfrom docutils.parsers.rst import roles\n\nfrom nikola.utils import split_explicit_title, LOGGER, slugify\nfrom nikola.plugin_categories import RestExtension\n\n\nclass Plugin(RestExtension):\n \"\"\"Plugin for doc role.\"\"\"\n\n name = 'rest_doc'\n\n def set_site(self, site):\n \"\"\"Set Nikola site.\"\"\"\n self.site = site\n roles.register_canonical_role('doc', doc_role)\n self.site.register_shortcode('doc', doc_shortcode)\n doc_role.site = site\n return super().set_site(site)\n\n\ndef _doc_link(rawtext, text, options={}, content=[]):\n \"\"\"Handle the doc role.\"\"\"\n # split link's text and post's slug in role content\n has_explicit_title, title, slug = split_explicit_title(text)\n if '#' in slug:\n slug, fragment = slug.split('#', 1)\n else:\n fragment = None\n slug = slugify(slug)\n # check if the slug given is part of our blog posts/pages\n twin_slugs = False\n post = None\n for p in doc_role.site.timeline:\n if p.meta('slug') == slug:\n if post is None:\n post = p\n else:\n twin_slugs = True\n break\n\n try:\n if post is None:\n raise ValueError(\"No post with matching slug found.\")\n except ValueError:\n return False, False, None, None, slug\n\n if not has_explicit_title:\n # use post's title as link's text\n title = post.title()\n permalink = post.permalink()\n if fragment:\n permalink += '#' + fragment\n\n return True, twin_slugs, title, permalink, slug\n\n\ndef doc_role(name, rawtext, text, lineno, inliner, options={}, content=[]):\n \"\"\"Handle the doc role.\"\"\"\n success, twin_slugs, title, permalink, slug = _doc_link(rawtext, text, options, content)\n if success:\n if twin_slugs:\n inliner.reporter.warning(\n 'More than one post with the same slug. Using \"{0}\"'.format(permalink))\n LOGGER.warning(\n 'More than one post with the same slug. Using \"{0}\" for doc role'.format(permalink))\n node = make_link_node(rawtext, title, permalink, options)\n return [node], []\n else:\n msg = inliner.reporter.error(\n '\"{0}\" slug doesn\\'t exist.'.format(slug),\n line=lineno)\n prb = inliner.problematic(rawtext, rawtext, msg)\n return [prb], [msg]\n\n\ndef doc_shortcode(*args, **kwargs):\n \"\"\"Implement the doc shortcode.\"\"\"\n text = kwargs['data']\n success, twin_slugs, title, permalink, slug = _doc_link(text, text, LOGGER)\n if success:\n if twin_slugs:\n LOGGER.warning(\n 'More than one post with the same slug. Using \"{0}\" for doc shortcode'.format(permalink))\n return '<a href=\"{0}\">{1}</a>'.format(permalink, title)\n else:\n LOGGER.error(\n '\"{0}\" slug doesn\\'t exist.'.format(slug))\n return '<span class=\"error text-error\" style=\"color: red;\">Invalid link: {0}</span>'.format(text)\n\n\ndef make_link_node(rawtext, text, url, options):\n \"\"\"Make a reST link node.\"\"\"\n node = nodes.reference(rawtext, text, refuri=url, *options)\n return node\n", "path": "nikola/plugins/compile/rest/doc.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2020 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"reST role for linking to other documents.\"\"\"\n\nfrom docutils import nodes\nfrom docutils.parsers.rst import roles\n\nfrom nikola.utils import split_explicit_title, LOGGER, slugify\nfrom nikola.plugin_categories import RestExtension\n\n\nclass Plugin(RestExtension):\n \"\"\"Plugin for doc role.\"\"\"\n\n name = 'rest_doc'\n\n def set_site(self, site):\n \"\"\"Set Nikola site.\"\"\"\n self.site = site\n roles.register_canonical_role('doc', doc_role)\n self.site.register_shortcode('doc', doc_shortcode)\n doc_role.site = site\n return super().set_site(site)\n\n\ndef _find_post(slug):\n \"\"\"Find a post with the given slug in posts or pages.\"\"\"\n twin_slugs = False\n post = None\n for p in doc_role.site.timeline:\n if p.meta('slug') == slug:\n if post is None:\n post = p\n else:\n twin_slugs = True\n break\n return post, twin_slugs\n\n\ndef _doc_link(rawtext, text, options={}, content=[]):\n \"\"\"Handle the doc role.\"\"\"\n # split link's text and post's slug in role content\n has_explicit_title, title, slug = split_explicit_title(text)\n if '#' in slug:\n slug, fragment = slug.split('#', 1)\n else:\n fragment = None\n\n # Look for the unslugified input first, then try to slugify (Issue #3450)\n post, twin_slugs = _find_post(slug)\n if post is None:\n slug = slugify(slug)\n post, twin_slugs = _find_post(slug)\n\n try:\n if post is None:\n raise ValueError(\"No post with matching slug found.\")\n except ValueError:\n return False, False, None, None, slug\n\n if not has_explicit_title:\n # use post's title as link's text\n title = post.title()\n permalink = post.permalink()\n if fragment:\n permalink += '#' + fragment\n\n return True, twin_slugs, title, permalink, slug\n\n\ndef doc_role(name, rawtext, text, lineno, inliner, options={}, content=[]):\n \"\"\"Handle the doc role.\"\"\"\n success, twin_slugs, title, permalink, slug = _doc_link(rawtext, text, options, content)\n if success:\n if twin_slugs:\n inliner.reporter.warning(\n 'More than one post with the same slug. Using \"{0}\"'.format(permalink))\n LOGGER.warning(\n 'More than one post with the same slug. Using \"{0}\" for doc role'.format(permalink))\n node = make_link_node(rawtext, title, permalink, options)\n return [node], []\n else:\n msg = inliner.reporter.error(\n '\"{0}\" slug doesn\\'t exist.'.format(slug),\n line=lineno)\n prb = inliner.problematic(rawtext, rawtext, msg)\n return [prb], [msg]\n\n\ndef doc_shortcode(*args, **kwargs):\n \"\"\"Implement the doc shortcode.\"\"\"\n text = kwargs['data']\n success, twin_slugs, title, permalink, slug = _doc_link(text, text, LOGGER)\n if success:\n if twin_slugs:\n LOGGER.warning(\n 'More than one post with the same slug. Using \"{0}\" for doc shortcode'.format(permalink))\n return '<a href=\"{0}\">{1}</a>'.format(permalink, title)\n else:\n LOGGER.error(\n '\"{0}\" slug doesn\\'t exist.'.format(slug))\n return '<span class=\"error text-error\" style=\"color: red;\">Invalid link: {0}</span>'.format(text)\n\n\ndef make_link_node(rawtext, text, url, options):\n \"\"\"Make a reST link node.\"\"\"\n node = nodes.reference(rawtext, text, refuri=url, *options)\n return node\n", "path": "nikola/plugins/compile/rest/doc.py"}]} | 1,666 | 420 |
gh_patches_debug_1018 | rasdani/github-patches | git_diff | opsdroid__opsdroid-1504 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cisco WebEx Teams connector doesn't start
# Error
```
INFO opsdroid.logging: ========================================
INFO opsdroid.logging: Started opsdroid 0+unknown.
WARNING opsdroid: 'welcome-message: true/false' is missing in configuration.yaml
WARNING opsdroid.loader: No databases in configuration. This will cause skills which store things in memory to lose data when opsdroid is restarted.
INFO opsdroid.loader: Cloning hello from remote repository.
Traceback (most recent call last):
File "/usr/local/bin/opsdroid", line 8, in <module>
sys.exit(cli())
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/opsdroid/cli/start.py", line 42, in start
opsdroid.run()
File "/usr/local/lib/python3.7/site-packages/opsdroid/core.py", line 158, in run
self.sync_load()
File "/usr/local/lib/python3.7/site-packages/opsdroid/core.py", line 177, in sync_load
self.eventloop.run_until_complete(self.load())
File "/usr/local/lib/python3.7/asyncio/base_events.py", line 587, in run_until_complete
return future.result()
File "/usr/local/lib/python3.7/site-packages/opsdroid/core.py", line 185, in load
await self.start_connectors(self.modules["connectors"])
File "/usr/local/lib/python3.7/site-packages/opsdroid/core.py", line 319, in start_connectors
await self.eventloop.create_task(connector.connect())
File "/usr/local/lib/python3.7/site-packages/opsdroid/connector/webexteams/__init__.py", line 53, in connect
await self.subscribe_to_rooms()
File "/usr/local/lib/python3.7/site-packages/opsdroid/connector/webexteams/__init__.py", line 99, in subscribe_to_rooms
secret=self.secret,
File "/usr/local/lib/python3.7/site-packages/webexteamssdk/api/webhooks.py", line 159, in create
json_data = self._session.post(API_ENDPOINT, json=post_data)
File "/usr/local/lib/python3.7/site-packages/webexteamssdk/restsession.py", line 401, in post
**kwargs)
File "/usr/local/lib/python3.7/site-packages/webexteamssdk/restsession.py", line 258, in request
check_response_code(response, erc)
File "/usr/local/lib/python3.7/site-packages/webexteamssdk/utils.py", line 220, in check_response_code
raise ApiError(response)
webexteamssdk.exceptions.ApiError: [400] Bad Request - POST failed: HTTP/1.1 400 Bad Request (url = https://webhook-engine-a.wbx2.com/webhook-engine/api/v1/webhooks, request/response TrackingId = ROUTER_5ECD21B0-63B3-01BB-00D6-B2CAA80F00D6, error = 'Invalid targetUrl: Illegal character in path at index 0: <function Url at 0x7fd36ce31f80>/connector/webexteams')
```
# How to reproduce
* Create `configuration.yaml` with the following content:
```
connectors:
webexteams:
token: MYBOTACCESSTOKEN
webhook-url: https://my-webhook-url.com
# Seem that webhook-url is not relevant for the error message
skills:
hello:
```
* Create `debug.sh` with the following content:
```
docker run --rm -ti -p 8080:8080 \
-v `pwd`/configuration.yaml:/root/.config/opsdroid/configuration.yaml:ro \
opsdroid/opsdroid:v0.18.0 sh
```
* `chmod +x debug.sh`
* `./debug.sh`
* (in the container) `opsdroid start`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opsdroid/connector/webexteams/__init__.py`
Content:
```
1 """A connector for Webex Teams."""
2 import json
3 import logging
4 import uuid
5 import os
6
7 import aiohttp
8
9 from webexteamssdk import WebexTeamsAPI
10 from voluptuous import Required, Url
11
12 from opsdroid.connector import Connector, register_event
13 from opsdroid.events import Message
14
15
16 _LOGGER = logging.getLogger(__name__)
17 CONFIG_SCHEMA = {Required("webhook-url"): Url, Required("token"): str}
18
19
20 class ConnectorWebexTeams(Connector):
21 """A connector for Webex Teams."""
22
23 def __init__(self, config, opsdroid=None):
24 """Create a connector."""
25 _LOGGER.debug(_("Loaded WebEx Teams Connector."))
26 super().__init__(config, opsdroid=opsdroid)
27 self.name = "webexteams"
28 self.config = config
29 self.opsdroid = opsdroid
30 self.default_target = None
31 self.bot_name = config.get("bot-name", "opsdroid")
32 self.bot_webex_id = None
33 self.secret = uuid.uuid4().hex
34 self.people = {}
35
36 async def connect(self):
37 """Connect to the chat service."""
38 try:
39 self.api = WebexTeamsAPI(
40 access_token=self.config["token"],
41 proxies={
42 "http": os.environ.get("HTTP_PROXY"),
43 "https": os.environ.get("HTTPS_PROXY"),
44 },
45 )
46 except KeyError:
47
48 _LOGGER.error(_("Must set access-token for WebEx Teams Connector."))
49
50 return
51
52 await self.clean_up_webhooks()
53 await self.subscribe_to_rooms()
54 await self.set_own_id()
55
56 async def webexteams_message_handler(self, request):
57 """Handle webhooks from the Webex Teams api."""
58 _LOGGER.debug(_("Handling message from WebEx Teams."))
59 req_data = await request.json()
60
61 _LOGGER.debug(req_data)
62
63 msg = self.api.messages.get(req_data["data"]["id"])
64
65 if req_data["data"]["personId"] != self.bot_webex_id:
66 person = await self.get_person(req_data["data"]["personId"])
67
68 try:
69 message = Message(
70 text=msg.text,
71 user=person.displayName,
72 target={"id": msg.roomId, "type": msg.roomType},
73 connector=self,
74 )
75 await self.opsdroid.parse(message)
76 except KeyError as error:
77 _LOGGER.error(error)
78
79 return aiohttp.web.Response(text=json.dumps("Received"), status=201)
80
81 async def clean_up_webhooks(self):
82 """Remove all existing webhooks."""
83 for webhook in self.api.webhooks.list():
84 self.api.webhooks.delete(webhook.id)
85
86 async def subscribe_to_rooms(self):
87 """Create webhooks for all rooms."""
88 _LOGGER.debug(_("Creating Webex Teams webhook."))
89 webhook_endpoint = "/connector/webexteams"
90 self.opsdroid.web_server.web_app.router.add_post(
91 webhook_endpoint, self.webexteams_message_handler
92 )
93
94 self.api.webhooks.create(
95 name="opsdroid",
96 targetUrl="{}{}".format(self.config.get("webhook-url"), webhook_endpoint),
97 resource="messages",
98 event="created",
99 secret=self.secret,
100 )
101
102 async def get_person(self, personId):
103 """Get a person's info from the api or cache."""
104 if personId not in self.people:
105 self.people[personId] = self.api.people.get(personId)
106 return self.people[personId]
107
108 async def set_own_id(self):
109 """Get the bot id and set it in the class."""
110 self.bot_webex_id = self.api.people.me().id
111
112 async def listen(self):
113 """Listen for and parse new messages."""
114 pass # Listening is handled by the aiohttp web server
115
116 @register_event(Message)
117 async def send_message(self, message):
118 """Respond with a message."""
119 self.api.messages.create(message.target["id"], text=message.text)
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opsdroid/connector/webexteams/__init__.py b/opsdroid/connector/webexteams/__init__.py
--- a/opsdroid/connector/webexteams/__init__.py
+++ b/opsdroid/connector/webexteams/__init__.py
@@ -14,7 +14,7 @@
_LOGGER = logging.getLogger(__name__)
-CONFIG_SCHEMA = {Required("webhook-url"): Url, Required("token"): str}
+CONFIG_SCHEMA = {Required("webhook-url"): Url(), Required("token"): str}
class ConnectorWebexTeams(Connector):
| {"golden_diff": "diff --git a/opsdroid/connector/webexteams/__init__.py b/opsdroid/connector/webexteams/__init__.py\n--- a/opsdroid/connector/webexteams/__init__.py\n+++ b/opsdroid/connector/webexteams/__init__.py\n@@ -14,7 +14,7 @@\n \n \n _LOGGER = logging.getLogger(__name__)\n-CONFIG_SCHEMA = {Required(\"webhook-url\"): Url, Required(\"token\"): str}\n+CONFIG_SCHEMA = {Required(\"webhook-url\"): Url(), Required(\"token\"): str}\n \n \n class ConnectorWebexTeams(Connector):\n", "issue": "Cisco WebEx Teams connector doesn't start\n# Error\r\n\r\n```\r\nINFO opsdroid.logging: ========================================\r\nINFO opsdroid.logging: Started opsdroid 0+unknown.\r\nWARNING opsdroid: 'welcome-message: true/false' is missing in configuration.yaml\r\nWARNING opsdroid.loader: No databases in configuration. This will cause skills which store things in memory to lose data when opsdroid is restarted.\r\nINFO opsdroid.loader: Cloning hello from remote repository.\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/opsdroid\", line 8, in <module>\r\n sys.exit(cli())\r\n File \"/usr/local/lib/python3.7/site-packages/click/core.py\", line 829, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/usr/local/lib/python3.7/site-packages/click/core.py\", line 782, in main\r\n rv = self.invoke(ctx)\r\n File \"/usr/local/lib/python3.7/site-packages/click/core.py\", line 1259, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/usr/local/lib/python3.7/site-packages/click/core.py\", line 1066, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/usr/local/lib/python3.7/site-packages/click/core.py\", line 610, in invoke\r\n return callback(*args, **kwargs)\r\n File \"/usr/local/lib/python3.7/site-packages/opsdroid/cli/start.py\", line 42, in start\r\n opsdroid.run()\r\n File \"/usr/local/lib/python3.7/site-packages/opsdroid/core.py\", line 158, in run\r\n self.sync_load()\r\n File \"/usr/local/lib/python3.7/site-packages/opsdroid/core.py\", line 177, in sync_load\r\n self.eventloop.run_until_complete(self.load())\r\n File \"/usr/local/lib/python3.7/asyncio/base_events.py\", line 587, in run_until_complete\r\n return future.result()\r\n File \"/usr/local/lib/python3.7/site-packages/opsdroid/core.py\", line 185, in load\r\n await self.start_connectors(self.modules[\"connectors\"])\r\n File \"/usr/local/lib/python3.7/site-packages/opsdroid/core.py\", line 319, in start_connectors\r\n await self.eventloop.create_task(connector.connect())\r\n File \"/usr/local/lib/python3.7/site-packages/opsdroid/connector/webexteams/__init__.py\", line 53, in connect\r\n await self.subscribe_to_rooms()\r\n File \"/usr/local/lib/python3.7/site-packages/opsdroid/connector/webexteams/__init__.py\", line 99, in subscribe_to_rooms\r\n secret=self.secret,\r\n File \"/usr/local/lib/python3.7/site-packages/webexteamssdk/api/webhooks.py\", line 159, in create\r\n json_data = self._session.post(API_ENDPOINT, json=post_data)\r\n File \"/usr/local/lib/python3.7/site-packages/webexteamssdk/restsession.py\", line 401, in post\r\n **kwargs)\r\n File \"/usr/local/lib/python3.7/site-packages/webexteamssdk/restsession.py\", line 258, in request\r\n check_response_code(response, erc)\r\n File \"/usr/local/lib/python3.7/site-packages/webexteamssdk/utils.py\", line 220, in check_response_code\r\n raise ApiError(response)\r\nwebexteamssdk.exceptions.ApiError: [400] Bad Request - POST failed: HTTP/1.1 400 Bad Request (url = https://webhook-engine-a.wbx2.com/webhook-engine/api/v1/webhooks, request/response TrackingId = ROUTER_5ECD21B0-63B3-01BB-00D6-B2CAA80F00D6, error = 'Invalid targetUrl: Illegal character in path at index 0: <function Url at 0x7fd36ce31f80>/connector/webexteams')\r\n```\r\n# How to reproduce\r\n\r\n* Create `configuration.yaml` with the following content:\r\n\r\n```\r\nconnectors:\r\n webexteams:\r\n token: MYBOTACCESSTOKEN\r\n webhook-url: https://my-webhook-url.com\r\n # Seem that webhook-url is not relevant for the error message\r\nskills:\r\n hello:\r\n```\r\n\r\n* Create `debug.sh` with the following content:\r\n\r\n```\r\ndocker run --rm -ti -p 8080:8080 \\\r\n\t-v `pwd`/configuration.yaml:/root/.config/opsdroid/configuration.yaml:ro \\\r\n\topsdroid/opsdroid:v0.18.0 sh\r\n```\r\n\r\n* `chmod +x debug.sh`\r\n* `./debug.sh`\r\n* (in the container) `opsdroid start`\r\n\r\n\n", "before_files": [{"content": "\"\"\"A connector for Webex Teams.\"\"\"\nimport json\nimport logging\nimport uuid\nimport os\n\nimport aiohttp\n\nfrom webexteamssdk import WebexTeamsAPI\nfrom voluptuous import Required, Url\n\nfrom opsdroid.connector import Connector, register_event\nfrom opsdroid.events import Message\n\n\n_LOGGER = logging.getLogger(__name__)\nCONFIG_SCHEMA = {Required(\"webhook-url\"): Url, Required(\"token\"): str}\n\n\nclass ConnectorWebexTeams(Connector):\n \"\"\"A connector for Webex Teams.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Create a connector.\"\"\"\n _LOGGER.debug(_(\"Loaded WebEx Teams Connector.\"))\n super().__init__(config, opsdroid=opsdroid)\n self.name = \"webexteams\"\n self.config = config\n self.opsdroid = opsdroid\n self.default_target = None\n self.bot_name = config.get(\"bot-name\", \"opsdroid\")\n self.bot_webex_id = None\n self.secret = uuid.uuid4().hex\n self.people = {}\n\n async def connect(self):\n \"\"\"Connect to the chat service.\"\"\"\n try:\n self.api = WebexTeamsAPI(\n access_token=self.config[\"token\"],\n proxies={\n \"http\": os.environ.get(\"HTTP_PROXY\"),\n \"https\": os.environ.get(\"HTTPS_PROXY\"),\n },\n )\n except KeyError:\n\n _LOGGER.error(_(\"Must set access-token for WebEx Teams Connector.\"))\n\n return\n\n await self.clean_up_webhooks()\n await self.subscribe_to_rooms()\n await self.set_own_id()\n\n async def webexteams_message_handler(self, request):\n \"\"\"Handle webhooks from the Webex Teams api.\"\"\"\n _LOGGER.debug(_(\"Handling message from WebEx Teams.\"))\n req_data = await request.json()\n\n _LOGGER.debug(req_data)\n\n msg = self.api.messages.get(req_data[\"data\"][\"id\"])\n\n if req_data[\"data\"][\"personId\"] != self.bot_webex_id:\n person = await self.get_person(req_data[\"data\"][\"personId\"])\n\n try:\n message = Message(\n text=msg.text,\n user=person.displayName,\n target={\"id\": msg.roomId, \"type\": msg.roomType},\n connector=self,\n )\n await self.opsdroid.parse(message)\n except KeyError as error:\n _LOGGER.error(error)\n\n return aiohttp.web.Response(text=json.dumps(\"Received\"), status=201)\n\n async def clean_up_webhooks(self):\n \"\"\"Remove all existing webhooks.\"\"\"\n for webhook in self.api.webhooks.list():\n self.api.webhooks.delete(webhook.id)\n\n async def subscribe_to_rooms(self):\n \"\"\"Create webhooks for all rooms.\"\"\"\n _LOGGER.debug(_(\"Creating Webex Teams webhook.\"))\n webhook_endpoint = \"/connector/webexteams\"\n self.opsdroid.web_server.web_app.router.add_post(\n webhook_endpoint, self.webexteams_message_handler\n )\n\n self.api.webhooks.create(\n name=\"opsdroid\",\n targetUrl=\"{}{}\".format(self.config.get(\"webhook-url\"), webhook_endpoint),\n resource=\"messages\",\n event=\"created\",\n secret=self.secret,\n )\n\n async def get_person(self, personId):\n \"\"\"Get a person's info from the api or cache.\"\"\"\n if personId not in self.people:\n self.people[personId] = self.api.people.get(personId)\n return self.people[personId]\n\n async def set_own_id(self):\n \"\"\"Get the bot id and set it in the class.\"\"\"\n self.bot_webex_id = self.api.people.me().id\n\n async def listen(self):\n \"\"\"Listen for and parse new messages.\"\"\"\n pass # Listening is handled by the aiohttp web server\n\n @register_event(Message)\n async def send_message(self, message):\n \"\"\"Respond with a message.\"\"\"\n self.api.messages.create(message.target[\"id\"], text=message.text)\n", "path": "opsdroid/connector/webexteams/__init__.py"}], "after_files": [{"content": "\"\"\"A connector for Webex Teams.\"\"\"\nimport json\nimport logging\nimport uuid\nimport os\n\nimport aiohttp\n\nfrom webexteamssdk import WebexTeamsAPI\nfrom voluptuous import Required, Url\n\nfrom opsdroid.connector import Connector, register_event\nfrom opsdroid.events import Message\n\n\n_LOGGER = logging.getLogger(__name__)\nCONFIG_SCHEMA = {Required(\"webhook-url\"): Url(), Required(\"token\"): str}\n\n\nclass ConnectorWebexTeams(Connector):\n \"\"\"A connector for Webex Teams.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Create a connector.\"\"\"\n _LOGGER.debug(_(\"Loaded WebEx Teams Connector.\"))\n super().__init__(config, opsdroid=opsdroid)\n self.name = \"webexteams\"\n self.config = config\n self.opsdroid = opsdroid\n self.default_target = None\n self.bot_name = config.get(\"bot-name\", \"opsdroid\")\n self.bot_webex_id = None\n self.secret = uuid.uuid4().hex\n self.people = {}\n\n async def connect(self):\n \"\"\"Connect to the chat service.\"\"\"\n try:\n self.api = WebexTeamsAPI(\n access_token=self.config[\"token\"],\n proxies={\n \"http\": os.environ.get(\"HTTP_PROXY\"),\n \"https\": os.environ.get(\"HTTPS_PROXY\"),\n },\n )\n except KeyError:\n\n _LOGGER.error(_(\"Must set access-token for WebEx Teams Connector.\"))\n\n return\n\n await self.clean_up_webhooks()\n await self.subscribe_to_rooms()\n await self.set_own_id()\n\n async def webexteams_message_handler(self, request):\n \"\"\"Handle webhooks from the Webex Teams api.\"\"\"\n _LOGGER.debug(_(\"Handling message from WebEx Teams.\"))\n req_data = await request.json()\n\n _LOGGER.debug(req_data)\n\n msg = self.api.messages.get(req_data[\"data\"][\"id\"])\n\n if req_data[\"data\"][\"personId\"] != self.bot_webex_id:\n person = await self.get_person(req_data[\"data\"][\"personId\"])\n\n try:\n message = Message(\n text=msg.text,\n user=person.displayName,\n target={\"id\": msg.roomId, \"type\": msg.roomType},\n connector=self,\n )\n await self.opsdroid.parse(message)\n except KeyError as error:\n _LOGGER.error(error)\n\n return aiohttp.web.Response(text=json.dumps(\"Received\"), status=201)\n\n async def clean_up_webhooks(self):\n \"\"\"Remove all existing webhooks.\"\"\"\n for webhook in self.api.webhooks.list():\n self.api.webhooks.delete(webhook.id)\n\n async def subscribe_to_rooms(self):\n \"\"\"Create webhooks for all rooms.\"\"\"\n _LOGGER.debug(_(\"Creating Webex Teams webhook.\"))\n webhook_endpoint = \"/connector/webexteams\"\n self.opsdroid.web_server.web_app.router.add_post(\n webhook_endpoint, self.webexteams_message_handler\n )\n\n self.api.webhooks.create(\n name=\"opsdroid\",\n targetUrl=\"{}{}\".format(self.config.get(\"webhook-url\"), webhook_endpoint),\n resource=\"messages\",\n event=\"created\",\n secret=self.secret,\n )\n\n async def get_person(self, personId):\n \"\"\"Get a person's info from the api or cache.\"\"\"\n if personId not in self.people:\n self.people[personId] = self.api.people.get(personId)\n return self.people[personId]\n\n async def set_own_id(self):\n \"\"\"Get the bot id and set it in the class.\"\"\"\n self.bot_webex_id = self.api.people.me().id\n\n async def listen(self):\n \"\"\"Listen for and parse new messages.\"\"\"\n pass # Listening is handled by the aiohttp web server\n\n @register_event(Message)\n async def send_message(self, message):\n \"\"\"Respond with a message.\"\"\"\n self.api.messages.create(message.target[\"id\"], text=message.text)\n", "path": "opsdroid/connector/webexteams/__init__.py"}]} | 2,444 | 136 |
gh_patches_debug_4308 | rasdani/github-patches | git_diff | tiangolo__fastapi-320 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
eta on pydantic update to 0.28
really need your latest PR
it shoud solve recurring issues with sqlalchemy to pydantic mapping
thank you !
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `fastapi/utils.py`
Content:
```
1 import re
2 from typing import Any, Dict, List, Sequence, Set, Type
3
4 from fastapi import routing
5 from fastapi.openapi.constants import REF_PREFIX
6 from pydantic import BaseModel
7 from pydantic.fields import Field
8 from pydantic.schema import get_flat_models_from_fields, model_process_schema
9 from starlette.routing import BaseRoute
10
11
12 def get_flat_models_from_routes(
13 routes: Sequence[Type[BaseRoute]]
14 ) -> Set[Type[BaseModel]]:
15 body_fields_from_routes: List[Field] = []
16 responses_from_routes: List[Field] = []
17 for route in routes:
18 if getattr(route, "include_in_schema", None) and isinstance(
19 route, routing.APIRoute
20 ):
21 if route.body_field:
22 assert isinstance(
23 route.body_field, Field
24 ), "A request body must be a Pydantic Field"
25 body_fields_from_routes.append(route.body_field)
26 if route.response_field:
27 responses_from_routes.append(route.response_field)
28 if route.response_fields:
29 responses_from_routes.extend(route.response_fields.values())
30 flat_models = get_flat_models_from_fields(
31 body_fields_from_routes + responses_from_routes
32 )
33 return flat_models
34
35
36 def get_model_definitions(
37 *, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]
38 ) -> Dict[str, Any]:
39 definitions: Dict[str, Dict] = {}
40 for model in flat_models:
41 m_schema, m_definitions = model_process_schema(
42 model, model_name_map=model_name_map, ref_prefix=REF_PREFIX
43 )
44 definitions.update(m_definitions)
45 model_name = model_name_map[model]
46 definitions[model_name] = m_schema
47 return definitions
48
49
50 def get_path_param_names(path: str) -> Set[str]:
51 return {item.strip("{}") for item in re.findall("{[^}]*}", path)}
52
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/fastapi/utils.py b/fastapi/utils.py
--- a/fastapi/utils.py
+++ b/fastapi/utils.py
@@ -28,7 +28,7 @@
if route.response_fields:
responses_from_routes.extend(route.response_fields.values())
flat_models = get_flat_models_from_fields(
- body_fields_from_routes + responses_from_routes
+ body_fields_from_routes + responses_from_routes, known_models=set()
)
return flat_models
| {"golden_diff": "diff --git a/fastapi/utils.py b/fastapi/utils.py\n--- a/fastapi/utils.py\n+++ b/fastapi/utils.py\n@@ -28,7 +28,7 @@\n if route.response_fields:\n responses_from_routes.extend(route.response_fields.values())\n flat_models = get_flat_models_from_fields(\n- body_fields_from_routes + responses_from_routes\n+ body_fields_from_routes + responses_from_routes, known_models=set()\n )\n return flat_models\n", "issue": "eta on pydantic update to 0.28\nreally need your latest PR \r\nit shoud solve recurring issues with sqlalchemy to pydantic mapping\r\nthank you !\n", "before_files": [{"content": "import re\nfrom typing import Any, Dict, List, Sequence, Set, Type\n\nfrom fastapi import routing\nfrom fastapi.openapi.constants import REF_PREFIX\nfrom pydantic import BaseModel\nfrom pydantic.fields import Field\nfrom pydantic.schema import get_flat_models_from_fields, model_process_schema\nfrom starlette.routing import BaseRoute\n\n\ndef get_flat_models_from_routes(\n routes: Sequence[Type[BaseRoute]]\n) -> Set[Type[BaseModel]]:\n body_fields_from_routes: List[Field] = []\n responses_from_routes: List[Field] = []\n for route in routes:\n if getattr(route, \"include_in_schema\", None) and isinstance(\n route, routing.APIRoute\n ):\n if route.body_field:\n assert isinstance(\n route.body_field, Field\n ), \"A request body must be a Pydantic Field\"\n body_fields_from_routes.append(route.body_field)\n if route.response_field:\n responses_from_routes.append(route.response_field)\n if route.response_fields:\n responses_from_routes.extend(route.response_fields.values())\n flat_models = get_flat_models_from_fields(\n body_fields_from_routes + responses_from_routes\n )\n return flat_models\n\n\ndef get_model_definitions(\n *, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]\n) -> Dict[str, Any]:\n definitions: Dict[str, Dict] = {}\n for model in flat_models:\n m_schema, m_definitions = model_process_schema(\n model, model_name_map=model_name_map, ref_prefix=REF_PREFIX\n )\n definitions.update(m_definitions)\n model_name = model_name_map[model]\n definitions[model_name] = m_schema\n return definitions\n\n\ndef get_path_param_names(path: str) -> Set[str]:\n return {item.strip(\"{}\") for item in re.findall(\"{[^}]*}\", path)}\n", "path": "fastapi/utils.py"}], "after_files": [{"content": "import re\nfrom typing import Any, Dict, List, Sequence, Set, Type\n\nfrom fastapi import routing\nfrom fastapi.openapi.constants import REF_PREFIX\nfrom pydantic import BaseModel\nfrom pydantic.fields import Field\nfrom pydantic.schema import get_flat_models_from_fields, model_process_schema\nfrom starlette.routing import BaseRoute\n\n\ndef get_flat_models_from_routes(\n routes: Sequence[Type[BaseRoute]]\n) -> Set[Type[BaseModel]]:\n body_fields_from_routes: List[Field] = []\n responses_from_routes: List[Field] = []\n for route in routes:\n if getattr(route, \"include_in_schema\", None) and isinstance(\n route, routing.APIRoute\n ):\n if route.body_field:\n assert isinstance(\n route.body_field, Field\n ), \"A request body must be a Pydantic Field\"\n body_fields_from_routes.append(route.body_field)\n if route.response_field:\n responses_from_routes.append(route.response_field)\n if route.response_fields:\n responses_from_routes.extend(route.response_fields.values())\n flat_models = get_flat_models_from_fields(\n body_fields_from_routes + responses_from_routes, known_models=set()\n )\n return flat_models\n\n\ndef get_model_definitions(\n *, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]\n) -> Dict[str, Any]:\n definitions: Dict[str, Dict] = {}\n for model in flat_models:\n m_schema, m_definitions = model_process_schema(\n model, model_name_map=model_name_map, ref_prefix=REF_PREFIX\n )\n definitions.update(m_definitions)\n model_name = model_name_map[model]\n definitions[model_name] = m_schema\n return definitions\n\n\ndef get_path_param_names(path: str) -> Set[str]:\n return {item.strip(\"{}\") for item in re.findall(\"{[^}]*}\", path)}\n", "path": "fastapi/utils.py"}]} | 789 | 99 |
gh_patches_debug_16320 | rasdani/github-patches | git_diff | googleapis__python-bigquery-564 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
unit-3.6 session is failing on Mac
```
nox > Running session unit-3.6
nox > Re-using existing virtual environment at .nox/unit-3-6.
nox > pip install mock pytest google-cloud-testutils pytest-cov freezegun -c /Users/swast/src/python-bigquery/testing/constraints-3.6.txt
nox > pip install -e .[all] -c /Users/swast/src/python-bigquery/testing/constraints-3.6.txt
nox > Command pip install -e .[all] -c /Users/swast/src/python-bigquery/testing/constraints-3.6.txt failed with exit code 1:
Obtaining file:///Users/swast/src/python-bigquery
Collecting google-api-core[grpc]<2.0.0dev,>=1.23.0
Using cached google_api_core-1.23.0-py2.py3-none-any.whl (91 kB)
Collecting proto-plus>=1.10.0
Using cached proto_plus-1.10.0-py3-none-any.whl
Collecting google-cloud-core<2.0dev,>=1.4.1
Using cached google_cloud_core-1.4.1-py2.py3-none-any.whl (26 kB)
Collecting google-resumable-media<2.0dev,>=0.6.0
Using cached google_resumable_media-0.6.0-py2.py3-none-any.whl (41 kB)
Requirement already satisfied: packaging>=14.3 in ./.nox/unit-3-6/lib/python3.6/site-packages (from google-cloud-bigquery==2.12.0) (20.9)
Collecting protobuf>=3.12.0
Using cached protobuf-3.12.0-cp36-cp36m-macosx_10_9_x86_64.whl (1.3 MB)
Collecting google-cloud-bigquery-storage<3.0.0dev,>=2.0.0
Using cached google_cloud_bigquery_storage-2.0.0-py2.py3-none-any.whl (60 kB)
Collecting grpcio<2.0dev,>=1.32.0
Using cached grpcio-1.32.0-cp36-cp36m-macosx_10_9_x86_64.whl (3.3 MB)
Collecting pyarrow<4.0dev,>=1.0.0
Using cached pyarrow-1.0.0-cp36-cp36m-macosx_10_9_intel.whl (11.1 MB)
Collecting pandas>=0.23.0
Using cached pandas-0.23.0-cp36-cp36m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl (16.8 MB)
ERROR: Could not find a version that satisfies the requirement pyarrow<4.0dev,>=3.0.0 (from google-cloud-bigquery[all])
ERROR: No matching distribution found for pyarrow<4.0dev,>=3.0.0
nox > Session unit-3.6 failed.
```
I think the `bignumeric` extra needs to be excluded from `all` or we need to add some logic to avoid installing that extra from Python 3.6.
Not sure why the Kokoro jobs aren't failing, though.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import io
16 import os
17
18 import setuptools
19
20
21 # Package metadata.
22
23 name = "google-cloud-bigquery"
24 description = "Google BigQuery API client library"
25
26 # Should be one of:
27 # 'Development Status :: 3 - Alpha'
28 # 'Development Status :: 4 - Beta'
29 # 'Development Status :: 5 - Production/Stable'
30 release_status = "Development Status :: 5 - Production/Stable"
31 dependencies = [
32 "google-api-core[grpc] >= 1.23.0, < 2.0.0dev",
33 "proto-plus >= 1.10.0",
34 "google-cloud-core >= 1.4.1, < 2.0dev",
35 "google-resumable-media >= 0.6.0, < 2.0dev",
36 "packaging >= 14.3",
37 "protobuf >= 3.12.0",
38 ]
39 extras = {
40 "bqstorage": [
41 "google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev",
42 # Due to an issue in pip's dependency resolver, the `grpc` extra is not
43 # installed, even though `google-cloud-bigquery-storage` specifies it
44 # as `google-api-core[grpc]`. We thus need to explicitly specify it here.
45 # See: https://github.com/googleapis/python-bigquery/issues/83 The
46 # grpc.Channel.close() method isn't added until 1.32.0.
47 # https://github.com/grpc/grpc/pull/15254
48 "grpcio >= 1.32.0, < 2.0dev",
49 "pyarrow >= 1.0.0, < 4.0dev",
50 ],
51 "pandas": ["pandas>=0.23.0", "pyarrow >= 1.0.0, < 4.0dev",],
52 "bignumeric_type": ["pyarrow >= 3.0.0, < 4.0dev"],
53 "tqdm": ["tqdm >= 4.7.4, <5.0.0dev"],
54 "opentelemetry": [
55 "opentelemetry-api==0.11b0",
56 "opentelemetry-sdk==0.11b0",
57 "opentelemetry-instrumentation==0.11b0",
58 ],
59 }
60
61 all_extras = []
62
63 for extra in extras:
64 all_extras.extend(extras[extra])
65
66 extras["all"] = all_extras
67
68 # Setup boilerplate below this line.
69
70 package_root = os.path.abspath(os.path.dirname(__file__))
71
72 readme_filename = os.path.join(package_root, "README.rst")
73 with io.open(readme_filename, encoding="utf-8") as readme_file:
74 readme = readme_file.read()
75
76 version = {}
77 with open(os.path.join(package_root, "google/cloud/bigquery/version.py")) as fp:
78 exec(fp.read(), version)
79 version = version["__version__"]
80
81 # Only include packages under the 'google' namespace. Do not include tests,
82 # benchmarks, etc.
83 packages = [
84 package
85 for package in setuptools.PEP420PackageFinder.find()
86 if package.startswith("google")
87 ]
88
89 # Determine which namespaces are needed.
90 namespaces = ["google"]
91 if "google.cloud" in packages:
92 namespaces.append("google.cloud")
93
94
95 setuptools.setup(
96 name=name,
97 version=version,
98 description=description,
99 long_description=readme,
100 author="Google LLC",
101 author_email="[email protected]",
102 license="Apache 2.0",
103 url="https://github.com/googleapis/python-bigquery",
104 classifiers=[
105 release_status,
106 "Intended Audience :: Developers",
107 "License :: OSI Approved :: Apache Software License",
108 "Programming Language :: Python",
109 "Programming Language :: Python :: 3",
110 "Programming Language :: Python :: 3.6",
111 "Programming Language :: Python :: 3.7",
112 "Programming Language :: Python :: 3.8",
113 "Programming Language :: Python :: 3.9",
114 "Operating System :: OS Independent",
115 "Topic :: Internet",
116 ],
117 platforms="Posix; MacOS X; Windows",
118 packages=packages,
119 namespace_packages=namespaces,
120 install_requires=dependencies,
121 extras_require=extras,
122 python_requires=">=3.6, <3.10",
123 include_package_data=True,
124 zip_safe=False,
125 )
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -48,7 +48,7 @@
"grpcio >= 1.32.0, < 2.0dev",
"pyarrow >= 1.0.0, < 4.0dev",
],
- "pandas": ["pandas>=0.23.0", "pyarrow >= 1.0.0, < 4.0dev",],
+ "pandas": ["pandas>=0.23.0", "pyarrow >= 1.0.0, < 4.0dev"],
"bignumeric_type": ["pyarrow >= 3.0.0, < 4.0dev"],
"tqdm": ["tqdm >= 4.7.4, <5.0.0dev"],
"opentelemetry": [
@@ -61,6 +61,11 @@
all_extras = []
for extra in extras:
+ # Exclude this extra from all to avoid overly strict dependencies on core
+ # libraries such as pyarrow.
+ # https://github.com/googleapis/python-bigquery/issues/563
+ if extra in {"bignumeric_type"}:
+ continue
all_extras.extend(extras[extra])
extras["all"] = all_extras
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -48,7 +48,7 @@\n \"grpcio >= 1.32.0, < 2.0dev\",\n \"pyarrow >= 1.0.0, < 4.0dev\",\n ],\n- \"pandas\": [\"pandas>=0.23.0\", \"pyarrow >= 1.0.0, < 4.0dev\",],\n+ \"pandas\": [\"pandas>=0.23.0\", \"pyarrow >= 1.0.0, < 4.0dev\"],\n \"bignumeric_type\": [\"pyarrow >= 3.0.0, < 4.0dev\"],\n \"tqdm\": [\"tqdm >= 4.7.4, <5.0.0dev\"],\n \"opentelemetry\": [\n@@ -61,6 +61,11 @@\n all_extras = []\n \n for extra in extras:\n+ # Exclude this extra from all to avoid overly strict dependencies on core\n+ # libraries such as pyarrow.\n+ # https://github.com/googleapis/python-bigquery/issues/563\n+ if extra in {\"bignumeric_type\"}:\n+ continue\n all_extras.extend(extras[extra])\n \n extras[\"all\"] = all_extras\n", "issue": "unit-3.6 session is failing on Mac\n```\r\nnox > Running session unit-3.6\r\nnox > Re-using existing virtual environment at .nox/unit-3-6.\r\nnox > pip install mock pytest google-cloud-testutils pytest-cov freezegun -c /Users/swast/src/python-bigquery/testing/constraints-3.6.txt\r\nnox > pip install -e .[all] -c /Users/swast/src/python-bigquery/testing/constraints-3.6.txt\r\nnox > Command pip install -e .[all] -c /Users/swast/src/python-bigquery/testing/constraints-3.6.txt failed with exit code 1:\r\nObtaining file:///Users/swast/src/python-bigquery\r\nCollecting google-api-core[grpc]<2.0.0dev,>=1.23.0\r\n Using cached google_api_core-1.23.0-py2.py3-none-any.whl (91 kB)\r\nCollecting proto-plus>=1.10.0\r\n Using cached proto_plus-1.10.0-py3-none-any.whl\r\nCollecting google-cloud-core<2.0dev,>=1.4.1\r\n Using cached google_cloud_core-1.4.1-py2.py3-none-any.whl (26 kB)\r\nCollecting google-resumable-media<2.0dev,>=0.6.0\r\n Using cached google_resumable_media-0.6.0-py2.py3-none-any.whl (41 kB)\r\nRequirement already satisfied: packaging>=14.3 in ./.nox/unit-3-6/lib/python3.6/site-packages (from google-cloud-bigquery==2.12.0) (20.9)\r\nCollecting protobuf>=3.12.0\r\n Using cached protobuf-3.12.0-cp36-cp36m-macosx_10_9_x86_64.whl (1.3 MB)\r\nCollecting google-cloud-bigquery-storage<3.0.0dev,>=2.0.0\r\n Using cached google_cloud_bigquery_storage-2.0.0-py2.py3-none-any.whl (60 kB)\r\nCollecting grpcio<2.0dev,>=1.32.0\r\n Using cached grpcio-1.32.0-cp36-cp36m-macosx_10_9_x86_64.whl (3.3 MB)\r\nCollecting pyarrow<4.0dev,>=1.0.0\r\n Using cached pyarrow-1.0.0-cp36-cp36m-macosx_10_9_intel.whl (11.1 MB)\r\nCollecting pandas>=0.23.0\r\n Using cached pandas-0.23.0-cp36-cp36m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl (16.8 MB)\r\nERROR: Could not find a version that satisfies the requirement pyarrow<4.0dev,>=3.0.0 (from google-cloud-bigquery[all])\r\nERROR: No matching distribution found for pyarrow<4.0dev,>=3.0.0\r\nnox > Session unit-3.6 failed.\r\n```\r\n\r\nI think the `bignumeric` extra needs to be excluded from `all` or we need to add some logic to avoid installing that extra from Python 3.6.\r\n\r\nNot sure why the Kokoro jobs aren't failing, though.\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-bigquery\"\ndescription = \"Google BigQuery API client library\"\n\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"google-api-core[grpc] >= 1.23.0, < 2.0.0dev\",\n \"proto-plus >= 1.10.0\",\n \"google-cloud-core >= 1.4.1, < 2.0dev\",\n \"google-resumable-media >= 0.6.0, < 2.0dev\",\n \"packaging >= 14.3\",\n \"protobuf >= 3.12.0\",\n]\nextras = {\n \"bqstorage\": [\n \"google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev\",\n # Due to an issue in pip's dependency resolver, the `grpc` extra is not\n # installed, even though `google-cloud-bigquery-storage` specifies it\n # as `google-api-core[grpc]`. We thus need to explicitly specify it here.\n # See: https://github.com/googleapis/python-bigquery/issues/83 The\n # grpc.Channel.close() method isn't added until 1.32.0.\n # https://github.com/grpc/grpc/pull/15254\n \"grpcio >= 1.32.0, < 2.0dev\",\n \"pyarrow >= 1.0.0, < 4.0dev\",\n ],\n \"pandas\": [\"pandas>=0.23.0\", \"pyarrow >= 1.0.0, < 4.0dev\",],\n \"bignumeric_type\": [\"pyarrow >= 3.0.0, < 4.0dev\"],\n \"tqdm\": [\"tqdm >= 4.7.4, <5.0.0dev\"],\n \"opentelemetry\": [\n \"opentelemetry-api==0.11b0\",\n \"opentelemetry-sdk==0.11b0\",\n \"opentelemetry-instrumentation==0.11b0\",\n ],\n}\n\nall_extras = []\n\nfor extra in extras:\n all_extras.extend(extras[extra])\n\nextras[\"all\"] = all_extras\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nversion = {}\nwith open(os.path.join(package_root, \"google/cloud/bigquery/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package\n for package in setuptools.PEP420PackageFinder.find()\n if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/googleapis/python-bigquery\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=3.6, <3.10\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-bigquery\"\ndescription = \"Google BigQuery API client library\"\n\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"google-api-core[grpc] >= 1.23.0, < 2.0.0dev\",\n \"proto-plus >= 1.10.0\",\n \"google-cloud-core >= 1.4.1, < 2.0dev\",\n \"google-resumable-media >= 0.6.0, < 2.0dev\",\n \"packaging >= 14.3\",\n \"protobuf >= 3.12.0\",\n]\nextras = {\n \"bqstorage\": [\n \"google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev\",\n # Due to an issue in pip's dependency resolver, the `grpc` extra is not\n # installed, even though `google-cloud-bigquery-storage` specifies it\n # as `google-api-core[grpc]`. We thus need to explicitly specify it here.\n # See: https://github.com/googleapis/python-bigquery/issues/83 The\n # grpc.Channel.close() method isn't added until 1.32.0.\n # https://github.com/grpc/grpc/pull/15254\n \"grpcio >= 1.32.0, < 2.0dev\",\n \"pyarrow >= 1.0.0, < 4.0dev\",\n ],\n \"pandas\": [\"pandas>=0.23.0\", \"pyarrow >= 1.0.0, < 4.0dev\"],\n \"bignumeric_type\": [\"pyarrow >= 3.0.0, < 4.0dev\"],\n \"tqdm\": [\"tqdm >= 4.7.4, <5.0.0dev\"],\n \"opentelemetry\": [\n \"opentelemetry-api==0.11b0\",\n \"opentelemetry-sdk==0.11b0\",\n \"opentelemetry-instrumentation==0.11b0\",\n ],\n}\n\nall_extras = []\n\nfor extra in extras:\n # Exclude this extra from all to avoid overly strict dependencies on core\n # libraries such as pyarrow.\n # https://github.com/googleapis/python-bigquery/issues/563\n if extra in {\"bignumeric_type\"}:\n continue\n all_extras.extend(extras[extra])\n\nextras[\"all\"] = all_extras\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nversion = {}\nwith open(os.path.join(package_root, \"google/cloud/bigquery/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package\n for package in setuptools.PEP420PackageFinder.find()\n if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/googleapis/python-bigquery\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=3.6, <3.10\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "setup.py"}]} | 2,430 | 305 |
gh_patches_debug_2391 | rasdani/github-patches | git_diff | scrapy__scrapy-2929 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
LinkExtractor is not ignoring .m4v extension (video) by default
By chance I found out that LinkExtractor is not ignoring the video extension m4v in the same way it is ignoring other video formats.
https://en.wikipedia.org/wiki/M4V
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/linkextractors/__init__.py`
Content:
```
1 """
2 scrapy.linkextractors
3
4 This package contains a collection of Link Extractors.
5
6 For more info see docs/topics/link-extractors.rst
7 """
8 import re
9
10 from six.moves.urllib.parse import urlparse
11 from parsel.csstranslator import HTMLTranslator
12 from w3lib.url import canonicalize_url
13
14 from scrapy.utils.misc import arg_to_iter
15 from scrapy.utils.url import (
16 url_is_from_any_domain, url_has_any_extension,
17 )
18
19
20 # common file extensions that are not followed if they occur in links
21 IGNORED_EXTENSIONS = [
22 # images
23 'mng', 'pct', 'bmp', 'gif', 'jpg', 'jpeg', 'png', 'pst', 'psp', 'tif',
24 'tiff', 'ai', 'drw', 'dxf', 'eps', 'ps', 'svg',
25
26 # audio
27 'mp3', 'wma', 'ogg', 'wav', 'ra', 'aac', 'mid', 'au', 'aiff',
28
29 # video
30 '3gp', 'asf', 'asx', 'avi', 'mov', 'mp4', 'mpg', 'qt', 'rm', 'swf', 'wmv',
31 'm4a',
32
33 # office suites
34 'xls', 'xlsx', 'ppt', 'pptx', 'pps', 'doc', 'docx', 'odt', 'ods', 'odg',
35 'odp',
36
37 # other
38 'css', 'pdf', 'exe', 'bin', 'rss', 'zip', 'rar',
39 ]
40
41
42 _re_type = type(re.compile("", 0))
43 _matches = lambda url, regexs: any(r.search(url) for r in regexs)
44 _is_valid_url = lambda url: url.split('://', 1)[0] in {'http', 'https', 'file'}
45
46
47 class FilteringLinkExtractor(object):
48
49 _csstranslator = HTMLTranslator()
50
51 def __init__(self, link_extractor, allow, deny, allow_domains, deny_domains,
52 restrict_xpaths, canonicalize, deny_extensions, restrict_css):
53
54 self.link_extractor = link_extractor
55
56 self.allow_res = [x if isinstance(x, _re_type) else re.compile(x)
57 for x in arg_to_iter(allow)]
58 self.deny_res = [x if isinstance(x, _re_type) else re.compile(x)
59 for x in arg_to_iter(deny)]
60
61 self.allow_domains = set(arg_to_iter(allow_domains))
62 self.deny_domains = set(arg_to_iter(deny_domains))
63
64 self.restrict_xpaths = tuple(arg_to_iter(restrict_xpaths))
65 self.restrict_xpaths += tuple(map(self._csstranslator.css_to_xpath,
66 arg_to_iter(restrict_css)))
67
68 self.canonicalize = canonicalize
69 if deny_extensions is None:
70 deny_extensions = IGNORED_EXTENSIONS
71 self.deny_extensions = {'.' + e for e in arg_to_iter(deny_extensions)}
72
73 def _link_allowed(self, link):
74 if not _is_valid_url(link.url):
75 return False
76 if self.allow_res and not _matches(link.url, self.allow_res):
77 return False
78 if self.deny_res and _matches(link.url, self.deny_res):
79 return False
80 parsed_url = urlparse(link.url)
81 if self.allow_domains and not url_is_from_any_domain(parsed_url, self.allow_domains):
82 return False
83 if self.deny_domains and url_is_from_any_domain(parsed_url, self.deny_domains):
84 return False
85 if self.deny_extensions and url_has_any_extension(parsed_url, self.deny_extensions):
86 return False
87 return True
88
89 def matches(self, url):
90
91 if self.allow_domains and not url_is_from_any_domain(url, self.allow_domains):
92 return False
93 if self.deny_domains and url_is_from_any_domain(url, self.deny_domains):
94 return False
95
96 allowed = (regex.search(url) for regex in self.allow_res) if self.allow_res else [True]
97 denied = (regex.search(url) for regex in self.deny_res) if self.deny_res else []
98 return any(allowed) and not any(denied)
99
100 def _process_links(self, links):
101 links = [x for x in links if self._link_allowed(x)]
102 if self.canonicalize:
103 for link in links:
104 link.url = canonicalize_url(link.url)
105 links = self.link_extractor._process_links(links)
106 return links
107
108 def _extract_links(self, *args, **kwargs):
109 return self.link_extractor._extract_links(*args, **kwargs)
110
111
112 # Top-level imports
113 from .lxmlhtml import LxmlLinkExtractor as LinkExtractor
114
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scrapy/linkextractors/__init__.py b/scrapy/linkextractors/__init__.py
--- a/scrapy/linkextractors/__init__.py
+++ b/scrapy/linkextractors/__init__.py
@@ -28,7 +28,7 @@
# video
'3gp', 'asf', 'asx', 'avi', 'mov', 'mp4', 'mpg', 'qt', 'rm', 'swf', 'wmv',
- 'm4a',
+ 'm4a', 'm4v',
# office suites
'xls', 'xlsx', 'ppt', 'pptx', 'pps', 'doc', 'docx', 'odt', 'ods', 'odg',
| {"golden_diff": "diff --git a/scrapy/linkextractors/__init__.py b/scrapy/linkextractors/__init__.py\n--- a/scrapy/linkextractors/__init__.py\n+++ b/scrapy/linkextractors/__init__.py\n@@ -28,7 +28,7 @@\n \n # video\n '3gp', 'asf', 'asx', 'avi', 'mov', 'mp4', 'mpg', 'qt', 'rm', 'swf', 'wmv',\n- 'm4a',\n+ 'm4a', 'm4v',\n \n # office suites\n 'xls', 'xlsx', 'ppt', 'pptx', 'pps', 'doc', 'docx', 'odt', 'ods', 'odg',\n", "issue": "LinkExtractor is not ignoring .m4v extension (video) by default\nBy chance I found out that LinkExtractor is not ignoring the video extension m4v in the same way it is ignoring other video formats.\r\n\r\nhttps://en.wikipedia.org/wiki/M4V\r\n\n", "before_files": [{"content": "\"\"\"\nscrapy.linkextractors\n\nThis package contains a collection of Link Extractors.\n\nFor more info see docs/topics/link-extractors.rst\n\"\"\"\nimport re\n\nfrom six.moves.urllib.parse import urlparse\nfrom parsel.csstranslator import HTMLTranslator\nfrom w3lib.url import canonicalize_url\n\nfrom scrapy.utils.misc import arg_to_iter\nfrom scrapy.utils.url import (\n url_is_from_any_domain, url_has_any_extension,\n)\n\n\n# common file extensions that are not followed if they occur in links\nIGNORED_EXTENSIONS = [\n # images\n 'mng', 'pct', 'bmp', 'gif', 'jpg', 'jpeg', 'png', 'pst', 'psp', 'tif',\n 'tiff', 'ai', 'drw', 'dxf', 'eps', 'ps', 'svg',\n\n # audio\n 'mp3', 'wma', 'ogg', 'wav', 'ra', 'aac', 'mid', 'au', 'aiff',\n\n # video\n '3gp', 'asf', 'asx', 'avi', 'mov', 'mp4', 'mpg', 'qt', 'rm', 'swf', 'wmv',\n 'm4a',\n\n # office suites\n 'xls', 'xlsx', 'ppt', 'pptx', 'pps', 'doc', 'docx', 'odt', 'ods', 'odg',\n 'odp',\n\n # other\n 'css', 'pdf', 'exe', 'bin', 'rss', 'zip', 'rar',\n]\n\n\n_re_type = type(re.compile(\"\", 0))\n_matches = lambda url, regexs: any(r.search(url) for r in regexs)\n_is_valid_url = lambda url: url.split('://', 1)[0] in {'http', 'https', 'file'}\n\n\nclass FilteringLinkExtractor(object):\n\n _csstranslator = HTMLTranslator()\n\n def __init__(self, link_extractor, allow, deny, allow_domains, deny_domains,\n restrict_xpaths, canonicalize, deny_extensions, restrict_css):\n\n self.link_extractor = link_extractor\n\n self.allow_res = [x if isinstance(x, _re_type) else re.compile(x)\n for x in arg_to_iter(allow)]\n self.deny_res = [x if isinstance(x, _re_type) else re.compile(x)\n for x in arg_to_iter(deny)]\n\n self.allow_domains = set(arg_to_iter(allow_domains))\n self.deny_domains = set(arg_to_iter(deny_domains))\n\n self.restrict_xpaths = tuple(arg_to_iter(restrict_xpaths))\n self.restrict_xpaths += tuple(map(self._csstranslator.css_to_xpath,\n arg_to_iter(restrict_css)))\n\n self.canonicalize = canonicalize\n if deny_extensions is None:\n deny_extensions = IGNORED_EXTENSIONS\n self.deny_extensions = {'.' + e for e in arg_to_iter(deny_extensions)}\n\n def _link_allowed(self, link):\n if not _is_valid_url(link.url):\n return False\n if self.allow_res and not _matches(link.url, self.allow_res):\n return False\n if self.deny_res and _matches(link.url, self.deny_res):\n return False\n parsed_url = urlparse(link.url)\n if self.allow_domains and not url_is_from_any_domain(parsed_url, self.allow_domains):\n return False\n if self.deny_domains and url_is_from_any_domain(parsed_url, self.deny_domains):\n return False\n if self.deny_extensions and url_has_any_extension(parsed_url, self.deny_extensions):\n return False\n return True\n\n def matches(self, url):\n\n if self.allow_domains and not url_is_from_any_domain(url, self.allow_domains):\n return False\n if self.deny_domains and url_is_from_any_domain(url, self.deny_domains):\n return False\n\n allowed = (regex.search(url) for regex in self.allow_res) if self.allow_res else [True]\n denied = (regex.search(url) for regex in self.deny_res) if self.deny_res else []\n return any(allowed) and not any(denied)\n\n def _process_links(self, links):\n links = [x for x in links if self._link_allowed(x)]\n if self.canonicalize:\n for link in links:\n link.url = canonicalize_url(link.url)\n links = self.link_extractor._process_links(links)\n return links\n\n def _extract_links(self, *args, **kwargs):\n return self.link_extractor._extract_links(*args, **kwargs)\n\n\n# Top-level imports\nfrom .lxmlhtml import LxmlLinkExtractor as LinkExtractor\n", "path": "scrapy/linkextractors/__init__.py"}], "after_files": [{"content": "\"\"\"\nscrapy.linkextractors\n\nThis package contains a collection of Link Extractors.\n\nFor more info see docs/topics/link-extractors.rst\n\"\"\"\nimport re\n\nfrom six.moves.urllib.parse import urlparse\nfrom parsel.csstranslator import HTMLTranslator\nfrom w3lib.url import canonicalize_url\n\nfrom scrapy.utils.misc import arg_to_iter\nfrom scrapy.utils.url import (\n url_is_from_any_domain, url_has_any_extension,\n)\n\n\n# common file extensions that are not followed if they occur in links\nIGNORED_EXTENSIONS = [\n # images\n 'mng', 'pct', 'bmp', 'gif', 'jpg', 'jpeg', 'png', 'pst', 'psp', 'tif',\n 'tiff', 'ai', 'drw', 'dxf', 'eps', 'ps', 'svg',\n\n # audio\n 'mp3', 'wma', 'ogg', 'wav', 'ra', 'aac', 'mid', 'au', 'aiff',\n\n # video\n '3gp', 'asf', 'asx', 'avi', 'mov', 'mp4', 'mpg', 'qt', 'rm', 'swf', 'wmv',\n 'm4a', 'm4v',\n\n # office suites\n 'xls', 'xlsx', 'ppt', 'pptx', 'pps', 'doc', 'docx', 'odt', 'ods', 'odg',\n 'odp',\n\n # other\n 'css', 'pdf', 'exe', 'bin', 'rss', 'zip', 'rar',\n]\n\n\n_re_type = type(re.compile(\"\", 0))\n_matches = lambda url, regexs: any(r.search(url) for r in regexs)\n_is_valid_url = lambda url: url.split('://', 1)[0] in {'http', 'https', 'file'}\n\n\nclass FilteringLinkExtractor(object):\n\n _csstranslator = HTMLTranslator()\n\n def __init__(self, link_extractor, allow, deny, allow_domains, deny_domains,\n restrict_xpaths, canonicalize, deny_extensions, restrict_css):\n\n self.link_extractor = link_extractor\n\n self.allow_res = [x if isinstance(x, _re_type) else re.compile(x)\n for x in arg_to_iter(allow)]\n self.deny_res = [x if isinstance(x, _re_type) else re.compile(x)\n for x in arg_to_iter(deny)]\n\n self.allow_domains = set(arg_to_iter(allow_domains))\n self.deny_domains = set(arg_to_iter(deny_domains))\n\n self.restrict_xpaths = tuple(arg_to_iter(restrict_xpaths))\n self.restrict_xpaths += tuple(map(self._csstranslator.css_to_xpath,\n arg_to_iter(restrict_css)))\n\n self.canonicalize = canonicalize\n if deny_extensions is None:\n deny_extensions = IGNORED_EXTENSIONS\n self.deny_extensions = {'.' + e for e in arg_to_iter(deny_extensions)}\n\n def _link_allowed(self, link):\n if not _is_valid_url(link.url):\n return False\n if self.allow_res and not _matches(link.url, self.allow_res):\n return False\n if self.deny_res and _matches(link.url, self.deny_res):\n return False\n parsed_url = urlparse(link.url)\n if self.allow_domains and not url_is_from_any_domain(parsed_url, self.allow_domains):\n return False\n if self.deny_domains and url_is_from_any_domain(parsed_url, self.deny_domains):\n return False\n if self.deny_extensions and url_has_any_extension(parsed_url, self.deny_extensions):\n return False\n return True\n\n def matches(self, url):\n\n if self.allow_domains and not url_is_from_any_domain(url, self.allow_domains):\n return False\n if self.deny_domains and url_is_from_any_domain(url, self.deny_domains):\n return False\n\n allowed = (regex.search(url) for regex in self.allow_res) if self.allow_res else [True]\n denied = (regex.search(url) for regex in self.deny_res) if self.deny_res else []\n return any(allowed) and not any(denied)\n\n def _process_links(self, links):\n links = [x for x in links if self._link_allowed(x)]\n if self.canonicalize:\n for link in links:\n link.url = canonicalize_url(link.url)\n links = self.link_extractor._process_links(links)\n return links\n\n def _extract_links(self, *args, **kwargs):\n return self.link_extractor._extract_links(*args, **kwargs)\n\n\n# Top-level imports\nfrom .lxmlhtml import LxmlLinkExtractor as LinkExtractor\n", "path": "scrapy/linkextractors/__init__.py"}]} | 1,570 | 166 |
gh_patches_debug_2150 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-532 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix logging issue
Currently, the logging is not working as intended. E.g. the formatting was ignored for WARNING and INFO log are not printed out.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticdl/python/elasticdl/master/main.py`
Content:
```
1 import logging
2 import time
3 import argparse
4 import os
5 import recordio
6
7 import grpc
8 import tensorflow as tf
9
10 tf.enable_eager_execution()
11
12 from contextlib import closing
13 from concurrent import futures
14 from elasticdl.proto import elasticdl_pb2_grpc
15 from elasticdl.python.elasticdl.master.servicer import MasterServicer
16 from elasticdl.python.elasticdl.master.task_queue import _TaskQueue
17 from elasticdl.python.elasticdl.master.k8s_worker_manager import WorkerManager
18 from elasticdl.python.elasticdl.common.model_helper import load_user_model, build_model
19
20
21 def _make_task_queue(data_dir, record_per_task, num_epoch):
22 f_records = {}
23 for f in os.listdir(data_dir):
24 p = os.path.join(data_dir, f)
25 with closing(recordio.Index(p)) as rio:
26 f_records[p] = rio.num_records()
27 return _TaskQueue(f_records, record_per_task, num_epoch)
28
29
30 def _parse_args():
31 parser = argparse.ArgumentParser(description="ElasticDL Master")
32 parser.add_argument(
33 "--model_file",
34 help="Full file path of user defined neural model",
35 required=True,
36 )
37 parser.add_argument(
38 "--train_data_dir",
39 help="Training data directory. Files should be in RecordIO format",
40 required=True,
41 )
42 parser.add_argument("--record_per_task", type=int, required=True)
43 parser.add_argument("--num_epoch", type=int, required=True)
44 parser.add_argument(
45 "--grads_to_wait",
46 type=int,
47 help="Number of gradients to wait before updating model",
48 required=True,
49 )
50 parser.add_argument(
51 "--minibatch_size",
52 type=int,
53 help="Minibatch size used by workers to compute gradients",
54 required=True,
55 )
56 parser.add_argument(
57 "--num_worker",
58 type=int,
59 help="the number of workers used in training",
60 default=0,
61 )
62 parser.add_argument(
63 "--worker_cpu_request",
64 help="the minimal cpu required by worker in training",
65 default="1000m",
66 )
67 parser.add_argument(
68 "--worker_cpu_limit",
69 help="the maximal cpu used by worker in training",
70 default="1000m",
71 )
72 parser.add_argument(
73 "--worker_memory_request",
74 help="the minimal memory required by worker in training",
75 default="4096Mi",
76 )
77 parser.add_argument(
78 "--worker_memory_limit",
79 help="the maximal memory used by worker in training",
80 default="4096Mi",
81 )
82 parser.add_argument(
83 "--worker_pod_priority",
84 help="the requested priority of worker pod")
85 parser.add_argument(
86 "--worker_image", help="docker image for worker", default=None
87 )
88 parser.add_argument("--job_name", help="job name", required=True)
89 parser.add_argument(
90 "--codec_type",
91 default="bytes",
92 choices=["tf_example", "bytes"],
93 help="Type of codec(tf_example or bytes)",
94 )
95 parser.add_argument("--volume_name",
96 help="the volume name of network filesytem")
97 parser.add_argument("--mount_path",
98 help="the mount path in the docker container")
99 parser.add_argument(
100 "--log_level",
101 choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
102 type=str.upper,
103 default='WARNING',
104 help="the logging level",
105 )
106 parser.add_argument("--image_pull_policy",
107 help="the image pull policy of master and worker")
108 return parser.parse_args()
109
110
111 def main():
112 args = _parse_args()
113
114 # TODO: pass port via flags.
115 PORT = 50001
116
117 # Initialize logger
118 logging.basicConfig(
119 format='%(asctime)s %(name)s %(levelname)-8s '
120 '[%(filename)s:%(lineno)d] %(message)s',
121 )
122 # Set level for ROOT logger.
123 logging.getLogger().setLevel(args.log_level)
124 logger = logging.getLogger(__name__)
125
126 task_q = _make_task_queue(
127 args.train_data_dir, args.record_per_task, args.num_epoch
128 )
129 model_module = load_user_model(args.model_file)
130 model_inst = model_module.model
131 build_model(model_inst, model_module.feature_columns())
132 optimizer = model_module.optimizer()
133
134 server = grpc.server(futures.ThreadPoolExecutor(max_workers=64))
135 elasticdl_pb2_grpc.add_MasterServicer_to_server(
136 MasterServicer(
137 args.grads_to_wait,
138 args.minibatch_size,
139 optimizer,
140 task_q,
141 init_var=model_inst.trainable_variables,
142 ),
143 server,
144 )
145 server.add_insecure_port("[::]:{}".format(PORT))
146 server.start()
147 logger.info("Server started at port: %d", PORT)
148
149 if args.num_worker:
150 master_addr = "%s:%d" % (os.getenv("MY_POD_IP", "localhost"), PORT)
151 worker_command = ["python"]
152 worker_args = [
153 "-m",
154 "elasticdl.python.elasticdl.worker.main",
155 "--model_file",
156 args.model_file,
157 "--master_addr",
158 master_addr,
159 "--codec_type",
160 args.codec_type
161 ]
162
163 worker_manager = WorkerManager(
164 task_q,
165 job_name=args.job_name,
166 worker_image=args.worker_image,
167 command=worker_command,
168 args=worker_args,
169 namespace="default",
170 num_worker=args.num_worker,
171 cpu_request=args.worker_cpu_request,
172 cpu_limit=args.worker_cpu_limit,
173 memory_request=args.worker_memory_request,
174 memory_limit=args.worker_memory_limit,
175 pod_priority=args.worker_pod_priority,
176 mount_path=args.mount_path,
177 volume_name=args.volume_name,
178 image_pull_policy=args.image_pull_policy,
179 restart_policy="Never",
180 )
181 worker_manager.start_workers()
182
183 try:
184 while True:
185 if task_q.finished():
186 break
187 time.sleep(30)
188 except KeyboardInterrupt:
189 logger.warning("Server stopping")
190
191 server.stop(0)
192
193
194 if __name__ == "__main__":
195 main()
196
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticdl/python/elasticdl/master/main.py b/elasticdl/python/elasticdl/master/main.py
--- a/elasticdl/python/elasticdl/master/main.py
+++ b/elasticdl/python/elasticdl/master/main.py
@@ -157,7 +157,9 @@
"--master_addr",
master_addr,
"--codec_type",
- args.codec_type
+ args.codec_type,
+ "--log_level",
+ args.log_level
]
worker_manager = WorkerManager(
| {"golden_diff": "diff --git a/elasticdl/python/elasticdl/master/main.py b/elasticdl/python/elasticdl/master/main.py\n--- a/elasticdl/python/elasticdl/master/main.py\n+++ b/elasticdl/python/elasticdl/master/main.py\n@@ -157,7 +157,9 @@\n \"--master_addr\",\n master_addr,\n \"--codec_type\",\n- args.codec_type\n+ args.codec_type,\n+ \"--log_level\",\n+ args.log_level\n ]\n \n worker_manager = WorkerManager(\n", "issue": "Fix logging issue\nCurrently, the logging is not working as intended. E.g. the formatting was ignored for WARNING and INFO log are not printed out.\n", "before_files": [{"content": "import logging\nimport time\nimport argparse\nimport os\nimport recordio\n\nimport grpc\nimport tensorflow as tf\n\ntf.enable_eager_execution()\n\nfrom contextlib import closing\nfrom concurrent import futures\nfrom elasticdl.proto import elasticdl_pb2_grpc\nfrom elasticdl.python.elasticdl.master.servicer import MasterServicer\nfrom elasticdl.python.elasticdl.master.task_queue import _TaskQueue\nfrom elasticdl.python.elasticdl.master.k8s_worker_manager import WorkerManager\nfrom elasticdl.python.elasticdl.common.model_helper import load_user_model, build_model\n\n\ndef _make_task_queue(data_dir, record_per_task, num_epoch):\n f_records = {}\n for f in os.listdir(data_dir):\n p = os.path.join(data_dir, f)\n with closing(recordio.Index(p)) as rio:\n f_records[p] = rio.num_records()\n return _TaskQueue(f_records, record_per_task, num_epoch)\n\n\ndef _parse_args():\n parser = argparse.ArgumentParser(description=\"ElasticDL Master\")\n parser.add_argument(\n \"--model_file\",\n help=\"Full file path of user defined neural model\",\n required=True,\n )\n parser.add_argument(\n \"--train_data_dir\",\n help=\"Training data directory. Files should be in RecordIO format\",\n required=True,\n )\n parser.add_argument(\"--record_per_task\", type=int, required=True)\n parser.add_argument(\"--num_epoch\", type=int, required=True)\n parser.add_argument(\n \"--grads_to_wait\",\n type=int,\n help=\"Number of gradients to wait before updating model\",\n required=True,\n )\n parser.add_argument(\n \"--minibatch_size\",\n type=int,\n help=\"Minibatch size used by workers to compute gradients\",\n required=True,\n )\n parser.add_argument(\n \"--num_worker\",\n type=int,\n help=\"the number of workers used in training\",\n default=0,\n )\n parser.add_argument(\n \"--worker_cpu_request\",\n help=\"the minimal cpu required by worker in training\",\n default=\"1000m\",\n )\n parser.add_argument(\n \"--worker_cpu_limit\",\n help=\"the maximal cpu used by worker in training\",\n default=\"1000m\",\n )\n parser.add_argument(\n \"--worker_memory_request\",\n help=\"the minimal memory required by worker in training\",\n default=\"4096Mi\",\n )\n parser.add_argument(\n \"--worker_memory_limit\",\n help=\"the maximal memory used by worker in training\",\n default=\"4096Mi\",\n )\n parser.add_argument(\n \"--worker_pod_priority\",\n help=\"the requested priority of worker pod\")\n parser.add_argument(\n \"--worker_image\", help=\"docker image for worker\", default=None\n )\n parser.add_argument(\"--job_name\", help=\"job name\", required=True)\n parser.add_argument(\n \"--codec_type\",\n default=\"bytes\",\n choices=[\"tf_example\", \"bytes\"],\n help=\"Type of codec(tf_example or bytes)\",\n )\n parser.add_argument(\"--volume_name\",\n help=\"the volume name of network filesytem\")\n parser.add_argument(\"--mount_path\",\n help=\"the mount path in the docker container\")\n parser.add_argument(\n \"--log_level\",\n choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],\n type=str.upper,\n default='WARNING',\n help=\"the logging level\",\n )\n parser.add_argument(\"--image_pull_policy\",\n help=\"the image pull policy of master and worker\")\n return parser.parse_args()\n\n\ndef main():\n args = _parse_args()\n\n # TODO: pass port via flags.\n PORT = 50001\n\n # Initialize logger\n logging.basicConfig(\n format='%(asctime)s %(name)s %(levelname)-8s '\n '[%(filename)s:%(lineno)d] %(message)s',\n )\n # Set level for ROOT logger.\n logging.getLogger().setLevel(args.log_level)\n logger = logging.getLogger(__name__)\n\n task_q = _make_task_queue(\n args.train_data_dir, args.record_per_task, args.num_epoch\n )\n model_module = load_user_model(args.model_file)\n model_inst = model_module.model\n build_model(model_inst, model_module.feature_columns())\n optimizer = model_module.optimizer()\n\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=64))\n elasticdl_pb2_grpc.add_MasterServicer_to_server(\n MasterServicer(\n args.grads_to_wait,\n args.minibatch_size,\n optimizer,\n task_q,\n init_var=model_inst.trainable_variables,\n ),\n server,\n )\n server.add_insecure_port(\"[::]:{}\".format(PORT))\n server.start()\n logger.info(\"Server started at port: %d\", PORT)\n\n if args.num_worker:\n master_addr = \"%s:%d\" % (os.getenv(\"MY_POD_IP\", \"localhost\"), PORT)\n worker_command = [\"python\"]\n worker_args = [\n \"-m\",\n \"elasticdl.python.elasticdl.worker.main\",\n \"--model_file\",\n args.model_file,\n \"--master_addr\",\n master_addr,\n \"--codec_type\",\n args.codec_type\n ]\n\n worker_manager = WorkerManager(\n task_q,\n job_name=args.job_name,\n worker_image=args.worker_image,\n command=worker_command,\n args=worker_args,\n namespace=\"default\",\n num_worker=args.num_worker,\n cpu_request=args.worker_cpu_request,\n cpu_limit=args.worker_cpu_limit,\n memory_request=args.worker_memory_request,\n memory_limit=args.worker_memory_limit,\n pod_priority=args.worker_pod_priority,\n mount_path=args.mount_path,\n volume_name=args.volume_name,\n image_pull_policy=args.image_pull_policy,\n restart_policy=\"Never\",\n )\n worker_manager.start_workers()\n\n try:\n while True:\n if task_q.finished():\n break\n time.sleep(30)\n except KeyboardInterrupt:\n logger.warning(\"Server stopping\")\n\n server.stop(0)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "elasticdl/python/elasticdl/master/main.py"}], "after_files": [{"content": "import logging\nimport time\nimport argparse\nimport os\nimport recordio\n\nimport grpc\nimport tensorflow as tf\n\ntf.enable_eager_execution()\n\nfrom contextlib import closing\nfrom concurrent import futures\nfrom elasticdl.proto import elasticdl_pb2_grpc\nfrom elasticdl.python.elasticdl.master.servicer import MasterServicer\nfrom elasticdl.python.elasticdl.master.task_queue import _TaskQueue\nfrom elasticdl.python.elasticdl.master.k8s_worker_manager import WorkerManager\nfrom elasticdl.python.elasticdl.common.model_helper import load_user_model, build_model\n\n\ndef _make_task_queue(data_dir, record_per_task, num_epoch):\n f_records = {}\n for f in os.listdir(data_dir):\n p = os.path.join(data_dir, f)\n with closing(recordio.Index(p)) as rio:\n f_records[p] = rio.num_records()\n return _TaskQueue(f_records, record_per_task, num_epoch)\n\n\ndef _parse_args():\n parser = argparse.ArgumentParser(description=\"ElasticDL Master\")\n parser.add_argument(\n \"--model_file\",\n help=\"Full file path of user defined neural model\",\n required=True,\n )\n parser.add_argument(\n \"--train_data_dir\",\n help=\"Training data directory. Files should be in RecordIO format\",\n required=True,\n )\n parser.add_argument(\"--record_per_task\", type=int, required=True)\n parser.add_argument(\"--num_epoch\", type=int, required=True)\n parser.add_argument(\n \"--grads_to_wait\",\n type=int,\n help=\"Number of gradients to wait before updating model\",\n required=True,\n )\n parser.add_argument(\n \"--minibatch_size\",\n type=int,\n help=\"Minibatch size used by workers to compute gradients\",\n required=True,\n )\n parser.add_argument(\n \"--num_worker\",\n type=int,\n help=\"the number of workers used in training\",\n default=0,\n )\n parser.add_argument(\n \"--worker_cpu_request\",\n help=\"the minimal cpu required by worker in training\",\n default=\"1000m\",\n )\n parser.add_argument(\n \"--worker_cpu_limit\",\n help=\"the maximal cpu used by worker in training\",\n default=\"1000m\",\n )\n parser.add_argument(\n \"--worker_memory_request\",\n help=\"the minimal memory required by worker in training\",\n default=\"4096Mi\",\n )\n parser.add_argument(\n \"--worker_memory_limit\",\n help=\"the maximal memory used by worker in training\",\n default=\"4096Mi\",\n )\n parser.add_argument(\n \"--worker_pod_priority\",\n help=\"the requested priority of worker pod\")\n parser.add_argument(\n \"--worker_image\", help=\"docker image for worker\", default=None\n )\n parser.add_argument(\"--job_name\", help=\"job name\", required=True)\n parser.add_argument(\n \"--codec_type\",\n default=\"bytes\",\n choices=[\"tf_example\", \"bytes\"],\n help=\"Type of codec(tf_example or bytes)\",\n )\n parser.add_argument(\"--volume_name\",\n help=\"the volume name of network filesytem\")\n parser.add_argument(\"--mount_path\",\n help=\"the mount path in the docker container\")\n parser.add_argument(\n \"--log_level\",\n choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],\n type=str.upper,\n default='WARNING',\n help=\"the logging level\",\n )\n parser.add_argument(\"--image_pull_policy\",\n help=\"the image pull policy of master and worker\")\n return parser.parse_args()\n\n\ndef main():\n args = _parse_args()\n\n # TODO: pass port via flags.\n PORT = 50001\n\n # Initialize logger\n logging.basicConfig(\n format='%(asctime)s %(name)s %(levelname)-8s '\n '[%(filename)s:%(lineno)d] %(message)s',\n )\n # Set level for ROOT logger.\n logging.getLogger().setLevel(args.log_level)\n logger = logging.getLogger(__name__)\n\n task_q = _make_task_queue(\n args.train_data_dir, args.record_per_task, args.num_epoch\n )\n model_module = load_user_model(args.model_file)\n model_inst = model_module.model\n build_model(model_inst, model_module.feature_columns())\n optimizer = model_module.optimizer()\n\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=64))\n elasticdl_pb2_grpc.add_MasterServicer_to_server(\n MasterServicer(\n args.grads_to_wait,\n args.minibatch_size,\n optimizer,\n task_q,\n init_var=model_inst.trainable_variables,\n ),\n server,\n )\n server.add_insecure_port(\"[::]:{}\".format(PORT))\n server.start()\n logger.info(\"Server started at port: %d\", PORT)\n\n if args.num_worker:\n master_addr = \"%s:%d\" % (os.getenv(\"MY_POD_IP\", \"localhost\"), PORT)\n worker_command = [\"python\"]\n worker_args = [\n \"-m\",\n \"elasticdl.python.elasticdl.worker.main\",\n \"--model_file\",\n args.model_file,\n \"--master_addr\",\n master_addr,\n \"--codec_type\",\n args.codec_type,\n \"--log_level\",\n args.log_level\n ]\n\n worker_manager = WorkerManager(\n task_q,\n job_name=args.job_name,\n worker_image=args.worker_image,\n command=worker_command,\n args=worker_args,\n namespace=\"default\",\n num_worker=args.num_worker,\n cpu_request=args.worker_cpu_request,\n cpu_limit=args.worker_cpu_limit,\n memory_request=args.worker_memory_request,\n memory_limit=args.worker_memory_limit,\n pod_priority=args.worker_pod_priority,\n mount_path=args.mount_path,\n volume_name=args.volume_name,\n image_pull_policy=args.image_pull_policy,\n restart_policy=\"Never\",\n )\n worker_manager.start_workers()\n\n try:\n while True:\n if task_q.finished():\n break\n time.sleep(30)\n except KeyboardInterrupt:\n logger.warning(\"Server stopping\")\n\n server.stop(0)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "elasticdl/python/elasticdl/master/main.py"}]} | 2,066 | 114 |
gh_patches_debug_31844 | rasdani/github-patches | git_diff | elastic__apm-agent-python-1664 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[META 689] Ignore *principal* headers by default
See meta issue for the description and details:
- Meta issue: https://github.com/elastic/apm/issues/689
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticapm/conf/constants.py`
Content:
```
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2019, Elasticsearch BV
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are met:
8 #
9 # * Redistributions of source code must retain the above copyright notice, this
10 # list of conditions and the following disclaimer.
11 #
12 # * Redistributions in binary form must reproduce the above copyright notice,
13 # this list of conditions and the following disclaimer in the documentation
14 # and/or other materials provided with the distribution.
15 #
16 # * Neither the name of the copyright holder nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 import decimal
32 import re
33 from collections import namedtuple
34
35
36 def _starmatch_to_regex(pattern):
37 """
38 This is a duplicate of starmatch_to_regex() in utils/__init__.py
39
40 Duplication to avoid circular imports
41 """
42 options = re.DOTALL
43 # check if we are case sensitive
44 if pattern.startswith("(?-i)"):
45 pattern = pattern[5:]
46 else:
47 options |= re.IGNORECASE
48 i, n = 0, len(pattern)
49 res = []
50 while i < n:
51 c = pattern[i]
52 i = i + 1
53 if c == "*":
54 res.append(".*")
55 else:
56 res.append(re.escape(c))
57 return re.compile(r"(?:%s)\Z" % "".join(res), options)
58
59
60 EVENTS_API_PATH = "intake/v2/events"
61 AGENT_CONFIG_PATH = "config/v1/agents"
62 SERVER_INFO_PATH = ""
63
64 TRACE_CONTEXT_VERSION = 0
65 TRACEPARENT_HEADER_NAME = "traceparent"
66 TRACEPARENT_LEGACY_HEADER_NAME = "elastic-apm-traceparent"
67 TRACEPARENT_BINARY_HEADER_NAME = "elasticapmtraceparent"
68 TRACESTATE_HEADER_NAME = "tracestate"
69
70 TIMESTAMP_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ"
71
72 KEYWORD_MAX_LENGTH = 1024
73 LONG_FIELD_MAX_LENGTH = 10000
74
75 HTTP_WITH_BODY = {"POST", "PUT", "PATCH", "DELETE"}
76
77 MASK = "[REDACTED]"
78
79 EXCEPTION_CHAIN_MAX_DEPTH = 50
80
81 ERROR = "error"
82 TRANSACTION = "transaction"
83 SPAN = "span"
84 METRICSET = "metricset"
85
86 LABEL_RE = re.compile('[.*"]')
87
88 HARDCODED_PROCESSORS = ["elasticapm.processors.add_context_lines_to_frames"]
89
90 BASE_SANITIZE_FIELD_NAMES_UNPROCESSED = [
91 "password",
92 "passwd",
93 "pwd",
94 "secret",
95 "*key",
96 "*token*",
97 "*session*",
98 "*credit*",
99 "*card*",
100 "*auth*",
101 "set-cookie",
102 ]
103
104 BASE_SANITIZE_FIELD_NAMES = [_starmatch_to_regex(x) for x in BASE_SANITIZE_FIELD_NAMES_UNPROCESSED]
105
106 OUTCOME = namedtuple("OUTCOME", ["SUCCESS", "FAILURE", "UNKNOWN"])(
107 SUCCESS="success", FAILURE="failure", UNKNOWN="unknown"
108 )
109
110 try:
111 # Python 2
112 LABEL_TYPES = (bool, int, long, float, decimal.Decimal)
113 except NameError:
114 # Python 3
115 LABEL_TYPES = (bool, int, float, decimal.Decimal)
116
117 TRACESTATE = namedtuple("TRACESTATE", ["SAMPLE_RATE"])(SAMPLE_RATE="s")
118 TRACE_CONTINUATION_STRATEGY = namedtuple("TRACE_CONTINUATION_STRATEGY", ["CONTINUE", "RESTART", "RESTART_EXTERNAL"])(
119 CONTINUE="continue",
120 RESTART="restart",
121 RESTART_EXTERNAL="restart_external",
122 )
123
```
Path: `elasticapm/utils/__init__.py`
Content:
```
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2012, the Sentry Team, see AUTHORS for more details
4 # Copyright (c) 2019, Elasticsearch BV
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are met:
9 #
10 # * Redistributions of source code must retain the above copyright notice, this
11 # list of conditions and the following disclaimer.
12 #
13 # * Redistributions in binary form must reproduce the above copyright notice,
14 # this list of conditions and the following disclaimer in the documentation
15 # and/or other materials provided with the distribution.
16 #
17 # * Neither the name of the copyright holder nor the names of its
18 # contributors may be used to endorse or promote products derived from
19 # this software without specific prior written permission.
20 #
21 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
25 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30
31 import base64
32 import os
33 import re
34 import urllib.parse
35 from functools import partial
36 from types import FunctionType
37 from typing import Pattern
38
39 from elasticapm.conf import constants
40 from elasticapm.utils import encoding
41
42 try:
43 from functools import partialmethod
44
45 partial_types = (partial, partialmethod)
46 except ImportError:
47 # Python 2
48 partial_types = (partial,)
49
50
51 default_ports = {"https": 443, "http": 80, "postgresql": 5432, "mysql": 3306, "mssql": 1433}
52
53
54 def varmap(func, var, context=None, name=None, **kwargs):
55 """
56 Executes ``func(key_name, value)`` on all values,
57 recursively discovering dict and list scoped
58 values.
59 """
60 if context is None:
61 context = set()
62 objid = id(var)
63 if objid in context:
64 return func(name, "<...>", **kwargs)
65 context.add(objid)
66
67 # Apply func() before recursion, so that `shorten()` doesn't have to iterate over all the trimmed values
68 ret = func(name, var, **kwargs)
69 if isinstance(ret, dict):
70 # iterate over a copy of the dictionary to avoid "dictionary changed size during iteration" issues
71 ret = dict((k, varmap(func, v, context, k, **kwargs)) for k, v in ret.copy().items())
72 elif isinstance(ret, (list, tuple)):
73 # Apply func() before recursion, so that `shorten()` doesn't have to iterate over all the trimmed values
74 ret = [varmap(func, f, context, name, **kwargs) for f in ret]
75 context.remove(objid)
76 return ret
77
78
79 def get_name_from_func(func: FunctionType) -> str:
80 # partials don't have `__module__` or `__name__`, so we use the values from the "inner" function
81 if isinstance(func, partial_types):
82 return "partial({})".format(get_name_from_func(func.func))
83 elif hasattr(func, "_partialmethod") and hasattr(func._partialmethod, "func"):
84 return "partial({})".format(get_name_from_func(func._partialmethod.func))
85
86 module = func.__module__
87
88 if hasattr(func, "view_class"):
89 view_name = func.view_class.__name__
90 elif hasattr(func, "__name__"):
91 view_name = func.__name__
92 else: # Fall back if there's no __name__
93 view_name = func.__class__.__name__
94
95 return "{0}.{1}".format(module, view_name)
96
97
98 def build_name_with_http_method_prefix(name, request):
99 return " ".join((request.method, name)) if name else name
100
101
102 def is_master_process() -> bool:
103 # currently only recognizes uwsgi master process
104 try:
105 import uwsgi
106
107 return os.getpid() == uwsgi.masterpid()
108 except ImportError:
109 return False
110
111
112 def get_url_dict(url: str) -> dict:
113 parse_result = urllib.parse.urlparse(url)
114
115 url_dict = {
116 "full": encoding.keyword_field(url),
117 "protocol": parse_result.scheme + ":",
118 "hostname": encoding.keyword_field(parse_result.hostname),
119 "pathname": encoding.keyword_field(parse_result.path),
120 }
121
122 port = None if parse_result.port is None else str(parse_result.port)
123
124 if port:
125 url_dict["port"] = port
126 if parse_result.query:
127 url_dict["search"] = encoding.keyword_field("?" + parse_result.query)
128 return url_dict
129
130
131 def sanitize_url(url: str) -> str:
132 if "@" not in url:
133 return url
134 parts = urllib.parse.urlparse(url)
135 return url.replace("%s:%s" % (parts.username, parts.password), "%s:%s" % (parts.username, constants.MASK))
136
137
138 def get_host_from_url(url: str) -> str:
139 parsed_url = urllib.parse.urlparse(url)
140 host = parsed_url.hostname or " "
141
142 if parsed_url.port and default_ports.get(parsed_url.scheme) != parsed_url.port:
143 host += ":" + str(parsed_url.port)
144
145 return host
146
147
148 def url_to_destination_resource(url: str) -> str:
149 parts = urllib.parse.urlsplit(url)
150 hostname = parts.hostname if parts.hostname else ""
151 # preserve brackets for IPv6 URLs
152 if "://[" in url:
153 hostname = "[%s]" % hostname
154 try:
155 port = parts.port
156 except ValueError:
157 # Malformed port, just use None rather than raising an exception
158 port = None
159 default_port = default_ports.get(parts.scheme, None)
160 name = "%s://%s" % (parts.scheme, hostname)
161 resource = hostname
162 if not port and parts.scheme in default_ports:
163 port = default_ports[parts.scheme]
164 if port:
165 if port != default_port:
166 name += ":%d" % port
167 resource += ":%d" % port
168 return resource
169
170
171 def read_pem_file(file_obj) -> bytes:
172 cert = b""
173 for line in file_obj:
174 if line.startswith(b"-----BEGIN CERTIFICATE-----"):
175 break
176 # scan until we find the first END CERTIFICATE marker
177 for line in file_obj:
178 if line.startswith(b"-----END CERTIFICATE-----"):
179 break
180 cert += line.strip()
181 return base64.b64decode(cert)
182
183
184 def starmatch_to_regex(pattern: str) -> Pattern:
185 options = re.DOTALL
186 # check if we are case sensitive
187 if pattern.startswith("(?-i)"):
188 pattern = pattern[5:]
189 else:
190 options |= re.IGNORECASE
191 i, n = 0, len(pattern)
192 res = []
193 while i < n:
194 c = pattern[i]
195 i = i + 1
196 if c == "*":
197 res.append(".*")
198 else:
199 res.append(re.escape(c))
200 return re.compile(r"(?:%s)\Z" % "".join(res), options)
201
202
203 def nested_key(d: dict, *args):
204 """
205 Traverses a dictionary for nested keys. Returns `None` if the at any point
206 in the traversal a key cannot be found.
207
208 Example:
209
210 >>> from elasticapm.utils import nested_key
211 >>> d = {"a": {"b": {"c": 0}}}
212 >>> nested_key(d, "a", "b", "c")
213 0
214 >>> nested_key(d, "a", "b", "d")
215 None
216 """
217 for arg in args:
218 try:
219 d = d[arg]
220 except (TypeError, KeyError):
221 d = None
222 break
223 return d
224
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticapm/conf/constants.py b/elasticapm/conf/constants.py
--- a/elasticapm/conf/constants.py
+++ b/elasticapm/conf/constants.py
@@ -31,16 +31,17 @@
import decimal
import re
from collections import namedtuple
+from typing import Pattern
-def _starmatch_to_regex(pattern):
+def _starmatch_to_regex(pattern: str) -> Pattern:
"""
This is a duplicate of starmatch_to_regex() in utils/__init__.py
Duplication to avoid circular imports
"""
options = re.DOTALL
- # check if we are case sensitive
+ # check if we are case-sensitive
if pattern.startswith("(?-i)"):
pattern = pattern[5:]
else:
@@ -98,6 +99,7 @@
"*credit*",
"*card*",
"*auth*",
+ "*principal*",
"set-cookie",
]
@@ -107,12 +109,7 @@
SUCCESS="success", FAILURE="failure", UNKNOWN="unknown"
)
-try:
- # Python 2
- LABEL_TYPES = (bool, int, long, float, decimal.Decimal)
-except NameError:
- # Python 3
- LABEL_TYPES = (bool, int, float, decimal.Decimal)
+LABEL_TYPES = (bool, int, float, decimal.Decimal)
TRACESTATE = namedtuple("TRACESTATE", ["SAMPLE_RATE"])(SAMPLE_RATE="s")
TRACE_CONTINUATION_STRATEGY = namedtuple("TRACE_CONTINUATION_STRATEGY", ["CONTINUE", "RESTART", "RESTART_EXTERNAL"])(
diff --git a/elasticapm/utils/__init__.py b/elasticapm/utils/__init__.py
--- a/elasticapm/utils/__init__.py
+++ b/elasticapm/utils/__init__.py
@@ -183,7 +183,7 @@
def starmatch_to_regex(pattern: str) -> Pattern:
options = re.DOTALL
- # check if we are case sensitive
+ # check if we are case-sensitive
if pattern.startswith("(?-i)"):
pattern = pattern[5:]
else:
| {"golden_diff": "diff --git a/elasticapm/conf/constants.py b/elasticapm/conf/constants.py\n--- a/elasticapm/conf/constants.py\n+++ b/elasticapm/conf/constants.py\n@@ -31,16 +31,17 @@\n import decimal\n import re\n from collections import namedtuple\n+from typing import Pattern\n \n \n-def _starmatch_to_regex(pattern):\n+def _starmatch_to_regex(pattern: str) -> Pattern:\n \"\"\"\n This is a duplicate of starmatch_to_regex() in utils/__init__.py\n \n Duplication to avoid circular imports\n \"\"\"\n options = re.DOTALL\n- # check if we are case sensitive\n+ # check if we are case-sensitive\n if pattern.startswith(\"(?-i)\"):\n pattern = pattern[5:]\n else:\n@@ -98,6 +99,7 @@\n \"*credit*\",\n \"*card*\",\n \"*auth*\",\n+ \"*principal*\",\n \"set-cookie\",\n ]\n \n@@ -107,12 +109,7 @@\n SUCCESS=\"success\", FAILURE=\"failure\", UNKNOWN=\"unknown\"\n )\n \n-try:\n- # Python 2\n- LABEL_TYPES = (bool, int, long, float, decimal.Decimal)\n-except NameError:\n- # Python 3\n- LABEL_TYPES = (bool, int, float, decimal.Decimal)\n+LABEL_TYPES = (bool, int, float, decimal.Decimal)\n \n TRACESTATE = namedtuple(\"TRACESTATE\", [\"SAMPLE_RATE\"])(SAMPLE_RATE=\"s\")\n TRACE_CONTINUATION_STRATEGY = namedtuple(\"TRACE_CONTINUATION_STRATEGY\", [\"CONTINUE\", \"RESTART\", \"RESTART_EXTERNAL\"])(\ndiff --git a/elasticapm/utils/__init__.py b/elasticapm/utils/__init__.py\n--- a/elasticapm/utils/__init__.py\n+++ b/elasticapm/utils/__init__.py\n@@ -183,7 +183,7 @@\n \n def starmatch_to_regex(pattern: str) -> Pattern:\n options = re.DOTALL\n- # check if we are case sensitive\n+ # check if we are case-sensitive\n if pattern.startswith(\"(?-i)\"):\n pattern = pattern[5:]\n else:\n", "issue": "[META 689] Ignore *principal* headers by default\nSee meta issue for the description and details:\r\n- Meta issue: https://github.com/elastic/apm/issues/689\r\n\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport decimal\nimport re\nfrom collections import namedtuple\n\n\ndef _starmatch_to_regex(pattern):\n \"\"\"\n This is a duplicate of starmatch_to_regex() in utils/__init__.py\n\n Duplication to avoid circular imports\n \"\"\"\n options = re.DOTALL\n # check if we are case sensitive\n if pattern.startswith(\"(?-i)\"):\n pattern = pattern[5:]\n else:\n options |= re.IGNORECASE\n i, n = 0, len(pattern)\n res = []\n while i < n:\n c = pattern[i]\n i = i + 1\n if c == \"*\":\n res.append(\".*\")\n else:\n res.append(re.escape(c))\n return re.compile(r\"(?:%s)\\Z\" % \"\".join(res), options)\n\n\nEVENTS_API_PATH = \"intake/v2/events\"\nAGENT_CONFIG_PATH = \"config/v1/agents\"\nSERVER_INFO_PATH = \"\"\n\nTRACE_CONTEXT_VERSION = 0\nTRACEPARENT_HEADER_NAME = \"traceparent\"\nTRACEPARENT_LEGACY_HEADER_NAME = \"elastic-apm-traceparent\"\nTRACEPARENT_BINARY_HEADER_NAME = \"elasticapmtraceparent\"\nTRACESTATE_HEADER_NAME = \"tracestate\"\n\nTIMESTAMP_FORMAT = \"%Y-%m-%dT%H:%M:%S.%fZ\"\n\nKEYWORD_MAX_LENGTH = 1024\nLONG_FIELD_MAX_LENGTH = 10000\n\nHTTP_WITH_BODY = {\"POST\", \"PUT\", \"PATCH\", \"DELETE\"}\n\nMASK = \"[REDACTED]\"\n\nEXCEPTION_CHAIN_MAX_DEPTH = 50\n\nERROR = \"error\"\nTRANSACTION = \"transaction\"\nSPAN = \"span\"\nMETRICSET = \"metricset\"\n\nLABEL_RE = re.compile('[.*\"]')\n\nHARDCODED_PROCESSORS = [\"elasticapm.processors.add_context_lines_to_frames\"]\n\nBASE_SANITIZE_FIELD_NAMES_UNPROCESSED = [\n \"password\",\n \"passwd\",\n \"pwd\",\n \"secret\",\n \"*key\",\n \"*token*\",\n \"*session*\",\n \"*credit*\",\n \"*card*\",\n \"*auth*\",\n \"set-cookie\",\n]\n\nBASE_SANITIZE_FIELD_NAMES = [_starmatch_to_regex(x) for x in BASE_SANITIZE_FIELD_NAMES_UNPROCESSED]\n\nOUTCOME = namedtuple(\"OUTCOME\", [\"SUCCESS\", \"FAILURE\", \"UNKNOWN\"])(\n SUCCESS=\"success\", FAILURE=\"failure\", UNKNOWN=\"unknown\"\n)\n\ntry:\n # Python 2\n LABEL_TYPES = (bool, int, long, float, decimal.Decimal)\nexcept NameError:\n # Python 3\n LABEL_TYPES = (bool, int, float, decimal.Decimal)\n\nTRACESTATE = namedtuple(\"TRACESTATE\", [\"SAMPLE_RATE\"])(SAMPLE_RATE=\"s\")\nTRACE_CONTINUATION_STRATEGY = namedtuple(\"TRACE_CONTINUATION_STRATEGY\", [\"CONTINUE\", \"RESTART\", \"RESTART_EXTERNAL\"])(\n CONTINUE=\"continue\",\n RESTART=\"restart\",\n RESTART_EXTERNAL=\"restart_external\",\n)\n", "path": "elasticapm/conf/constants.py"}, {"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\nimport base64\nimport os\nimport re\nimport urllib.parse\nfrom functools import partial\nfrom types import FunctionType\nfrom typing import Pattern\n\nfrom elasticapm.conf import constants\nfrom elasticapm.utils import encoding\n\ntry:\n from functools import partialmethod\n\n partial_types = (partial, partialmethod)\nexcept ImportError:\n # Python 2\n partial_types = (partial,)\n\n\ndefault_ports = {\"https\": 443, \"http\": 80, \"postgresql\": 5432, \"mysql\": 3306, \"mssql\": 1433}\n\n\ndef varmap(func, var, context=None, name=None, **kwargs):\n \"\"\"\n Executes ``func(key_name, value)`` on all values,\n recursively discovering dict and list scoped\n values.\n \"\"\"\n if context is None:\n context = set()\n objid = id(var)\n if objid in context:\n return func(name, \"<...>\", **kwargs)\n context.add(objid)\n\n # Apply func() before recursion, so that `shorten()` doesn't have to iterate over all the trimmed values\n ret = func(name, var, **kwargs)\n if isinstance(ret, dict):\n # iterate over a copy of the dictionary to avoid \"dictionary changed size during iteration\" issues\n ret = dict((k, varmap(func, v, context, k, **kwargs)) for k, v in ret.copy().items())\n elif isinstance(ret, (list, tuple)):\n # Apply func() before recursion, so that `shorten()` doesn't have to iterate over all the trimmed values\n ret = [varmap(func, f, context, name, **kwargs) for f in ret]\n context.remove(objid)\n return ret\n\n\ndef get_name_from_func(func: FunctionType) -> str:\n # partials don't have `__module__` or `__name__`, so we use the values from the \"inner\" function\n if isinstance(func, partial_types):\n return \"partial({})\".format(get_name_from_func(func.func))\n elif hasattr(func, \"_partialmethod\") and hasattr(func._partialmethod, \"func\"):\n return \"partial({})\".format(get_name_from_func(func._partialmethod.func))\n\n module = func.__module__\n\n if hasattr(func, \"view_class\"):\n view_name = func.view_class.__name__\n elif hasattr(func, \"__name__\"):\n view_name = func.__name__\n else: # Fall back if there's no __name__\n view_name = func.__class__.__name__\n\n return \"{0}.{1}\".format(module, view_name)\n\n\ndef build_name_with_http_method_prefix(name, request):\n return \" \".join((request.method, name)) if name else name\n\n\ndef is_master_process() -> bool:\n # currently only recognizes uwsgi master process\n try:\n import uwsgi\n\n return os.getpid() == uwsgi.masterpid()\n except ImportError:\n return False\n\n\ndef get_url_dict(url: str) -> dict:\n parse_result = urllib.parse.urlparse(url)\n\n url_dict = {\n \"full\": encoding.keyword_field(url),\n \"protocol\": parse_result.scheme + \":\",\n \"hostname\": encoding.keyword_field(parse_result.hostname),\n \"pathname\": encoding.keyword_field(parse_result.path),\n }\n\n port = None if parse_result.port is None else str(parse_result.port)\n\n if port:\n url_dict[\"port\"] = port\n if parse_result.query:\n url_dict[\"search\"] = encoding.keyword_field(\"?\" + parse_result.query)\n return url_dict\n\n\ndef sanitize_url(url: str) -> str:\n if \"@\" not in url:\n return url\n parts = urllib.parse.urlparse(url)\n return url.replace(\"%s:%s\" % (parts.username, parts.password), \"%s:%s\" % (parts.username, constants.MASK))\n\n\ndef get_host_from_url(url: str) -> str:\n parsed_url = urllib.parse.urlparse(url)\n host = parsed_url.hostname or \" \"\n\n if parsed_url.port and default_ports.get(parsed_url.scheme) != parsed_url.port:\n host += \":\" + str(parsed_url.port)\n\n return host\n\n\ndef url_to_destination_resource(url: str) -> str:\n parts = urllib.parse.urlsplit(url)\n hostname = parts.hostname if parts.hostname else \"\"\n # preserve brackets for IPv6 URLs\n if \"://[\" in url:\n hostname = \"[%s]\" % hostname\n try:\n port = parts.port\n except ValueError:\n # Malformed port, just use None rather than raising an exception\n port = None\n default_port = default_ports.get(parts.scheme, None)\n name = \"%s://%s\" % (parts.scheme, hostname)\n resource = hostname\n if not port and parts.scheme in default_ports:\n port = default_ports[parts.scheme]\n if port:\n if port != default_port:\n name += \":%d\" % port\n resource += \":%d\" % port\n return resource\n\n\ndef read_pem_file(file_obj) -> bytes:\n cert = b\"\"\n for line in file_obj:\n if line.startswith(b\"-----BEGIN CERTIFICATE-----\"):\n break\n # scan until we find the first END CERTIFICATE marker\n for line in file_obj:\n if line.startswith(b\"-----END CERTIFICATE-----\"):\n break\n cert += line.strip()\n return base64.b64decode(cert)\n\n\ndef starmatch_to_regex(pattern: str) -> Pattern:\n options = re.DOTALL\n # check if we are case sensitive\n if pattern.startswith(\"(?-i)\"):\n pattern = pattern[5:]\n else:\n options |= re.IGNORECASE\n i, n = 0, len(pattern)\n res = []\n while i < n:\n c = pattern[i]\n i = i + 1\n if c == \"*\":\n res.append(\".*\")\n else:\n res.append(re.escape(c))\n return re.compile(r\"(?:%s)\\Z\" % \"\".join(res), options)\n\n\ndef nested_key(d: dict, *args):\n \"\"\"\n Traverses a dictionary for nested keys. Returns `None` if the at any point\n in the traversal a key cannot be found.\n\n Example:\n\n >>> from elasticapm.utils import nested_key\n >>> d = {\"a\": {\"b\": {\"c\": 0}}}\n >>> nested_key(d, \"a\", \"b\", \"c\")\n 0\n >>> nested_key(d, \"a\", \"b\", \"d\")\n None\n \"\"\"\n for arg in args:\n try:\n d = d[arg]\n except (TypeError, KeyError):\n d = None\n break\n return d\n", "path": "elasticapm/utils/__init__.py"}], "after_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport decimal\nimport re\nfrom collections import namedtuple\nfrom typing import Pattern\n\n\ndef _starmatch_to_regex(pattern: str) -> Pattern:\n \"\"\"\n This is a duplicate of starmatch_to_regex() in utils/__init__.py\n\n Duplication to avoid circular imports\n \"\"\"\n options = re.DOTALL\n # check if we are case-sensitive\n if pattern.startswith(\"(?-i)\"):\n pattern = pattern[5:]\n else:\n options |= re.IGNORECASE\n i, n = 0, len(pattern)\n res = []\n while i < n:\n c = pattern[i]\n i = i + 1\n if c == \"*\":\n res.append(\".*\")\n else:\n res.append(re.escape(c))\n return re.compile(r\"(?:%s)\\Z\" % \"\".join(res), options)\n\n\nEVENTS_API_PATH = \"intake/v2/events\"\nAGENT_CONFIG_PATH = \"config/v1/agents\"\nSERVER_INFO_PATH = \"\"\n\nTRACE_CONTEXT_VERSION = 0\nTRACEPARENT_HEADER_NAME = \"traceparent\"\nTRACEPARENT_LEGACY_HEADER_NAME = \"elastic-apm-traceparent\"\nTRACEPARENT_BINARY_HEADER_NAME = \"elasticapmtraceparent\"\nTRACESTATE_HEADER_NAME = \"tracestate\"\n\nTIMESTAMP_FORMAT = \"%Y-%m-%dT%H:%M:%S.%fZ\"\n\nKEYWORD_MAX_LENGTH = 1024\nLONG_FIELD_MAX_LENGTH = 10000\n\nHTTP_WITH_BODY = {\"POST\", \"PUT\", \"PATCH\", \"DELETE\"}\n\nMASK = \"[REDACTED]\"\n\nEXCEPTION_CHAIN_MAX_DEPTH = 50\n\nERROR = \"error\"\nTRANSACTION = \"transaction\"\nSPAN = \"span\"\nMETRICSET = \"metricset\"\n\nLABEL_RE = re.compile('[.*\"]')\n\nHARDCODED_PROCESSORS = [\"elasticapm.processors.add_context_lines_to_frames\"]\n\nBASE_SANITIZE_FIELD_NAMES_UNPROCESSED = [\n \"password\",\n \"passwd\",\n \"pwd\",\n \"secret\",\n \"*key\",\n \"*token*\",\n \"*session*\",\n \"*credit*\",\n \"*card*\",\n \"*auth*\",\n \"*principal*\",\n \"set-cookie\",\n]\n\nBASE_SANITIZE_FIELD_NAMES = [_starmatch_to_regex(x) for x in BASE_SANITIZE_FIELD_NAMES_UNPROCESSED]\n\nOUTCOME = namedtuple(\"OUTCOME\", [\"SUCCESS\", \"FAILURE\", \"UNKNOWN\"])(\n SUCCESS=\"success\", FAILURE=\"failure\", UNKNOWN=\"unknown\"\n)\n\nLABEL_TYPES = (bool, int, float, decimal.Decimal)\n\nTRACESTATE = namedtuple(\"TRACESTATE\", [\"SAMPLE_RATE\"])(SAMPLE_RATE=\"s\")\nTRACE_CONTINUATION_STRATEGY = namedtuple(\"TRACE_CONTINUATION_STRATEGY\", [\"CONTINUE\", \"RESTART\", \"RESTART_EXTERNAL\"])(\n CONTINUE=\"continue\",\n RESTART=\"restart\",\n RESTART_EXTERNAL=\"restart_external\",\n)\n", "path": "elasticapm/conf/constants.py"}, {"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\nimport base64\nimport os\nimport re\nimport urllib.parse\nfrom functools import partial\nfrom types import FunctionType\nfrom typing import Pattern\n\nfrom elasticapm.conf import constants\nfrom elasticapm.utils import encoding\n\ntry:\n from functools import partialmethod\n\n partial_types = (partial, partialmethod)\nexcept ImportError:\n # Python 2\n partial_types = (partial,)\n\n\ndefault_ports = {\"https\": 443, \"http\": 80, \"postgresql\": 5432, \"mysql\": 3306, \"mssql\": 1433}\n\n\ndef varmap(func, var, context=None, name=None, **kwargs):\n \"\"\"\n Executes ``func(key_name, value)`` on all values,\n recursively discovering dict and list scoped\n values.\n \"\"\"\n if context is None:\n context = set()\n objid = id(var)\n if objid in context:\n return func(name, \"<...>\", **kwargs)\n context.add(objid)\n\n # Apply func() before recursion, so that `shorten()` doesn't have to iterate over all the trimmed values\n ret = func(name, var, **kwargs)\n if isinstance(ret, dict):\n # iterate over a copy of the dictionary to avoid \"dictionary changed size during iteration\" issues\n ret = dict((k, varmap(func, v, context, k, **kwargs)) for k, v in ret.copy().items())\n elif isinstance(ret, (list, tuple)):\n # Apply func() before recursion, so that `shorten()` doesn't have to iterate over all the trimmed values\n ret = [varmap(func, f, context, name, **kwargs) for f in ret]\n context.remove(objid)\n return ret\n\n\ndef get_name_from_func(func: FunctionType) -> str:\n # partials don't have `__module__` or `__name__`, so we use the values from the \"inner\" function\n if isinstance(func, partial_types):\n return \"partial({})\".format(get_name_from_func(func.func))\n elif hasattr(func, \"_partialmethod\") and hasattr(func._partialmethod, \"func\"):\n return \"partial({})\".format(get_name_from_func(func._partialmethod.func))\n\n module = func.__module__\n\n if hasattr(func, \"view_class\"):\n view_name = func.view_class.__name__\n elif hasattr(func, \"__name__\"):\n view_name = func.__name__\n else: # Fall back if there's no __name__\n view_name = func.__class__.__name__\n\n return \"{0}.{1}\".format(module, view_name)\n\n\ndef build_name_with_http_method_prefix(name, request):\n return \" \".join((request.method, name)) if name else name\n\n\ndef is_master_process() -> bool:\n # currently only recognizes uwsgi master process\n try:\n import uwsgi\n\n return os.getpid() == uwsgi.masterpid()\n except ImportError:\n return False\n\n\ndef get_url_dict(url: str) -> dict:\n parse_result = urllib.parse.urlparse(url)\n\n url_dict = {\n \"full\": encoding.keyword_field(url),\n \"protocol\": parse_result.scheme + \":\",\n \"hostname\": encoding.keyword_field(parse_result.hostname),\n \"pathname\": encoding.keyword_field(parse_result.path),\n }\n\n port = None if parse_result.port is None else str(parse_result.port)\n\n if port:\n url_dict[\"port\"] = port\n if parse_result.query:\n url_dict[\"search\"] = encoding.keyword_field(\"?\" + parse_result.query)\n return url_dict\n\n\ndef sanitize_url(url: str) -> str:\n if \"@\" not in url:\n return url\n parts = urllib.parse.urlparse(url)\n return url.replace(\"%s:%s\" % (parts.username, parts.password), \"%s:%s\" % (parts.username, constants.MASK))\n\n\ndef get_host_from_url(url: str) -> str:\n parsed_url = urllib.parse.urlparse(url)\n host = parsed_url.hostname or \" \"\n\n if parsed_url.port and default_ports.get(parsed_url.scheme) != parsed_url.port:\n host += \":\" + str(parsed_url.port)\n\n return host\n\n\ndef url_to_destination_resource(url: str) -> str:\n parts = urllib.parse.urlsplit(url)\n hostname = parts.hostname if parts.hostname else \"\"\n # preserve brackets for IPv6 URLs\n if \"://[\" in url:\n hostname = \"[%s]\" % hostname\n try:\n port = parts.port\n except ValueError:\n # Malformed port, just use None rather than raising an exception\n port = None\n default_port = default_ports.get(parts.scheme, None)\n name = \"%s://%s\" % (parts.scheme, hostname)\n resource = hostname\n if not port and parts.scheme in default_ports:\n port = default_ports[parts.scheme]\n if port:\n if port != default_port:\n name += \":%d\" % port\n resource += \":%d\" % port\n return resource\n\n\ndef read_pem_file(file_obj) -> bytes:\n cert = b\"\"\n for line in file_obj:\n if line.startswith(b\"-----BEGIN CERTIFICATE-----\"):\n break\n # scan until we find the first END CERTIFICATE marker\n for line in file_obj:\n if line.startswith(b\"-----END CERTIFICATE-----\"):\n break\n cert += line.strip()\n return base64.b64decode(cert)\n\n\ndef starmatch_to_regex(pattern: str) -> Pattern:\n options = re.DOTALL\n # check if we are case-sensitive\n if pattern.startswith(\"(?-i)\"):\n pattern = pattern[5:]\n else:\n options |= re.IGNORECASE\n i, n = 0, len(pattern)\n res = []\n while i < n:\n c = pattern[i]\n i = i + 1\n if c == \"*\":\n res.append(\".*\")\n else:\n res.append(re.escape(c))\n return re.compile(r\"(?:%s)\\Z\" % \"\".join(res), options)\n\n\ndef nested_key(d: dict, *args):\n \"\"\"\n Traverses a dictionary for nested keys. Returns `None` if the at any point\n in the traversal a key cannot be found.\n\n Example:\n\n >>> from elasticapm.utils import nested_key\n >>> d = {\"a\": {\"b\": {\"c\": 0}}}\n >>> nested_key(d, \"a\", \"b\", \"c\")\n 0\n >>> nested_key(d, \"a\", \"b\", \"d\")\n None\n \"\"\"\n for arg in args:\n try:\n d = d[arg]\n except (TypeError, KeyError):\n d = None\n break\n return d\n", "path": "elasticapm/utils/__init__.py"}]} | 3,936 | 488 |
gh_patches_debug_17330 | rasdani/github-patches | git_diff | bornhack__bornhack-website-187 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
include iban/swift/bic info on custominvoice pdfs
include iban/swift/bic info on custominvoice pdfs
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/shop/invoiceworker.py`
Content:
```
1 from django.core.files import File
2 from utils.pdf import generate_pdf_letter
3 from shop.email import add_invoice_email, add_creditnote_email
4 from shop.models import Order, CustomOrder, Invoice, CreditNote
5 import logging
6 logging.basicConfig(level=logging.INFO)
7 logger = logging.getLogger('bornhack.%s' % __name__)
8
9
10 def do_work():
11 """
12 The invoice worker creates Invoice objects for shop orders and
13 for custom orders. It also generates PDF files for Invoice objects
14 that have no PDF. It also emails invoices for shop orders.
15 """
16
17 # check if we need to generate any invoices for shop orders
18 for order in Order.objects.filter(paid=True, invoice__isnull=True):
19 # generate invoice for this Order
20 Invoice.objects.create(order=order)
21 logger.info('Generated Invoice object for %s' % order)
22
23 # check if we need to generate any invoices for custom orders
24 for customorder in CustomOrder.objects.filter(invoice__isnull=True):
25 # generate invoice for this CustomOrder
26 Invoice.objects.create(customorder=customorder)
27 logger.info('Generated Invoice object for %s' % customorder)
28
29 # check if we need to generate any pdf invoices
30 for invoice in Invoice.objects.filter(pdf=''):
31 # generate the pdf
32 try:
33 if invoice.customorder:
34 template = 'pdf/custominvoice.html'
35 else:
36 template = 'pdf/invoice.html'
37 pdffile = generate_pdf_letter(
38 filename=invoice.filename,
39 template=template,
40 formatdict={
41 'invoice': invoice,
42 },
43 )
44 logger.info('Generated pdf for invoice %s' % invoice)
45 except Exception as E:
46 logger.exception('Unable to generate PDF file for invoice #%s. Error: %s' % (invoice.pk, E))
47 continue
48
49 # update invoice object with the file
50 invoice.pdf.save(invoice.filename, File(pdffile))
51 invoice.save()
52
53 # check if we need to send out any invoices (only for shop orders, and only where pdf has been generated)
54 for invoice in Invoice.objects.filter(order__isnull=False, sent_to_customer=False).exclude(pdf=''):
55 logger.info("found unmailed Invoice object: %s" % invoice)
56 # add email to the outgoing email queue
57 if add_invoice_email(invoice=invoice):
58 invoice.sent_to_customer = True
59 invoice.save()
60 logger.info('OK: Invoice email to {} added to queue.'.format(
61 invoice.order.user.email)
62 )
63 else:
64 logger.error('Unable to add email for invoice {} to {}'.format(
65 invoice.pk,
66 invoice.order.user.email
67 )
68 )
69
70 # check if we need to generate any pdf creditnotes?
71 for creditnote in CreditNote.objects.filter(pdf=''):
72 # generate the pdf
73 try:
74 pdffile = generate_pdf_letter(
75 filename=creditnote.filename,
76 template='pdf/creditnote.html',
77 formatdict={
78 'creditnote': creditnote,
79 },
80 )
81 logger.info('Generated pdf for creditnote %s' % creditnote)
82 except Exception as E:
83 logger.exception('Unable to generate PDF file for creditnote #%s. Error: %s' % (creditnote.pk, E))
84 continue
85
86 # update creditnote object with the file
87 creditnote.pdf.save(creditnote.filename, File(pdffile))
88 creditnote.save()
89
90 # check if we need to send out any creditnotes (only where pdf has been generated and only for creditnotes linked to a user)
91 for creditnote in CreditNote.objects.filter(sent_to_customer=False).exclude(pdf='').exclude(user=None):
92 # send the email
93 if add_creditnote_email(creditnote=creditnote):
94 logger.info('OK: Creditnote email to %s added' % creditnote.user.email)
95 creditnote.sent_to_customer = True
96 creditnote.save()
97 else:
98 logger.error('Unable to add creditnote email for creditnote %s to %s' % (creditnote.pk, creditnote.user.email))
99
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/shop/invoiceworker.py b/src/shop/invoiceworker.py
--- a/src/shop/invoiceworker.py
+++ b/src/shop/invoiceworker.py
@@ -1,3 +1,4 @@
+from django.conf import settings
from django.core.files import File
from utils.pdf import generate_pdf_letter
from shop.email import add_invoice_email, add_creditnote_email
@@ -39,6 +40,11 @@
template=template,
formatdict={
'invoice': invoice,
+ 'bank': settings.BANKACCOUNT_BANK,
+ 'bank_iban': settings.BANKACCOUNT_IBAN,
+ 'bank_bic': settings.BANKACCOUNT_SWIFTBIC,
+ 'bank_dk_reg': settings.BANKACCOUNT_REG,
+ 'bank_dk_accno': settings.BANKACCOUNT_ACCOUNT,
},
)
logger.info('Generated pdf for invoice %s' % invoice)
| {"golden_diff": "diff --git a/src/shop/invoiceworker.py b/src/shop/invoiceworker.py\n--- a/src/shop/invoiceworker.py\n+++ b/src/shop/invoiceworker.py\n@@ -1,3 +1,4 @@\n+from django.conf import settings\n from django.core.files import File\n from utils.pdf import generate_pdf_letter\n from shop.email import add_invoice_email, add_creditnote_email\n@@ -39,6 +40,11 @@\n template=template,\n formatdict={\n 'invoice': invoice,\n+ 'bank': settings.BANKACCOUNT_BANK,\n+ 'bank_iban': settings.BANKACCOUNT_IBAN,\n+ 'bank_bic': settings.BANKACCOUNT_SWIFTBIC,\n+ 'bank_dk_reg': settings.BANKACCOUNT_REG,\n+ 'bank_dk_accno': settings.BANKACCOUNT_ACCOUNT,\n },\n )\n logger.info('Generated pdf for invoice %s' % invoice)\n", "issue": "include iban/swift/bic info on custominvoice pdfs\n\ninclude iban/swift/bic info on custominvoice pdfs\n\n", "before_files": [{"content": "from django.core.files import File\nfrom utils.pdf import generate_pdf_letter\nfrom shop.email import add_invoice_email, add_creditnote_email\nfrom shop.models import Order, CustomOrder, Invoice, CreditNote\nimport logging\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger('bornhack.%s' % __name__)\n\n\ndef do_work():\n \"\"\"\n The invoice worker creates Invoice objects for shop orders and\n for custom orders. It also generates PDF files for Invoice objects\n that have no PDF. It also emails invoices for shop orders.\n \"\"\"\n\n # check if we need to generate any invoices for shop orders\n for order in Order.objects.filter(paid=True, invoice__isnull=True):\n # generate invoice for this Order\n Invoice.objects.create(order=order)\n logger.info('Generated Invoice object for %s' % order)\n\n # check if we need to generate any invoices for custom orders\n for customorder in CustomOrder.objects.filter(invoice__isnull=True):\n # generate invoice for this CustomOrder\n Invoice.objects.create(customorder=customorder)\n logger.info('Generated Invoice object for %s' % customorder)\n\n # check if we need to generate any pdf invoices\n for invoice in Invoice.objects.filter(pdf=''):\n # generate the pdf\n try:\n if invoice.customorder:\n template = 'pdf/custominvoice.html'\n else:\n template = 'pdf/invoice.html'\n pdffile = generate_pdf_letter(\n filename=invoice.filename,\n template=template,\n formatdict={\n 'invoice': invoice,\n },\n )\n logger.info('Generated pdf for invoice %s' % invoice)\n except Exception as E:\n logger.exception('Unable to generate PDF file for invoice #%s. Error: %s' % (invoice.pk, E))\n continue\n\n # update invoice object with the file\n invoice.pdf.save(invoice.filename, File(pdffile))\n invoice.save()\n\n # check if we need to send out any invoices (only for shop orders, and only where pdf has been generated)\n for invoice in Invoice.objects.filter(order__isnull=False, sent_to_customer=False).exclude(pdf=''):\n logger.info(\"found unmailed Invoice object: %s\" % invoice)\n # add email to the outgoing email queue\n if add_invoice_email(invoice=invoice):\n invoice.sent_to_customer = True\n invoice.save()\n logger.info('OK: Invoice email to {} added to queue.'.format(\n invoice.order.user.email)\n )\n else:\n logger.error('Unable to add email for invoice {} to {}'.format(\n invoice.pk,\n invoice.order.user.email\n )\n )\n\n # check if we need to generate any pdf creditnotes?\n for creditnote in CreditNote.objects.filter(pdf=''):\n # generate the pdf\n try:\n pdffile = generate_pdf_letter(\n filename=creditnote.filename,\n template='pdf/creditnote.html',\n formatdict={\n 'creditnote': creditnote,\n },\n )\n logger.info('Generated pdf for creditnote %s' % creditnote)\n except Exception as E:\n logger.exception('Unable to generate PDF file for creditnote #%s. Error: %s' % (creditnote.pk, E))\n continue\n\n # update creditnote object with the file\n creditnote.pdf.save(creditnote.filename, File(pdffile))\n creditnote.save()\n\n # check if we need to send out any creditnotes (only where pdf has been generated and only for creditnotes linked to a user)\n for creditnote in CreditNote.objects.filter(sent_to_customer=False).exclude(pdf='').exclude(user=None):\n # send the email\n if add_creditnote_email(creditnote=creditnote):\n logger.info('OK: Creditnote email to %s added' % creditnote.user.email)\n creditnote.sent_to_customer = True\n creditnote.save()\n else:\n logger.error('Unable to add creditnote email for creditnote %s to %s' % (creditnote.pk, creditnote.user.email))\n\n", "path": "src/shop/invoiceworker.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.core.files import File\nfrom utils.pdf import generate_pdf_letter\nfrom shop.email import add_invoice_email, add_creditnote_email\nfrom shop.models import Order, CustomOrder, Invoice, CreditNote\nimport logging\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger('bornhack.%s' % __name__)\n\n\ndef do_work():\n \"\"\"\n The invoice worker creates Invoice objects for shop orders and\n for custom orders. It also generates PDF files for Invoice objects\n that have no PDF. It also emails invoices for shop orders.\n \"\"\"\n\n # check if we need to generate any invoices for shop orders\n for order in Order.objects.filter(paid=True, invoice__isnull=True):\n # generate invoice for this Order\n Invoice.objects.create(order=order)\n logger.info('Generated Invoice object for %s' % order)\n\n # check if we need to generate any invoices for custom orders\n for customorder in CustomOrder.objects.filter(invoice__isnull=True):\n # generate invoice for this CustomOrder\n Invoice.objects.create(customorder=customorder)\n logger.info('Generated Invoice object for %s' % customorder)\n\n # check if we need to generate any pdf invoices\n for invoice in Invoice.objects.filter(pdf=''):\n # generate the pdf\n try:\n if invoice.customorder:\n template = 'pdf/custominvoice.html'\n else:\n template = 'pdf/invoice.html'\n pdffile = generate_pdf_letter(\n filename=invoice.filename,\n template=template,\n formatdict={\n 'invoice': invoice,\n 'bank': settings.BANKACCOUNT_BANK,\n 'bank_iban': settings.BANKACCOUNT_IBAN,\n 'bank_bic': settings.BANKACCOUNT_SWIFTBIC,\n 'bank_dk_reg': settings.BANKACCOUNT_REG,\n 'bank_dk_accno': settings.BANKACCOUNT_ACCOUNT,\n },\n )\n logger.info('Generated pdf for invoice %s' % invoice)\n except Exception as E:\n logger.exception('Unable to generate PDF file for invoice #%s. Error: %s' % (invoice.pk, E))\n continue\n\n # update invoice object with the file\n invoice.pdf.save(invoice.filename, File(pdffile))\n invoice.save()\n\n # check if we need to send out any invoices (only for shop orders, and only where pdf has been generated)\n for invoice in Invoice.objects.filter(order__isnull=False, sent_to_customer=False).exclude(pdf=''):\n logger.info(\"found unmailed Invoice object: %s\" % invoice)\n # add email to the outgoing email queue\n if add_invoice_email(invoice=invoice):\n invoice.sent_to_customer = True\n invoice.save()\n logger.info('OK: Invoice email to {} added to queue.'.format(\n invoice.order.user.email)\n )\n else:\n logger.error('Unable to add email for invoice {} to {}'.format(\n invoice.pk,\n invoice.order.user.email\n )\n )\n\n # check if we need to generate any pdf creditnotes?\n for creditnote in CreditNote.objects.filter(pdf=''):\n # generate the pdf\n try:\n pdffile = generate_pdf_letter(\n filename=creditnote.filename,\n template='pdf/creditnote.html',\n formatdict={\n 'creditnote': creditnote,\n },\n )\n logger.info('Generated pdf for creditnote %s' % creditnote)\n except Exception as E:\n logger.exception('Unable to generate PDF file for creditnote #%s. Error: %s' % (creditnote.pk, E))\n continue\n\n # update creditnote object with the file\n creditnote.pdf.save(creditnote.filename, File(pdffile))\n creditnote.save()\n\n # check if we need to send out any creditnotes (only where pdf has been generated and only for creditnotes linked to a user)\n for creditnote in CreditNote.objects.filter(sent_to_customer=False).exclude(pdf='').exclude(user=None):\n # send the email\n if add_creditnote_email(creditnote=creditnote):\n logger.info('OK: Creditnote email to %s added' % creditnote.user.email)\n creditnote.sent_to_customer = True\n creditnote.save()\n else:\n logger.error('Unable to add creditnote email for creditnote %s to %s' % (creditnote.pk, creditnote.user.email))\n\n", "path": "src/shop/invoiceworker.py"}]} | 1,342 | 205 |
gh_patches_debug_10518 | rasdani/github-patches | git_diff | Mailu__Mailu-1052 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Regular alpine base-image update
To receive current updates for upstream projects, security fixes and new features such as mentioned in #1049 , let’s upgrade the alpine baseimage to latest 3.10
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/postfix/start.py`
Content:
```
1 #!/usr/bin/python3
2
3 import os
4 import glob
5 import shutil
6 import multiprocessing
7 import logging as log
8 import sys
9 from mailustart import resolve, convert
10
11 from podop import run_server
12
13 log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING"))
14
15 def start_podop():
16 os.setuid(100)
17 url = "http://" + os.environ["ADMIN_ADDRESS"] + "/internal/postfix/"
18 # TODO: Remove verbosity setting from Podop?
19 run_server(0, "postfix", "/tmp/podop.socket", [
20 ("transport", "url", url + "transport/§"),
21 ("alias", "url", url + "alias/§"),
22 ("domain", "url", url + "domain/§"),
23 ("mailbox", "url", url + "mailbox/§"),
24 ("senderaccess", "url", url + "sender/access/§"),
25 ("senderlogin", "url", url + "sender/login/§")
26 ])
27
28 # Actual startup script
29 os.environ["FRONT_ADDRESS"] = resolve(os.environ.get("FRONT_ADDRESS", "front"))
30 os.environ["ADMIN_ADDRESS"] = resolve(os.environ.get("ADMIN_ADDRESS", "admin"))
31 os.environ["HOST_ANTISPAM"] = resolve(os.environ.get("HOST_ANTISPAM", "antispam:11332"))
32 os.environ["HOST_LMTP"] = resolve(os.environ.get("HOST_LMTP", "imap:2525"))
33
34 for postfix_file in glob.glob("/conf/*.cf"):
35 convert(postfix_file, os.path.join("/etc/postfix", os.path.basename(postfix_file)))
36
37 if os.path.exists("/overrides/postfix.cf"):
38 for line in open("/overrides/postfix.cf").read().strip().split("\n"):
39 os.system('postconf -e "{}"'.format(line))
40
41 if os.path.exists("/overrides/postfix.master"):
42 for line in open("/overrides/postfix.master").read().strip().split("\n"):
43 os.system('postconf -Me "{}"'.format(line))
44
45 for map_file in glob.glob("/overrides/*.map"):
46 destination = os.path.join("/etc/postfix", os.path.basename(map_file))
47 shutil.copyfile(map_file, destination)
48 os.system("postmap {}".format(destination))
49 os.remove(destination)
50
51 if "RELAYUSER" in os.environ:
52 path = "/etc/postfix/sasl_passwd"
53 convert("/conf/sasl_passwd", path)
54 os.system("postmap {}".format(path))
55
56 convert("/conf/rsyslog.conf", "/etc/rsyslog.conf")
57
58 # Run Podop and Postfix
59 multiprocessing.Process(target=start_podop).start()
60 if os.path.exists("/var/run/rsyslogd.pid"):
61 os.remove("/var/run/rsyslogd.pid")
62 os.system("/usr/lib/postfix/post-install meta_directory=/etc/postfix create-missing")
63 os.system("/usr/lib/postfix/master &")
64 os.execv("/usr/sbin/rsyslogd", ["rsyslogd", "-n"])
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/core/postfix/start.py b/core/postfix/start.py
--- a/core/postfix/start.py
+++ b/core/postfix/start.py
@@ -53,12 +53,7 @@
convert("/conf/sasl_passwd", path)
os.system("postmap {}".format(path))
-convert("/conf/rsyslog.conf", "/etc/rsyslog.conf")
-
# Run Podop and Postfix
multiprocessing.Process(target=start_podop).start()
-if os.path.exists("/var/run/rsyslogd.pid"):
- os.remove("/var/run/rsyslogd.pid")
-os.system("/usr/lib/postfix/post-install meta_directory=/etc/postfix create-missing")
-os.system("/usr/lib/postfix/master &")
-os.execv("/usr/sbin/rsyslogd", ["rsyslogd", "-n"])
+os.system("/usr/libexec/postfix/post-install meta_directory=/etc/postfix create-missing")
+os.system("postfix start-fg")
| {"golden_diff": "diff --git a/core/postfix/start.py b/core/postfix/start.py\n--- a/core/postfix/start.py\n+++ b/core/postfix/start.py\n@@ -53,12 +53,7 @@\n convert(\"/conf/sasl_passwd\", path)\n os.system(\"postmap {}\".format(path))\n \n-convert(\"/conf/rsyslog.conf\", \"/etc/rsyslog.conf\")\n-\n # Run Podop and Postfix\n multiprocessing.Process(target=start_podop).start()\n-if os.path.exists(\"/var/run/rsyslogd.pid\"):\n- os.remove(\"/var/run/rsyslogd.pid\")\n-os.system(\"/usr/lib/postfix/post-install meta_directory=/etc/postfix create-missing\")\n-os.system(\"/usr/lib/postfix/master &\")\n-os.execv(\"/usr/sbin/rsyslogd\", [\"rsyslogd\", \"-n\"])\n+os.system(\"/usr/libexec/postfix/post-install meta_directory=/etc/postfix create-missing\")\n+os.system(\"postfix start-fg\")\n", "issue": "Regular alpine base-image update\nTo receive current updates for upstream projects, security fixes and new features such as mentioned in #1049 , let\u2019s upgrade the alpine baseimage to latest 3.10\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport os\nimport glob\nimport shutil\nimport multiprocessing\nimport logging as log\nimport sys\nfrom mailustart import resolve, convert\n\nfrom podop import run_server\n\nlog.basicConfig(stream=sys.stderr, level=os.environ.get(\"LOG_LEVEL\", \"WARNING\"))\n\ndef start_podop():\n os.setuid(100)\n url = \"http://\" + os.environ[\"ADMIN_ADDRESS\"] + \"/internal/postfix/\"\n # TODO: Remove verbosity setting from Podop?\n run_server(0, \"postfix\", \"/tmp/podop.socket\", [\n\t\t(\"transport\", \"url\", url + \"transport/\u00a7\"),\n\t\t(\"alias\", \"url\", url + \"alias/\u00a7\"),\n\t\t(\"domain\", \"url\", url + \"domain/\u00a7\"),\n (\"mailbox\", \"url\", url + \"mailbox/\u00a7\"),\n (\"senderaccess\", \"url\", url + \"sender/access/\u00a7\"),\n (\"senderlogin\", \"url\", url + \"sender/login/\u00a7\")\n ])\n\n# Actual startup script\nos.environ[\"FRONT_ADDRESS\"] = resolve(os.environ.get(\"FRONT_ADDRESS\", \"front\"))\nos.environ[\"ADMIN_ADDRESS\"] = resolve(os.environ.get(\"ADMIN_ADDRESS\", \"admin\"))\nos.environ[\"HOST_ANTISPAM\"] = resolve(os.environ.get(\"HOST_ANTISPAM\", \"antispam:11332\"))\nos.environ[\"HOST_LMTP\"] = resolve(os.environ.get(\"HOST_LMTP\", \"imap:2525\"))\n\nfor postfix_file in glob.glob(\"/conf/*.cf\"):\n convert(postfix_file, os.path.join(\"/etc/postfix\", os.path.basename(postfix_file)))\n\nif os.path.exists(\"/overrides/postfix.cf\"):\n for line in open(\"/overrides/postfix.cf\").read().strip().split(\"\\n\"):\n os.system('postconf -e \"{}\"'.format(line))\n\nif os.path.exists(\"/overrides/postfix.master\"):\n for line in open(\"/overrides/postfix.master\").read().strip().split(\"\\n\"):\n os.system('postconf -Me \"{}\"'.format(line))\n\nfor map_file in glob.glob(\"/overrides/*.map\"):\n destination = os.path.join(\"/etc/postfix\", os.path.basename(map_file))\n shutil.copyfile(map_file, destination)\n os.system(\"postmap {}\".format(destination))\n os.remove(destination)\n\nif \"RELAYUSER\" in os.environ:\n path = \"/etc/postfix/sasl_passwd\"\n convert(\"/conf/sasl_passwd\", path)\n os.system(\"postmap {}\".format(path))\n\nconvert(\"/conf/rsyslog.conf\", \"/etc/rsyslog.conf\")\n\n# Run Podop and Postfix\nmultiprocessing.Process(target=start_podop).start()\nif os.path.exists(\"/var/run/rsyslogd.pid\"):\n os.remove(\"/var/run/rsyslogd.pid\")\nos.system(\"/usr/lib/postfix/post-install meta_directory=/etc/postfix create-missing\")\nos.system(\"/usr/lib/postfix/master &\")\nos.execv(\"/usr/sbin/rsyslogd\", [\"rsyslogd\", \"-n\"])\n", "path": "core/postfix/start.py"}], "after_files": [{"content": "#!/usr/bin/python3\n\nimport os\nimport glob\nimport shutil\nimport multiprocessing\nimport logging as log\nimport sys\nfrom mailustart import resolve, convert\n\nfrom podop import run_server\n\nlog.basicConfig(stream=sys.stderr, level=os.environ.get(\"LOG_LEVEL\", \"WARNING\"))\n\ndef start_podop():\n os.setuid(100)\n url = \"http://\" + os.environ[\"ADMIN_ADDRESS\"] + \"/internal/postfix/\"\n # TODO: Remove verbosity setting from Podop?\n run_server(0, \"postfix\", \"/tmp/podop.socket\", [\n\t\t(\"transport\", \"url\", url + \"transport/\u00a7\"),\n\t\t(\"alias\", \"url\", url + \"alias/\u00a7\"),\n\t\t(\"domain\", \"url\", url + \"domain/\u00a7\"),\n (\"mailbox\", \"url\", url + \"mailbox/\u00a7\"),\n (\"senderaccess\", \"url\", url + \"sender/access/\u00a7\"),\n (\"senderlogin\", \"url\", url + \"sender/login/\u00a7\")\n ])\n\n# Actual startup script\nos.environ[\"FRONT_ADDRESS\"] = resolve(os.environ.get(\"FRONT_ADDRESS\", \"front\"))\nos.environ[\"ADMIN_ADDRESS\"] = resolve(os.environ.get(\"ADMIN_ADDRESS\", \"admin\"))\nos.environ[\"HOST_ANTISPAM\"] = resolve(os.environ.get(\"HOST_ANTISPAM\", \"antispam:11332\"))\nos.environ[\"HOST_LMTP\"] = resolve(os.environ.get(\"HOST_LMTP\", \"imap:2525\"))\n\nfor postfix_file in glob.glob(\"/conf/*.cf\"):\n convert(postfix_file, os.path.join(\"/etc/postfix\", os.path.basename(postfix_file)))\n\nif os.path.exists(\"/overrides/postfix.cf\"):\n for line in open(\"/overrides/postfix.cf\").read().strip().split(\"\\n\"):\n os.system('postconf -e \"{}\"'.format(line))\n\nif os.path.exists(\"/overrides/postfix.master\"):\n for line in open(\"/overrides/postfix.master\").read().strip().split(\"\\n\"):\n os.system('postconf -Me \"{}\"'.format(line))\n\nfor map_file in glob.glob(\"/overrides/*.map\"):\n destination = os.path.join(\"/etc/postfix\", os.path.basename(map_file))\n shutil.copyfile(map_file, destination)\n os.system(\"postmap {}\".format(destination))\n os.remove(destination)\n\nif \"RELAYUSER\" in os.environ:\n path = \"/etc/postfix/sasl_passwd\"\n convert(\"/conf/sasl_passwd\", path)\n os.system(\"postmap {}\".format(path))\n\n# Run Podop and Postfix\nmultiprocessing.Process(target=start_podop).start()\nos.system(\"/usr/libexec/postfix/post-install meta_directory=/etc/postfix create-missing\")\nos.system(\"postfix start-fg\")\n", "path": "core/postfix/start.py"}]} | 1,070 | 206 |
gh_patches_debug_967 | rasdani/github-patches | git_diff | ivy-llc__ivy-16680 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
rad2deg
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/paddle/tensor/math.py`
Content:
```
1 # global
2 import ivy
3 from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
4 from ivy.functional.frontends.paddle.func_wrapper import (
5 to_ivy_arrays_and_back,
6 )
7
8
9 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
10 @to_ivy_arrays_and_back
11 def sin(x, name=None):
12 return ivy.sin(x)
13
14
15 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
16 @to_ivy_arrays_and_back
17 def cos(x, name=None):
18 return ivy.cos(x)
19
20
21 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
22 @to_ivy_arrays_and_back
23 def acos(x, name=None):
24 return ivy.acos(x)
25
26
27 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
28 @to_ivy_arrays_and_back
29 def cosh(x, name=None):
30 return ivy.cosh(x)
31
32
33 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
34 @to_ivy_arrays_and_back
35 def tanh(x, name=None):
36 return ivy.tanh(x)
37
38
39 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
40 @to_ivy_arrays_and_back
41 def acosh(x, name=None):
42 return ivy.acosh(x)
43
44
45 @with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle")
46 @to_ivy_arrays_and_back
47 def asin(x, name=None):
48 return ivy.asin(x)
49
50
51 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
52 @to_ivy_arrays_and_back
53 def log(x, name=None):
54 return ivy.log(x)
55
56
57 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
58 @to_ivy_arrays_and_back
59 def divide(x, y, name=None):
60 return ivy.divide(x, y)
61
62
63 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
64 @to_ivy_arrays_and_back
65 def abs(x, name=None):
66 return ivy.abs(x)
67
68
69 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
70 @to_ivy_arrays_and_back
71 def multiply(x, y, name=None):
72 return ivy.multiply(x, y)
73
74
75 @with_unsupported_dtypes(
76 {"2.4.2 and below": ("bool", "unsigned", "int8", "float16", "bfloat16")}, "paddle"
77 )
78 @to_ivy_arrays_and_back
79 def add(x, y, name=None):
80 return ivy.add(x, y)
81
82
83 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
84 @to_ivy_arrays_and_back
85 def subtract(x, y, name=None):
86 return ivy.subtract(x, y)
87
88
89 @with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle")
90 @to_ivy_arrays_and_back
91 def sqrt(x, name=None):
92 return ivy.sqrt(x)
93
94
95 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
96 @to_ivy_arrays_and_back
97 def atanh(x, name=None):
98 return ivy.atanh(x)
99
100
101 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
102 @to_ivy_arrays_and_back
103 def atan(x, name=None):
104 return ivy.atan(x)
105
106
107 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
108 @to_ivy_arrays_and_back
109 def round(x, name=None):
110 return ivy.round(x)
111
112
113 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
114 @to_ivy_arrays_and_back
115 def ceil(x, name=None):
116 return ivy.ceil(x)
117
118
119 @with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle")
120 @to_ivy_arrays_and_back
121 def sinh(x, name=None):
122 return ivy.sinh(x)
123
124
125 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
126 @to_ivy_arrays_and_back
127 def pow(x, y, name=None):
128 return ivy.pow(x, y)
129
130
131 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
132 @to_ivy_arrays_and_back
133 def floor(x, name=None):
134 return ivy.floor(x)
135
136
137 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
138 @to_ivy_arrays_and_back
139 def remainder(x, y, name=None):
140 return ivy.remainder(x, y)
141
142
143 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
144 @to_ivy_arrays_and_back
145 def log2(x, name=None):
146 return ivy.log2(x)
147
148
149 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
150 @to_ivy_arrays_and_back
151 def log1p(x, name=None):
152 return ivy.log1p(x)
153
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/frontends/paddle/tensor/math.py b/ivy/functional/frontends/paddle/tensor/math.py
--- a/ivy/functional/frontends/paddle/tensor/math.py
+++ b/ivy/functional/frontends/paddle/tensor/math.py
@@ -150,3 +150,9 @@
@to_ivy_arrays_and_back
def log1p(x, name=None):
return ivy.log1p(x)
+
+
+@with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
+@to_ivy_arrays_and_back
+def rad2deg(x, name=None):
+ return ivy.rad2deg(x)
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/math.py b/ivy/functional/frontends/paddle/tensor/math.py\n--- a/ivy/functional/frontends/paddle/tensor/math.py\n+++ b/ivy/functional/frontends/paddle/tensor/math.py\n@@ -150,3 +150,9 @@\n @to_ivy_arrays_and_back\n def log1p(x, name=None):\n return ivy.log1p(x)\n+\n+\n+@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n+@to_ivy_arrays_and_back\n+def rad2deg(x, name=None):\n+ return ivy.rad2deg(x)\n", "issue": "rad2deg\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sin(x, name=None):\n return ivy.sin(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cos(x, name=None):\n return ivy.cos(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acos(x, name=None):\n return ivy.acos(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cosh(x, name=None):\n return ivy.cosh(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef tanh(x, name=None):\n return ivy.tanh(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acosh(x, name=None):\n return ivy.acosh(x)\n\n\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef asin(x, name=None):\n return ivy.asin(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log(x, name=None):\n return ivy.log(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef divide(x, y, name=None):\n return ivy.divide(x, y)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef abs(x, name=None):\n return ivy.abs(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef multiply(x, y, name=None):\n return ivy.multiply(x, y)\n\n\n@with_unsupported_dtypes(\n {\"2.4.2 and below\": (\"bool\", \"unsigned\", \"int8\", \"float16\", \"bfloat16\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef add(x, y, name=None):\n return ivy.add(x, y)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef subtract(x, y, name=None):\n return ivy.subtract(x, y)\n\n\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sqrt(x, name=None):\n return ivy.sqrt(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atanh(x, name=None):\n return ivy.atanh(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atan(x, name=None):\n return ivy.atan(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef round(x, name=None):\n return ivy.round(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef ceil(x, name=None):\n return ivy.ceil(x)\n\n\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sinh(x, name=None):\n return ivy.sinh(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef pow(x, y, name=None):\n return ivy.pow(x, y)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef floor(x, name=None):\n return ivy.floor(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef remainder(x, y, name=None):\n return ivy.remainder(x, y)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log2(x, name=None):\n return ivy.log2(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log1p(x, name=None):\n return ivy.log1p(x)\n", "path": "ivy/functional/frontends/paddle/tensor/math.py"}], "after_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sin(x, name=None):\n return ivy.sin(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cos(x, name=None):\n return ivy.cos(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acos(x, name=None):\n return ivy.acos(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cosh(x, name=None):\n return ivy.cosh(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef tanh(x, name=None):\n return ivy.tanh(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acosh(x, name=None):\n return ivy.acosh(x)\n\n\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef asin(x, name=None):\n return ivy.asin(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log(x, name=None):\n return ivy.log(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef divide(x, y, name=None):\n return ivy.divide(x, y)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef abs(x, name=None):\n return ivy.abs(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef multiply(x, y, name=None):\n return ivy.multiply(x, y)\n\n\n@with_unsupported_dtypes(\n {\"2.4.2 and below\": (\"bool\", \"unsigned\", \"int8\", \"float16\", \"bfloat16\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef add(x, y, name=None):\n return ivy.add(x, y)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef subtract(x, y, name=None):\n return ivy.subtract(x, y)\n\n\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sqrt(x, name=None):\n return ivy.sqrt(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atanh(x, name=None):\n return ivy.atanh(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atan(x, name=None):\n return ivy.atan(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef round(x, name=None):\n return ivy.round(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef ceil(x, name=None):\n return ivy.ceil(x)\n\n\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sinh(x, name=None):\n return ivy.sinh(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef pow(x, y, name=None):\n return ivy.pow(x, y)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef floor(x, name=None):\n return ivy.floor(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef remainder(x, y, name=None):\n return ivy.remainder(x, y)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log2(x, name=None):\n return ivy.log2(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log1p(x, name=None):\n return ivy.log1p(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef rad2deg(x, name=None):\n return ivy.rad2deg(x)\n", "path": "ivy/functional/frontends/paddle/tensor/math.py"}]} | 2,036 | 163 |
gh_patches_debug_8478 | rasdani/github-patches | git_diff | ocf__ocfweb-535 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
versioned caching is broken
On [this line](https://github.com/ocf/ocfweb/blob/e0ca5b9053abf4908fd6dd3ff344724e61c38b53/ocfweb/environment.py#L15) we use a marathon environment variable to determine the cache version. We need to change this to use a Kubernetes things, a la
https://kubernetes.io/docs/tasks/inject-data-application/environment-variable-expose-pod-information/.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ocfweb/environment.py`
Content:
```
1 """Introspection into the current environment."""
2 import os
3 from functools import lru_cache
4
5
6 @lru_cache()
7 def ocfweb_version():
8 """Return string representing ocfweb version.
9
10 In dev, returns 'dev'. In prod, returns a version
11 similar to '2015.12.06.02.25-gitb98c8cb6'.
12 """
13 # On Marathon, read it out of environ
14 try:
15 docker_image, tag = os.environ['MARATHON_APP_DOCKER_IMAGE'].split(':', 1)
16 return tag
17 except KeyError:
18 pass
19
20 # Otherwise, we must be in dev.
21 return 'dev'
22
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ocfweb/environment.py b/ocfweb/environment.py
--- a/ocfweb/environment.py
+++ b/ocfweb/environment.py
@@ -8,12 +8,11 @@
"""Return string representing ocfweb version.
In dev, returns 'dev'. In prod, returns a version
- similar to '2015.12.06.02.25-gitb98c8cb6'.
+ similar to '2019-08-20-T21-10-57-gite0ca5b9'.
"""
- # On Marathon, read it out of environ
+ # On Kubernetes, read it out of environ
try:
- docker_image, tag = os.environ['MARATHON_APP_DOCKER_IMAGE'].split(':', 1)
- return tag
+ return os.environ['OCFWEB_PROD_VERSION']
except KeyError:
pass
| {"golden_diff": "diff --git a/ocfweb/environment.py b/ocfweb/environment.py\n--- a/ocfweb/environment.py\n+++ b/ocfweb/environment.py\n@@ -8,12 +8,11 @@\n \"\"\"Return string representing ocfweb version.\n \n In dev, returns 'dev'. In prod, returns a version\n- similar to '2015.12.06.02.25-gitb98c8cb6'.\n+ similar to '2019-08-20-T21-10-57-gite0ca5b9'.\n \"\"\"\n- # On Marathon, read it out of environ\n+ # On Kubernetes, read it out of environ\n try:\n- docker_image, tag = os.environ['MARATHON_APP_DOCKER_IMAGE'].split(':', 1)\n- return tag\n+ return os.environ['OCFWEB_PROD_VERSION']\n except KeyError:\n pass\n", "issue": "versioned caching is broken\nOn [this line](https://github.com/ocf/ocfweb/blob/e0ca5b9053abf4908fd6dd3ff344724e61c38b53/ocfweb/environment.py#L15) we use a marathon environment variable to determine the cache version. We need to change this to use a Kubernetes things, a la \r\nhttps://kubernetes.io/docs/tasks/inject-data-application/environment-variable-expose-pod-information/.\n", "before_files": [{"content": "\"\"\"Introspection into the current environment.\"\"\"\nimport os\nfrom functools import lru_cache\n\n\n@lru_cache()\ndef ocfweb_version():\n \"\"\"Return string representing ocfweb version.\n\n In dev, returns 'dev'. In prod, returns a version\n similar to '2015.12.06.02.25-gitb98c8cb6'.\n \"\"\"\n # On Marathon, read it out of environ\n try:\n docker_image, tag = os.environ['MARATHON_APP_DOCKER_IMAGE'].split(':', 1)\n return tag\n except KeyError:\n pass\n\n # Otherwise, we must be in dev.\n return 'dev'\n", "path": "ocfweb/environment.py"}], "after_files": [{"content": "\"\"\"Introspection into the current environment.\"\"\"\nimport os\nfrom functools import lru_cache\n\n\n@lru_cache()\ndef ocfweb_version():\n \"\"\"Return string representing ocfweb version.\n\n In dev, returns 'dev'. In prod, returns a version\n similar to '2019-08-20-T21-10-57-gite0ca5b9'.\n \"\"\"\n # On Kubernetes, read it out of environ\n try:\n return os.environ['OCFWEB_PROD_VERSION']\n except KeyError:\n pass\n\n # Otherwise, we must be in dev.\n return 'dev'\n", "path": "ocfweb/environment.py"}]} | 561 | 213 |
gh_patches_debug_33341 | rasdani/github-patches | git_diff | feast-dev__feast-1814 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Datastore online request makes a call once for each entity
**Is your feature request related to a problem? Please describe.**
There are performance issues with Datastore when fetching features for multiple entities. An individual fetch request is made for each entity
**Describe the solution you'd like**
I would like to have a more efficient access to the Datastore for multiple entities. The python api for datastore offers a method `get_multi()` which allows to fetch multiple keys in one request.
**Describe alternatives you've considered**
---
**Additional context**
I have done some basic tests using the `get_multi` vs get and it seems that a improvement can be made by using the function `get_multi`
Here is part of code I used for testing:
```
import numpy as np
import timeit
from google.cloud import datastore
def multi_online_read(num_keys=5):
client = datastore.Client(
project=project_name, namespace=None,
)
feast_project = "feature_repo"
key = client.key("Project", feast_project, "Table", table, "Row", row)
keys = []
response = []
for i in range(num_keys):
keys.append(key)
response = client.get_multi(keys)
return response
def online_read(num_keys=5):
client = datastore.Client(
project=project_name, namespace=None,
)
feast_project = "feature_repo"
key = client.key("Project", feast_project, "Table", table, "Row", row)
response = []
for i in range(num_keys):
response.append(client.get(key))
return response
if __name__ == "__main__":
results = timeit.repeat(online_read, repeat=10, number=1)
print(results)
print(np.mean(results))
results = timeit.repeat(multi_online_read, repeat=10, number=1)
print(results)
print(np.mean(results))
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sdk/python/feast/infra/online_stores/datastore.py`
Content:
```
1 # Copyright 2021 The Feast Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import itertools
15 from datetime import datetime
16 from multiprocessing.pool import ThreadPool
17 from typing import Any, Callable, Dict, Iterator, List, Optional, Sequence, Tuple, Union
18
19 from pydantic import PositiveInt, StrictStr
20 from pydantic.typing import Literal
21
22 from feast import Entity, FeatureTable, utils
23 from feast.feature_view import FeatureView
24 from feast.infra.online_stores.helpers import compute_entity_id
25 from feast.infra.online_stores.online_store import OnlineStore
26 from feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto
27 from feast.protos.feast.types.Value_pb2 import Value as ValueProto
28 from feast.repo_config import FeastConfigBaseModel, RepoConfig
29
30 try:
31 from google.auth.exceptions import DefaultCredentialsError
32 from google.cloud import datastore
33 except ImportError as e:
34 from feast.errors import FeastExtrasDependencyImportError, FeastProviderLoginError
35
36 raise FeastExtrasDependencyImportError("gcp", str(e))
37
38
39 ProtoBatch = Sequence[
40 Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]]
41 ]
42
43
44 class DatastoreOnlineStoreConfig(FeastConfigBaseModel):
45 """ Online store config for GCP Datastore """
46
47 type: Literal["datastore"] = "datastore"
48 """ Online store type selector"""
49
50 project_id: Optional[StrictStr] = None
51 """ (optional) GCP Project Id """
52
53 namespace: Optional[StrictStr] = None
54 """ (optional) Datastore namespace """
55
56 write_concurrency: Optional[PositiveInt] = 40
57 """ (optional) Amount of threads to use when writing batches of feature rows into Datastore"""
58
59 write_batch_size: Optional[PositiveInt] = 50
60 """ (optional) Amount of feature rows per batch being written into Datastore"""
61
62
63 class DatastoreOnlineStore(OnlineStore):
64 """
65 OnlineStore is an object used for all interaction between Feast and the service used for offline storage of
66 features.
67 """
68
69 _client: Optional[datastore.Client] = None
70
71 def update(
72 self,
73 config: RepoConfig,
74 tables_to_delete: Sequence[Union[FeatureTable, FeatureView]],
75 tables_to_keep: Sequence[Union[FeatureTable, FeatureView]],
76 entities_to_delete: Sequence[Entity],
77 entities_to_keep: Sequence[Entity],
78 partial: bool,
79 ):
80 """
81 """
82 online_config = config.online_store
83 assert isinstance(online_config, DatastoreOnlineStoreConfig)
84 client = self._get_client(online_config)
85 feast_project = config.project
86
87 for table in tables_to_keep:
88 key = client.key("Project", feast_project, "Table", table.name)
89 entity = datastore.Entity(
90 key=key, exclude_from_indexes=("created_ts", "event_ts", "values")
91 )
92 entity.update({"created_ts": datetime.utcnow()})
93 client.put(entity)
94
95 for table in tables_to_delete:
96 _delete_all_values(
97 client, client.key("Project", feast_project, "Table", table.name)
98 )
99
100 # Delete the table metadata datastore entity
101 key = client.key("Project", feast_project, "Table", table.name)
102 client.delete(key)
103
104 def teardown(
105 self,
106 config: RepoConfig,
107 tables: Sequence[Union[FeatureTable, FeatureView]],
108 entities: Sequence[Entity],
109 ):
110 """
111 There's currently no teardown done for Datastore.
112 """
113 online_config = config.online_store
114 assert isinstance(online_config, DatastoreOnlineStoreConfig)
115 client = self._get_client(online_config)
116 feast_project = config.project
117
118 for table in tables:
119 _delete_all_values(
120 client, client.key("Project", feast_project, "Table", table.name)
121 )
122
123 # Delete the table metadata datastore entity
124 key = client.key("Project", feast_project, "Table", table.name)
125 client.delete(key)
126
127 def _get_client(self, online_config: DatastoreOnlineStoreConfig):
128
129 if not self._client:
130 try:
131 self._client = datastore.Client(
132 project=online_config.project_id, namespace=online_config.namespace,
133 )
134 except DefaultCredentialsError as e:
135 raise FeastProviderLoginError(
136 str(e)
137 + '\nIt may be necessary to run "gcloud auth application-default login" if you would like to use your '
138 "local Google Cloud account "
139 )
140 return self._client
141
142 def online_write_batch(
143 self,
144 config: RepoConfig,
145 table: Union[FeatureTable, FeatureView],
146 data: List[
147 Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]]
148 ],
149 progress: Optional[Callable[[int], Any]],
150 ) -> None:
151
152 online_config = config.online_store
153 assert isinstance(online_config, DatastoreOnlineStoreConfig)
154 client = self._get_client(online_config)
155
156 write_concurrency = online_config.write_concurrency
157 write_batch_size = online_config.write_batch_size
158 feast_project = config.project
159
160 pool = ThreadPool(processes=write_concurrency)
161 pool.map(
162 lambda b: self._write_minibatch(client, feast_project, table, b, progress),
163 self._to_minibatches(data, batch_size=write_batch_size),
164 )
165
166 @staticmethod
167 def _to_minibatches(data: ProtoBatch, batch_size) -> Iterator[ProtoBatch]:
168 """
169 Split data into minibatches, making sure we stay under GCP datastore transaction size
170 limits.
171 """
172 iterable = iter(data)
173
174 while True:
175 batch = list(itertools.islice(iterable, batch_size))
176 if len(batch) > 0:
177 yield batch
178 else:
179 break
180
181 @staticmethod
182 def _write_minibatch(
183 client,
184 project: str,
185 table: Union[FeatureTable, FeatureView],
186 data: Sequence[
187 Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]]
188 ],
189 progress: Optional[Callable[[int], Any]],
190 ):
191 entities = []
192 for entity_key, features, timestamp, created_ts in data:
193 document_id = compute_entity_id(entity_key)
194
195 key = client.key(
196 "Project", project, "Table", table.name, "Row", document_id,
197 )
198
199 entity = datastore.Entity(
200 key=key, exclude_from_indexes=("created_ts", "event_ts", "values")
201 )
202
203 entity.update(
204 dict(
205 key=entity_key.SerializeToString(),
206 values={k: v.SerializeToString() for k, v in features.items()},
207 event_ts=utils.make_tzaware(timestamp),
208 created_ts=(
209 utils.make_tzaware(created_ts)
210 if created_ts is not None
211 else None
212 ),
213 )
214 )
215 entities.append(entity)
216 with client.transaction():
217 client.put_multi(entities)
218
219 if progress:
220 progress(len(entities))
221
222 def online_read(
223 self,
224 config: RepoConfig,
225 table: Union[FeatureTable, FeatureView],
226 entity_keys: List[EntityKeyProto],
227 requested_features: Optional[List[str]] = None,
228 ) -> List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]]:
229
230 online_config = config.online_store
231 assert isinstance(online_config, DatastoreOnlineStoreConfig)
232 client = self._get_client(online_config)
233
234 feast_project = config.project
235
236 result: List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]] = []
237 for entity_key in entity_keys:
238 document_id = compute_entity_id(entity_key)
239 key = client.key(
240 "Project", feast_project, "Table", table.name, "Row", document_id
241 )
242 value = client.get(key)
243 if value is not None:
244 res = {}
245 for feature_name, value_bin in value["values"].items():
246 val = ValueProto()
247 val.ParseFromString(value_bin)
248 res[feature_name] = val
249 result.append((value["event_ts"], res))
250 else:
251 result.append((None, None))
252 return result
253
254
255 def _delete_all_values(client, key) -> None:
256 """
257 Delete all data under the key path in datastore.
258 """
259 while True:
260 query = client.query(kind="Row", ancestor=key)
261 entities = list(query.fetch(limit=1000))
262 if not entities:
263 return
264
265 for entity in entities:
266 client.delete(entity.key)
267
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sdk/python/feast/infra/online_stores/datastore.py b/sdk/python/feast/infra/online_stores/datastore.py
--- a/sdk/python/feast/infra/online_stores/datastore.py
+++ b/sdk/python/feast/infra/online_stores/datastore.py
@@ -30,6 +30,7 @@
try:
from google.auth.exceptions import DefaultCredentialsError
from google.cloud import datastore
+ from google.cloud.datastore.client import Key
except ImportError as e:
from feast.errors import FeastExtrasDependencyImportError, FeastProviderLoginError
@@ -233,22 +234,32 @@
feast_project = config.project
+ keys: List[Key] = []
result: List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]] = []
for entity_key in entity_keys:
document_id = compute_entity_id(entity_key)
key = client.key(
"Project", feast_project, "Table", table.name, "Row", document_id
)
- value = client.get(key)
- if value is not None:
+ keys.append(key)
+
+ values = client.get_multi(keys)
+
+ if values is not None:
+ keys_missing_from_response = set(keys) - set([v.key for v in values])
+ values = sorted(values, key=lambda v: keys.index(v.key))
+ for value in values:
res = {}
for feature_name, value_bin in value["values"].items():
val = ValueProto()
val.ParseFromString(value_bin)
res[feature_name] = val
result.append((value["event_ts"], res))
- else:
- result.append((None, None))
+ for missing_key_idx in sorted(
+ [keys.index(k) for k in keys_missing_from_response]
+ ):
+ result.insert(missing_key_idx, (None, None))
+
return result
| {"golden_diff": "diff --git a/sdk/python/feast/infra/online_stores/datastore.py b/sdk/python/feast/infra/online_stores/datastore.py\n--- a/sdk/python/feast/infra/online_stores/datastore.py\n+++ b/sdk/python/feast/infra/online_stores/datastore.py\n@@ -30,6 +30,7 @@\n try:\n from google.auth.exceptions import DefaultCredentialsError\n from google.cloud import datastore\n+ from google.cloud.datastore.client import Key\n except ImportError as e:\n from feast.errors import FeastExtrasDependencyImportError, FeastProviderLoginError\n \n@@ -233,22 +234,32 @@\n \n feast_project = config.project\n \n+ keys: List[Key] = []\n result: List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]] = []\n for entity_key in entity_keys:\n document_id = compute_entity_id(entity_key)\n key = client.key(\n \"Project\", feast_project, \"Table\", table.name, \"Row\", document_id\n )\n- value = client.get(key)\n- if value is not None:\n+ keys.append(key)\n+\n+ values = client.get_multi(keys)\n+\n+ if values is not None:\n+ keys_missing_from_response = set(keys) - set([v.key for v in values])\n+ values = sorted(values, key=lambda v: keys.index(v.key))\n+ for value in values:\n res = {}\n for feature_name, value_bin in value[\"values\"].items():\n val = ValueProto()\n val.ParseFromString(value_bin)\n res[feature_name] = val\n result.append((value[\"event_ts\"], res))\n- else:\n- result.append((None, None))\n+ for missing_key_idx in sorted(\n+ [keys.index(k) for k in keys_missing_from_response]\n+ ):\n+ result.insert(missing_key_idx, (None, None))\n+\n return result\n", "issue": "Datastore online request makes a call once for each entity\n**Is your feature request related to a problem? Please describe.**\r\nThere are performance issues with Datastore when fetching features for multiple entities. An individual fetch request is made for each entity \r\n\r\n**Describe the solution you'd like**\r\nI would like to have a more efficient access to the Datastore for multiple entities. The python api for datastore offers a method `get_multi()` which allows to fetch multiple keys in one request.\r\n\r\n**Describe alternatives you've considered**\r\n---\r\n\r\n**Additional context**\r\n\r\nI have done some basic tests using the `get_multi` vs get and it seems that a improvement can be made by using the function `get_multi`\r\n\r\nHere is part of code I used for testing:\r\n\r\n```\r\nimport numpy as np\r\nimport timeit\r\nfrom google.cloud import datastore\r\n\r\ndef multi_online_read(num_keys=5):\r\n\r\n client = datastore.Client(\r\n project=project_name, namespace=None,\r\n )\r\n \r\n feast_project = \"feature_repo\"\r\n key = client.key(\"Project\", feast_project, \"Table\", table, \"Row\", row)\r\n keys = []\r\n response = []\r\n for i in range(num_keys):\r\n keys.append(key)\r\n\r\n response = client.get_multi(keys)\r\n\r\n return response\r\n\r\ndef online_read(num_keys=5):\r\n client = datastore.Client(\r\n project=project_name, namespace=None,\r\n )\r\n \r\n feast_project = \"feature_repo\"\r\n key = client.key(\"Project\", feast_project, \"Table\", table, \"Row\", row)\r\n \r\n response = []\r\n for i in range(num_keys):\r\n response.append(client.get(key))\r\n\r\n\r\n return response\r\n\r\nif __name__ == \"__main__\":\r\n\r\n results = timeit.repeat(online_read, repeat=10, number=1)\r\n print(results)\r\n print(np.mean(results))\r\n\r\n results = timeit.repeat(multi_online_read, repeat=10, number=1)\r\n print(results)\r\n print(np.mean(results))\r\n```\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2021 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport itertools\nfrom datetime import datetime\nfrom multiprocessing.pool import ThreadPool\nfrom typing import Any, Callable, Dict, Iterator, List, Optional, Sequence, Tuple, Union\n\nfrom pydantic import PositiveInt, StrictStr\nfrom pydantic.typing import Literal\n\nfrom feast import Entity, FeatureTable, utils\nfrom feast.feature_view import FeatureView\nfrom feast.infra.online_stores.helpers import compute_entity_id\nfrom feast.infra.online_stores.online_store import OnlineStore\nfrom feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto\nfrom feast.protos.feast.types.Value_pb2 import Value as ValueProto\nfrom feast.repo_config import FeastConfigBaseModel, RepoConfig\n\ntry:\n from google.auth.exceptions import DefaultCredentialsError\n from google.cloud import datastore\nexcept ImportError as e:\n from feast.errors import FeastExtrasDependencyImportError, FeastProviderLoginError\n\n raise FeastExtrasDependencyImportError(\"gcp\", str(e))\n\n\nProtoBatch = Sequence[\n Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]]\n]\n\n\nclass DatastoreOnlineStoreConfig(FeastConfigBaseModel):\n \"\"\" Online store config for GCP Datastore \"\"\"\n\n type: Literal[\"datastore\"] = \"datastore\"\n \"\"\" Online store type selector\"\"\"\n\n project_id: Optional[StrictStr] = None\n \"\"\" (optional) GCP Project Id \"\"\"\n\n namespace: Optional[StrictStr] = None\n \"\"\" (optional) Datastore namespace \"\"\"\n\n write_concurrency: Optional[PositiveInt] = 40\n \"\"\" (optional) Amount of threads to use when writing batches of feature rows into Datastore\"\"\"\n\n write_batch_size: Optional[PositiveInt] = 50\n \"\"\" (optional) Amount of feature rows per batch being written into Datastore\"\"\"\n\n\nclass DatastoreOnlineStore(OnlineStore):\n \"\"\"\n OnlineStore is an object used for all interaction between Feast and the service used for offline storage of\n features.\n \"\"\"\n\n _client: Optional[datastore.Client] = None\n\n def update(\n self,\n config: RepoConfig,\n tables_to_delete: Sequence[Union[FeatureTable, FeatureView]],\n tables_to_keep: Sequence[Union[FeatureTable, FeatureView]],\n entities_to_delete: Sequence[Entity],\n entities_to_keep: Sequence[Entity],\n partial: bool,\n ):\n \"\"\"\n \"\"\"\n online_config = config.online_store\n assert isinstance(online_config, DatastoreOnlineStoreConfig)\n client = self._get_client(online_config)\n feast_project = config.project\n\n for table in tables_to_keep:\n key = client.key(\"Project\", feast_project, \"Table\", table.name)\n entity = datastore.Entity(\n key=key, exclude_from_indexes=(\"created_ts\", \"event_ts\", \"values\")\n )\n entity.update({\"created_ts\": datetime.utcnow()})\n client.put(entity)\n\n for table in tables_to_delete:\n _delete_all_values(\n client, client.key(\"Project\", feast_project, \"Table\", table.name)\n )\n\n # Delete the table metadata datastore entity\n key = client.key(\"Project\", feast_project, \"Table\", table.name)\n client.delete(key)\n\n def teardown(\n self,\n config: RepoConfig,\n tables: Sequence[Union[FeatureTable, FeatureView]],\n entities: Sequence[Entity],\n ):\n \"\"\"\n There's currently no teardown done for Datastore.\n \"\"\"\n online_config = config.online_store\n assert isinstance(online_config, DatastoreOnlineStoreConfig)\n client = self._get_client(online_config)\n feast_project = config.project\n\n for table in tables:\n _delete_all_values(\n client, client.key(\"Project\", feast_project, \"Table\", table.name)\n )\n\n # Delete the table metadata datastore entity\n key = client.key(\"Project\", feast_project, \"Table\", table.name)\n client.delete(key)\n\n def _get_client(self, online_config: DatastoreOnlineStoreConfig):\n\n if not self._client:\n try:\n self._client = datastore.Client(\n project=online_config.project_id, namespace=online_config.namespace,\n )\n except DefaultCredentialsError as e:\n raise FeastProviderLoginError(\n str(e)\n + '\\nIt may be necessary to run \"gcloud auth application-default login\" if you would like to use your '\n \"local Google Cloud account \"\n )\n return self._client\n\n def online_write_batch(\n self,\n config: RepoConfig,\n table: Union[FeatureTable, FeatureView],\n data: List[\n Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]]\n ],\n progress: Optional[Callable[[int], Any]],\n ) -> None:\n\n online_config = config.online_store\n assert isinstance(online_config, DatastoreOnlineStoreConfig)\n client = self._get_client(online_config)\n\n write_concurrency = online_config.write_concurrency\n write_batch_size = online_config.write_batch_size\n feast_project = config.project\n\n pool = ThreadPool(processes=write_concurrency)\n pool.map(\n lambda b: self._write_minibatch(client, feast_project, table, b, progress),\n self._to_minibatches(data, batch_size=write_batch_size),\n )\n\n @staticmethod\n def _to_minibatches(data: ProtoBatch, batch_size) -> Iterator[ProtoBatch]:\n \"\"\"\n Split data into minibatches, making sure we stay under GCP datastore transaction size\n limits.\n \"\"\"\n iterable = iter(data)\n\n while True:\n batch = list(itertools.islice(iterable, batch_size))\n if len(batch) > 0:\n yield batch\n else:\n break\n\n @staticmethod\n def _write_minibatch(\n client,\n project: str,\n table: Union[FeatureTable, FeatureView],\n data: Sequence[\n Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]]\n ],\n progress: Optional[Callable[[int], Any]],\n ):\n entities = []\n for entity_key, features, timestamp, created_ts in data:\n document_id = compute_entity_id(entity_key)\n\n key = client.key(\n \"Project\", project, \"Table\", table.name, \"Row\", document_id,\n )\n\n entity = datastore.Entity(\n key=key, exclude_from_indexes=(\"created_ts\", \"event_ts\", \"values\")\n )\n\n entity.update(\n dict(\n key=entity_key.SerializeToString(),\n values={k: v.SerializeToString() for k, v in features.items()},\n event_ts=utils.make_tzaware(timestamp),\n created_ts=(\n utils.make_tzaware(created_ts)\n if created_ts is not None\n else None\n ),\n )\n )\n entities.append(entity)\n with client.transaction():\n client.put_multi(entities)\n\n if progress:\n progress(len(entities))\n\n def online_read(\n self,\n config: RepoConfig,\n table: Union[FeatureTable, FeatureView],\n entity_keys: List[EntityKeyProto],\n requested_features: Optional[List[str]] = None,\n ) -> List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]]:\n\n online_config = config.online_store\n assert isinstance(online_config, DatastoreOnlineStoreConfig)\n client = self._get_client(online_config)\n\n feast_project = config.project\n\n result: List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]] = []\n for entity_key in entity_keys:\n document_id = compute_entity_id(entity_key)\n key = client.key(\n \"Project\", feast_project, \"Table\", table.name, \"Row\", document_id\n )\n value = client.get(key)\n if value is not None:\n res = {}\n for feature_name, value_bin in value[\"values\"].items():\n val = ValueProto()\n val.ParseFromString(value_bin)\n res[feature_name] = val\n result.append((value[\"event_ts\"], res))\n else:\n result.append((None, None))\n return result\n\n\ndef _delete_all_values(client, key) -> None:\n \"\"\"\n Delete all data under the key path in datastore.\n \"\"\"\n while True:\n query = client.query(kind=\"Row\", ancestor=key)\n entities = list(query.fetch(limit=1000))\n if not entities:\n return\n\n for entity in entities:\n client.delete(entity.key)\n", "path": "sdk/python/feast/infra/online_stores/datastore.py"}], "after_files": [{"content": "# Copyright 2021 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport itertools\nfrom datetime import datetime\nfrom multiprocessing.pool import ThreadPool\nfrom typing import Any, Callable, Dict, Iterator, List, Optional, Sequence, Tuple, Union\n\nfrom pydantic import PositiveInt, StrictStr\nfrom pydantic.typing import Literal\n\nfrom feast import Entity, FeatureTable, utils\nfrom feast.feature_view import FeatureView\nfrom feast.infra.online_stores.helpers import compute_entity_id\nfrom feast.infra.online_stores.online_store import OnlineStore\nfrom feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto\nfrom feast.protos.feast.types.Value_pb2 import Value as ValueProto\nfrom feast.repo_config import FeastConfigBaseModel, RepoConfig\n\ntry:\n from google.auth.exceptions import DefaultCredentialsError\n from google.cloud import datastore\n from google.cloud.datastore.client import Key\nexcept ImportError as e:\n from feast.errors import FeastExtrasDependencyImportError, FeastProviderLoginError\n\n raise FeastExtrasDependencyImportError(\"gcp\", str(e))\n\n\nProtoBatch = Sequence[\n Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]]\n]\n\n\nclass DatastoreOnlineStoreConfig(FeastConfigBaseModel):\n \"\"\" Online store config for GCP Datastore \"\"\"\n\n type: Literal[\"datastore\"] = \"datastore\"\n \"\"\" Online store type selector\"\"\"\n\n project_id: Optional[StrictStr] = None\n \"\"\" (optional) GCP Project Id \"\"\"\n\n namespace: Optional[StrictStr] = None\n \"\"\" (optional) Datastore namespace \"\"\"\n\n write_concurrency: Optional[PositiveInt] = 40\n \"\"\" (optional) Amount of threads to use when writing batches of feature rows into Datastore\"\"\"\n\n write_batch_size: Optional[PositiveInt] = 50\n \"\"\" (optional) Amount of feature rows per batch being written into Datastore\"\"\"\n\n\nclass DatastoreOnlineStore(OnlineStore):\n \"\"\"\n OnlineStore is an object used for all interaction between Feast and the service used for offline storage of\n features.\n \"\"\"\n\n _client: Optional[datastore.Client] = None\n\n def update(\n self,\n config: RepoConfig,\n tables_to_delete: Sequence[Union[FeatureTable, FeatureView]],\n tables_to_keep: Sequence[Union[FeatureTable, FeatureView]],\n entities_to_delete: Sequence[Entity],\n entities_to_keep: Sequence[Entity],\n partial: bool,\n ):\n \"\"\"\n \"\"\"\n online_config = config.online_store\n assert isinstance(online_config, DatastoreOnlineStoreConfig)\n client = self._get_client(online_config)\n feast_project = config.project\n\n for table in tables_to_keep:\n key = client.key(\"Project\", feast_project, \"Table\", table.name)\n entity = datastore.Entity(\n key=key, exclude_from_indexes=(\"created_ts\", \"event_ts\", \"values\")\n )\n entity.update({\"created_ts\": datetime.utcnow()})\n client.put(entity)\n\n for table in tables_to_delete:\n _delete_all_values(\n client, client.key(\"Project\", feast_project, \"Table\", table.name)\n )\n\n # Delete the table metadata datastore entity\n key = client.key(\"Project\", feast_project, \"Table\", table.name)\n client.delete(key)\n\n def teardown(\n self,\n config: RepoConfig,\n tables: Sequence[Union[FeatureTable, FeatureView]],\n entities: Sequence[Entity],\n ):\n \"\"\"\n There's currently no teardown done for Datastore.\n \"\"\"\n online_config = config.online_store\n assert isinstance(online_config, DatastoreOnlineStoreConfig)\n client = self._get_client(online_config)\n feast_project = config.project\n\n for table in tables:\n _delete_all_values(\n client, client.key(\"Project\", feast_project, \"Table\", table.name)\n )\n\n # Delete the table metadata datastore entity\n key = client.key(\"Project\", feast_project, \"Table\", table.name)\n client.delete(key)\n\n def _get_client(self, online_config: DatastoreOnlineStoreConfig):\n\n if not self._client:\n try:\n self._client = datastore.Client(\n project=online_config.project_id, namespace=online_config.namespace,\n )\n except DefaultCredentialsError as e:\n raise FeastProviderLoginError(\n str(e)\n + '\\nIt may be necessary to run \"gcloud auth application-default login\" if you would like to use your '\n \"local Google Cloud account \"\n )\n return self._client\n\n def online_write_batch(\n self,\n config: RepoConfig,\n table: Union[FeatureTable, FeatureView],\n data: List[\n Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]]\n ],\n progress: Optional[Callable[[int], Any]],\n ) -> None:\n\n online_config = config.online_store\n assert isinstance(online_config, DatastoreOnlineStoreConfig)\n client = self._get_client(online_config)\n\n write_concurrency = online_config.write_concurrency\n write_batch_size = online_config.write_batch_size\n feast_project = config.project\n\n pool = ThreadPool(processes=write_concurrency)\n pool.map(\n lambda b: self._write_minibatch(client, feast_project, table, b, progress),\n self._to_minibatches(data, batch_size=write_batch_size),\n )\n\n @staticmethod\n def _to_minibatches(data: ProtoBatch, batch_size) -> Iterator[ProtoBatch]:\n \"\"\"\n Split data into minibatches, making sure we stay under GCP datastore transaction size\n limits.\n \"\"\"\n iterable = iter(data)\n\n while True:\n batch = list(itertools.islice(iterable, batch_size))\n if len(batch) > 0:\n yield batch\n else:\n break\n\n @staticmethod\n def _write_minibatch(\n client,\n project: str,\n table: Union[FeatureTable, FeatureView],\n data: Sequence[\n Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]]\n ],\n progress: Optional[Callable[[int], Any]],\n ):\n entities = []\n for entity_key, features, timestamp, created_ts in data:\n document_id = compute_entity_id(entity_key)\n\n key = client.key(\n \"Project\", project, \"Table\", table.name, \"Row\", document_id,\n )\n\n entity = datastore.Entity(\n key=key, exclude_from_indexes=(\"created_ts\", \"event_ts\", \"values\")\n )\n\n entity.update(\n dict(\n key=entity_key.SerializeToString(),\n values={k: v.SerializeToString() for k, v in features.items()},\n event_ts=utils.make_tzaware(timestamp),\n created_ts=(\n utils.make_tzaware(created_ts)\n if created_ts is not None\n else None\n ),\n )\n )\n entities.append(entity)\n with client.transaction():\n client.put_multi(entities)\n\n if progress:\n progress(len(entities))\n\n def online_read(\n self,\n config: RepoConfig,\n table: Union[FeatureTable, FeatureView],\n entity_keys: List[EntityKeyProto],\n requested_features: Optional[List[str]] = None,\n ) -> List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]]:\n\n online_config = config.online_store\n assert isinstance(online_config, DatastoreOnlineStoreConfig)\n client = self._get_client(online_config)\n\n feast_project = config.project\n\n keys: List[Key] = []\n result: List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]] = []\n for entity_key in entity_keys:\n document_id = compute_entity_id(entity_key)\n key = client.key(\n \"Project\", feast_project, \"Table\", table.name, \"Row\", document_id\n )\n keys.append(key)\n\n values = client.get_multi(keys)\n\n if values is not None:\n keys_missing_from_response = set(keys) - set([v.key for v in values])\n values = sorted(values, key=lambda v: keys.index(v.key))\n for value in values:\n res = {}\n for feature_name, value_bin in value[\"values\"].items():\n val = ValueProto()\n val.ParseFromString(value_bin)\n res[feature_name] = val\n result.append((value[\"event_ts\"], res))\n for missing_key_idx in sorted(\n [keys.index(k) for k in keys_missing_from_response]\n ):\n result.insert(missing_key_idx, (None, None))\n\n return result\n\n\ndef _delete_all_values(client, key) -> None:\n \"\"\"\n Delete all data under the key path in datastore.\n \"\"\"\n while True:\n query = client.query(kind=\"Row\", ancestor=key)\n entities = list(query.fetch(limit=1000))\n if not entities:\n return\n\n for entity in entities:\n client.delete(entity.key)\n", "path": "sdk/python/feast/infra/online_stores/datastore.py"}]} | 3,332 | 426 |
gh_patches_debug_6883 | rasdani/github-patches | git_diff | shuup__shuup-1573 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Admin: Fix category name in category edit view
Do not show the category with hierarchy on breadcrumbs and title

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `shuup/admin/modules/categories/views/edit.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # This file is part of Shuup.
3 #
4 # Copyright (c) 2012-2018, Shuup Inc. All rights reserved.
5 #
6 # This source code is licensed under the OSL-3.0 license found in the
7 # LICENSE file in the root directory of this source tree.
8 from django.core.urlresolvers import reverse_lazy
9
10 from shuup.admin.form_part import FormPartsViewMixin, SaveFormPartsMixin
11 from shuup.admin.modules.categories.form_parts import (
12 CategoryBaseFormPart, CategoryProductFormPart
13 )
14 from shuup.admin.shop_provider import get_shop
15 from shuup.admin.toolbar import get_default_edit_toolbar
16 from shuup.admin.utils.tour import is_tour_complete
17 from shuup.admin.utils.views import CreateOrUpdateView
18 from shuup.core.models import Category
19
20
21 class CategoryEditView(SaveFormPartsMixin, FormPartsViewMixin, CreateOrUpdateView):
22 model = Category
23 template_name = "shuup/admin/categories/edit.jinja"
24 context_object_name = "category"
25 base_form_part_classes = [CategoryBaseFormPart, CategoryProductFormPart]
26 form_part_class_provide_key = "admin_category_form_part"
27
28 def get_toolbar(self):
29 save_form_id = self.get_save_form_id()
30 object = self.get_object()
31 delete_url = reverse_lazy("shuup_admin:category.delete", kwargs={"pk": object.pk}) if object.pk else None
32 return get_default_edit_toolbar(self, save_form_id, delete_url=delete_url)
33
34 def get_context_data(self, **kwargs):
35 context = super(CategoryEditView, self).get_context_data(**kwargs)
36 context["tour_key"] = "category"
37 context["tour_complete"] = is_tour_complete(get_shop(self.request), "category")
38 return context
39
40 def form_valid(self, form):
41 return self.save_form_parts(form)
42
43 def get_queryset(self):
44 return Category.objects.all_except_deleted(shop=get_shop(self.request))
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/shuup/admin/modules/categories/views/edit.py b/shuup/admin/modules/categories/views/edit.py
--- a/shuup/admin/modules/categories/views/edit.py
+++ b/shuup/admin/modules/categories/views/edit.py
@@ -35,6 +35,9 @@
context = super(CategoryEditView, self).get_context_data(**kwargs)
context["tour_key"] = "category"
context["tour_complete"] = is_tour_complete(get_shop(self.request), "category")
+ if self.object.pk:
+ context["title"] = self.object.name
+
return context
def form_valid(self, form):
| {"golden_diff": "diff --git a/shuup/admin/modules/categories/views/edit.py b/shuup/admin/modules/categories/views/edit.py\n--- a/shuup/admin/modules/categories/views/edit.py\n+++ b/shuup/admin/modules/categories/views/edit.py\n@@ -35,6 +35,9 @@\n context = super(CategoryEditView, self).get_context_data(**kwargs)\n context[\"tour_key\"] = \"category\"\n context[\"tour_complete\"] = is_tour_complete(get_shop(self.request), \"category\")\n+ if self.object.pk:\n+ context[\"title\"] = self.object.name\n+\n return context\n \n def form_valid(self, form):\n", "issue": "Admin: Fix category name in category edit view\nDo not show the category with hierarchy on breadcrumbs and title\r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of Shuup.\n#\n# Copyright (c) 2012-2018, Shuup Inc. All rights reserved.\n#\n# This source code is licensed under the OSL-3.0 license found in the\n# LICENSE file in the root directory of this source tree.\nfrom django.core.urlresolvers import reverse_lazy\n\nfrom shuup.admin.form_part import FormPartsViewMixin, SaveFormPartsMixin\nfrom shuup.admin.modules.categories.form_parts import (\n CategoryBaseFormPart, CategoryProductFormPart\n)\nfrom shuup.admin.shop_provider import get_shop\nfrom shuup.admin.toolbar import get_default_edit_toolbar\nfrom shuup.admin.utils.tour import is_tour_complete\nfrom shuup.admin.utils.views import CreateOrUpdateView\nfrom shuup.core.models import Category\n\n\nclass CategoryEditView(SaveFormPartsMixin, FormPartsViewMixin, CreateOrUpdateView):\n model = Category\n template_name = \"shuup/admin/categories/edit.jinja\"\n context_object_name = \"category\"\n base_form_part_classes = [CategoryBaseFormPart, CategoryProductFormPart]\n form_part_class_provide_key = \"admin_category_form_part\"\n\n def get_toolbar(self):\n save_form_id = self.get_save_form_id()\n object = self.get_object()\n delete_url = reverse_lazy(\"shuup_admin:category.delete\", kwargs={\"pk\": object.pk}) if object.pk else None\n return get_default_edit_toolbar(self, save_form_id, delete_url=delete_url)\n\n def get_context_data(self, **kwargs):\n context = super(CategoryEditView, self).get_context_data(**kwargs)\n context[\"tour_key\"] = \"category\"\n context[\"tour_complete\"] = is_tour_complete(get_shop(self.request), \"category\")\n return context\n\n def form_valid(self, form):\n return self.save_form_parts(form)\n\n def get_queryset(self):\n return Category.objects.all_except_deleted(shop=get_shop(self.request))\n", "path": "shuup/admin/modules/categories/views/edit.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of Shuup.\n#\n# Copyright (c) 2012-2018, Shuup Inc. All rights reserved.\n#\n# This source code is licensed under the OSL-3.0 license found in the\n# LICENSE file in the root directory of this source tree.\nfrom django.core.urlresolvers import reverse_lazy\n\nfrom shuup.admin.form_part import FormPartsViewMixin, SaveFormPartsMixin\nfrom shuup.admin.modules.categories.form_parts import (\n CategoryBaseFormPart, CategoryProductFormPart\n)\nfrom shuup.admin.shop_provider import get_shop\nfrom shuup.admin.toolbar import get_default_edit_toolbar\nfrom shuup.admin.utils.tour import is_tour_complete\nfrom shuup.admin.utils.views import CreateOrUpdateView\nfrom shuup.core.models import Category\n\n\nclass CategoryEditView(SaveFormPartsMixin, FormPartsViewMixin, CreateOrUpdateView):\n model = Category\n template_name = \"shuup/admin/categories/edit.jinja\"\n context_object_name = \"category\"\n base_form_part_classes = [CategoryBaseFormPart, CategoryProductFormPart]\n form_part_class_provide_key = \"admin_category_form_part\"\n\n def get_toolbar(self):\n save_form_id = self.get_save_form_id()\n object = self.get_object()\n delete_url = reverse_lazy(\"shuup_admin:category.delete\", kwargs={\"pk\": object.pk}) if object.pk else None\n return get_default_edit_toolbar(self, save_form_id, delete_url=delete_url)\n\n def get_context_data(self, **kwargs):\n context = super(CategoryEditView, self).get_context_data(**kwargs)\n context[\"tour_key\"] = \"category\"\n context[\"tour_complete\"] = is_tour_complete(get_shop(self.request), \"category\")\n if self.object.pk:\n context[\"title\"] = self.object.name\n\n return context\n\n def form_valid(self, form):\n return self.save_form_parts(form)\n\n def get_queryset(self):\n return Category.objects.all_except_deleted(shop=get_shop(self.request))\n", "path": "shuup/admin/modules/categories/views/edit.py"}]} | 863 | 136 |
gh_patches_debug_34446 | rasdani/github-patches | git_diff | litestar-org__litestar-1255 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: Issue with reload
**Describe the bug**
When I start my app using starlite run -r I get the following error
INFO: Will watch for changes in these directories: ['/Users/alc/starlite-apps/starlite_test']
ERROR: [Errno 48] Address already in use
If I stop that and run it without -r it works, so I guess my port is not blocked
**To Reproduce**
Save this as `app.py`
```python
from starlite import get, Starlite
@get('/')
def route() -> str:
return 'Hello'
app = Starlite([route])
```
**Additional context**
@provinzkraut in discord help suggested a workaround using ` uvicorn <app file>:<application object> --reload` and also linked https://github.com/encode/uvicorn/issues/1045
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `starlite/cli/commands/core.py`
Content:
```
1 from __future__ import annotations
2
3 import inspect
4 import multiprocessing
5
6 import click
7 from click import command, option
8 from rich.tree import Tree
9
10 from starlite import Starlite
11 from starlite.cli.utils import StarliteCLIException, StarliteEnv, console, show_app_info
12 from starlite.routes import HTTPRoute, WebSocketRoute
13 from starlite.utils.helpers import unwrap_partial
14
15
16 @command(name="info")
17 def info_command(app: Starlite) -> None:
18 """Show information about the detected Starlite app."""
19
20 show_app_info(app)
21
22
23 @command(name="run")
24 @option("-r", "--reload", help="Reload server on changes", default=False, is_flag=True)
25 @option("-p", "--port", help="Serve under this port", type=int, default=8000, show_default=True)
26 @option(
27 "-wc",
28 "--web-concurrency",
29 help="The number of HTTP workers to launch",
30 type=click.IntRange(min=1, max=multiprocessing.cpu_count() + 1),
31 show_default=True,
32 default=1,
33 )
34 @option("--host", help="Server under this host", default="127.0.0.1", show_default=True)
35 @option("--debug", help="Run app in debug mode", is_flag=True)
36 def run_command(
37 reload: bool,
38 port: int,
39 web_concurrency: int,
40 host: str,
41 debug: bool,
42 env: StarliteEnv,
43 app: Starlite,
44 ) -> None:
45 """Run a Starlite app.
46
47 The app can be either passed as a module path in the form of <module name>.<submodule>:<app instance or factory>,
48 set as an environment variable STARLITE_APP with the same format or automatically discovered from one of these
49 canonical paths: app.py, asgi.py, application.py or app/__init__.py. When auto-discovering application factories,
50 functions with the name ``create_app`` are considered, or functions that are annotated as returning a ``Starlite``
51 instance.
52 """
53
54 try:
55 import uvicorn
56 except ImportError:
57 raise StarliteCLIException("Uvicorn needs to be installed to run an app") # pylint: disable=W0707
58
59 if debug or env.debug:
60 app.debug = True
61
62 show_app_info(app)
63
64 console.rule("[yellow]Starting server process", align="left")
65
66 uvicorn.run(
67 env.app_path,
68 reload=env.reload or reload,
69 host=env.host or host,
70 port=env.port or port,
71 workers=env.web_concurrency or web_concurrency,
72 factory=env.is_app_factory,
73 )
74
75
76 @command(name="routes")
77 def routes_command(app: Starlite) -> None: # pragma: no cover
78 """Display information about the application's routes."""
79
80 tree = Tree("", hide_root=True)
81
82 for route in sorted(app.routes, key=lambda r: r.path):
83 if isinstance(route, HTTPRoute):
84 branch = tree.add(f"[green]{route.path}[/green] (HTTP)")
85 for handler in route.route_handlers:
86 handler_info = [
87 f"[blue]{handler.name or handler.handler_name}[/blue]",
88 ]
89
90 if inspect.iscoroutinefunction(unwrap_partial(handler.fn.value)):
91 handler_info.append("[magenta]async[/magenta]")
92 else:
93 handler_info.append("[yellow]sync[/yellow]")
94
95 handler_info.append(f'[cyan]{", ".join(sorted(handler.http_methods))}[/cyan]')
96
97 if len(handler.paths) > 1:
98 for path in handler.paths:
99 branch.add(" ".join([f"[green]{path}[green]", *handler_info]))
100 else:
101 branch.add(" ".join(handler_info))
102
103 else:
104 if isinstance(route, WebSocketRoute):
105 route_type = "WS"
106 else:
107 route_type = "ASGI"
108 branch = tree.add(f"[green]{route.path}[/green] ({route_type})")
109 branch.add(f"[blue]{route.route_handler.name or route.route_handler.handler_name}[/blue]")
110
111 console.print(tree)
112
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/starlite/cli/commands/core.py b/starlite/cli/commands/core.py
--- a/starlite/cli/commands/core.py
+++ b/starlite/cli/commands/core.py
@@ -2,17 +2,31 @@
import inspect
import multiprocessing
+import subprocess
+from typing import Any
import click
from click import command, option
from rich.tree import Tree
from starlite import Starlite
-from starlite.cli.utils import StarliteCLIException, StarliteEnv, console, show_app_info
+from starlite.cli.utils import StarliteEnv, console, show_app_info
from starlite.routes import HTTPRoute, WebSocketRoute
from starlite.utils.helpers import unwrap_partial
+def _convert_uvicorn_args(args: dict[str, Any]) -> list[str]:
+ process_args = []
+ for arg, value in args.items():
+ if isinstance(value, bool):
+ if value:
+ process_args.append(f"--{arg}")
+ else:
+ process_args.append(f"--{arg}={value}")
+
+ return process_args
+
+
@command(name="info")
def info_command(app: Starlite) -> None:
"""Show information about the detected Starlite app."""
@@ -51,11 +65,6 @@
instance.
"""
- try:
- import uvicorn
- except ImportError:
- raise StarliteCLIException("Uvicorn needs to be installed to run an app") # pylint: disable=W0707
-
if debug or env.debug:
app.debug = True
@@ -63,14 +72,18 @@
console.rule("[yellow]Starting server process", align="left")
- uvicorn.run(
- env.app_path,
- reload=env.reload or reload,
- host=env.host or host,
- port=env.port or port,
- workers=env.web_concurrency or web_concurrency,
- factory=env.is_app_factory,
- )
+ # invoke uvicorn in a subprocess to be able to use the --reload flag. see
+ # https://github.com/starlite-api/starlite/issues/1191 and https://github.com/encode/uvicorn/issues/1045
+
+ process_args = {
+ "reload": env.reload or reload,
+ "host": env.host or host,
+ "port": env.port or port,
+ "workers": env.web_concurrency or web_concurrency,
+ "factory": env.is_app_factory,
+ }
+
+ subprocess.run(["uvicorn", env.app_path, *_convert_uvicorn_args(process_args)], check=True)
@command(name="routes")
| {"golden_diff": "diff --git a/starlite/cli/commands/core.py b/starlite/cli/commands/core.py\n--- a/starlite/cli/commands/core.py\n+++ b/starlite/cli/commands/core.py\n@@ -2,17 +2,31 @@\n \n import inspect\n import multiprocessing\n+import subprocess\n+from typing import Any\n \n import click\n from click import command, option\n from rich.tree import Tree\n \n from starlite import Starlite\n-from starlite.cli.utils import StarliteCLIException, StarliteEnv, console, show_app_info\n+from starlite.cli.utils import StarliteEnv, console, show_app_info\n from starlite.routes import HTTPRoute, WebSocketRoute\n from starlite.utils.helpers import unwrap_partial\n \n \n+def _convert_uvicorn_args(args: dict[str, Any]) -> list[str]:\n+ process_args = []\n+ for arg, value in args.items():\n+ if isinstance(value, bool):\n+ if value:\n+ process_args.append(f\"--{arg}\")\n+ else:\n+ process_args.append(f\"--{arg}={value}\")\n+\n+ return process_args\n+\n+\n @command(name=\"info\")\n def info_command(app: Starlite) -> None:\n \"\"\"Show information about the detected Starlite app.\"\"\"\n@@ -51,11 +65,6 @@\n instance.\n \"\"\"\n \n- try:\n- import uvicorn\n- except ImportError:\n- raise StarliteCLIException(\"Uvicorn needs to be installed to run an app\") # pylint: disable=W0707\n-\n if debug or env.debug:\n app.debug = True\n \n@@ -63,14 +72,18 @@\n \n console.rule(\"[yellow]Starting server process\", align=\"left\")\n \n- uvicorn.run(\n- env.app_path,\n- reload=env.reload or reload,\n- host=env.host or host,\n- port=env.port or port,\n- workers=env.web_concurrency or web_concurrency,\n- factory=env.is_app_factory,\n- )\n+ # invoke uvicorn in a subprocess to be able to use the --reload flag. see\n+ # https://github.com/starlite-api/starlite/issues/1191 and https://github.com/encode/uvicorn/issues/1045\n+\n+ process_args = {\n+ \"reload\": env.reload or reload,\n+ \"host\": env.host or host,\n+ \"port\": env.port or port,\n+ \"workers\": env.web_concurrency or web_concurrency,\n+ \"factory\": env.is_app_factory,\n+ }\n+\n+ subprocess.run([\"uvicorn\", env.app_path, *_convert_uvicorn_args(process_args)], check=True)\n \n \n @command(name=\"routes\")\n", "issue": "Bug: Issue with reload\n**Describe the bug**\r\nWhen I start my app using starlite run -r I get the following error\r\n\r\nINFO: Will watch for changes in these directories: ['/Users/alc/starlite-apps/starlite_test']\r\nERROR: [Errno 48] Address already in use\r\n\r\nIf I stop that and run it without -r it works, so I guess my port is not blocked\r\n\r\n**To Reproduce**\r\nSave this as `app.py`\r\n\r\n```python\r\nfrom starlite import get, Starlite\r\n\r\n@get('/')\r\ndef route() -> str:\r\n return 'Hello'\r\n\r\napp = Starlite([route])\r\n```\r\n\r\n**Additional context**\r\n@provinzkraut in discord help suggested a workaround using ` uvicorn <app file>:<application object> --reload` and also linked https://github.com/encode/uvicorn/issues/1045\r\n\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport inspect\nimport multiprocessing\n\nimport click\nfrom click import command, option\nfrom rich.tree import Tree\n\nfrom starlite import Starlite\nfrom starlite.cli.utils import StarliteCLIException, StarliteEnv, console, show_app_info\nfrom starlite.routes import HTTPRoute, WebSocketRoute\nfrom starlite.utils.helpers import unwrap_partial\n\n\n@command(name=\"info\")\ndef info_command(app: Starlite) -> None:\n \"\"\"Show information about the detected Starlite app.\"\"\"\n\n show_app_info(app)\n\n\n@command(name=\"run\")\n@option(\"-r\", \"--reload\", help=\"Reload server on changes\", default=False, is_flag=True)\n@option(\"-p\", \"--port\", help=\"Serve under this port\", type=int, default=8000, show_default=True)\n@option(\n \"-wc\",\n \"--web-concurrency\",\n help=\"The number of HTTP workers to launch\",\n type=click.IntRange(min=1, max=multiprocessing.cpu_count() + 1),\n show_default=True,\n default=1,\n)\n@option(\"--host\", help=\"Server under this host\", default=\"127.0.0.1\", show_default=True)\n@option(\"--debug\", help=\"Run app in debug mode\", is_flag=True)\ndef run_command(\n reload: bool,\n port: int,\n web_concurrency: int,\n host: str,\n debug: bool,\n env: StarliteEnv,\n app: Starlite,\n) -> None:\n \"\"\"Run a Starlite app.\n\n The app can be either passed as a module path in the form of <module name>.<submodule>:<app instance or factory>,\n set as an environment variable STARLITE_APP with the same format or automatically discovered from one of these\n canonical paths: app.py, asgi.py, application.py or app/__init__.py. When auto-discovering application factories,\n functions with the name ``create_app`` are considered, or functions that are annotated as returning a ``Starlite``\n instance.\n \"\"\"\n\n try:\n import uvicorn\n except ImportError:\n raise StarliteCLIException(\"Uvicorn needs to be installed to run an app\") # pylint: disable=W0707\n\n if debug or env.debug:\n app.debug = True\n\n show_app_info(app)\n\n console.rule(\"[yellow]Starting server process\", align=\"left\")\n\n uvicorn.run(\n env.app_path,\n reload=env.reload or reload,\n host=env.host or host,\n port=env.port or port,\n workers=env.web_concurrency or web_concurrency,\n factory=env.is_app_factory,\n )\n\n\n@command(name=\"routes\")\ndef routes_command(app: Starlite) -> None: # pragma: no cover\n \"\"\"Display information about the application's routes.\"\"\"\n\n tree = Tree(\"\", hide_root=True)\n\n for route in sorted(app.routes, key=lambda r: r.path):\n if isinstance(route, HTTPRoute):\n branch = tree.add(f\"[green]{route.path}[/green] (HTTP)\")\n for handler in route.route_handlers:\n handler_info = [\n f\"[blue]{handler.name or handler.handler_name}[/blue]\",\n ]\n\n if inspect.iscoroutinefunction(unwrap_partial(handler.fn.value)):\n handler_info.append(\"[magenta]async[/magenta]\")\n else:\n handler_info.append(\"[yellow]sync[/yellow]\")\n\n handler_info.append(f'[cyan]{\", \".join(sorted(handler.http_methods))}[/cyan]')\n\n if len(handler.paths) > 1:\n for path in handler.paths:\n branch.add(\" \".join([f\"[green]{path}[green]\", *handler_info]))\n else:\n branch.add(\" \".join(handler_info))\n\n else:\n if isinstance(route, WebSocketRoute):\n route_type = \"WS\"\n else:\n route_type = \"ASGI\"\n branch = tree.add(f\"[green]{route.path}[/green] ({route_type})\")\n branch.add(f\"[blue]{route.route_handler.name or route.route_handler.handler_name}[/blue]\")\n\n console.print(tree)\n", "path": "starlite/cli/commands/core.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport inspect\nimport multiprocessing\nimport subprocess\nfrom typing import Any\n\nimport click\nfrom click import command, option\nfrom rich.tree import Tree\n\nfrom starlite import Starlite\nfrom starlite.cli.utils import StarliteEnv, console, show_app_info\nfrom starlite.routes import HTTPRoute, WebSocketRoute\nfrom starlite.utils.helpers import unwrap_partial\n\n\ndef _convert_uvicorn_args(args: dict[str, Any]) -> list[str]:\n process_args = []\n for arg, value in args.items():\n if isinstance(value, bool):\n if value:\n process_args.append(f\"--{arg}\")\n else:\n process_args.append(f\"--{arg}={value}\")\n\n return process_args\n\n\n@command(name=\"info\")\ndef info_command(app: Starlite) -> None:\n \"\"\"Show information about the detected Starlite app.\"\"\"\n\n show_app_info(app)\n\n\n@command(name=\"run\")\n@option(\"-r\", \"--reload\", help=\"Reload server on changes\", default=False, is_flag=True)\n@option(\"-p\", \"--port\", help=\"Serve under this port\", type=int, default=8000, show_default=True)\n@option(\n \"-wc\",\n \"--web-concurrency\",\n help=\"The number of HTTP workers to launch\",\n type=click.IntRange(min=1, max=multiprocessing.cpu_count() + 1),\n show_default=True,\n default=1,\n)\n@option(\"--host\", help=\"Server under this host\", default=\"127.0.0.1\", show_default=True)\n@option(\"--debug\", help=\"Run app in debug mode\", is_flag=True)\ndef run_command(\n reload: bool,\n port: int,\n web_concurrency: int,\n host: str,\n debug: bool,\n env: StarliteEnv,\n app: Starlite,\n) -> None:\n \"\"\"Run a Starlite app.\n\n The app can be either passed as a module path in the form of <module name>.<submodule>:<app instance or factory>,\n set as an environment variable STARLITE_APP with the same format or automatically discovered from one of these\n canonical paths: app.py, asgi.py, application.py or app/__init__.py. When auto-discovering application factories,\n functions with the name ``create_app`` are considered, or functions that are annotated as returning a ``Starlite``\n instance.\n \"\"\"\n\n if debug or env.debug:\n app.debug = True\n\n show_app_info(app)\n\n console.rule(\"[yellow]Starting server process\", align=\"left\")\n\n # invoke uvicorn in a subprocess to be able to use the --reload flag. see\n # https://github.com/starlite-api/starlite/issues/1191 and https://github.com/encode/uvicorn/issues/1045\n\n process_args = {\n \"reload\": env.reload or reload,\n \"host\": env.host or host,\n \"port\": env.port or port,\n \"workers\": env.web_concurrency or web_concurrency,\n \"factory\": env.is_app_factory,\n }\n\n subprocess.run([\"uvicorn\", env.app_path, *_convert_uvicorn_args(process_args)], check=True)\n\n\n@command(name=\"routes\")\ndef routes_command(app: Starlite) -> None: # pragma: no cover\n \"\"\"Display information about the application's routes.\"\"\"\n\n tree = Tree(\"\", hide_root=True)\n\n for route in sorted(app.routes, key=lambda r: r.path):\n if isinstance(route, HTTPRoute):\n branch = tree.add(f\"[green]{route.path}[/green] (HTTP)\")\n for handler in route.route_handlers:\n handler_info = [\n f\"[blue]{handler.name or handler.handler_name}[/blue]\",\n ]\n\n if inspect.iscoroutinefunction(unwrap_partial(handler.fn.value)):\n handler_info.append(\"[magenta]async[/magenta]\")\n else:\n handler_info.append(\"[yellow]sync[/yellow]\")\n\n handler_info.append(f'[cyan]{\", \".join(sorted(handler.http_methods))}[/cyan]')\n\n if len(handler.paths) > 1:\n for path in handler.paths:\n branch.add(\" \".join([f\"[green]{path}[green]\", *handler_info]))\n else:\n branch.add(\" \".join(handler_info))\n\n else:\n if isinstance(route, WebSocketRoute):\n route_type = \"WS\"\n else:\n route_type = \"ASGI\"\n branch = tree.add(f\"[green]{route.path}[/green] ({route_type})\")\n branch.add(f\"[blue]{route.route_handler.name or route.route_handler.handler_name}[/blue]\")\n\n console.print(tree)\n", "path": "starlite/cli/commands/core.py"}]} | 1,562 | 588 |
gh_patches_debug_12183 | rasdani/github-patches | git_diff | keras-team__keras-2883 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
YAML serialization throws exception with Regularizers
If a model contains a regularizer, YAML serialization throws an exception. Using latest Keras 1.0.3.
Compare:
``` python
>>> model = Sequential([LSTM(20, input_shape=(2,3))])
>>> model.to_yaml()
'class_name: Sequential\nconfig:\n- class_name: LSTM\n config:\n U_regularizer: null\n W_regularizer: null\n activation: tanh\n b_regularizer: null\n batch_input_shape: !!python/tuple [null, 2, 3]\n consume_less: cpu\n dropout_U: 0.0\n dropout_W: 0.0\n forget_bias_init: one\n go_backwards: false\n init: glorot_uniform\n inner_activation: hard_sigmoid\n inner_init: orthogonal\n input_dim: 3\n input_dtype: float32\n input_length: null\n name: lstm_3\n output_dim: 20\n return_sequences: false\n stateful: false\n trainable: true\n unroll: false\n'
>>> model = Sequential([LSTM(20, input_shape=(2,3), W_regularizer=l2())])
>>> model.to_yaml()
/usr/lib64/python3.4/site-packages/yaml/representer.py:135: FutureWarning: comparison to `None` will result in an elementwise object comparison in the future.
if data in [None, ()]:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib64/python3.4/site-packages/keras/engine/topology.py", line 2391, in to_yaml
return yaml.dump(model_config, **kwargs)
File "/usr/lib64/python3.4/site-packages/yaml/__init__.py", line 200, in dump
return dump_all([data], stream, Dumper=Dumper, **kwds)
File "/usr/lib64/python3.4/site-packages/yaml/__init__.py", line 188, in dump_all
dumper.represent(data)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 26, in represent
node = self.represent_data(data)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 47, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 203, in represent_dict
return self.represent_mapping('tag:yaml.org,2002:map', data)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 116, in represent_mapping
node_value = self.represent_data(item_value)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 47, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 195, in represent_list
return self.represent_sequence('tag:yaml.org,2002:seq', data)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 91, in represent_sequence
node_item = self.represent_data(item)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 47, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 203, in represent_dict
return self.represent_mapping('tag:yaml.org,2002:map', data)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 116, in represent_mapping
node_value = self.represent_data(item_value)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 47, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 203, in represent_dict
return self.represent_mapping('tag:yaml.org,2002:map', data)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 116, in represent_mapping
node_value = self.represent_data(item_value)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 47, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 203, in represent_dict
return self.represent_mapping('tag:yaml.org,2002:map', data)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 116, in represent_mapping
node_value = self.represent_data(item_value)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 51, in represent_data
node = self.yaml_multi_representers[data_type](self, data)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 352, in represent_object
return self.represent_mapping(tag+function_name, value)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 116, in represent_mapping
node_value = self.represent_data(item_value)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 47, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 282, in represent_tuple
return self.represent_sequence('tag:yaml.org,2002:python/tuple', data)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 91, in represent_sequence
node_item = self.represent_data(item)
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 33, in represent_data
if self.ignore_aliases(data):
File "/usr/lib64/python3.4/site-packages/yaml/representer.py", line 135, in ignore_aliases
if data in [None, ()]:
TypeError: data type not understood
```
Tested with LSTM & Embedding layer, W_regularizer and U_regularizer, l1 and l2. Always throws. `.to_json()` works fine. Is there some serialization code missing for l1/l2?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `keras/regularizers.py`
Content:
```
1 from __future__ import absolute_import
2 from . import backend as K
3
4
5 class Regularizer(object):
6 def set_param(self, p):
7 self.p = p
8
9 def set_layer(self, layer):
10 self.layer = layer
11
12 def __call__(self, loss):
13 return loss
14
15 def get_config(self):
16 return {'name': self.__class__.__name__}
17
18
19 class WeightRegularizer(Regularizer):
20 def __init__(self, l1=0., l2=0.):
21 self.l1 = K.cast_to_floatx(l1)
22 self.l2 = K.cast_to_floatx(l2)
23 self.uses_learning_phase = True
24
25 def set_param(self, p):
26 self.p = p
27
28 def __call__(self, loss):
29 if not hasattr(self, 'p'):
30 raise Exception('Need to call `set_param` on '
31 'WeightRegularizer instance '
32 'before calling the instance. '
33 'Check that you are not passing '
34 'a WeightRegularizer instead of an '
35 'ActivityRegularizer '
36 '(i.e. activity_regularizer="l2" instead '
37 'of activity_regularizer="activity_l2".')
38 regularized_loss = loss + K.sum(K.abs(self.p)) * self.l1
39 regularized_loss += K.sum(K.square(self.p)) * self.l2
40 return K.in_train_phase(regularized_loss, loss)
41
42 def get_config(self):
43 return {'name': self.__class__.__name__,
44 'l1': self.l1,
45 'l2': self.l2}
46
47
48 class ActivityRegularizer(Regularizer):
49 def __init__(self, l1=0., l2=0.):
50 self.l1 = K.cast_to_floatx(l1)
51 self.l2 = K.cast_to_floatx(l2)
52 self.uses_learning_phase = True
53
54 def set_layer(self, layer):
55 self.layer = layer
56
57 def __call__(self, loss):
58 if not hasattr(self, 'layer'):
59 raise Exception('Need to call `set_layer` on '
60 'ActivityRegularizer instance '
61 'before calling the instance.')
62 regularized_loss = loss
63 for i in range(len(self.layer.inbound_nodes)):
64 output = self.layer.get_output_at(i)
65 regularized_loss += self.l1 * K.sum(K.mean(K.abs(output), axis=0))
66 regularized_loss += self.l2 * K.sum(K.mean(K.square(output), axis=0))
67 return K.in_train_phase(regularized_loss, loss)
68
69 def get_config(self):
70 return {'name': self.__class__.__name__,
71 'l1': self.l1,
72 'l2': self.l2}
73
74
75 def l1(l=0.01):
76 return WeightRegularizer(l1=l)
77
78
79 def l2(l=0.01):
80 return WeightRegularizer(l2=l)
81
82
83 def l1l2(l1=0.01, l2=0.01):
84 return WeightRegularizer(l1=l1, l2=l2)
85
86
87 def activity_l1(l=0.01):
88 return ActivityRegularizer(l1=l)
89
90
91 def activity_l2(l=0.01):
92 return ActivityRegularizer(l2=l)
93
94
95 def activity_l1l2(l1=0.01, l2=0.01):
96 return ActivityRegularizer(l1=l1, l2=l2)
97
98
99 from .utils.generic_utils import get_from_module
100 def get(identifier, kwargs=None):
101 return get_from_module(identifier, globals(), 'regularizer',
102 instantiate=True, kwargs=kwargs)
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/keras/regularizers.py b/keras/regularizers.py
--- a/keras/regularizers.py
+++ b/keras/regularizers.py
@@ -41,8 +41,8 @@
def get_config(self):
return {'name': self.__class__.__name__,
- 'l1': self.l1,
- 'l2': self.l2}
+ 'l1': float(self.l1),
+ 'l2': float(self.l2)}
class ActivityRegularizer(Regularizer):
@@ -68,8 +68,8 @@
def get_config(self):
return {'name': self.__class__.__name__,
- 'l1': self.l1,
- 'l2': self.l2}
+ 'l1': float(self.l1),
+ 'l2': float(self.l2)}
def l1(l=0.01):
| {"golden_diff": "diff --git a/keras/regularizers.py b/keras/regularizers.py\n--- a/keras/regularizers.py\n+++ b/keras/regularizers.py\n@@ -41,8 +41,8 @@\n \n def get_config(self):\n return {'name': self.__class__.__name__,\n- 'l1': self.l1,\n- 'l2': self.l2}\n+ 'l1': float(self.l1),\n+ 'l2': float(self.l2)}\n \n \n class ActivityRegularizer(Regularizer):\n@@ -68,8 +68,8 @@\n \n def get_config(self):\n return {'name': self.__class__.__name__,\n- 'l1': self.l1,\n- 'l2': self.l2}\n+ 'l1': float(self.l1),\n+ 'l2': float(self.l2)}\n \n \n def l1(l=0.01):\n", "issue": "YAML serialization throws exception with Regularizers\nIf a model contains a regularizer, YAML serialization throws an exception. Using latest Keras 1.0.3.\n\nCompare:\n\n``` python\n>>> model = Sequential([LSTM(20, input_shape=(2,3))])\n>>> model.to_yaml()\n'class_name: Sequential\\nconfig:\\n- class_name: LSTM\\n config:\\n U_regularizer: null\\n W_regularizer: null\\n activation: tanh\\n b_regularizer: null\\n batch_input_shape: !!python/tuple [null, 2, 3]\\n consume_less: cpu\\n dropout_U: 0.0\\n dropout_W: 0.0\\n forget_bias_init: one\\n go_backwards: false\\n init: glorot_uniform\\n inner_activation: hard_sigmoid\\n inner_init: orthogonal\\n input_dim: 3\\n input_dtype: float32\\n input_length: null\\n name: lstm_3\\n output_dim: 20\\n return_sequences: false\\n stateful: false\\n trainable: true\\n unroll: false\\n'\n>>> model = Sequential([LSTM(20, input_shape=(2,3), W_regularizer=l2())])\n>>> model.to_yaml()\n/usr/lib64/python3.4/site-packages/yaml/representer.py:135: FutureWarning: comparison to `None` will result in an elementwise object comparison in the future.\n if data in [None, ()]:\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\n File \"/usr/lib64/python3.4/site-packages/keras/engine/topology.py\", line 2391, in to_yaml\n return yaml.dump(model_config, **kwargs)\n File \"/usr/lib64/python3.4/site-packages/yaml/__init__.py\", line 200, in dump\n return dump_all([data], stream, Dumper=Dumper, **kwds)\n File \"/usr/lib64/python3.4/site-packages/yaml/__init__.py\", line 188, in dump_all\n dumper.represent(data)\n File \"/usr/lib64/python3.4/site-packages/yaml/representer.py\", line 26, in represent \n node = self.represent_data(data) \n File \"/usr/lib64/python3.4/site-packages/yaml/representer.py\", line 47, in represent_data\n node = self.yaml_representers[data_types[0]](self, data)\n File \"/usr/lib64/python3.4/site-packages/yaml/representer.py\", line 203, in represent_dict\n return self.represent_mapping('tag:yaml.org,2002:map', data)\n File \"/usr/lib64/python3.4/site-packages/yaml/representer.py\", line 116, in represent_mapping\n node_value = self.represent_data(item_value)\n File \"/usr/lib64/python3.4/site-packages/yaml/representer.py\", line 47, in represent_data\n node = self.yaml_representers[data_types[0]](self, data)\n File \"/usr/lib64/python3.4/site-packages/yaml/representer.py\", line 195, in represent_list\n return self.represent_sequence('tag:yaml.org,2002:seq', data)\n File \"/usr/lib64/python3.4/site-packages/yaml/representer.py\", line 91, in represent_sequence\n node_item = self.represent_data(item)\n File \"/usr/lib64/python3.4/site-packages/yaml/representer.py\", line 47, in represent_data\n node = self.yaml_representers[data_types[0]](self, data)\n File \"/usr/lib64/python3.4/site-packages/yaml/representer.py\", line 203, in represent_dict\n return self.represent_mapping('tag:yaml.org,2002:map', data)\n File \"/usr/lib64/python3.4/site-packages/yaml/representer.py\", line 116, in represent_mapping\n node_value = self.represent_data(item_value)\n File \"/usr/lib64/python3.4/site-packages/yaml/representer.py\", line 47, in represent_data\n node = self.yaml_representers[data_types[0]](self, data)\n File \"/usr/lib64/python3.4/site-packages/yaml/representer.py\", line 203, in represent_dict\n return self.represent_mapping('tag:yaml.org,2002:map', data)\n File \"/usr/lib64/python3.4/site-packages/yaml/representer.py\", line 116, in represent_mapping\n node_value = self.represent_data(item_value)\n File \"/usr/lib64/python3.4/site-packages/yaml/representer.py\", line 47, in represent_data\n node = self.yaml_representers[data_types[0]](self, data)\n File \"/usr/lib64/python3.4/site-packages/yaml/representer.py\", line 203, in represent_dict\n return self.represent_mapping('tag:yaml.org,2002:map', data)\n File \"/usr/lib64/python3.4/site-packages/yaml/representer.py\", line 116, in represent_mapping\n node_value = self.represent_data(item_value)\n File \"/usr/lib64/python3.4/site-packages/yaml/representer.py\", line 51, in represent_data\n node = self.yaml_multi_representers[data_type](self, data)\n File \"/usr/lib64/python3.4/site-packages/yaml/representer.py\", line 352, in represent_object\n return self.represent_mapping(tag+function_name, value)\n File \"/usr/lib64/python3.4/site-packages/yaml/representer.py\", line 116, in represent_mapping\n node_value = self.represent_data(item_value)\n File \"/usr/lib64/python3.4/site-packages/yaml/representer.py\", line 47, in represent_data\n node = self.yaml_representers[data_types[0]](self, data)\n File \"/usr/lib64/python3.4/site-packages/yaml/representer.py\", line 282, in represent_tuple\n return self.represent_sequence('tag:yaml.org,2002:python/tuple', data)\n File \"/usr/lib64/python3.4/site-packages/yaml/representer.py\", line 91, in represent_sequence\n node_item = self.represent_data(item)\n File \"/usr/lib64/python3.4/site-packages/yaml/representer.py\", line 33, in represent_data\n if self.ignore_aliases(data):\n File \"/usr/lib64/python3.4/site-packages/yaml/representer.py\", line 135, in ignore_aliases\n if data in [None, ()]:\nTypeError: data type not understood\n```\n\nTested with LSTM & Embedding layer, W_regularizer and U_regularizer, l1 and l2. Always throws. `.to_json()` works fine. Is there some serialization code missing for l1/l2?\n\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom . import backend as K\n\n\nclass Regularizer(object):\n def set_param(self, p):\n self.p = p\n\n def set_layer(self, layer):\n self.layer = layer\n\n def __call__(self, loss):\n return loss\n\n def get_config(self):\n return {'name': self.__class__.__name__}\n\n\nclass WeightRegularizer(Regularizer):\n def __init__(self, l1=0., l2=0.):\n self.l1 = K.cast_to_floatx(l1)\n self.l2 = K.cast_to_floatx(l2)\n self.uses_learning_phase = True\n\n def set_param(self, p):\n self.p = p\n\n def __call__(self, loss):\n if not hasattr(self, 'p'):\n raise Exception('Need to call `set_param` on '\n 'WeightRegularizer instance '\n 'before calling the instance. '\n 'Check that you are not passing '\n 'a WeightRegularizer instead of an '\n 'ActivityRegularizer '\n '(i.e. activity_regularizer=\"l2\" instead '\n 'of activity_regularizer=\"activity_l2\".')\n regularized_loss = loss + K.sum(K.abs(self.p)) * self.l1\n regularized_loss += K.sum(K.square(self.p)) * self.l2\n return K.in_train_phase(regularized_loss, loss)\n\n def get_config(self):\n return {'name': self.__class__.__name__,\n 'l1': self.l1,\n 'l2': self.l2}\n\n\nclass ActivityRegularizer(Regularizer):\n def __init__(self, l1=0., l2=0.):\n self.l1 = K.cast_to_floatx(l1)\n self.l2 = K.cast_to_floatx(l2)\n self.uses_learning_phase = True\n\n def set_layer(self, layer):\n self.layer = layer\n\n def __call__(self, loss):\n if not hasattr(self, 'layer'):\n raise Exception('Need to call `set_layer` on '\n 'ActivityRegularizer instance '\n 'before calling the instance.')\n regularized_loss = loss\n for i in range(len(self.layer.inbound_nodes)):\n output = self.layer.get_output_at(i)\n regularized_loss += self.l1 * K.sum(K.mean(K.abs(output), axis=0))\n regularized_loss += self.l2 * K.sum(K.mean(K.square(output), axis=0))\n return K.in_train_phase(regularized_loss, loss)\n\n def get_config(self):\n return {'name': self.__class__.__name__,\n 'l1': self.l1,\n 'l2': self.l2}\n\n\ndef l1(l=0.01):\n return WeightRegularizer(l1=l)\n\n\ndef l2(l=0.01):\n return WeightRegularizer(l2=l)\n\n\ndef l1l2(l1=0.01, l2=0.01):\n return WeightRegularizer(l1=l1, l2=l2)\n\n\ndef activity_l1(l=0.01):\n return ActivityRegularizer(l1=l)\n\n\ndef activity_l2(l=0.01):\n return ActivityRegularizer(l2=l)\n\n\ndef activity_l1l2(l1=0.01, l2=0.01):\n return ActivityRegularizer(l1=l1, l2=l2)\n\n\nfrom .utils.generic_utils import get_from_module\ndef get(identifier, kwargs=None):\n return get_from_module(identifier, globals(), 'regularizer',\n instantiate=True, kwargs=kwargs)\n", "path": "keras/regularizers.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom . import backend as K\n\n\nclass Regularizer(object):\n def set_param(self, p):\n self.p = p\n\n def set_layer(self, layer):\n self.layer = layer\n\n def __call__(self, loss):\n return loss\n\n def get_config(self):\n return {'name': self.__class__.__name__}\n\n\nclass WeightRegularizer(Regularizer):\n def __init__(self, l1=0., l2=0.):\n self.l1 = K.cast_to_floatx(l1)\n self.l2 = K.cast_to_floatx(l2)\n self.uses_learning_phase = True\n\n def set_param(self, p):\n self.p = p\n\n def __call__(self, loss):\n if not hasattr(self, 'p'):\n raise Exception('Need to call `set_param` on '\n 'WeightRegularizer instance '\n 'before calling the instance. '\n 'Check that you are not passing '\n 'a WeightRegularizer instead of an '\n 'ActivityRegularizer '\n '(i.e. activity_regularizer=\"l2\" instead '\n 'of activity_regularizer=\"activity_l2\".')\n regularized_loss = loss + K.sum(K.abs(self.p)) * self.l1\n regularized_loss += K.sum(K.square(self.p)) * self.l2\n return K.in_train_phase(regularized_loss, loss)\n\n def get_config(self):\n return {'name': self.__class__.__name__,\n 'l1': float(self.l1),\n 'l2': float(self.l2)}\n\n\nclass ActivityRegularizer(Regularizer):\n def __init__(self, l1=0., l2=0.):\n self.l1 = K.cast_to_floatx(l1)\n self.l2 = K.cast_to_floatx(l2)\n self.uses_learning_phase = True\n\n def set_layer(self, layer):\n self.layer = layer\n\n def __call__(self, loss):\n if not hasattr(self, 'layer'):\n raise Exception('Need to call `set_layer` on '\n 'ActivityRegularizer instance '\n 'before calling the instance.')\n regularized_loss = loss\n for i in range(len(self.layer.inbound_nodes)):\n output = self.layer.get_output_at(i)\n regularized_loss += self.l1 * K.sum(K.mean(K.abs(output), axis=0))\n regularized_loss += self.l2 * K.sum(K.mean(K.square(output), axis=0))\n return K.in_train_phase(regularized_loss, loss)\n\n def get_config(self):\n return {'name': self.__class__.__name__,\n 'l1': float(self.l1),\n 'l2': float(self.l2)}\n\n\ndef l1(l=0.01):\n return WeightRegularizer(l1=l)\n\n\ndef l2(l=0.01):\n return WeightRegularizer(l2=l)\n\n\ndef l1l2(l1=0.01, l2=0.01):\n return WeightRegularizer(l1=l1, l2=l2)\n\n\ndef activity_l1(l=0.01):\n return ActivityRegularizer(l1=l)\n\n\ndef activity_l2(l=0.01):\n return ActivityRegularizer(l2=l)\n\n\ndef activity_l1l2(l1=0.01, l2=0.01):\n return ActivityRegularizer(l1=l1, l2=l2)\n\n\nfrom .utils.generic_utils import get_from_module\ndef get(identifier, kwargs=None):\n return get_from_module(identifier, globals(), 'regularizer',\n instantiate=True, kwargs=kwargs)\n", "path": "keras/regularizers.py"}]} | 2,849 | 204 |
gh_patches_debug_30821 | rasdani/github-patches | git_diff | cal-itp__benefits-897 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add group information to enrollment success event
#873 brought us an explicit event for enrollment success (and failure, and retry).
This ticket seeks to add an event property `payment_group` with the GUID of the Payment Processor group the user was enrolled into.
This will help us track down registration issues, verify users are getting into the correct groups, etc.
## Acceptance Criteria
<!-- Remember to consider edge cases -->
- [x] Every time the `enrollment returned` with `status = success` fires, an event property `payment_group` is sent with the event
## Additional context
The Group identifier is stored in the [`EligibilityType` model](https://github.com/cal-itp/benefits/blob/dev/benefits/core/models.py#L45)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `benefits/enrollment/analytics.py`
Content:
```
1 """
2 The enrollment application: analytics implementation.
3 """
4 from benefits.core import analytics as core
5
6
7 class ReturnedEnrollmentEvent(core.Event):
8 """Analytics event representing the end of payment processor enrollment request."""
9
10 def __init__(self, request, status, error=None):
11 super().__init__(request, "returned enrollment")
12 if str(status).lower() in ("error", "retry", "success"):
13 self.update_event_properties(status=status, error=error)
14
15
16 def returned_error(request, error):
17 """Send the "returned enrollment" analytics event with an error status and message."""
18 core.send_event(ReturnedEnrollmentEvent(request, status="error", error=error))
19
20
21 def returned_retry(request):
22 """Send the "returned enrollment" analytics event with a retry status."""
23 core.send_event(ReturnedEnrollmentEvent(request, status="retry"))
24
25
26 def returned_success(request):
27 """Send the "returned enrollment" analytics event with a success status."""
28 core.send_event(ReturnedEnrollmentEvent(request, status="success"))
29
```
Path: `benefits/enrollment/views.py`
Content:
```
1 """
2 The enrollment application: view definitions for the benefits enrollment flow.
3 """
4 import logging
5
6 from django.http import JsonResponse
7 from django.template.response import TemplateResponse
8 from django.urls import reverse
9 from django.utils.decorators import decorator_from_middleware
10 from django.utils.translation import pgettext, gettext as _
11
12 from benefits.core import models, session, viewmodels
13 from benefits.core.middleware import EligibleSessionRequired, VerifierSessionRequired, pageview_decorator
14 from benefits.core.views import ROUTE_HELP
15 from . import analytics, api, forms
16
17
18 ROUTE_INDEX = "enrollment:index"
19 ROUTE_RETRY = "enrollment:retry"
20 ROUTE_SUCCESS = "enrollment:success"
21 ROUTE_TOKEN = "enrollment:token"
22
23 TEMPLATE_INDEX = "enrollment/index.html"
24 TEMPLATE_RETRY = "enrollment/retry.html"
25 TEMPLATE_SUCCESS = "enrollment/success.html"
26
27
28 logger = logging.getLogger(__name__)
29
30
31 @decorator_from_middleware(EligibleSessionRequired)
32 def token(request):
33 """View handler for the enrollment auth token."""
34 if not session.enrollment_token_valid(request):
35 agency = session.agency(request)
36 response = api.Client(agency).access_token()
37 session.update(request, enrollment_token=response.access_token, enrollment_token_exp=response.expiry)
38
39 data = {"token": session.enrollment_token(request)}
40
41 return JsonResponse(data)
42
43
44 @decorator_from_middleware(EligibleSessionRequired)
45 def index(request):
46 """View handler for the enrollment landing page."""
47 session.update(request, origin=reverse(ROUTE_INDEX))
48
49 agency = session.agency(request)
50
51 # POST back after payment processor form, process card token
52 if request.method == "POST":
53 form = forms.CardTokenizeSuccessForm(request.POST)
54 if not form.is_valid():
55 raise Exception("Invalid card token form")
56
57 eligibility = session.eligibility(request)
58 logger.debug(f"Session contains an {models.EligibilityType.__name__}")
59
60 logger.debug("Read tokenized card")
61 card_token = form.cleaned_data.get("card_token")
62
63 response = api.Client(agency).enroll(card_token, eligibility.group_id)
64 if response.success:
65 analytics.returned_success(request)
66 return success(request)
67 else:
68 analytics.returned_error(request, response.message)
69 raise Exception(response.message)
70
71 # GET enrollment index, with button to initiate payment processor connection
72 else:
73 tokenize_button = "tokenize_card"
74 tokenize_retry_form = forms.CardTokenizeFailForm(ROUTE_RETRY)
75 tokenize_success_form = forms.CardTokenizeSuccessForm(auto_id=True, label_suffix="")
76
77 page = viewmodels.Page(
78 title=_("enrollment.pages.index.title"),
79 content_title=_("enrollment.pages.index.content_title"),
80 icon=viewmodels.Icon("idcardcheck", pgettext("image alt text", "core.icons.idcardcheck")),
81 paragraphs=[_("enrollment.pages.index.p[0]"), _("enrollment.pages.index.p[1]"), _("enrollment.pages.index.p[2]")],
82 classes="text-lg-center no-image-mobile",
83 forms=[tokenize_retry_form, tokenize_success_form],
84 buttons=[
85 viewmodels.Button.primary(
86 text=_("enrollment.buttons.payment_partner"), id=tokenize_button, url=f"#{tokenize_button}"
87 ),
88 ],
89 )
90 context = {}
91 context.update(page.context_dict())
92
93 # add agency details
94 agency_vm = viewmodels.TransitAgency(agency)
95 context.update(agency_vm.context_dict())
96
97 # and payment processor details
98 processor_vm = viewmodels.PaymentProcessor(
99 model=agency.payment_processor,
100 access_token_url=reverse(ROUTE_TOKEN),
101 element_id=f"#{tokenize_button}",
102 color="#046b99",
103 name=f"{agency.long_name} {_('partnered with')} {agency.payment_processor.name}",
104 )
105 context.update(processor_vm.context_dict())
106 logger.warning(f"card_tokenize_url: {context['payment_processor'].card_tokenize_url}")
107
108 # the tokenize form URLs are injected to page-generated Javascript
109 context["forms"] = {
110 "tokenize_retry": reverse(tokenize_retry_form.action_url),
111 "tokenize_success": reverse(tokenize_success_form.action_url),
112 }
113
114 return TemplateResponse(request, TEMPLATE_INDEX, context)
115
116
117 @decorator_from_middleware(EligibleSessionRequired)
118 def retry(request):
119 """View handler for a recoverable failure condition."""
120 if request.method == "POST":
121 analytics.returned_retry(request)
122 form = forms.CardTokenizeFailForm(request.POST)
123 if form.is_valid():
124 agency = session.agency(request)
125 page = viewmodels.Page(
126 classes="no-image-mobile",
127 title=_("enrollment.pages.retry.title"),
128 icon=viewmodels.Icon("bankcardquestion", pgettext("image alt text", "core.icons.bankcardquestion")),
129 content_title=_("enrollment.pages.retry.title"),
130 paragraphs=[_("enrollment.pages.retry.p[0]")],
131 buttons=viewmodels.Button.agency_contact_links(agency),
132 )
133 page.buttons.append(viewmodels.Button.primary(text=_("core.buttons.retry"), url=session.origin(request)))
134 return TemplateResponse(request, TEMPLATE_RETRY, page.context_dict())
135 else:
136 analytics.returned_error(request, "Invalid retry submission.")
137 raise Exception("Invalid retry submission.")
138 else:
139 analytics.returned_error(request, "This view method only supports POST.")
140 raise Exception("This view method only supports POST.")
141
142
143 @pageview_decorator
144 @decorator_from_middleware(VerifierSessionRequired)
145 def success(request):
146 """View handler for the final success page."""
147 request.path = "/enrollment/success"
148 session.update(request, origin=reverse(ROUTE_SUCCESS))
149 verifier = session.verifier(request)
150 icon = viewmodels.Icon("bankcardcheck", pgettext("image alt text", "core.icons.bankcardcheck"))
151 page = viewmodels.Page(
152 classes="no-image-mobile",
153 title=_("enrollment.pages.success.title"),
154 content_title=_("enrollment.pages.success.content_title"),
155 )
156
157 if verifier.is_auth_required:
158 if session.logged_in(request):
159 page.buttons = [viewmodels.Button.logout()]
160 page.classes = ["no-image-mobile", "logged-in"]
161 page.icon = icon
162 else:
163 page.classes = ["no-image-mobile", "logged-out"]
164 page.content_title = _("enrollment.pages.success.logout.title")
165 page.noimage = True
166 else:
167 page.icon = icon
168
169 help_link = reverse(ROUTE_HELP)
170 context_dict = {**page.context_dict(), **{"help_link": help_link}}
171
172 return TemplateResponse(request, TEMPLATE_SUCCESS, context_dict)
173
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/benefits/enrollment/analytics.py b/benefits/enrollment/analytics.py
--- a/benefits/enrollment/analytics.py
+++ b/benefits/enrollment/analytics.py
@@ -7,10 +7,12 @@
class ReturnedEnrollmentEvent(core.Event):
"""Analytics event representing the end of payment processor enrollment request."""
- def __init__(self, request, status, error=None):
+ def __init__(self, request, status, error=None, payment_group=None):
super().__init__(request, "returned enrollment")
if str(status).lower() in ("error", "retry", "success"):
self.update_event_properties(status=status, error=error)
+ if payment_group is not None:
+ self.update_event_properties(payment_group=payment_group)
def returned_error(request, error):
@@ -23,6 +25,6 @@
core.send_event(ReturnedEnrollmentEvent(request, status="retry"))
-def returned_success(request):
+def returned_success(request, payment_group):
"""Send the "returned enrollment" analytics event with a success status."""
- core.send_event(ReturnedEnrollmentEvent(request, status="success"))
+ core.send_event(ReturnedEnrollmentEvent(request, status="success", payment_group=payment_group))
diff --git a/benefits/enrollment/views.py b/benefits/enrollment/views.py
--- a/benefits/enrollment/views.py
+++ b/benefits/enrollment/views.py
@@ -62,7 +62,7 @@
response = api.Client(agency).enroll(card_token, eligibility.group_id)
if response.success:
- analytics.returned_success(request)
+ analytics.returned_success(request, eligibility.group_id)
return success(request)
else:
analytics.returned_error(request, response.message)
| {"golden_diff": "diff --git a/benefits/enrollment/analytics.py b/benefits/enrollment/analytics.py\n--- a/benefits/enrollment/analytics.py\n+++ b/benefits/enrollment/analytics.py\n@@ -7,10 +7,12 @@\n class ReturnedEnrollmentEvent(core.Event):\n \"\"\"Analytics event representing the end of payment processor enrollment request.\"\"\"\n \n- def __init__(self, request, status, error=None):\n+ def __init__(self, request, status, error=None, payment_group=None):\n super().__init__(request, \"returned enrollment\")\n if str(status).lower() in (\"error\", \"retry\", \"success\"):\n self.update_event_properties(status=status, error=error)\n+ if payment_group is not None:\n+ self.update_event_properties(payment_group=payment_group)\n \n \n def returned_error(request, error):\n@@ -23,6 +25,6 @@\n core.send_event(ReturnedEnrollmentEvent(request, status=\"retry\"))\n \n \n-def returned_success(request):\n+def returned_success(request, payment_group):\n \"\"\"Send the \"returned enrollment\" analytics event with a success status.\"\"\"\n- core.send_event(ReturnedEnrollmentEvent(request, status=\"success\"))\n+ core.send_event(ReturnedEnrollmentEvent(request, status=\"success\", payment_group=payment_group))\ndiff --git a/benefits/enrollment/views.py b/benefits/enrollment/views.py\n--- a/benefits/enrollment/views.py\n+++ b/benefits/enrollment/views.py\n@@ -62,7 +62,7 @@\n \n response = api.Client(agency).enroll(card_token, eligibility.group_id)\n if response.success:\n- analytics.returned_success(request)\n+ analytics.returned_success(request, eligibility.group_id)\n return success(request)\n else:\n analytics.returned_error(request, response.message)\n", "issue": "Add group information to enrollment success event\n#873 brought us an explicit event for enrollment success (and failure, and retry).\r\n\r\nThis ticket seeks to add an event property `payment_group` with the GUID of the Payment Processor group the user was enrolled into.\r\n\r\nThis will help us track down registration issues, verify users are getting into the correct groups, etc.\r\n\r\n## Acceptance Criteria\r\n\r\n<!-- Remember to consider edge cases -->\r\n\r\n- [x] Every time the `enrollment returned` with `status = success` fires, an event property `payment_group` is sent with the event\r\n\r\n## Additional context\r\n\r\nThe Group identifier is stored in the [`EligibilityType` model](https://github.com/cal-itp/benefits/blob/dev/benefits/core/models.py#L45)\r\n\n", "before_files": [{"content": "\"\"\"\nThe enrollment application: analytics implementation.\n\"\"\"\nfrom benefits.core import analytics as core\n\n\nclass ReturnedEnrollmentEvent(core.Event):\n \"\"\"Analytics event representing the end of payment processor enrollment request.\"\"\"\n\n def __init__(self, request, status, error=None):\n super().__init__(request, \"returned enrollment\")\n if str(status).lower() in (\"error\", \"retry\", \"success\"):\n self.update_event_properties(status=status, error=error)\n\n\ndef returned_error(request, error):\n \"\"\"Send the \"returned enrollment\" analytics event with an error status and message.\"\"\"\n core.send_event(ReturnedEnrollmentEvent(request, status=\"error\", error=error))\n\n\ndef returned_retry(request):\n \"\"\"Send the \"returned enrollment\" analytics event with a retry status.\"\"\"\n core.send_event(ReturnedEnrollmentEvent(request, status=\"retry\"))\n\n\ndef returned_success(request):\n \"\"\"Send the \"returned enrollment\" analytics event with a success status.\"\"\"\n core.send_event(ReturnedEnrollmentEvent(request, status=\"success\"))\n", "path": "benefits/enrollment/analytics.py"}, {"content": "\"\"\"\nThe enrollment application: view definitions for the benefits enrollment flow.\n\"\"\"\nimport logging\n\nfrom django.http import JsonResponse\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse\nfrom django.utils.decorators import decorator_from_middleware\nfrom django.utils.translation import pgettext, gettext as _\n\nfrom benefits.core import models, session, viewmodels\nfrom benefits.core.middleware import EligibleSessionRequired, VerifierSessionRequired, pageview_decorator\nfrom benefits.core.views import ROUTE_HELP\nfrom . import analytics, api, forms\n\n\nROUTE_INDEX = \"enrollment:index\"\nROUTE_RETRY = \"enrollment:retry\"\nROUTE_SUCCESS = \"enrollment:success\"\nROUTE_TOKEN = \"enrollment:token\"\n\nTEMPLATE_INDEX = \"enrollment/index.html\"\nTEMPLATE_RETRY = \"enrollment/retry.html\"\nTEMPLATE_SUCCESS = \"enrollment/success.html\"\n\n\nlogger = logging.getLogger(__name__)\n\n\n@decorator_from_middleware(EligibleSessionRequired)\ndef token(request):\n \"\"\"View handler for the enrollment auth token.\"\"\"\n if not session.enrollment_token_valid(request):\n agency = session.agency(request)\n response = api.Client(agency).access_token()\n session.update(request, enrollment_token=response.access_token, enrollment_token_exp=response.expiry)\n\n data = {\"token\": session.enrollment_token(request)}\n\n return JsonResponse(data)\n\n\n@decorator_from_middleware(EligibleSessionRequired)\ndef index(request):\n \"\"\"View handler for the enrollment landing page.\"\"\"\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n agency = session.agency(request)\n\n # POST back after payment processor form, process card token\n if request.method == \"POST\":\n form = forms.CardTokenizeSuccessForm(request.POST)\n if not form.is_valid():\n raise Exception(\"Invalid card token form\")\n\n eligibility = session.eligibility(request)\n logger.debug(f\"Session contains an {models.EligibilityType.__name__}\")\n\n logger.debug(\"Read tokenized card\")\n card_token = form.cleaned_data.get(\"card_token\")\n\n response = api.Client(agency).enroll(card_token, eligibility.group_id)\n if response.success:\n analytics.returned_success(request)\n return success(request)\n else:\n analytics.returned_error(request, response.message)\n raise Exception(response.message)\n\n # GET enrollment index, with button to initiate payment processor connection\n else:\n tokenize_button = \"tokenize_card\"\n tokenize_retry_form = forms.CardTokenizeFailForm(ROUTE_RETRY)\n tokenize_success_form = forms.CardTokenizeSuccessForm(auto_id=True, label_suffix=\"\")\n\n page = viewmodels.Page(\n title=_(\"enrollment.pages.index.title\"),\n content_title=_(\"enrollment.pages.index.content_title\"),\n icon=viewmodels.Icon(\"idcardcheck\", pgettext(\"image alt text\", \"core.icons.idcardcheck\")),\n paragraphs=[_(\"enrollment.pages.index.p[0]\"), _(\"enrollment.pages.index.p[1]\"), _(\"enrollment.pages.index.p[2]\")],\n classes=\"text-lg-center no-image-mobile\",\n forms=[tokenize_retry_form, tokenize_success_form],\n buttons=[\n viewmodels.Button.primary(\n text=_(\"enrollment.buttons.payment_partner\"), id=tokenize_button, url=f\"#{tokenize_button}\"\n ),\n ],\n )\n context = {}\n context.update(page.context_dict())\n\n # add agency details\n agency_vm = viewmodels.TransitAgency(agency)\n context.update(agency_vm.context_dict())\n\n # and payment processor details\n processor_vm = viewmodels.PaymentProcessor(\n model=agency.payment_processor,\n access_token_url=reverse(ROUTE_TOKEN),\n element_id=f\"#{tokenize_button}\",\n color=\"#046b99\",\n name=f\"{agency.long_name} {_('partnered with')} {agency.payment_processor.name}\",\n )\n context.update(processor_vm.context_dict())\n logger.warning(f\"card_tokenize_url: {context['payment_processor'].card_tokenize_url}\")\n\n # the tokenize form URLs are injected to page-generated Javascript\n context[\"forms\"] = {\n \"tokenize_retry\": reverse(tokenize_retry_form.action_url),\n \"tokenize_success\": reverse(tokenize_success_form.action_url),\n }\n\n return TemplateResponse(request, TEMPLATE_INDEX, context)\n\n\n@decorator_from_middleware(EligibleSessionRequired)\ndef retry(request):\n \"\"\"View handler for a recoverable failure condition.\"\"\"\n if request.method == \"POST\":\n analytics.returned_retry(request)\n form = forms.CardTokenizeFailForm(request.POST)\n if form.is_valid():\n agency = session.agency(request)\n page = viewmodels.Page(\n classes=\"no-image-mobile\",\n title=_(\"enrollment.pages.retry.title\"),\n icon=viewmodels.Icon(\"bankcardquestion\", pgettext(\"image alt text\", \"core.icons.bankcardquestion\")),\n content_title=_(\"enrollment.pages.retry.title\"),\n paragraphs=[_(\"enrollment.pages.retry.p[0]\")],\n buttons=viewmodels.Button.agency_contact_links(agency),\n )\n page.buttons.append(viewmodels.Button.primary(text=_(\"core.buttons.retry\"), url=session.origin(request)))\n return TemplateResponse(request, TEMPLATE_RETRY, page.context_dict())\n else:\n analytics.returned_error(request, \"Invalid retry submission.\")\n raise Exception(\"Invalid retry submission.\")\n else:\n analytics.returned_error(request, \"This view method only supports POST.\")\n raise Exception(\"This view method only supports POST.\")\n\n\n@pageview_decorator\n@decorator_from_middleware(VerifierSessionRequired)\ndef success(request):\n \"\"\"View handler for the final success page.\"\"\"\n request.path = \"/enrollment/success\"\n session.update(request, origin=reverse(ROUTE_SUCCESS))\n verifier = session.verifier(request)\n icon = viewmodels.Icon(\"bankcardcheck\", pgettext(\"image alt text\", \"core.icons.bankcardcheck\"))\n page = viewmodels.Page(\n classes=\"no-image-mobile\",\n title=_(\"enrollment.pages.success.title\"),\n content_title=_(\"enrollment.pages.success.content_title\"),\n )\n\n if verifier.is_auth_required:\n if session.logged_in(request):\n page.buttons = [viewmodels.Button.logout()]\n page.classes = [\"no-image-mobile\", \"logged-in\"]\n page.icon = icon\n else:\n page.classes = [\"no-image-mobile\", \"logged-out\"]\n page.content_title = _(\"enrollment.pages.success.logout.title\")\n page.noimage = True\n else:\n page.icon = icon\n\n help_link = reverse(ROUTE_HELP)\n context_dict = {**page.context_dict(), **{\"help_link\": help_link}}\n\n return TemplateResponse(request, TEMPLATE_SUCCESS, context_dict)\n", "path": "benefits/enrollment/views.py"}], "after_files": [{"content": "\"\"\"\nThe enrollment application: analytics implementation.\n\"\"\"\nfrom benefits.core import analytics as core\n\n\nclass ReturnedEnrollmentEvent(core.Event):\n \"\"\"Analytics event representing the end of payment processor enrollment request.\"\"\"\n\n def __init__(self, request, status, error=None, payment_group=None):\n super().__init__(request, \"returned enrollment\")\n if str(status).lower() in (\"error\", \"retry\", \"success\"):\n self.update_event_properties(status=status, error=error)\n if payment_group is not None:\n self.update_event_properties(payment_group=payment_group)\n\n\ndef returned_error(request, error):\n \"\"\"Send the \"returned enrollment\" analytics event with an error status and message.\"\"\"\n core.send_event(ReturnedEnrollmentEvent(request, status=\"error\", error=error))\n\n\ndef returned_retry(request):\n \"\"\"Send the \"returned enrollment\" analytics event with a retry status.\"\"\"\n core.send_event(ReturnedEnrollmentEvent(request, status=\"retry\"))\n\n\ndef returned_success(request, payment_group):\n \"\"\"Send the \"returned enrollment\" analytics event with a success status.\"\"\"\n core.send_event(ReturnedEnrollmentEvent(request, status=\"success\", payment_group=payment_group))\n", "path": "benefits/enrollment/analytics.py"}, {"content": "\"\"\"\nThe enrollment application: view definitions for the benefits enrollment flow.\n\"\"\"\nimport logging\n\nfrom django.http import JsonResponse\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse\nfrom django.utils.decorators import decorator_from_middleware\nfrom django.utils.translation import pgettext, gettext as _\n\nfrom benefits.core import models, session, viewmodels\nfrom benefits.core.middleware import EligibleSessionRequired, VerifierSessionRequired, pageview_decorator\nfrom benefits.core.views import ROUTE_HELP\nfrom . import analytics, api, forms\n\n\nROUTE_INDEX = \"enrollment:index\"\nROUTE_RETRY = \"enrollment:retry\"\nROUTE_SUCCESS = \"enrollment:success\"\nROUTE_TOKEN = \"enrollment:token\"\n\nTEMPLATE_INDEX = \"enrollment/index.html\"\nTEMPLATE_RETRY = \"enrollment/retry.html\"\nTEMPLATE_SUCCESS = \"enrollment/success.html\"\n\n\nlogger = logging.getLogger(__name__)\n\n\n@decorator_from_middleware(EligibleSessionRequired)\ndef token(request):\n \"\"\"View handler for the enrollment auth token.\"\"\"\n if not session.enrollment_token_valid(request):\n agency = session.agency(request)\n response = api.Client(agency).access_token()\n session.update(request, enrollment_token=response.access_token, enrollment_token_exp=response.expiry)\n\n data = {\"token\": session.enrollment_token(request)}\n\n return JsonResponse(data)\n\n\n@decorator_from_middleware(EligibleSessionRequired)\ndef index(request):\n \"\"\"View handler for the enrollment landing page.\"\"\"\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n agency = session.agency(request)\n\n # POST back after payment processor form, process card token\n if request.method == \"POST\":\n form = forms.CardTokenizeSuccessForm(request.POST)\n if not form.is_valid():\n raise Exception(\"Invalid card token form\")\n\n eligibility = session.eligibility(request)\n logger.debug(f\"Session contains an {models.EligibilityType.__name__}\")\n\n logger.debug(\"Read tokenized card\")\n card_token = form.cleaned_data.get(\"card_token\")\n\n response = api.Client(agency).enroll(card_token, eligibility.group_id)\n if response.success:\n analytics.returned_success(request, eligibility.group_id)\n return success(request)\n else:\n analytics.returned_error(request, response.message)\n raise Exception(response.message)\n\n # GET enrollment index, with button to initiate payment processor connection\n else:\n tokenize_button = \"tokenize_card\"\n tokenize_retry_form = forms.CardTokenizeFailForm(ROUTE_RETRY)\n tokenize_success_form = forms.CardTokenizeSuccessForm(auto_id=True, label_suffix=\"\")\n\n page = viewmodels.Page(\n title=_(\"enrollment.pages.index.title\"),\n content_title=_(\"enrollment.pages.index.content_title\"),\n icon=viewmodels.Icon(\"idcardcheck\", pgettext(\"image alt text\", \"core.icons.idcardcheck\")),\n paragraphs=[_(\"enrollment.pages.index.p[0]\"), _(\"enrollment.pages.index.p[1]\"), _(\"enrollment.pages.index.p[2]\")],\n classes=\"text-lg-center no-image-mobile\",\n forms=[tokenize_retry_form, tokenize_success_form],\n buttons=[\n viewmodels.Button.primary(\n text=_(\"enrollment.buttons.payment_partner\"), id=tokenize_button, url=f\"#{tokenize_button}\"\n ),\n ],\n )\n context = {}\n context.update(page.context_dict())\n\n # add agency details\n agency_vm = viewmodels.TransitAgency(agency)\n context.update(agency_vm.context_dict())\n\n # and payment processor details\n processor_vm = viewmodels.PaymentProcessor(\n model=agency.payment_processor,\n access_token_url=reverse(ROUTE_TOKEN),\n element_id=f\"#{tokenize_button}\",\n color=\"#046b99\",\n name=f\"{agency.long_name} {_('partnered with')} {agency.payment_processor.name}\",\n )\n context.update(processor_vm.context_dict())\n logger.warning(f\"card_tokenize_url: {context['payment_processor'].card_tokenize_url}\")\n\n # the tokenize form URLs are injected to page-generated Javascript\n context[\"forms\"] = {\n \"tokenize_retry\": reverse(tokenize_retry_form.action_url),\n \"tokenize_success\": reverse(tokenize_success_form.action_url),\n }\n\n return TemplateResponse(request, TEMPLATE_INDEX, context)\n\n\n@decorator_from_middleware(EligibleSessionRequired)\ndef retry(request):\n \"\"\"View handler for a recoverable failure condition.\"\"\"\n if request.method == \"POST\":\n analytics.returned_retry(request)\n form = forms.CardTokenizeFailForm(request.POST)\n if form.is_valid():\n agency = session.agency(request)\n page = viewmodels.Page(\n classes=\"no-image-mobile\",\n title=_(\"enrollment.pages.retry.title\"),\n icon=viewmodels.Icon(\"bankcardquestion\", pgettext(\"image alt text\", \"core.icons.bankcardquestion\")),\n content_title=_(\"enrollment.pages.retry.title\"),\n paragraphs=[_(\"enrollment.pages.retry.p[0]\")],\n buttons=viewmodels.Button.agency_contact_links(agency),\n )\n page.buttons.append(viewmodels.Button.primary(text=_(\"core.buttons.retry\"), url=session.origin(request)))\n return TemplateResponse(request, TEMPLATE_RETRY, page.context_dict())\n else:\n analytics.returned_error(request, \"Invalid retry submission.\")\n raise Exception(\"Invalid retry submission.\")\n else:\n analytics.returned_error(request, \"This view method only supports POST.\")\n raise Exception(\"This view method only supports POST.\")\n\n\n@pageview_decorator\n@decorator_from_middleware(VerifierSessionRequired)\ndef success(request):\n \"\"\"View handler for the final success page.\"\"\"\n request.path = \"/enrollment/success\"\n session.update(request, origin=reverse(ROUTE_SUCCESS))\n verifier = session.verifier(request)\n icon = viewmodels.Icon(\"bankcardcheck\", pgettext(\"image alt text\", \"core.icons.bankcardcheck\"))\n page = viewmodels.Page(\n classes=\"no-image-mobile\",\n title=_(\"enrollment.pages.success.title\"),\n content_title=_(\"enrollment.pages.success.content_title\"),\n )\n\n if verifier.is_auth_required:\n if session.logged_in(request):\n page.buttons = [viewmodels.Button.logout()]\n page.classes = [\"no-image-mobile\", \"logged-in\"]\n page.icon = icon\n else:\n page.classes = [\"no-image-mobile\", \"logged-out\"]\n page.content_title = _(\"enrollment.pages.success.logout.title\")\n page.noimage = True\n else:\n page.icon = icon\n\n help_link = reverse(ROUTE_HELP)\n context_dict = {**page.context_dict(), **{\"help_link\": help_link}}\n\n return TemplateResponse(request, TEMPLATE_SUCCESS, context_dict)\n", "path": "benefits/enrollment/views.py"}]} | 2,505 | 389 |
gh_patches_debug_35694 | rasdani/github-patches | git_diff | tobymao__sqlglot-1328 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ParseError with Oracle SQL containing hint USE_NL with more than 2 tables (version 11.4.1)
`parse_one(sql='SELECT /*+ USE_NL(A B C) */ A.COL_TEST FROM TABLE_A A, TABLE_B B, TABLE_C C', read='oracle')
`
is returning:
Traceback (most recent call last):
File "/Applications/PyCharm CE.app/Contents/plugins/python-ce/helpers/pydev/pydevconsole.py", line 364, in runcode
coro = func()
^^^^^^
File "<input>", line 1, in <module>
File "/usr/local/lib/python3.11/site-packages/sqlglot/__init__.py", line 147, in parse_one
result = dialect.parse(sql, **opts)
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/sqlglot/dialects/dialect.py", line 163, in parse
return self.parser(**opts).parse(self.tokenizer.tokenize(sql), sql)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/sqlglot/parser.py", line 767, in parse
return self._parse(
^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/sqlglot/parser.py", line 830, in _parse
expressions.append(parse_method(self))
^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/sqlglot/parser.py", line 993, in _parse_statement
expression = self._parse_set_operations(expression) if expression else self._parse_select()
^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/sqlglot/parser.py", line 1689, in _parse_select
hint = self._parse_hint()
^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/sqlglot/parser.py", line 1831, in _parse_hint
hints = self._parse_csv(self._parse_function)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/sqlglot/parser.py", line 3585, in _parse_csv
parse_result = parse_method()
^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/sqlglot/parser.py", line 2842, in _parse_function
self._match_r_paren(this)
File "/usr/local/lib/python3.11/site-packages/sqlglot/parser.py", line 3935, in _match_r_paren
self.raise_error("Expecting )")
File "/usr/local/lib/python3.11/site-packages/sqlglot/parser.py", line 876, in raise_error
raise error
sqlglot.errors.ParseError: Expecting ). Line 1, Col: 24.
SELECT /*+ USE_NL(A B C) */ A.COL_TEST FROM TABLE_A A, TABLE_B B, TABLE_C C
Or
`parse_one(sql='SELECT /*+ USE_NL(A B) */ A.COL_TEST FROM TABLE_A A, TABLE_B B, TABLE_C C', read='oracle')`
works and returns:
(SELECT hint:
(HINT expressions:
(ANONYMOUS this: USE_NL, expressions:
(ALIAS this:
(COLUMN this:
(IDENTIFIER this: A, quoted: False)), alias:
(IDENTIFIER this: B, quoted: False)))), expressions:
(COLUMN this:
(IDENTIFIER this: COL_TEST, quoted: False), table:
(IDENTIFIER this: A, quoted: False)), from:
(FROM expressions:
(TABLE this:
(IDENTIFIER this: TABLE_A, quoted: False), alias:
(TABLEALIAS this:
(IDENTIFIER this: A, quoted: False))),
(TABLE this:
(IDENTIFIER this: TABLE_B, quoted: False), alias:
(TABLEALIAS this:
(IDENTIFIER this: B, quoted: False))),
(TABLE this:
(IDENTIFIER this: TABLE_C, quoted: False), alias:
(TABLEALIAS this:
(IDENTIFIER this: C, quoted: False)))))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sqlglot/dialects/oracle.py`
Content:
```
1 from __future__ import annotations
2
3 import typing as t
4
5 from sqlglot import exp, generator, parser, tokens, transforms
6 from sqlglot.dialects.dialect import Dialect, no_ilike_sql, rename_func, trim_sql
7 from sqlglot.helper import csv, seq_get
8 from sqlglot.tokens import TokenType
9
10 PASSING_TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS - {
11 TokenType.COLUMN,
12 TokenType.RETURNING,
13 }
14
15
16 def _limit_sql(self, expression):
17 return self.fetch_sql(exp.Fetch(direction="FIRST", count=expression.expression))
18
19
20 def _parse_xml_table(self) -> exp.XMLTable:
21 this = self._parse_string()
22
23 passing = None
24 columns = None
25
26 if self._match_text_seq("PASSING"):
27 # The BY VALUE keywords are optional and are provided for semantic clarity
28 self._match_text_seq("BY", "VALUE")
29 passing = self._parse_csv(
30 lambda: self._parse_table(alias_tokens=PASSING_TABLE_ALIAS_TOKENS)
31 )
32
33 by_ref = self._match_text_seq("RETURNING", "SEQUENCE", "BY", "REF")
34
35 if self._match_text_seq("COLUMNS"):
36 columns = self._parse_csv(lambda: self._parse_column_def(self._parse_field(any_token=True)))
37
38 return self.expression(
39 exp.XMLTable,
40 this=this,
41 passing=passing,
42 columns=columns,
43 by_ref=by_ref,
44 )
45
46
47 class Oracle(Dialect):
48 # https://docs.oracle.com/database/121/SQLRF/sql_elements004.htm#SQLRF00212
49 # https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes
50 time_mapping = {
51 "AM": "%p", # Meridian indicator with or without periods
52 "A.M.": "%p", # Meridian indicator with or without periods
53 "PM": "%p", # Meridian indicator with or without periods
54 "P.M.": "%p", # Meridian indicator with or without periods
55 "D": "%u", # Day of week (1-7)
56 "DAY": "%A", # name of day
57 "DD": "%d", # day of month (1-31)
58 "DDD": "%j", # day of year (1-366)
59 "DY": "%a", # abbreviated name of day
60 "HH": "%I", # Hour of day (1-12)
61 "HH12": "%I", # alias for HH
62 "HH24": "%H", # Hour of day (0-23)
63 "IW": "%V", # Calendar week of year (1-52 or 1-53), as defined by the ISO 8601 standard
64 "MI": "%M", # Minute (0-59)
65 "MM": "%m", # Month (01-12; January = 01)
66 "MON": "%b", # Abbreviated name of month
67 "MONTH": "%B", # Name of month
68 "SS": "%S", # Second (0-59)
69 "WW": "%W", # Week of year (1-53)
70 "YY": "%y", # 15
71 "YYYY": "%Y", # 2015
72 }
73
74 class Parser(parser.Parser):
75 FUNCTIONS = {
76 **parser.Parser.FUNCTIONS, # type: ignore
77 "DECODE": exp.Matches.from_arg_list,
78 "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
79 }
80
81 FUNCTION_PARSERS: t.Dict[str, t.Callable] = {
82 **parser.Parser.FUNCTION_PARSERS,
83 "XMLTABLE": _parse_xml_table,
84 }
85
86 def _parse_column(self) -> t.Optional[exp.Expression]:
87 column = super()._parse_column()
88 if column:
89 column.set("join_mark", self._match(TokenType.JOIN_MARKER))
90 return column
91
92 class Generator(generator.Generator):
93 LOCKING_READS_SUPPORTED = True
94
95 TYPE_MAPPING = {
96 **generator.Generator.TYPE_MAPPING, # type: ignore
97 exp.DataType.Type.TINYINT: "NUMBER",
98 exp.DataType.Type.SMALLINT: "NUMBER",
99 exp.DataType.Type.INT: "NUMBER",
100 exp.DataType.Type.BIGINT: "NUMBER",
101 exp.DataType.Type.DECIMAL: "NUMBER",
102 exp.DataType.Type.DOUBLE: "DOUBLE PRECISION",
103 exp.DataType.Type.VARCHAR: "VARCHAR2",
104 exp.DataType.Type.NVARCHAR: "NVARCHAR2",
105 exp.DataType.Type.TEXT: "CLOB",
106 exp.DataType.Type.BINARY: "BLOB",
107 exp.DataType.Type.VARBINARY: "BLOB",
108 }
109
110 TRANSFORMS = {
111 **generator.Generator.TRANSFORMS, # type: ignore
112 **transforms.UNALIAS_GROUP, # type: ignore
113 exp.ILike: no_ilike_sql,
114 exp.Limit: _limit_sql,
115 exp.Trim: trim_sql,
116 exp.Matches: rename_func("DECODE"),
117 exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
118 exp.Subquery: lambda self, e: self.subquery_sql(e, sep=" "),
119 exp.Table: lambda self, e: self.table_sql(e, sep=" "),
120 exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
121 exp.UnixToTime: lambda self, e: f"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)",
122 exp.Substring: rename_func("SUBSTR"),
123 exp.ToChar: lambda self, e: self.function_fallback_sql(e),
124 }
125
126 def query_modifiers(self, expression: exp.Expression, *sqls: str) -> str:
127 return csv(
128 *sqls,
129 *[self.sql(sql) for sql in expression.args.get("joins") or []],
130 self.sql(expression, "match"),
131 *[self.sql(sql) for sql in expression.args.get("laterals") or []],
132 self.sql(expression, "where"),
133 self.sql(expression, "group"),
134 self.sql(expression, "having"),
135 self.sql(expression, "qualify"),
136 self.seg("WINDOW ") + self.expressions(expression, "windows", flat=True)
137 if expression.args.get("windows")
138 else "",
139 self.sql(expression, "distribute"),
140 self.sql(expression, "sort"),
141 self.sql(expression, "cluster"),
142 self.sql(expression, "order"),
143 self.sql(expression, "offset"), # offset before limit in oracle
144 self.sql(expression, "limit"),
145 self.sql(expression, "lock"),
146 sep="",
147 )
148
149 def offset_sql(self, expression: exp.Offset) -> str:
150 return f"{super().offset_sql(expression)} ROWS"
151
152 def column_sql(self, expression: exp.Column) -> str:
153 column = super().column_sql(expression)
154 return f"{column} (+)" if expression.args.get("join_mark") else column
155
156 def xmltable_sql(self, expression: exp.XMLTable) -> str:
157 this = self.sql(expression, "this")
158 passing = self.expressions(expression, "passing")
159 passing = f"{self.sep()}PASSING{self.seg(passing)}" if passing else ""
160 columns = self.expressions(expression, "columns")
161 columns = f"{self.sep()}COLUMNS{self.seg(columns)}" if columns else ""
162 by_ref = (
163 f"{self.sep()}RETURNING SEQUENCE BY REF" if expression.args.get("by_ref") else ""
164 )
165 return f"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}"
166
167 class Tokenizer(tokens.Tokenizer):
168 KEYWORDS = {
169 **tokens.Tokenizer.KEYWORDS,
170 "(+)": TokenType.JOIN_MARKER,
171 "COLUMNS": TokenType.COLUMN,
172 "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
173 "MINUS": TokenType.EXCEPT,
174 "NVARCHAR2": TokenType.NVARCHAR,
175 "RETURNING": TokenType.RETURNING,
176 "START": TokenType.BEGIN,
177 "TOP": TokenType.TOP,
178 "VARCHAR2": TokenType.VARCHAR,
179 }
180
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sqlglot/dialects/oracle.py b/sqlglot/dialects/oracle.py
--- a/sqlglot/dialects/oracle.py
+++ b/sqlglot/dialects/oracle.py
@@ -89,6 +89,20 @@
column.set("join_mark", self._match(TokenType.JOIN_MARKER))
return column
+ def _parse_hint(self) -> t.Optional[exp.Expression]:
+ if self._match(TokenType.HINT):
+ start = self._curr
+ while self._curr and not self._match_pair(TokenType.STAR, TokenType.SLASH):
+ self._advance()
+
+ if not self._curr:
+ self.raise_error("Expected */ after HINT")
+
+ end = self._tokens[self._index - 3]
+ return exp.Hint(expressions=[self._find_sql(start, end)])
+
+ return None
+
class Generator(generator.Generator):
LOCKING_READS_SUPPORTED = True
@@ -110,17 +124,18 @@
TRANSFORMS = {
**generator.Generator.TRANSFORMS, # type: ignore
**transforms.UNALIAS_GROUP, # type: ignore
+ exp.Hint: lambda self, e: f" /*+ {self.expressions(e).strip()} */",
exp.ILike: no_ilike_sql,
exp.Limit: _limit_sql,
- exp.Trim: trim_sql,
exp.Matches: rename_func("DECODE"),
exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
exp.Subquery: lambda self, e: self.subquery_sql(e, sep=" "),
+ exp.Substring: rename_func("SUBSTR"),
exp.Table: lambda self, e: self.table_sql(e, sep=" "),
exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
- exp.UnixToTime: lambda self, e: f"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)",
- exp.Substring: rename_func("SUBSTR"),
exp.ToChar: lambda self, e: self.function_fallback_sql(e),
+ exp.Trim: trim_sql,
+ exp.UnixToTime: lambda self, e: f"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)",
}
def query_modifiers(self, expression: exp.Expression, *sqls: str) -> str:
| {"golden_diff": "diff --git a/sqlglot/dialects/oracle.py b/sqlglot/dialects/oracle.py\n--- a/sqlglot/dialects/oracle.py\n+++ b/sqlglot/dialects/oracle.py\n@@ -89,6 +89,20 @@\n column.set(\"join_mark\", self._match(TokenType.JOIN_MARKER))\n return column\n \n+ def _parse_hint(self) -> t.Optional[exp.Expression]:\n+ if self._match(TokenType.HINT):\n+ start = self._curr\n+ while self._curr and not self._match_pair(TokenType.STAR, TokenType.SLASH):\n+ self._advance()\n+\n+ if not self._curr:\n+ self.raise_error(\"Expected */ after HINT\")\n+\n+ end = self._tokens[self._index - 3]\n+ return exp.Hint(expressions=[self._find_sql(start, end)])\n+\n+ return None\n+\n class Generator(generator.Generator):\n LOCKING_READS_SUPPORTED = True\n \n@@ -110,17 +124,18 @@\n TRANSFORMS = {\n **generator.Generator.TRANSFORMS, # type: ignore\n **transforms.UNALIAS_GROUP, # type: ignore\n+ exp.Hint: lambda self, e: f\" /*+ {self.expressions(e).strip()} */\",\n exp.ILike: no_ilike_sql,\n exp.Limit: _limit_sql,\n- exp.Trim: trim_sql,\n exp.Matches: rename_func(\"DECODE\"),\n exp.StrToTime: lambda self, e: f\"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})\",\n exp.Subquery: lambda self, e: self.subquery_sql(e, sep=\" \"),\n+ exp.Substring: rename_func(\"SUBSTR\"),\n exp.Table: lambda self, e: self.table_sql(e, sep=\" \"),\n exp.TimeToStr: lambda self, e: f\"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})\",\n- exp.UnixToTime: lambda self, e: f\"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)\",\n- exp.Substring: rename_func(\"SUBSTR\"),\n exp.ToChar: lambda self, e: self.function_fallback_sql(e),\n+ exp.Trim: trim_sql,\n+ exp.UnixToTime: lambda self, e: f\"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)\",\n }\n \n def query_modifiers(self, expression: exp.Expression, *sqls: str) -> str:\n", "issue": "ParseError with Oracle SQL containing hint USE_NL with more than 2 tables (version 11.4.1)\n\r\n`parse_one(sql='SELECT /*+ USE_NL(A B C) */ A.COL_TEST FROM TABLE_A A, TABLE_B B, TABLE_C C', read='oracle')\r\n`\r\nis returning: \r\nTraceback (most recent call last):\r\n File \"/Applications/PyCharm CE.app/Contents/plugins/python-ce/helpers/pydev/pydevconsole.py\", line 364, in runcode\r\n coro = func()\r\n ^^^^^^\r\n File \"<input>\", line 1, in <module>\r\n File \"/usr/local/lib/python3.11/site-packages/sqlglot/__init__.py\", line 147, in parse_one\r\n result = dialect.parse(sql, **opts)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/sqlglot/dialects/dialect.py\", line 163, in parse\r\n return self.parser(**opts).parse(self.tokenizer.tokenize(sql), sql)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/sqlglot/parser.py\", line 767, in parse\r\n return self._parse(\r\n ^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/sqlglot/parser.py\", line 830, in _parse\r\n expressions.append(parse_method(self))\r\n ^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/sqlglot/parser.py\", line 993, in _parse_statement\r\n expression = self._parse_set_operations(expression) if expression else self._parse_select()\r\n ^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/sqlglot/parser.py\", line 1689, in _parse_select\r\n hint = self._parse_hint()\r\n ^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/sqlglot/parser.py\", line 1831, in _parse_hint\r\n hints = self._parse_csv(self._parse_function)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/sqlglot/parser.py\", line 3585, in _parse_csv\r\n parse_result = parse_method()\r\n ^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/sqlglot/parser.py\", line 2842, in _parse_function\r\n self._match_r_paren(this)\r\n File \"/usr/local/lib/python3.11/site-packages/sqlglot/parser.py\", line 3935, in _match_r_paren\r\n self.raise_error(\"Expecting )\")\r\n File \"/usr/local/lib/python3.11/site-packages/sqlglot/parser.py\", line 876, in raise_error\r\n raise error\r\nsqlglot.errors.ParseError: Expecting ). Line 1, Col: 24.\r\n SELECT /*+ USE_NL(A B C) */ A.COL_TEST FROM TABLE_A A, TABLE_B B, TABLE_C C\r\n\r\nOr\r\n`parse_one(sql='SELECT /*+ USE_NL(A B) */ A.COL_TEST FROM TABLE_A A, TABLE_B B, TABLE_C C', read='oracle')`\r\n\r\nworks and returns:\r\n\r\n(SELECT hint: \r\n (HINT expressions: \r\n (ANONYMOUS this: USE_NL, expressions: \r\n (ALIAS this: \r\n (COLUMN this: \r\n (IDENTIFIER this: A, quoted: False)), alias: \r\n (IDENTIFIER this: B, quoted: False)))), expressions: \r\n (COLUMN this: \r\n (IDENTIFIER this: COL_TEST, quoted: False), table: \r\n (IDENTIFIER this: A, quoted: False)), from: \r\n (FROM expressions: \r\n (TABLE this: \r\n (IDENTIFIER this: TABLE_A, quoted: False), alias: \r\n (TABLEALIAS this: \r\n (IDENTIFIER this: A, quoted: False))), \r\n (TABLE this: \r\n (IDENTIFIER this: TABLE_B, quoted: False), alias: \r\n (TABLEALIAS this: \r\n (IDENTIFIER this: B, quoted: False))), \r\n (TABLE this: \r\n (IDENTIFIER this: TABLE_C, quoted: False), alias: \r\n (TABLEALIAS this: \r\n (IDENTIFIER this: C, quoted: False)))))\n", "before_files": [{"content": "from __future__ import annotations\n\nimport typing as t\n\nfrom sqlglot import exp, generator, parser, tokens, transforms\nfrom sqlglot.dialects.dialect import Dialect, no_ilike_sql, rename_func, trim_sql\nfrom sqlglot.helper import csv, seq_get\nfrom sqlglot.tokens import TokenType\n\nPASSING_TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS - {\n TokenType.COLUMN,\n TokenType.RETURNING,\n}\n\n\ndef _limit_sql(self, expression):\n return self.fetch_sql(exp.Fetch(direction=\"FIRST\", count=expression.expression))\n\n\ndef _parse_xml_table(self) -> exp.XMLTable:\n this = self._parse_string()\n\n passing = None\n columns = None\n\n if self._match_text_seq(\"PASSING\"):\n # The BY VALUE keywords are optional and are provided for semantic clarity\n self._match_text_seq(\"BY\", \"VALUE\")\n passing = self._parse_csv(\n lambda: self._parse_table(alias_tokens=PASSING_TABLE_ALIAS_TOKENS)\n )\n\n by_ref = self._match_text_seq(\"RETURNING\", \"SEQUENCE\", \"BY\", \"REF\")\n\n if self._match_text_seq(\"COLUMNS\"):\n columns = self._parse_csv(lambda: self._parse_column_def(self._parse_field(any_token=True)))\n\n return self.expression(\n exp.XMLTable,\n this=this,\n passing=passing,\n columns=columns,\n by_ref=by_ref,\n )\n\n\nclass Oracle(Dialect):\n # https://docs.oracle.com/database/121/SQLRF/sql_elements004.htm#SQLRF00212\n # https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes\n time_mapping = {\n \"AM\": \"%p\", # Meridian indicator with or without periods\n \"A.M.\": \"%p\", # Meridian indicator with or without periods\n \"PM\": \"%p\", # Meridian indicator with or without periods\n \"P.M.\": \"%p\", # Meridian indicator with or without periods\n \"D\": \"%u\", # Day of week (1-7)\n \"DAY\": \"%A\", # name of day\n \"DD\": \"%d\", # day of month (1-31)\n \"DDD\": \"%j\", # day of year (1-366)\n \"DY\": \"%a\", # abbreviated name of day\n \"HH\": \"%I\", # Hour of day (1-12)\n \"HH12\": \"%I\", # alias for HH\n \"HH24\": \"%H\", # Hour of day (0-23)\n \"IW\": \"%V\", # Calendar week of year (1-52 or 1-53), as defined by the ISO 8601 standard\n \"MI\": \"%M\", # Minute (0-59)\n \"MM\": \"%m\", # Month (01-12; January = 01)\n \"MON\": \"%b\", # Abbreviated name of month\n \"MONTH\": \"%B\", # Name of month\n \"SS\": \"%S\", # Second (0-59)\n \"WW\": \"%W\", # Week of year (1-53)\n \"YY\": \"%y\", # 15\n \"YYYY\": \"%Y\", # 2015\n }\n\n class Parser(parser.Parser):\n FUNCTIONS = {\n **parser.Parser.FUNCTIONS, # type: ignore\n \"DECODE\": exp.Matches.from_arg_list,\n \"SQUARE\": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),\n }\n\n FUNCTION_PARSERS: t.Dict[str, t.Callable] = {\n **parser.Parser.FUNCTION_PARSERS,\n \"XMLTABLE\": _parse_xml_table,\n }\n\n def _parse_column(self) -> t.Optional[exp.Expression]:\n column = super()._parse_column()\n if column:\n column.set(\"join_mark\", self._match(TokenType.JOIN_MARKER))\n return column\n\n class Generator(generator.Generator):\n LOCKING_READS_SUPPORTED = True\n\n TYPE_MAPPING = {\n **generator.Generator.TYPE_MAPPING, # type: ignore\n exp.DataType.Type.TINYINT: \"NUMBER\",\n exp.DataType.Type.SMALLINT: \"NUMBER\",\n exp.DataType.Type.INT: \"NUMBER\",\n exp.DataType.Type.BIGINT: \"NUMBER\",\n exp.DataType.Type.DECIMAL: \"NUMBER\",\n exp.DataType.Type.DOUBLE: \"DOUBLE PRECISION\",\n exp.DataType.Type.VARCHAR: \"VARCHAR2\",\n exp.DataType.Type.NVARCHAR: \"NVARCHAR2\",\n exp.DataType.Type.TEXT: \"CLOB\",\n exp.DataType.Type.BINARY: \"BLOB\",\n exp.DataType.Type.VARBINARY: \"BLOB\",\n }\n\n TRANSFORMS = {\n **generator.Generator.TRANSFORMS, # type: ignore\n **transforms.UNALIAS_GROUP, # type: ignore\n exp.ILike: no_ilike_sql,\n exp.Limit: _limit_sql,\n exp.Trim: trim_sql,\n exp.Matches: rename_func(\"DECODE\"),\n exp.StrToTime: lambda self, e: f\"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})\",\n exp.Subquery: lambda self, e: self.subquery_sql(e, sep=\" \"),\n exp.Table: lambda self, e: self.table_sql(e, sep=\" \"),\n exp.TimeToStr: lambda self, e: f\"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})\",\n exp.UnixToTime: lambda self, e: f\"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)\",\n exp.Substring: rename_func(\"SUBSTR\"),\n exp.ToChar: lambda self, e: self.function_fallback_sql(e),\n }\n\n def query_modifiers(self, expression: exp.Expression, *sqls: str) -> str:\n return csv(\n *sqls,\n *[self.sql(sql) for sql in expression.args.get(\"joins\") or []],\n self.sql(expression, \"match\"),\n *[self.sql(sql) for sql in expression.args.get(\"laterals\") or []],\n self.sql(expression, \"where\"),\n self.sql(expression, \"group\"),\n self.sql(expression, \"having\"),\n self.sql(expression, \"qualify\"),\n self.seg(\"WINDOW \") + self.expressions(expression, \"windows\", flat=True)\n if expression.args.get(\"windows\")\n else \"\",\n self.sql(expression, \"distribute\"),\n self.sql(expression, \"sort\"),\n self.sql(expression, \"cluster\"),\n self.sql(expression, \"order\"),\n self.sql(expression, \"offset\"), # offset before limit in oracle\n self.sql(expression, \"limit\"),\n self.sql(expression, \"lock\"),\n sep=\"\",\n )\n\n def offset_sql(self, expression: exp.Offset) -> str:\n return f\"{super().offset_sql(expression)} ROWS\"\n\n def column_sql(self, expression: exp.Column) -> str:\n column = super().column_sql(expression)\n return f\"{column} (+)\" if expression.args.get(\"join_mark\") else column\n\n def xmltable_sql(self, expression: exp.XMLTable) -> str:\n this = self.sql(expression, \"this\")\n passing = self.expressions(expression, \"passing\")\n passing = f\"{self.sep()}PASSING{self.seg(passing)}\" if passing else \"\"\n columns = self.expressions(expression, \"columns\")\n columns = f\"{self.sep()}COLUMNS{self.seg(columns)}\" if columns else \"\"\n by_ref = (\n f\"{self.sep()}RETURNING SEQUENCE BY REF\" if expression.args.get(\"by_ref\") else \"\"\n )\n return f\"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}\"\n\n class Tokenizer(tokens.Tokenizer):\n KEYWORDS = {\n **tokens.Tokenizer.KEYWORDS,\n \"(+)\": TokenType.JOIN_MARKER,\n \"COLUMNS\": TokenType.COLUMN,\n \"MATCH_RECOGNIZE\": TokenType.MATCH_RECOGNIZE,\n \"MINUS\": TokenType.EXCEPT,\n \"NVARCHAR2\": TokenType.NVARCHAR,\n \"RETURNING\": TokenType.RETURNING,\n \"START\": TokenType.BEGIN,\n \"TOP\": TokenType.TOP,\n \"VARCHAR2\": TokenType.VARCHAR,\n }\n", "path": "sqlglot/dialects/oracle.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport typing as t\n\nfrom sqlglot import exp, generator, parser, tokens, transforms\nfrom sqlglot.dialects.dialect import Dialect, no_ilike_sql, rename_func, trim_sql\nfrom sqlglot.helper import csv, seq_get\nfrom sqlglot.tokens import TokenType\n\nPASSING_TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS - {\n TokenType.COLUMN,\n TokenType.RETURNING,\n}\n\n\ndef _limit_sql(self, expression):\n return self.fetch_sql(exp.Fetch(direction=\"FIRST\", count=expression.expression))\n\n\ndef _parse_xml_table(self) -> exp.XMLTable:\n this = self._parse_string()\n\n passing = None\n columns = None\n\n if self._match_text_seq(\"PASSING\"):\n # The BY VALUE keywords are optional and are provided for semantic clarity\n self._match_text_seq(\"BY\", \"VALUE\")\n passing = self._parse_csv(\n lambda: self._parse_table(alias_tokens=PASSING_TABLE_ALIAS_TOKENS)\n )\n\n by_ref = self._match_text_seq(\"RETURNING\", \"SEQUENCE\", \"BY\", \"REF\")\n\n if self._match_text_seq(\"COLUMNS\"):\n columns = self._parse_csv(lambda: self._parse_column_def(self._parse_field(any_token=True)))\n\n return self.expression(\n exp.XMLTable,\n this=this,\n passing=passing,\n columns=columns,\n by_ref=by_ref,\n )\n\n\nclass Oracle(Dialect):\n # https://docs.oracle.com/database/121/SQLRF/sql_elements004.htm#SQLRF00212\n # https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes\n time_mapping = {\n \"AM\": \"%p\", # Meridian indicator with or without periods\n \"A.M.\": \"%p\", # Meridian indicator with or without periods\n \"PM\": \"%p\", # Meridian indicator with or without periods\n \"P.M.\": \"%p\", # Meridian indicator with or without periods\n \"D\": \"%u\", # Day of week (1-7)\n \"DAY\": \"%A\", # name of day\n \"DD\": \"%d\", # day of month (1-31)\n \"DDD\": \"%j\", # day of year (1-366)\n \"DY\": \"%a\", # abbreviated name of day\n \"HH\": \"%I\", # Hour of day (1-12)\n \"HH12\": \"%I\", # alias for HH\n \"HH24\": \"%H\", # Hour of day (0-23)\n \"IW\": \"%V\", # Calendar week of year (1-52 or 1-53), as defined by the ISO 8601 standard\n \"MI\": \"%M\", # Minute (0-59)\n \"MM\": \"%m\", # Month (01-12; January = 01)\n \"MON\": \"%b\", # Abbreviated name of month\n \"MONTH\": \"%B\", # Name of month\n \"SS\": \"%S\", # Second (0-59)\n \"WW\": \"%W\", # Week of year (1-53)\n \"YY\": \"%y\", # 15\n \"YYYY\": \"%Y\", # 2015\n }\n\n class Parser(parser.Parser):\n FUNCTIONS = {\n **parser.Parser.FUNCTIONS, # type: ignore\n \"DECODE\": exp.Matches.from_arg_list,\n \"SQUARE\": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),\n }\n\n FUNCTION_PARSERS: t.Dict[str, t.Callable] = {\n **parser.Parser.FUNCTION_PARSERS,\n \"XMLTABLE\": _parse_xml_table,\n }\n\n def _parse_column(self) -> t.Optional[exp.Expression]:\n column = super()._parse_column()\n if column:\n column.set(\"join_mark\", self._match(TokenType.JOIN_MARKER))\n return column\n\n def _parse_hint(self) -> t.Optional[exp.Expression]:\n if self._match(TokenType.HINT):\n start = self._curr\n while self._curr and not self._match_pair(TokenType.STAR, TokenType.SLASH):\n self._advance()\n\n if not self._curr:\n self.raise_error(\"Expected */ after HINT\")\n\n end = self._tokens[self._index - 3]\n return exp.Hint(expressions=[self._find_sql(start, end)])\n\n return None\n\n class Generator(generator.Generator):\n LOCKING_READS_SUPPORTED = True\n\n TYPE_MAPPING = {\n **generator.Generator.TYPE_MAPPING, # type: ignore\n exp.DataType.Type.TINYINT: \"NUMBER\",\n exp.DataType.Type.SMALLINT: \"NUMBER\",\n exp.DataType.Type.INT: \"NUMBER\",\n exp.DataType.Type.BIGINT: \"NUMBER\",\n exp.DataType.Type.DECIMAL: \"NUMBER\",\n exp.DataType.Type.DOUBLE: \"DOUBLE PRECISION\",\n exp.DataType.Type.VARCHAR: \"VARCHAR2\",\n exp.DataType.Type.NVARCHAR: \"NVARCHAR2\",\n exp.DataType.Type.TEXT: \"CLOB\",\n exp.DataType.Type.BINARY: \"BLOB\",\n exp.DataType.Type.VARBINARY: \"BLOB\",\n }\n\n TRANSFORMS = {\n **generator.Generator.TRANSFORMS, # type: ignore\n **transforms.UNALIAS_GROUP, # type: ignore\n exp.Hint: lambda self, e: f\" /*+ {self.expressions(e).strip()} */\",\n exp.ILike: no_ilike_sql,\n exp.Limit: _limit_sql,\n exp.Matches: rename_func(\"DECODE\"),\n exp.StrToTime: lambda self, e: f\"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})\",\n exp.Subquery: lambda self, e: self.subquery_sql(e, sep=\" \"),\n exp.Substring: rename_func(\"SUBSTR\"),\n exp.Table: lambda self, e: self.table_sql(e, sep=\" \"),\n exp.TimeToStr: lambda self, e: f\"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})\",\n exp.ToChar: lambda self, e: self.function_fallback_sql(e),\n exp.Trim: trim_sql,\n exp.UnixToTime: lambda self, e: f\"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)\",\n }\n\n def query_modifiers(self, expression: exp.Expression, *sqls: str) -> str:\n return csv(\n *sqls,\n *[self.sql(sql) for sql in expression.args.get(\"joins\") or []],\n self.sql(expression, \"match\"),\n *[self.sql(sql) for sql in expression.args.get(\"laterals\") or []],\n self.sql(expression, \"where\"),\n self.sql(expression, \"group\"),\n self.sql(expression, \"having\"),\n self.sql(expression, \"qualify\"),\n self.seg(\"WINDOW \") + self.expressions(expression, \"windows\", flat=True)\n if expression.args.get(\"windows\")\n else \"\",\n self.sql(expression, \"distribute\"),\n self.sql(expression, \"sort\"),\n self.sql(expression, \"cluster\"),\n self.sql(expression, \"order\"),\n self.sql(expression, \"offset\"), # offset before limit in oracle\n self.sql(expression, \"limit\"),\n self.sql(expression, \"lock\"),\n sep=\"\",\n )\n\n def offset_sql(self, expression: exp.Offset) -> str:\n return f\"{super().offset_sql(expression)} ROWS\"\n\n def column_sql(self, expression: exp.Column) -> str:\n column = super().column_sql(expression)\n return f\"{column} (+)\" if expression.args.get(\"join_mark\") else column\n\n def xmltable_sql(self, expression: exp.XMLTable) -> str:\n this = self.sql(expression, \"this\")\n passing = self.expressions(expression, \"passing\")\n passing = f\"{self.sep()}PASSING{self.seg(passing)}\" if passing else \"\"\n columns = self.expressions(expression, \"columns\")\n columns = f\"{self.sep()}COLUMNS{self.seg(columns)}\" if columns else \"\"\n by_ref = (\n f\"{self.sep()}RETURNING SEQUENCE BY REF\" if expression.args.get(\"by_ref\") else \"\"\n )\n return f\"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}\"\n\n class Tokenizer(tokens.Tokenizer):\n KEYWORDS = {\n **tokens.Tokenizer.KEYWORDS,\n \"(+)\": TokenType.JOIN_MARKER,\n \"COLUMNS\": TokenType.COLUMN,\n \"MATCH_RECOGNIZE\": TokenType.MATCH_RECOGNIZE,\n \"MINUS\": TokenType.EXCEPT,\n \"NVARCHAR2\": TokenType.NVARCHAR,\n \"RETURNING\": TokenType.RETURNING,\n \"START\": TokenType.BEGIN,\n \"TOP\": TokenType.TOP,\n \"VARCHAR2\": TokenType.VARCHAR,\n }\n", "path": "sqlglot/dialects/oracle.py"}]} | 3,552 | 601 |
gh_patches_debug_28936 | rasdani/github-patches | git_diff | dbt-labs__dbt-core-8308 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[CT-2139] Don't exclude `Formatting` events from JSON-formatted logs
We have some events that are only for formatting / aesthetic display in the terminal. (Previously, this was called `EmptyLine`; in v1.5 we're renaming it to `Formatting`.) Historically, we've intentionally _excluded_ these events from JSON-formatted structured logs. This makes sense in one way, but it's actively unhelpful for someone who wants _both_ the full structure of JSON logs _and_ the ability to produce their own text-formatted logs at parity with `dbt-core`'s.
_copying from slack_
> @jtcohen6
> While it's great to be opinionated about what & how we display `text`, it does feel weird to me that we're depriving the json log consumer of some detail/flexibility.
> @peterallenwebb:
> On balance, I think we should include the EmptyLine/Formatting events. I infer that the original logic behind excluding them was that they would be confusing/unwanted by consumers, but the existence of this use case suggests otherwise.
### Proposed change
Remove the line from `_stdout_filter` that filters out `Formatting` if we're emitting JSON-formatted logs:
https://github.com/dbt-labs/dbt-core/blob/c952d44ec5c2506995fbad75320acbae49125d3d/core/dbt/events/functions.py#L89
[`TextOnly`](https://github.com/dbt-labs/dbt-core/search?q=TextOnly) achieves the same purpose, for legacy logging _only_. We could remove it now, or remove it when we deprecate legacy logging in ~v1.6~ v1.7.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/dbt/events/functions.py`
Content:
```
1 from dbt.constants import METADATA_ENV_PREFIX
2 from dbt.events.base_types import BaseEvent, EventLevel, EventMsg
3 from dbt.events.eventmgr import EventManager, LoggerConfig, LineFormat, NoFilter, IEventManager
4 from dbt.events.helpers import env_secrets, scrub_secrets
5 from dbt.events.types import Formatting, Note
6 from dbt.flags import get_flags, ENABLE_LEGACY_LOGGER
7 from dbt.logger import GLOBAL_LOGGER, make_log_dir_if_missing
8 from functools import partial
9 import json
10 import os
11 import sys
12 from typing import Callable, Dict, List, Optional, TextIO
13 import uuid
14 from google.protobuf.json_format import MessageToDict
15
16 import dbt.utils
17
18 LOG_VERSION = 3
19 metadata_vars: Optional[Dict[str, str]] = None
20
21 # These are the logging events issued by the "clean" command,
22 # where we can't count on having a log directory. We've removed
23 # the "class" flags on the events in types.py. If necessary we
24 # could still use class or method flags, but we'd have to get
25 # the type class from the msg and then get the information from the class.
26 nofile_codes = ["Z012", "Z013", "Z014", "Z015"]
27
28
29 def setup_event_logger(flags, callbacks: List[Callable[[EventMsg], None]] = []) -> None:
30 cleanup_event_logger()
31 make_log_dir_if_missing(flags.LOG_PATH)
32 EVENT_MANAGER.callbacks = callbacks.copy()
33
34 if ENABLE_LEGACY_LOGGER:
35 EVENT_MANAGER.add_logger(
36 _get_logbook_log_config(
37 flags.DEBUG, flags.USE_COLORS, flags.LOG_CACHE_EVENTS, flags.QUIET
38 )
39 )
40 else:
41 if flags.LOG_LEVEL != "none":
42 line_format = _line_format_from_str(flags.LOG_FORMAT, LineFormat.PlainText)
43 log_level = (
44 EventLevel.ERROR
45 if flags.QUIET
46 else EventLevel.DEBUG
47 if flags.DEBUG
48 else EventLevel(flags.LOG_LEVEL)
49 )
50 console_config = _get_stdout_config(
51 line_format,
52 flags.USE_COLORS,
53 log_level,
54 flags.LOG_CACHE_EVENTS,
55 )
56 EVENT_MANAGER.add_logger(console_config)
57
58 if _CAPTURE_STREAM:
59 # Create second stdout logger to support test which want to know what's
60 # being sent to stdout.
61 console_config.output_stream = _CAPTURE_STREAM
62 EVENT_MANAGER.add_logger(console_config)
63
64 if flags.LOG_LEVEL_FILE != "none":
65 # create and add the file logger to the event manager
66 log_file = os.path.join(flags.LOG_PATH, "dbt.log")
67 log_file_format = _line_format_from_str(flags.LOG_FORMAT_FILE, LineFormat.DebugText)
68 log_level_file = EventLevel.DEBUG if flags.DEBUG else EventLevel(flags.LOG_LEVEL_FILE)
69 EVENT_MANAGER.add_logger(
70 _get_logfile_config(
71 log_file,
72 flags.USE_COLORS_FILE,
73 log_file_format,
74 log_level_file,
75 flags.LOG_FILE_MAX_BYTES,
76 )
77 )
78
79
80 def _line_format_from_str(format_str: str, default: LineFormat) -> LineFormat:
81 if format_str == "text":
82 return LineFormat.PlainText
83 elif format_str == "debug":
84 return LineFormat.DebugText
85 elif format_str == "json":
86 return LineFormat.Json
87
88 return default
89
90
91 def _get_stdout_config(
92 line_format: LineFormat,
93 use_colors: bool,
94 level: EventLevel,
95 log_cache_events: bool,
96 ) -> LoggerConfig:
97
98 return LoggerConfig(
99 name="stdout_log",
100 level=level,
101 use_colors=use_colors,
102 line_format=line_format,
103 scrubber=env_scrubber,
104 filter=partial(
105 _stdout_filter,
106 log_cache_events,
107 line_format,
108 ),
109 output_stream=sys.stdout,
110 )
111
112
113 def _stdout_filter(
114 log_cache_events: bool,
115 line_format: LineFormat,
116 msg: EventMsg,
117 ) -> bool:
118 return (msg.info.name not in ["CacheAction", "CacheDumpGraph"] or log_cache_events) and not (
119 line_format == LineFormat.Json and type(msg.data) == Formatting
120 )
121
122
123 def _get_logfile_config(
124 log_path: str,
125 use_colors: bool,
126 line_format: LineFormat,
127 level: EventLevel,
128 log_file_max_bytes: int,
129 ) -> LoggerConfig:
130 return LoggerConfig(
131 name="file_log",
132 line_format=line_format,
133 use_colors=use_colors,
134 level=level, # File log is *always* debug level
135 scrubber=env_scrubber,
136 filter=partial(_logfile_filter, bool(get_flags().LOG_CACHE_EVENTS), line_format),
137 output_file_name=log_path,
138 output_file_max_bytes=log_file_max_bytes,
139 )
140
141
142 def _logfile_filter(log_cache_events: bool, line_format: LineFormat, msg: EventMsg) -> bool:
143 return (
144 msg.info.code not in nofile_codes
145 and not (msg.info.name in ["CacheAction", "CacheDumpGraph"] and not log_cache_events)
146 and not (line_format == LineFormat.Json and type(msg.data) == Formatting)
147 )
148
149
150 def _get_logbook_log_config(
151 debug: bool, use_colors: bool, log_cache_events: bool, quiet: bool
152 ) -> LoggerConfig:
153 config = _get_stdout_config(
154 LineFormat.PlainText,
155 use_colors,
156 EventLevel.ERROR if quiet else EventLevel.DEBUG if debug else EventLevel.INFO,
157 log_cache_events,
158 )
159 config.name = "logbook_log"
160 config.filter = (
161 NoFilter
162 if log_cache_events
163 else lambda e: e.info.name not in ["CacheAction", "CacheDumpGraph"]
164 )
165 config.logger = GLOBAL_LOGGER
166 config.output_stream = None
167 return config
168
169
170 def env_scrubber(msg: str) -> str:
171 return scrub_secrets(msg, env_secrets())
172
173
174 def cleanup_event_logger():
175 # Reset to a no-op manager to release streams associated with logs. This is
176 # especially important for tests, since pytest replaces the stdout stream
177 # during test runs, and closes the stream after the test is over.
178 EVENT_MANAGER.loggers.clear()
179 EVENT_MANAGER.callbacks.clear()
180
181
182 # Since dbt-rpc does not do its own log setup, and since some events can
183 # currently fire before logs can be configured by setup_event_logger(), we
184 # create a default configuration with default settings and no file output.
185 EVENT_MANAGER: IEventManager = EventManager()
186 EVENT_MANAGER.add_logger(
187 _get_logbook_log_config(False, True, False, False) # type: ignore
188 if ENABLE_LEGACY_LOGGER
189 else _get_stdout_config(LineFormat.PlainText, True, EventLevel.INFO, False)
190 )
191
192 # This global, and the following two functions for capturing stdout logs are
193 # an unpleasant hack we intend to remove as part of API-ification. The GitHub
194 # issue #6350 was opened for that work.
195 _CAPTURE_STREAM: Optional[TextIO] = None
196
197
198 # used for integration tests
199 def capture_stdout_logs(stream: TextIO):
200 global _CAPTURE_STREAM
201 _CAPTURE_STREAM = stream
202
203
204 def stop_capture_stdout_logs():
205 global _CAPTURE_STREAM
206 _CAPTURE_STREAM = None
207
208
209 # returns a dictionary representation of the event fields.
210 # the message may contain secrets which must be scrubbed at the usage site.
211 def msg_to_json(msg: EventMsg) -> str:
212 msg_dict = msg_to_dict(msg)
213 raw_log_line = json.dumps(msg_dict, sort_keys=True, cls=dbt.utils.ForgivingJSONEncoder)
214 return raw_log_line
215
216
217 def msg_to_dict(msg: EventMsg) -> dict:
218 msg_dict = dict()
219 try:
220 msg_dict = MessageToDict(
221 msg, preserving_proto_field_name=True, including_default_value_fields=True # type: ignore
222 )
223 except Exception as exc:
224 event_type = type(msg).__name__
225 fire_event(
226 Note(msg=f"type {event_type} is not serializable. {str(exc)}"), level=EventLevel.WARN
227 )
228 # We don't want an empty NodeInfo in output
229 if (
230 "data" in msg_dict
231 and "node_info" in msg_dict["data"]
232 and msg_dict["data"]["node_info"]["node_name"] == ""
233 ):
234 del msg_dict["data"]["node_info"]
235 return msg_dict
236
237
238 def warn_or_error(event, node=None):
239 flags = get_flags()
240 if flags.WARN_ERROR or flags.WARN_ERROR_OPTIONS.includes(type(event).__name__):
241
242 # TODO: resolve this circular import when at top
243 from dbt.exceptions import EventCompilationError
244
245 raise EventCompilationError(event.message(), node)
246 else:
247 fire_event(event)
248
249
250 # an alternative to fire_event which only creates and logs the event value
251 # if the condition is met. Does nothing otherwise.
252 def fire_event_if(
253 conditional: bool, lazy_e: Callable[[], BaseEvent], level: Optional[EventLevel] = None
254 ) -> None:
255 if conditional:
256 fire_event(lazy_e(), level=level)
257
258
259 # a special case of fire_event_if, to only fire events in our unit/functional tests
260 def fire_event_if_test(
261 lazy_e: Callable[[], BaseEvent], level: Optional[EventLevel] = None
262 ) -> None:
263 fire_event_if(conditional=("pytest" in sys.modules), lazy_e=lazy_e, level=level)
264
265
266 # top-level method for accessing the new eventing system
267 # this is where all the side effects happen branched by event type
268 # (i.e. - mutating the event history, printing to stdout, logging
269 # to files, etc.)
270 def fire_event(e: BaseEvent, level: Optional[EventLevel] = None) -> None:
271 EVENT_MANAGER.fire_event(e, level=level)
272
273
274 def get_metadata_vars() -> Dict[str, str]:
275 global metadata_vars
276 if metadata_vars is None:
277 metadata_vars = {
278 k[len(METADATA_ENV_PREFIX) :]: v
279 for k, v in os.environ.items()
280 if k.startswith(METADATA_ENV_PREFIX)
281 }
282 return metadata_vars
283
284
285 def reset_metadata_vars() -> None:
286 global metadata_vars
287 metadata_vars = None
288
289
290 def get_invocation_id() -> str:
291 return EVENT_MANAGER.invocation_id
292
293
294 def set_invocation_id() -> None:
295 # This is primarily for setting the invocation_id for separate
296 # commands in the dbt servers. It shouldn't be necessary for the CLI.
297 EVENT_MANAGER.invocation_id = str(uuid.uuid4())
298
299
300 def ctx_set_event_manager(event_manager: IEventManager):
301 global EVENT_MANAGER
302 EVENT_MANAGER = event_manager
303
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/core/dbt/events/functions.py b/core/dbt/events/functions.py
--- a/core/dbt/events/functions.py
+++ b/core/dbt/events/functions.py
@@ -2,7 +2,7 @@
from dbt.events.base_types import BaseEvent, EventLevel, EventMsg
from dbt.events.eventmgr import EventManager, LoggerConfig, LineFormat, NoFilter, IEventManager
from dbt.events.helpers import env_secrets, scrub_secrets
-from dbt.events.types import Formatting, Note
+from dbt.events.types import Note
from dbt.flags import get_flags, ENABLE_LEGACY_LOGGER
from dbt.logger import GLOBAL_LOGGER, make_log_dir_if_missing
from functools import partial
@@ -115,9 +115,7 @@
line_format: LineFormat,
msg: EventMsg,
) -> bool:
- return (msg.info.name not in ["CacheAction", "CacheDumpGraph"] or log_cache_events) and not (
- line_format == LineFormat.Json and type(msg.data) == Formatting
- )
+ return msg.info.name not in ["CacheAction", "CacheDumpGraph"] or log_cache_events
def _get_logfile_config(
@@ -140,10 +138,8 @@
def _logfile_filter(log_cache_events: bool, line_format: LineFormat, msg: EventMsg) -> bool:
- return (
- msg.info.code not in nofile_codes
- and not (msg.info.name in ["CacheAction", "CacheDumpGraph"] and not log_cache_events)
- and not (line_format == LineFormat.Json and type(msg.data) == Formatting)
+ return msg.info.code not in nofile_codes and not (
+ msg.info.name in ["CacheAction", "CacheDumpGraph"] and not log_cache_events
)
| {"golden_diff": "diff --git a/core/dbt/events/functions.py b/core/dbt/events/functions.py\n--- a/core/dbt/events/functions.py\n+++ b/core/dbt/events/functions.py\n@@ -2,7 +2,7 @@\n from dbt.events.base_types import BaseEvent, EventLevel, EventMsg\n from dbt.events.eventmgr import EventManager, LoggerConfig, LineFormat, NoFilter, IEventManager\n from dbt.events.helpers import env_secrets, scrub_secrets\n-from dbt.events.types import Formatting, Note\n+from dbt.events.types import Note\n from dbt.flags import get_flags, ENABLE_LEGACY_LOGGER\n from dbt.logger import GLOBAL_LOGGER, make_log_dir_if_missing\n from functools import partial\n@@ -115,9 +115,7 @@\n line_format: LineFormat,\n msg: EventMsg,\n ) -> bool:\n- return (msg.info.name not in [\"CacheAction\", \"CacheDumpGraph\"] or log_cache_events) and not (\n- line_format == LineFormat.Json and type(msg.data) == Formatting\n- )\n+ return msg.info.name not in [\"CacheAction\", \"CacheDumpGraph\"] or log_cache_events\n \n \n def _get_logfile_config(\n@@ -140,10 +138,8 @@\n \n \n def _logfile_filter(log_cache_events: bool, line_format: LineFormat, msg: EventMsg) -> bool:\n- return (\n- msg.info.code not in nofile_codes\n- and not (msg.info.name in [\"CacheAction\", \"CacheDumpGraph\"] and not log_cache_events)\n- and not (line_format == LineFormat.Json and type(msg.data) == Formatting)\n+ return msg.info.code not in nofile_codes and not (\n+ msg.info.name in [\"CacheAction\", \"CacheDumpGraph\"] and not log_cache_events\n )\n", "issue": "[CT-2139] Don't exclude `Formatting` events from JSON-formatted logs\nWe have some events that are only for formatting / aesthetic display in the terminal. (Previously, this was called `EmptyLine`; in v1.5 we're renaming it to `Formatting`.) Historically, we've intentionally _excluded_ these events from JSON-formatted structured logs. This makes sense in one way, but it's actively unhelpful for someone who wants _both_ the full structure of JSON logs _and_ the ability to produce their own text-formatted logs at parity with `dbt-core`'s.\r\n\r\n_copying from slack_\r\n> @jtcohen6\r\n> While it's great to be opinionated about what & how we display `text`, it does feel weird to me that we're depriving the json log consumer of some detail/flexibility.\r\n> @peterallenwebb:\r\n> On balance, I think we should include the EmptyLine/Formatting events. I infer that the original logic behind excluding them was that they would be confusing/unwanted by consumers, but the existence of this use case suggests otherwise.\r\n\r\n### Proposed change\r\n\r\nRemove the line from `_stdout_filter` that filters out `Formatting` if we're emitting JSON-formatted logs:\r\nhttps://github.com/dbt-labs/dbt-core/blob/c952d44ec5c2506995fbad75320acbae49125d3d/core/dbt/events/functions.py#L89\r\n\r\n[`TextOnly`](https://github.com/dbt-labs/dbt-core/search?q=TextOnly) achieves the same purpose, for legacy logging _only_. We could remove it now, or remove it when we deprecate legacy logging in ~v1.6~ v1.7.\n", "before_files": [{"content": "from dbt.constants import METADATA_ENV_PREFIX\nfrom dbt.events.base_types import BaseEvent, EventLevel, EventMsg\nfrom dbt.events.eventmgr import EventManager, LoggerConfig, LineFormat, NoFilter, IEventManager\nfrom dbt.events.helpers import env_secrets, scrub_secrets\nfrom dbt.events.types import Formatting, Note\nfrom dbt.flags import get_flags, ENABLE_LEGACY_LOGGER\nfrom dbt.logger import GLOBAL_LOGGER, make_log_dir_if_missing\nfrom functools import partial\nimport json\nimport os\nimport sys\nfrom typing import Callable, Dict, List, Optional, TextIO\nimport uuid\nfrom google.protobuf.json_format import MessageToDict\n\nimport dbt.utils\n\nLOG_VERSION = 3\nmetadata_vars: Optional[Dict[str, str]] = None\n\n# These are the logging events issued by the \"clean\" command,\n# where we can't count on having a log directory. We've removed\n# the \"class\" flags on the events in types.py. If necessary we\n# could still use class or method flags, but we'd have to get\n# the type class from the msg and then get the information from the class.\nnofile_codes = [\"Z012\", \"Z013\", \"Z014\", \"Z015\"]\n\n\ndef setup_event_logger(flags, callbacks: List[Callable[[EventMsg], None]] = []) -> None:\n cleanup_event_logger()\n make_log_dir_if_missing(flags.LOG_PATH)\n EVENT_MANAGER.callbacks = callbacks.copy()\n\n if ENABLE_LEGACY_LOGGER:\n EVENT_MANAGER.add_logger(\n _get_logbook_log_config(\n flags.DEBUG, flags.USE_COLORS, flags.LOG_CACHE_EVENTS, flags.QUIET\n )\n )\n else:\n if flags.LOG_LEVEL != \"none\":\n line_format = _line_format_from_str(flags.LOG_FORMAT, LineFormat.PlainText)\n log_level = (\n EventLevel.ERROR\n if flags.QUIET\n else EventLevel.DEBUG\n if flags.DEBUG\n else EventLevel(flags.LOG_LEVEL)\n )\n console_config = _get_stdout_config(\n line_format,\n flags.USE_COLORS,\n log_level,\n flags.LOG_CACHE_EVENTS,\n )\n EVENT_MANAGER.add_logger(console_config)\n\n if _CAPTURE_STREAM:\n # Create second stdout logger to support test which want to know what's\n # being sent to stdout.\n console_config.output_stream = _CAPTURE_STREAM\n EVENT_MANAGER.add_logger(console_config)\n\n if flags.LOG_LEVEL_FILE != \"none\":\n # create and add the file logger to the event manager\n log_file = os.path.join(flags.LOG_PATH, \"dbt.log\")\n log_file_format = _line_format_from_str(flags.LOG_FORMAT_FILE, LineFormat.DebugText)\n log_level_file = EventLevel.DEBUG if flags.DEBUG else EventLevel(flags.LOG_LEVEL_FILE)\n EVENT_MANAGER.add_logger(\n _get_logfile_config(\n log_file,\n flags.USE_COLORS_FILE,\n log_file_format,\n log_level_file,\n flags.LOG_FILE_MAX_BYTES,\n )\n )\n\n\ndef _line_format_from_str(format_str: str, default: LineFormat) -> LineFormat:\n if format_str == \"text\":\n return LineFormat.PlainText\n elif format_str == \"debug\":\n return LineFormat.DebugText\n elif format_str == \"json\":\n return LineFormat.Json\n\n return default\n\n\ndef _get_stdout_config(\n line_format: LineFormat,\n use_colors: bool,\n level: EventLevel,\n log_cache_events: bool,\n) -> LoggerConfig:\n\n return LoggerConfig(\n name=\"stdout_log\",\n level=level,\n use_colors=use_colors,\n line_format=line_format,\n scrubber=env_scrubber,\n filter=partial(\n _stdout_filter,\n log_cache_events,\n line_format,\n ),\n output_stream=sys.stdout,\n )\n\n\ndef _stdout_filter(\n log_cache_events: bool,\n line_format: LineFormat,\n msg: EventMsg,\n) -> bool:\n return (msg.info.name not in [\"CacheAction\", \"CacheDumpGraph\"] or log_cache_events) and not (\n line_format == LineFormat.Json and type(msg.data) == Formatting\n )\n\n\ndef _get_logfile_config(\n log_path: str,\n use_colors: bool,\n line_format: LineFormat,\n level: EventLevel,\n log_file_max_bytes: int,\n) -> LoggerConfig:\n return LoggerConfig(\n name=\"file_log\",\n line_format=line_format,\n use_colors=use_colors,\n level=level, # File log is *always* debug level\n scrubber=env_scrubber,\n filter=partial(_logfile_filter, bool(get_flags().LOG_CACHE_EVENTS), line_format),\n output_file_name=log_path,\n output_file_max_bytes=log_file_max_bytes,\n )\n\n\ndef _logfile_filter(log_cache_events: bool, line_format: LineFormat, msg: EventMsg) -> bool:\n return (\n msg.info.code not in nofile_codes\n and not (msg.info.name in [\"CacheAction\", \"CacheDumpGraph\"] and not log_cache_events)\n and not (line_format == LineFormat.Json and type(msg.data) == Formatting)\n )\n\n\ndef _get_logbook_log_config(\n debug: bool, use_colors: bool, log_cache_events: bool, quiet: bool\n) -> LoggerConfig:\n config = _get_stdout_config(\n LineFormat.PlainText,\n use_colors,\n EventLevel.ERROR if quiet else EventLevel.DEBUG if debug else EventLevel.INFO,\n log_cache_events,\n )\n config.name = \"logbook_log\"\n config.filter = (\n NoFilter\n if log_cache_events\n else lambda e: e.info.name not in [\"CacheAction\", \"CacheDumpGraph\"]\n )\n config.logger = GLOBAL_LOGGER\n config.output_stream = None\n return config\n\n\ndef env_scrubber(msg: str) -> str:\n return scrub_secrets(msg, env_secrets())\n\n\ndef cleanup_event_logger():\n # Reset to a no-op manager to release streams associated with logs. This is\n # especially important for tests, since pytest replaces the stdout stream\n # during test runs, and closes the stream after the test is over.\n EVENT_MANAGER.loggers.clear()\n EVENT_MANAGER.callbacks.clear()\n\n\n# Since dbt-rpc does not do its own log setup, and since some events can\n# currently fire before logs can be configured by setup_event_logger(), we\n# create a default configuration with default settings and no file output.\nEVENT_MANAGER: IEventManager = EventManager()\nEVENT_MANAGER.add_logger(\n _get_logbook_log_config(False, True, False, False) # type: ignore\n if ENABLE_LEGACY_LOGGER\n else _get_stdout_config(LineFormat.PlainText, True, EventLevel.INFO, False)\n)\n\n# This global, and the following two functions for capturing stdout logs are\n# an unpleasant hack we intend to remove as part of API-ification. The GitHub\n# issue #6350 was opened for that work.\n_CAPTURE_STREAM: Optional[TextIO] = None\n\n\n# used for integration tests\ndef capture_stdout_logs(stream: TextIO):\n global _CAPTURE_STREAM\n _CAPTURE_STREAM = stream\n\n\ndef stop_capture_stdout_logs():\n global _CAPTURE_STREAM\n _CAPTURE_STREAM = None\n\n\n# returns a dictionary representation of the event fields.\n# the message may contain secrets which must be scrubbed at the usage site.\ndef msg_to_json(msg: EventMsg) -> str:\n msg_dict = msg_to_dict(msg)\n raw_log_line = json.dumps(msg_dict, sort_keys=True, cls=dbt.utils.ForgivingJSONEncoder)\n return raw_log_line\n\n\ndef msg_to_dict(msg: EventMsg) -> dict:\n msg_dict = dict()\n try:\n msg_dict = MessageToDict(\n msg, preserving_proto_field_name=True, including_default_value_fields=True # type: ignore\n )\n except Exception as exc:\n event_type = type(msg).__name__\n fire_event(\n Note(msg=f\"type {event_type} is not serializable. {str(exc)}\"), level=EventLevel.WARN\n )\n # We don't want an empty NodeInfo in output\n if (\n \"data\" in msg_dict\n and \"node_info\" in msg_dict[\"data\"]\n and msg_dict[\"data\"][\"node_info\"][\"node_name\"] == \"\"\n ):\n del msg_dict[\"data\"][\"node_info\"]\n return msg_dict\n\n\ndef warn_or_error(event, node=None):\n flags = get_flags()\n if flags.WARN_ERROR or flags.WARN_ERROR_OPTIONS.includes(type(event).__name__):\n\n # TODO: resolve this circular import when at top\n from dbt.exceptions import EventCompilationError\n\n raise EventCompilationError(event.message(), node)\n else:\n fire_event(event)\n\n\n# an alternative to fire_event which only creates and logs the event value\n# if the condition is met. Does nothing otherwise.\ndef fire_event_if(\n conditional: bool, lazy_e: Callable[[], BaseEvent], level: Optional[EventLevel] = None\n) -> None:\n if conditional:\n fire_event(lazy_e(), level=level)\n\n\n# a special case of fire_event_if, to only fire events in our unit/functional tests\ndef fire_event_if_test(\n lazy_e: Callable[[], BaseEvent], level: Optional[EventLevel] = None\n) -> None:\n fire_event_if(conditional=(\"pytest\" in sys.modules), lazy_e=lazy_e, level=level)\n\n\n# top-level method for accessing the new eventing system\n# this is where all the side effects happen branched by event type\n# (i.e. - mutating the event history, printing to stdout, logging\n# to files, etc.)\ndef fire_event(e: BaseEvent, level: Optional[EventLevel] = None) -> None:\n EVENT_MANAGER.fire_event(e, level=level)\n\n\ndef get_metadata_vars() -> Dict[str, str]:\n global metadata_vars\n if metadata_vars is None:\n metadata_vars = {\n k[len(METADATA_ENV_PREFIX) :]: v\n for k, v in os.environ.items()\n if k.startswith(METADATA_ENV_PREFIX)\n }\n return metadata_vars\n\n\ndef reset_metadata_vars() -> None:\n global metadata_vars\n metadata_vars = None\n\n\ndef get_invocation_id() -> str:\n return EVENT_MANAGER.invocation_id\n\n\ndef set_invocation_id() -> None:\n # This is primarily for setting the invocation_id for separate\n # commands in the dbt servers. It shouldn't be necessary for the CLI.\n EVENT_MANAGER.invocation_id = str(uuid.uuid4())\n\n\ndef ctx_set_event_manager(event_manager: IEventManager):\n global EVENT_MANAGER\n EVENT_MANAGER = event_manager\n", "path": "core/dbt/events/functions.py"}], "after_files": [{"content": "from dbt.constants import METADATA_ENV_PREFIX\nfrom dbt.events.base_types import BaseEvent, EventLevel, EventMsg\nfrom dbt.events.eventmgr import EventManager, LoggerConfig, LineFormat, NoFilter, IEventManager\nfrom dbt.events.helpers import env_secrets, scrub_secrets\nfrom dbt.events.types import Note\nfrom dbt.flags import get_flags, ENABLE_LEGACY_LOGGER\nfrom dbt.logger import GLOBAL_LOGGER, make_log_dir_if_missing\nfrom functools import partial\nimport json\nimport os\nimport sys\nfrom typing import Callable, Dict, List, Optional, TextIO\nimport uuid\nfrom google.protobuf.json_format import MessageToDict\n\nimport dbt.utils\n\nLOG_VERSION = 3\nmetadata_vars: Optional[Dict[str, str]] = None\n\n# These are the logging events issued by the \"clean\" command,\n# where we can't count on having a log directory. We've removed\n# the \"class\" flags on the events in types.py. If necessary we\n# could still use class or method flags, but we'd have to get\n# the type class from the msg and then get the information from the class.\nnofile_codes = [\"Z012\", \"Z013\", \"Z014\", \"Z015\"]\n\n\ndef setup_event_logger(flags, callbacks: List[Callable[[EventMsg], None]] = []) -> None:\n cleanup_event_logger()\n make_log_dir_if_missing(flags.LOG_PATH)\n EVENT_MANAGER.callbacks = callbacks.copy()\n\n if ENABLE_LEGACY_LOGGER:\n EVENT_MANAGER.add_logger(\n _get_logbook_log_config(\n flags.DEBUG, flags.USE_COLORS, flags.LOG_CACHE_EVENTS, flags.QUIET\n )\n )\n else:\n if flags.LOG_LEVEL != \"none\":\n line_format = _line_format_from_str(flags.LOG_FORMAT, LineFormat.PlainText)\n log_level = (\n EventLevel.ERROR\n if flags.QUIET\n else EventLevel.DEBUG\n if flags.DEBUG\n else EventLevel(flags.LOG_LEVEL)\n )\n console_config = _get_stdout_config(\n line_format,\n flags.USE_COLORS,\n log_level,\n flags.LOG_CACHE_EVENTS,\n )\n EVENT_MANAGER.add_logger(console_config)\n\n if _CAPTURE_STREAM:\n # Create second stdout logger to support test which want to know what's\n # being sent to stdout.\n console_config.output_stream = _CAPTURE_STREAM\n EVENT_MANAGER.add_logger(console_config)\n\n if flags.LOG_LEVEL_FILE != \"none\":\n # create and add the file logger to the event manager\n log_file = os.path.join(flags.LOG_PATH, \"dbt.log\")\n log_file_format = _line_format_from_str(flags.LOG_FORMAT_FILE, LineFormat.DebugText)\n log_level_file = EventLevel.DEBUG if flags.DEBUG else EventLevel(flags.LOG_LEVEL_FILE)\n EVENT_MANAGER.add_logger(\n _get_logfile_config(\n log_file,\n flags.USE_COLORS_FILE,\n log_file_format,\n log_level_file,\n flags.LOG_FILE_MAX_BYTES,\n )\n )\n\n\ndef _line_format_from_str(format_str: str, default: LineFormat) -> LineFormat:\n if format_str == \"text\":\n return LineFormat.PlainText\n elif format_str == \"debug\":\n return LineFormat.DebugText\n elif format_str == \"json\":\n return LineFormat.Json\n\n return default\n\n\ndef _get_stdout_config(\n line_format: LineFormat,\n use_colors: bool,\n level: EventLevel,\n log_cache_events: bool,\n) -> LoggerConfig:\n\n return LoggerConfig(\n name=\"stdout_log\",\n level=level,\n use_colors=use_colors,\n line_format=line_format,\n scrubber=env_scrubber,\n filter=partial(\n _stdout_filter,\n log_cache_events,\n line_format,\n ),\n output_stream=sys.stdout,\n )\n\n\ndef _stdout_filter(\n log_cache_events: bool,\n line_format: LineFormat,\n msg: EventMsg,\n) -> bool:\n return msg.info.name not in [\"CacheAction\", \"CacheDumpGraph\"] or log_cache_events\n\n\ndef _get_logfile_config(\n log_path: str,\n use_colors: bool,\n line_format: LineFormat,\n level: EventLevel,\n log_file_max_bytes: int,\n) -> LoggerConfig:\n return LoggerConfig(\n name=\"file_log\",\n line_format=line_format,\n use_colors=use_colors,\n level=level, # File log is *always* debug level\n scrubber=env_scrubber,\n filter=partial(_logfile_filter, bool(get_flags().LOG_CACHE_EVENTS), line_format),\n output_file_name=log_path,\n output_file_max_bytes=log_file_max_bytes,\n )\n\n\ndef _logfile_filter(log_cache_events: bool, line_format: LineFormat, msg: EventMsg) -> bool:\n return msg.info.code not in nofile_codes and not (\n msg.info.name in [\"CacheAction\", \"CacheDumpGraph\"] and not log_cache_events\n )\n\n\ndef _get_logbook_log_config(\n debug: bool, use_colors: bool, log_cache_events: bool, quiet: bool\n) -> LoggerConfig:\n config = _get_stdout_config(\n LineFormat.PlainText,\n use_colors,\n EventLevel.ERROR if quiet else EventLevel.DEBUG if debug else EventLevel.INFO,\n log_cache_events,\n )\n config.name = \"logbook_log\"\n config.filter = (\n NoFilter\n if log_cache_events\n else lambda e: e.info.name not in [\"CacheAction\", \"CacheDumpGraph\"]\n )\n config.logger = GLOBAL_LOGGER\n config.output_stream = None\n return config\n\n\ndef env_scrubber(msg: str) -> str:\n return scrub_secrets(msg, env_secrets())\n\n\ndef cleanup_event_logger():\n # Reset to a no-op manager to release streams associated with logs. This is\n # especially important for tests, since pytest replaces the stdout stream\n # during test runs, and closes the stream after the test is over.\n EVENT_MANAGER.loggers.clear()\n EVENT_MANAGER.callbacks.clear()\n\n\n# Since dbt-rpc does not do its own log setup, and since some events can\n# currently fire before logs can be configured by setup_event_logger(), we\n# create a default configuration with default settings and no file output.\nEVENT_MANAGER: IEventManager = EventManager()\nEVENT_MANAGER.add_logger(\n _get_logbook_log_config(False, True, False, False) # type: ignore\n if ENABLE_LEGACY_LOGGER\n else _get_stdout_config(LineFormat.PlainText, True, EventLevel.INFO, False)\n)\n\n# This global, and the following two functions for capturing stdout logs are\n# an unpleasant hack we intend to remove as part of API-ification. The GitHub\n# issue #6350 was opened for that work.\n_CAPTURE_STREAM: Optional[TextIO] = None\n\n\n# used for integration tests\ndef capture_stdout_logs(stream: TextIO):\n global _CAPTURE_STREAM\n _CAPTURE_STREAM = stream\n\n\ndef stop_capture_stdout_logs():\n global _CAPTURE_STREAM\n _CAPTURE_STREAM = None\n\n\n# returns a dictionary representation of the event fields.\n# the message may contain secrets which must be scrubbed at the usage site.\ndef msg_to_json(msg: EventMsg) -> str:\n msg_dict = msg_to_dict(msg)\n raw_log_line = json.dumps(msg_dict, sort_keys=True, cls=dbt.utils.ForgivingJSONEncoder)\n return raw_log_line\n\n\ndef msg_to_dict(msg: EventMsg) -> dict:\n msg_dict = dict()\n try:\n msg_dict = MessageToDict(\n msg, preserving_proto_field_name=True, including_default_value_fields=True # type: ignore\n )\n except Exception as exc:\n event_type = type(msg).__name__\n fire_event(\n Note(msg=f\"type {event_type} is not serializable. {str(exc)}\"), level=EventLevel.WARN\n )\n # We don't want an empty NodeInfo in output\n if (\n \"data\" in msg_dict\n and \"node_info\" in msg_dict[\"data\"]\n and msg_dict[\"data\"][\"node_info\"][\"node_name\"] == \"\"\n ):\n del msg_dict[\"data\"][\"node_info\"]\n return msg_dict\n\n\ndef warn_or_error(event, node=None):\n flags = get_flags()\n if flags.WARN_ERROR or flags.WARN_ERROR_OPTIONS.includes(type(event).__name__):\n\n # TODO: resolve this circular import when at top\n from dbt.exceptions import EventCompilationError\n\n raise EventCompilationError(event.message(), node)\n else:\n fire_event(event)\n\n\n# an alternative to fire_event which only creates and logs the event value\n# if the condition is met. Does nothing otherwise.\ndef fire_event_if(\n conditional: bool, lazy_e: Callable[[], BaseEvent], level: Optional[EventLevel] = None\n) -> None:\n if conditional:\n fire_event(lazy_e(), level=level)\n\n\n# a special case of fire_event_if, to only fire events in our unit/functional tests\ndef fire_event_if_test(\n lazy_e: Callable[[], BaseEvent], level: Optional[EventLevel] = None\n) -> None:\n fire_event_if(conditional=(\"pytest\" in sys.modules), lazy_e=lazy_e, level=level)\n\n\n# top-level method for accessing the new eventing system\n# this is where all the side effects happen branched by event type\n# (i.e. - mutating the event history, printing to stdout, logging\n# to files, etc.)\ndef fire_event(e: BaseEvent, level: Optional[EventLevel] = None) -> None:\n EVENT_MANAGER.fire_event(e, level=level)\n\n\ndef get_metadata_vars() -> Dict[str, str]:\n global metadata_vars\n if metadata_vars is None:\n metadata_vars = {\n k[len(METADATA_ENV_PREFIX) :]: v\n for k, v in os.environ.items()\n if k.startswith(METADATA_ENV_PREFIX)\n }\n return metadata_vars\n\n\ndef reset_metadata_vars() -> None:\n global metadata_vars\n metadata_vars = None\n\n\ndef get_invocation_id() -> str:\n return EVENT_MANAGER.invocation_id\n\n\ndef set_invocation_id() -> None:\n # This is primarily for setting the invocation_id for separate\n # commands in the dbt servers. It shouldn't be necessary for the CLI.\n EVENT_MANAGER.invocation_id = str(uuid.uuid4())\n\n\ndef ctx_set_event_manager(event_manager: IEventManager):\n global EVENT_MANAGER\n EVENT_MANAGER = event_manager\n", "path": "core/dbt/events/functions.py"}]} | 3,767 | 390 |
gh_patches_debug_18631 | rasdani/github-patches | git_diff | fidals__shopelectro-318 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ProductPage and ya.market prices not equals
This product in `yandex.yml` (from prod) has price `200 RUB` on it's product page
```
<offer id="4928" available="true">
<url>https://www.shopelectro.ru/catalog/products/4928/?utm_source=YM&utm_medium=cpc&utm_content=akkumuliatory-270&utm_term=4928</url>
<price>199.98</price>
<purchase_price>106.8111</purchase_price>
<currencyId>RUR</currencyId>
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `shopelectro/management/commands/_update_catalog/update_products.py`
Content:
```
1 import logging
2 from collections import defaultdict
3 from copy import deepcopy
4 from functools import reduce
5 from itertools import chain
6 from typing import Dict, Iterator, List
7 from xml.etree.ElementTree import Element
8
9 from django.conf import settings
10 from django.contrib.auth.models import User
11 from django.core.mail import send_mail
12 from django.db import transaction
13 from django.db.models import QuerySet
14 from django.template.loader import render_to_string
15
16 from shopelectro.management.commands._update_catalog.utils import (
17 XmlFile, is_correct_uuid, NOT_SAVE_TEMPLATE, UUID, Data,
18 )
19 from shopelectro.models import Product, ProductPage, Tag
20
21
22 logger = logging.getLogger(__name__)
23
24
25 def fetch_products(root: Element, config: XmlFile) -> Iterator:
26 product_els = root.findall(config.xpaths['products'])
27 for product_el in product_els:
28 name = product_el.find(config.xpaths['name']).text
29 uuid = product_el.find(config.xpaths['uuid']).text
30 vendor_code = product_el.find(
31 config.xpaths['vendor_code']
32 ).text.lstrip('0')
33 content = product_el.find(config.xpaths['page_content']).text or ''
34
35 tag_value_els = (
36 tag_el.find(config.xpaths['tag_value_uuid'])
37 for tag_el in product_el.findall(config.xpaths['tags'])
38 if tag_el is not None
39 )
40
41 tag_uuids = list(filter(is_correct_uuid, (
42 tag_value.text
43 for tag_value in tag_value_els
44 # should use 'is not None', because __bool__ does not defined
45 if tag_value is not None
46 )))
47
48 tags = Tag.objects.filter(uuid__in=tag_uuids)
49
50 yield uuid, {
51 'name': name,
52 'vendor_code': vendor_code,
53 'page': {
54 'content': content
55 },
56 'tags': tags
57 }
58
59
60 def fetch_prices(root: Element, config) -> Iterator:
61 def get_prices(prices_el):
62 def get_(price_el: Element) -> float:
63 return float(price_el.find(config.xpaths['price']).text)
64 *prices, retail_price = sorted(
65 get_(price)
66 for price in prices_el.findall(config.xpaths['prices'])
67 )
68 return prices + [retail_price - settings.PRICE_REDUCER]
69 product_price_els = root.findall(config.xpaths['product_prices'])
70 for prices_el in product_price_els:
71 product_uuid = prices_el.find(config.xpaths['product_uuid']).text
72 prices = dict(zip(
73 config.extra_options['price_types'],
74 get_prices(prices_el)
75 ))
76 yield product_uuid, prices
77
78
79 def fetch_in_stock(root: Element, config: XmlFile) -> Iterator:
80 product_els = root.findall(config.xpaths['products'])
81 for product_el in product_els:
82 uuid = product_el.find(config.xpaths['product_uuid']).text
83 in_stock = product_el.find(config.xpaths['in_stock']).text
84
85 if not (in_stock.isdigit() and int(in_stock) >= 0):
86 in_stock = 0
87
88 yield uuid, {
89 'in_stock': in_stock,
90 }
91
92
93 product_file = XmlFile(
94 fetch_callback=fetch_products,
95 xml_path_pattern='**/webdata/**/goods/**/import*.xml',
96 xpath_queries={
97 'products': './/{}Товары/',
98 'name': '.{}Наименование',
99 'uuid': '.{}Ид',
100 'page_content': '.{}Описание',
101 'tags': '.{}ЗначенияСвойств/',
102 'tag_value_uuid': '.{}Значение',
103 'vendor_code': '.{0}ЗначенияРеквизитов/{0}ЗначениеРеквизита'
104 '[{0}Наименование="Код"]/{0}Значение',
105 },
106 )
107
108 price_file = XmlFile(
109 fetch_callback=fetch_prices,
110 xml_path_pattern='**/webdata/**/goods/**/prices*.xml',
111 xpath_queries={
112 'product_prices': './/{}Предложения/',
113 'product_uuid': '.{}Ид',
114 'prices': '.{}Цены/',
115 'price': '.{}ЦенаЗаЕдиницу',
116 },
117 extra_options={
118 'price_types': [
119 'purchase_price', 'wholesale_large', 'wholesale_medium',
120 'wholesale_small', 'price',
121 ],
122 },
123 )
124
125
126 in_stock_file = XmlFile(
127 fetch_callback=fetch_in_stock,
128 xml_path_pattern='**/webdata/**/goods/**/rests*.xml',
129 xpath_queries={
130 'products': './/{}Предложения/',
131 'product_uuid': '.{}Ид',
132 'in_stock': './/{}Количество',
133 },
134 )
135
136
137 def merge_data(*data) -> Dict[UUID, Data]:
138 """
139 Merge data from xml files with different structure.
140
141 Example: files with product names and prices.
142 """
143 product_data = defaultdict(dict)
144 for key, data in chain.from_iterable(filter(None, data)):
145 product_data[key].update(data)
146
147 return product_data
148
149
150 def clean_data(data: Dict[UUID, Data]):
151 def has_all_prices(_, product_data):
152 price_types = price_file.extra_options['price_types']
153 has = all(
154 product_data.get(price_type)
155 for price_type in price_types
156 )
157 if not has:
158 logger.info(NOT_SAVE_TEMPLATE.format(
159 entity='Product',
160 name=product_data['name'],
161 field='price'
162 ))
163 return has
164
165 def has_vendor_code(_, product_data):
166 has = bool(product_data['vendor_code'])
167
168 if not has:
169 logger.info(NOT_SAVE_TEMPLATE.format(
170 entity='Product',
171 name=product_data['name'],
172 field='vendor_code'
173 ))
174
175 return has
176
177 def has_uuid(uuid, product_data):
178 has = is_correct_uuid(uuid)
179 if not has:
180 logger.info(NOT_SAVE_TEMPLATE.format(
181 entity='Product',
182 name=product_data['name'],
183 field='uuid'
184 ))
185 return has
186
187 def filter_(product_data):
188 return all(
189 f(*product_data)
190 for f in [has_all_prices, has_uuid, has_vendor_code]
191 )
192
193 cleaned_data = dict(
194 product_data
195 for product_data in data.items()
196 if filter_(product_data)
197 )
198
199 return cleaned_data
200
201
202 def report(recipients=None, message=None):
203 message = message or render_to_string('report.html')
204
205 user_query = (
206 User.objects
207 .filter(is_staff=True, is_superuser=False, is_active=True, email__isnull=False)
208 )
209
210 recipient_list = recipients or [user.email for user in user_query]
211
212 if recipient_list:
213 send_mail(
214 subject='Обновления каталога товаров',
215 message=message,
216 from_email=settings.EMAIL_SENDER,
217 recipient_list=recipient_list,
218 html_message=message,
219 )
220
221 logger.info('Sent message to {}'.format(
222 reduce(lambda x, y: '{}, {}'.format(x, y), recipient_list)
223 ))
224
225
226 @transaction.atomic
227 def delete(data: Dict[UUID, Data]):
228 uuids = list(data)
229 page_count, _ = ProductPage.objects.exclude(
230 shopelectro_product__uuid__in=uuids).delete()
231 product_count, _ = Product.objects.exclude(
232 uuid__in=uuids).delete()
233 logger.info('{} products and {} pages were deleted.'.format(
234 product_count, page_count))
235
236
237 @transaction.atomic
238 def update(data: Dict[UUID, Data]) -> QuerySet:
239 def save(product, field, value):
240 if field == 'name' and getattr(product, field, None):
241 return
242 elif field == 'page':
243 for page_field, page_value in value.items():
244 if not getattr(product.page, page_field, ''):
245 setattr(product.page, page_field, page_value)
246 else:
247 setattr(product, field, value)
248
249 def merge(left: List, right: List) -> List:
250 """Merge two arrays with order preserving."""
251 return left + [e for e in right if e not in left]
252
253 products = Product.objects.filter(uuid__in=data)
254
255 for product in products:
256 product_data = data[str(product.uuid)]
257 for field, value in product_data.items():
258 if field != 'tags':
259 save(product, field, value)
260 else:
261 # Dirty patch for preserving tags, appended from admin.
262 # Still waiting 1C throwing out.
263 product.tags = merge(list(product.tags.all()), value)
264
265 product.save()
266 logger.info('{} products were updated.'.format(products.count()))
267 return products
268
269
270 @transaction.atomic
271 def create(data: Dict[UUID, Data], updated_products: QuerySet) -> QuerySet:
272 data = deepcopy(data)
273 uuids_for_create = (
274 set(data) - set(str(product.uuid) for product in updated_products)
275 )
276
277 for uuid in uuids_for_create:
278 product_data = data.get(uuid)
279 tags = product_data.pop('tags', {})
280 page_data = product_data.pop('page', {})
281
282 new_product = Product.objects.create(**product_data, uuid=uuid)
283 new_product.tags.set(tags)
284 for field, value in page_data.items():
285 setattr(new_product.page, field, value)
286 new_product.page.save()
287
288 created_products = Product.objects.filter(uuid__in=uuids_for_create)
289
290 logger.info('{} products were created.'.format(created_products.count()))
291 return created_products
292
293
294 class UpdateProductError(Exception):
295 pass
296
297
298 def main(*args, **kwargs):
299 cleaned_product_data = clean_data(merge_data(
300 product_file.get_data(),
301 price_file.get_data(),
302 in_stock_file.get_data(),
303 ))
304
305 if not cleaned_product_data:
306
307 parsed_files = {
308 'product_files': list(product_file.parsed_files),
309 'price_files': list(price_file.parsed_files),
310 'in_stock_files': list(in_stock_file.parsed_files),
311 }
312
313 if not any(parsed_files.values()):
314 message = 'Files does not exist: {}'.format(parsed_files)
315 else:
316 # TODO: happy debugging (:
317 message = 'The file structure has changed or it does not contain the required data.'
318
319 raise UpdateProductError(message)
320
321 delete(cleaned_product_data)
322 updated_products = update(cleaned_product_data)
323 created_products = create(cleaned_product_data, updated_products)
324
325 if created_products.exists():
326 report(kwargs['recipients'])
327
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/shopelectro/management/commands/_update_catalog/update_products.py b/shopelectro/management/commands/_update_catalog/update_products.py
--- a/shopelectro/management/commands/_update_catalog/update_products.py
+++ b/shopelectro/management/commands/_update_catalog/update_products.py
@@ -61,11 +61,13 @@
def get_prices(prices_el):
def get_(price_el: Element) -> float:
return float(price_el.find(config.xpaths['price']).text)
+ def prepare_retail_(price: float):
+ return round(price - settings.PRICE_REDUCER)
*prices, retail_price = sorted(
get_(price)
for price in prices_el.findall(config.xpaths['prices'])
)
- return prices + [retail_price - settings.PRICE_REDUCER]
+ return prices + [prepare_retail_(retail_price)]
product_price_els = root.findall(config.xpaths['product_prices'])
for prices_el in product_price_els:
product_uuid = prices_el.find(config.xpaths['product_uuid']).text
| {"golden_diff": "diff --git a/shopelectro/management/commands/_update_catalog/update_products.py b/shopelectro/management/commands/_update_catalog/update_products.py\n--- a/shopelectro/management/commands/_update_catalog/update_products.py\n+++ b/shopelectro/management/commands/_update_catalog/update_products.py\n@@ -61,11 +61,13 @@\n def get_prices(prices_el):\n def get_(price_el: Element) -> float:\n return float(price_el.find(config.xpaths['price']).text)\n+ def prepare_retail_(price: float):\n+ return round(price - settings.PRICE_REDUCER)\n *prices, retail_price = sorted(\n get_(price)\n for price in prices_el.findall(config.xpaths['prices'])\n )\n- return prices + [retail_price - settings.PRICE_REDUCER]\n+ return prices + [prepare_retail_(retail_price)]\n product_price_els = root.findall(config.xpaths['product_prices'])\n for prices_el in product_price_els:\n product_uuid = prices_el.find(config.xpaths['product_uuid']).text\n", "issue": "ProductPage and ya.market prices not equals\nThis product in `yandex.yml` (from prod) has price `200 RUB` on it's product page\r\n\r\n```\r\n <offer id=\"4928\" available=\"true\">\r\n <url>https://www.shopelectro.ru/catalog/products/4928/?utm_source=YM&utm_medium=cpc&utm_content=akkumuliatory-270&utm_term=4928</url>\r\n <price>199.98</price>\r\n <purchase_price>106.8111</purchase_price>\r\n <currencyId>RUR</currencyId>\r\n```\n", "before_files": [{"content": "import logging\nfrom collections import defaultdict\nfrom copy import deepcopy\nfrom functools import reduce\nfrom itertools import chain\nfrom typing import Dict, Iterator, List\nfrom xml.etree.ElementTree import Element\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.core.mail import send_mail\nfrom django.db import transaction\nfrom django.db.models import QuerySet\nfrom django.template.loader import render_to_string\n\nfrom shopelectro.management.commands._update_catalog.utils import (\n XmlFile, is_correct_uuid, NOT_SAVE_TEMPLATE, UUID, Data,\n)\nfrom shopelectro.models import Product, ProductPage, Tag\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef fetch_products(root: Element, config: XmlFile) -> Iterator:\n product_els = root.findall(config.xpaths['products'])\n for product_el in product_els:\n name = product_el.find(config.xpaths['name']).text\n uuid = product_el.find(config.xpaths['uuid']).text\n vendor_code = product_el.find(\n config.xpaths['vendor_code']\n ).text.lstrip('0')\n content = product_el.find(config.xpaths['page_content']).text or ''\n\n tag_value_els = (\n tag_el.find(config.xpaths['tag_value_uuid'])\n for tag_el in product_el.findall(config.xpaths['tags'])\n if tag_el is not None\n )\n\n tag_uuids = list(filter(is_correct_uuid, (\n tag_value.text\n for tag_value in tag_value_els\n # should use 'is not None', because __bool__ does not defined\n if tag_value is not None\n )))\n\n tags = Tag.objects.filter(uuid__in=tag_uuids)\n\n yield uuid, {\n 'name': name,\n 'vendor_code': vendor_code,\n 'page': {\n 'content': content\n },\n 'tags': tags\n }\n\n\ndef fetch_prices(root: Element, config) -> Iterator:\n def get_prices(prices_el):\n def get_(price_el: Element) -> float:\n return float(price_el.find(config.xpaths['price']).text)\n *prices, retail_price = sorted(\n get_(price)\n for price in prices_el.findall(config.xpaths['prices'])\n )\n return prices + [retail_price - settings.PRICE_REDUCER]\n product_price_els = root.findall(config.xpaths['product_prices'])\n for prices_el in product_price_els:\n product_uuid = prices_el.find(config.xpaths['product_uuid']).text\n prices = dict(zip(\n config.extra_options['price_types'],\n get_prices(prices_el)\n ))\n yield product_uuid, prices\n\n\ndef fetch_in_stock(root: Element, config: XmlFile) -> Iterator:\n product_els = root.findall(config.xpaths['products'])\n for product_el in product_els:\n uuid = product_el.find(config.xpaths['product_uuid']).text\n in_stock = product_el.find(config.xpaths['in_stock']).text\n\n if not (in_stock.isdigit() and int(in_stock) >= 0):\n in_stock = 0\n\n yield uuid, {\n 'in_stock': in_stock,\n }\n\n\nproduct_file = XmlFile(\n fetch_callback=fetch_products,\n xml_path_pattern='**/webdata/**/goods/**/import*.xml',\n xpath_queries={\n 'products': './/{}\u0422\u043e\u0432\u0430\u0440\u044b/',\n 'name': '.{}\u041d\u0430\u0438\u043c\u0435\u043d\u043e\u0432\u0430\u043d\u0438\u0435',\n 'uuid': '.{}\u0418\u0434',\n 'page_content': '.{}\u041e\u043f\u0438\u0441\u0430\u043d\u0438\u0435',\n 'tags': '.{}\u0417\u043d\u0430\u0447\u0435\u043d\u0438\u044f\u0421\u0432\u043e\u0439\u0441\u0442\u0432/',\n 'tag_value_uuid': '.{}\u0417\u043d\u0430\u0447\u0435\u043d\u0438\u0435',\n 'vendor_code': '.{0}\u0417\u043d\u0430\u0447\u0435\u043d\u0438\u044f\u0420\u0435\u043a\u0432\u0438\u0437\u0438\u0442\u043e\u0432/{0}\u0417\u043d\u0430\u0447\u0435\u043d\u0438\u0435\u0420\u0435\u043a\u0432\u0438\u0437\u0438\u0442\u0430'\n '[{0}\u041d\u0430\u0438\u043c\u0435\u043d\u043e\u0432\u0430\u043d\u0438\u0435=\"\u041a\u043e\u0434\"]/{0}\u0417\u043d\u0430\u0447\u0435\u043d\u0438\u0435',\n },\n)\n\nprice_file = XmlFile(\n fetch_callback=fetch_prices,\n xml_path_pattern='**/webdata/**/goods/**/prices*.xml',\n xpath_queries={\n 'product_prices': './/{}\u041f\u0440\u0435\u0434\u043b\u043e\u0436\u0435\u043d\u0438\u044f/',\n 'product_uuid': '.{}\u0418\u0434',\n 'prices': '.{}\u0426\u0435\u043d\u044b/',\n 'price': '.{}\u0426\u0435\u043d\u0430\u0417\u0430\u0415\u0434\u0438\u043d\u0438\u0446\u0443',\n },\n extra_options={\n 'price_types': [\n 'purchase_price', 'wholesale_large', 'wholesale_medium',\n 'wholesale_small', 'price',\n ],\n },\n)\n\n\nin_stock_file = XmlFile(\n fetch_callback=fetch_in_stock,\n xml_path_pattern='**/webdata/**/goods/**/rests*.xml',\n xpath_queries={\n 'products': './/{}\u041f\u0440\u0435\u0434\u043b\u043e\u0436\u0435\u043d\u0438\u044f/',\n 'product_uuid': '.{}\u0418\u0434',\n 'in_stock': './/{}\u041a\u043e\u043b\u0438\u0447\u0435\u0441\u0442\u0432\u043e',\n },\n)\n\n\ndef merge_data(*data) -> Dict[UUID, Data]:\n \"\"\"\n Merge data from xml files with different structure.\n\n Example: files with product names and prices.\n \"\"\"\n product_data = defaultdict(dict)\n for key, data in chain.from_iterable(filter(None, data)):\n product_data[key].update(data)\n\n return product_data\n\n\ndef clean_data(data: Dict[UUID, Data]):\n def has_all_prices(_, product_data):\n price_types = price_file.extra_options['price_types']\n has = all(\n product_data.get(price_type)\n for price_type in price_types\n )\n if not has:\n logger.info(NOT_SAVE_TEMPLATE.format(\n entity='Product',\n name=product_data['name'],\n field='price'\n ))\n return has\n\n def has_vendor_code(_, product_data):\n has = bool(product_data['vendor_code'])\n\n if not has:\n logger.info(NOT_SAVE_TEMPLATE.format(\n entity='Product',\n name=product_data['name'],\n field='vendor_code'\n ))\n\n return has\n\n def has_uuid(uuid, product_data):\n has = is_correct_uuid(uuid)\n if not has:\n logger.info(NOT_SAVE_TEMPLATE.format(\n entity='Product',\n name=product_data['name'],\n field='uuid'\n ))\n return has\n\n def filter_(product_data):\n return all(\n f(*product_data)\n for f in [has_all_prices, has_uuid, has_vendor_code]\n )\n\n cleaned_data = dict(\n product_data\n for product_data in data.items()\n if filter_(product_data)\n )\n\n return cleaned_data\n\n\ndef report(recipients=None, message=None):\n message = message or render_to_string('report.html')\n\n user_query = (\n User.objects\n .filter(is_staff=True, is_superuser=False, is_active=True, email__isnull=False)\n )\n\n recipient_list = recipients or [user.email for user in user_query]\n\n if recipient_list:\n send_mail(\n subject='\u041e\u0431\u043d\u043e\u0432\u043b\u0435\u043d\u0438\u044f \u043a\u0430\u0442\u0430\u043b\u043e\u0433\u0430 \u0442\u043e\u0432\u0430\u0440\u043e\u0432',\n message=message,\n from_email=settings.EMAIL_SENDER,\n recipient_list=recipient_list,\n html_message=message,\n )\n\n logger.info('Sent message to {}'.format(\n reduce(lambda x, y: '{}, {}'.format(x, y), recipient_list)\n ))\n\n\[email protected]\ndef delete(data: Dict[UUID, Data]):\n uuids = list(data)\n page_count, _ = ProductPage.objects.exclude(\n shopelectro_product__uuid__in=uuids).delete()\n product_count, _ = Product.objects.exclude(\n uuid__in=uuids).delete()\n logger.info('{} products and {} pages were deleted.'.format(\n product_count, page_count))\n\n\[email protected]\ndef update(data: Dict[UUID, Data]) -> QuerySet:\n def save(product, field, value):\n if field == 'name' and getattr(product, field, None):\n return\n elif field == 'page':\n for page_field, page_value in value.items():\n if not getattr(product.page, page_field, ''):\n setattr(product.page, page_field, page_value)\n else:\n setattr(product, field, value)\n\n def merge(left: List, right: List) -> List:\n \"\"\"Merge two arrays with order preserving.\"\"\"\n return left + [e for e in right if e not in left]\n\n products = Product.objects.filter(uuid__in=data)\n\n for product in products:\n product_data = data[str(product.uuid)]\n for field, value in product_data.items():\n if field != 'tags':\n save(product, field, value)\n else:\n # Dirty patch for preserving tags, appended from admin.\n # Still waiting 1C throwing out.\n product.tags = merge(list(product.tags.all()), value)\n\n product.save()\n logger.info('{} products were updated.'.format(products.count()))\n return products\n\n\[email protected]\ndef create(data: Dict[UUID, Data], updated_products: QuerySet) -> QuerySet:\n data = deepcopy(data)\n uuids_for_create = (\n set(data) - set(str(product.uuid) for product in updated_products)\n )\n\n for uuid in uuids_for_create:\n product_data = data.get(uuid)\n tags = product_data.pop('tags', {})\n page_data = product_data.pop('page', {})\n\n new_product = Product.objects.create(**product_data, uuid=uuid)\n new_product.tags.set(tags)\n for field, value in page_data.items():\n setattr(new_product.page, field, value)\n new_product.page.save()\n\n created_products = Product.objects.filter(uuid__in=uuids_for_create)\n\n logger.info('{} products were created.'.format(created_products.count()))\n return created_products\n\n\nclass UpdateProductError(Exception):\n pass\n\n\ndef main(*args, **kwargs):\n cleaned_product_data = clean_data(merge_data(\n product_file.get_data(),\n price_file.get_data(),\n in_stock_file.get_data(),\n ))\n\n if not cleaned_product_data:\n\n parsed_files = {\n 'product_files': list(product_file.parsed_files),\n 'price_files': list(price_file.parsed_files),\n 'in_stock_files': list(in_stock_file.parsed_files),\n }\n\n if not any(parsed_files.values()):\n message = 'Files does not exist: {}'.format(parsed_files)\n else:\n # TODO: happy debugging (:\n message = 'The file structure has changed or it does not contain the required data.'\n\n raise UpdateProductError(message)\n\n delete(cleaned_product_data)\n updated_products = update(cleaned_product_data)\n created_products = create(cleaned_product_data, updated_products)\n\n if created_products.exists():\n report(kwargs['recipients'])\n", "path": "shopelectro/management/commands/_update_catalog/update_products.py"}], "after_files": [{"content": "import logging\nfrom collections import defaultdict\nfrom copy import deepcopy\nfrom functools import reduce\nfrom itertools import chain\nfrom typing import Dict, Iterator, List\nfrom xml.etree.ElementTree import Element\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.core.mail import send_mail\nfrom django.db import transaction\nfrom django.db.models import QuerySet\nfrom django.template.loader import render_to_string\n\nfrom shopelectro.management.commands._update_catalog.utils import (\n XmlFile, is_correct_uuid, NOT_SAVE_TEMPLATE, UUID, Data,\n)\nfrom shopelectro.models import Product, ProductPage, Tag\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef fetch_products(root: Element, config: XmlFile) -> Iterator:\n product_els = root.findall(config.xpaths['products'])\n for product_el in product_els:\n name = product_el.find(config.xpaths['name']).text\n uuid = product_el.find(config.xpaths['uuid']).text\n vendor_code = product_el.find(\n config.xpaths['vendor_code']\n ).text.lstrip('0')\n content = product_el.find(config.xpaths['page_content']).text or ''\n\n tag_value_els = (\n tag_el.find(config.xpaths['tag_value_uuid'])\n for tag_el in product_el.findall(config.xpaths['tags'])\n if tag_el is not None\n )\n\n tag_uuids = list(filter(is_correct_uuid, (\n tag_value.text\n for tag_value in tag_value_els\n # should use 'is not None', because __bool__ does not defined\n if tag_value is not None\n )))\n\n tags = Tag.objects.filter(uuid__in=tag_uuids)\n\n yield uuid, {\n 'name': name,\n 'vendor_code': vendor_code,\n 'page': {\n 'content': content\n },\n 'tags': tags\n }\n\n\ndef fetch_prices(root: Element, config) -> Iterator:\n def get_prices(prices_el):\n def get_(price_el: Element) -> float:\n return float(price_el.find(config.xpaths['price']).text)\n def prepare_retail_(price: float):\n return round(price - settings.PRICE_REDUCER)\n *prices, retail_price = sorted(\n get_(price)\n for price in prices_el.findall(config.xpaths['prices'])\n )\n return prices + [prepare_retail_(retail_price)]\n product_price_els = root.findall(config.xpaths['product_prices'])\n for prices_el in product_price_els:\n product_uuid = prices_el.find(config.xpaths['product_uuid']).text\n prices = dict(zip(\n config.extra_options['price_types'],\n get_prices(prices_el)\n ))\n yield product_uuid, prices\n\n\ndef fetch_in_stock(root: Element, config: XmlFile) -> Iterator:\n product_els = root.findall(config.xpaths['products'])\n for product_el in product_els:\n uuid = product_el.find(config.xpaths['product_uuid']).text\n in_stock = product_el.find(config.xpaths['in_stock']).text\n\n if not (in_stock.isdigit() and int(in_stock) >= 0):\n in_stock = 0\n\n yield uuid, {\n 'in_stock': in_stock,\n }\n\n\nproduct_file = XmlFile(\n fetch_callback=fetch_products,\n xml_path_pattern='**/webdata/**/goods/**/import*.xml',\n xpath_queries={\n 'products': './/{}\u0422\u043e\u0432\u0430\u0440\u044b/',\n 'name': '.{}\u041d\u0430\u0438\u043c\u0435\u043d\u043e\u0432\u0430\u043d\u0438\u0435',\n 'uuid': '.{}\u0418\u0434',\n 'page_content': '.{}\u041e\u043f\u0438\u0441\u0430\u043d\u0438\u0435',\n 'tags': '.{}\u0417\u043d\u0430\u0447\u0435\u043d\u0438\u044f\u0421\u0432\u043e\u0439\u0441\u0442\u0432/',\n 'tag_value_uuid': '.{}\u0417\u043d\u0430\u0447\u0435\u043d\u0438\u0435',\n 'vendor_code': '.{0}\u0417\u043d\u0430\u0447\u0435\u043d\u0438\u044f\u0420\u0435\u043a\u0432\u0438\u0437\u0438\u0442\u043e\u0432/{0}\u0417\u043d\u0430\u0447\u0435\u043d\u0438\u0435\u0420\u0435\u043a\u0432\u0438\u0437\u0438\u0442\u0430'\n '[{0}\u041d\u0430\u0438\u043c\u0435\u043d\u043e\u0432\u0430\u043d\u0438\u0435=\"\u041a\u043e\u0434\"]/{0}\u0417\u043d\u0430\u0447\u0435\u043d\u0438\u0435',\n },\n)\n\nprice_file = XmlFile(\n fetch_callback=fetch_prices,\n xml_path_pattern='**/webdata/**/goods/**/prices*.xml',\n xpath_queries={\n 'product_prices': './/{}\u041f\u0440\u0435\u0434\u043b\u043e\u0436\u0435\u043d\u0438\u044f/',\n 'product_uuid': '.{}\u0418\u0434',\n 'prices': '.{}\u0426\u0435\u043d\u044b/',\n 'price': '.{}\u0426\u0435\u043d\u0430\u0417\u0430\u0415\u0434\u0438\u043d\u0438\u0446\u0443',\n },\n extra_options={\n 'price_types': [\n 'purchase_price', 'wholesale_large', 'wholesale_medium',\n 'wholesale_small', 'price',\n ],\n },\n)\n\n\nin_stock_file = XmlFile(\n fetch_callback=fetch_in_stock,\n xml_path_pattern='**/webdata/**/goods/**/rests*.xml',\n xpath_queries={\n 'products': './/{}\u041f\u0440\u0435\u0434\u043b\u043e\u0436\u0435\u043d\u0438\u044f/',\n 'product_uuid': '.{}\u0418\u0434',\n 'in_stock': './/{}\u041a\u043e\u043b\u0438\u0447\u0435\u0441\u0442\u0432\u043e',\n },\n)\n\n\ndef merge_data(*data) -> Dict[UUID, Data]:\n \"\"\"\n Merge data from xml files with different structure.\n\n Example: files with product names and prices.\n \"\"\"\n product_data = defaultdict(dict)\n for key, data in chain.from_iterable(filter(None, data)):\n product_data[key].update(data)\n\n return product_data\n\n\ndef clean_data(data: Dict[UUID, Data]):\n def has_all_prices(_, product_data):\n price_types = price_file.extra_options['price_types']\n has = all(\n product_data.get(price_type)\n for price_type in price_types\n )\n if not has:\n logger.info(NOT_SAVE_TEMPLATE.format(\n entity='Product',\n name=product_data['name'],\n field='price'\n ))\n return has\n\n def has_vendor_code(_, product_data):\n has = bool(product_data['vendor_code'])\n\n if not has:\n logger.info(NOT_SAVE_TEMPLATE.format(\n entity='Product',\n name=product_data['name'],\n field='vendor_code'\n ))\n\n return has\n\n def has_uuid(uuid, product_data):\n has = is_correct_uuid(uuid)\n if not has:\n logger.info(NOT_SAVE_TEMPLATE.format(\n entity='Product',\n name=product_data['name'],\n field='uuid'\n ))\n return has\n\n def filter_(product_data):\n return all(\n f(*product_data)\n for f in [has_all_prices, has_uuid, has_vendor_code]\n )\n\n cleaned_data = dict(\n product_data\n for product_data in data.items()\n if filter_(product_data)\n )\n\n return cleaned_data\n\n\ndef report(recipients=None, message=None):\n message = message or render_to_string('report.html')\n\n user_query = (\n User.objects\n .filter(is_staff=True, is_superuser=False, is_active=True, email__isnull=False)\n )\n\n recipient_list = recipients or [user.email for user in user_query]\n\n if recipient_list:\n send_mail(\n subject='\u041e\u0431\u043d\u043e\u0432\u043b\u0435\u043d\u0438\u044f \u043a\u0430\u0442\u0430\u043b\u043e\u0433\u0430 \u0442\u043e\u0432\u0430\u0440\u043e\u0432',\n message=message,\n from_email=settings.EMAIL_SENDER,\n recipient_list=recipient_list,\n html_message=message,\n )\n\n logger.info('Sent message to {}'.format(\n reduce(lambda x, y: '{}, {}'.format(x, y), recipient_list)\n ))\n\n\[email protected]\ndef delete(data: Dict[UUID, Data]):\n uuids = list(data)\n page_count, _ = ProductPage.objects.exclude(\n shopelectro_product__uuid__in=uuids).delete()\n product_count, _ = Product.objects.exclude(\n uuid__in=uuids).delete()\n logger.info('{} products and {} pages were deleted.'.format(\n product_count, page_count))\n\n\[email protected]\ndef update(data: Dict[UUID, Data]) -> QuerySet:\n def save(product, field, value):\n if field == 'name' and getattr(product, field, None):\n return\n elif field == 'page':\n for page_field, page_value in value.items():\n if not getattr(product.page, page_field, ''):\n setattr(product.page, page_field, page_value)\n else:\n setattr(product, field, value)\n\n def merge(left: List, right: List) -> List:\n \"\"\"Merge two arrays with order preserving.\"\"\"\n return left + [e for e in right if e not in left]\n\n products = Product.objects.filter(uuid__in=data)\n\n for product in products:\n product_data = data[str(product.uuid)]\n for field, value in product_data.items():\n if field != 'tags':\n save(product, field, value)\n else:\n # Dirty patch for preserving tags, appended from admin.\n # Still waiting 1C throwing out.\n product.tags = merge(list(product.tags.all()), value)\n\n product.save()\n logger.info('{} products were updated.'.format(products.count()))\n return products\n\n\[email protected]\ndef create(data: Dict[UUID, Data], updated_products: QuerySet) -> QuerySet:\n data = deepcopy(data)\n uuids_for_create = (\n set(data) - set(str(product.uuid) for product in updated_products)\n )\n\n for uuid in uuids_for_create:\n product_data = data.get(uuid)\n tags = product_data.pop('tags', {})\n page_data = product_data.pop('page', {})\n\n new_product = Product.objects.create(**product_data, uuid=uuid)\n new_product.tags.set(tags)\n for field, value in page_data.items():\n setattr(new_product.page, field, value)\n new_product.page.save()\n\n created_products = Product.objects.filter(uuid__in=uuids_for_create)\n\n logger.info('{} products were created.'.format(created_products.count()))\n return created_products\n\n\nclass UpdateProductError(Exception):\n pass\n\n\ndef main(*args, **kwargs):\n cleaned_product_data = clean_data(merge_data(\n product_file.get_data(),\n price_file.get_data(),\n in_stock_file.get_data(),\n ))\n\n if not cleaned_product_data:\n\n parsed_files = {\n 'product_files': list(product_file.parsed_files),\n 'price_files': list(price_file.parsed_files),\n 'in_stock_files': list(in_stock_file.parsed_files),\n }\n\n if not any(parsed_files.values()):\n message = 'Files does not exist: {}'.format(parsed_files)\n else:\n # TODO: happy debugging (:\n message = 'The file structure has changed or it does not contain the required data.'\n\n raise UpdateProductError(message)\n\n delete(cleaned_product_data)\n updated_products = update(cleaned_product_data)\n created_products = create(cleaned_product_data, updated_products)\n\n if created_products.exists():\n report(kwargs['recipients'])\n", "path": "shopelectro/management/commands/_update_catalog/update_products.py"}]} | 3,563 | 241 |
gh_patches_debug_38540 | rasdani/github-patches | git_diff | adap__flower-458 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve docstring for `start_keras_client`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/py/flwr/common/__init__.py`
Content:
```
1 # Copyright 2020 Adap GmbH. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """Flower utilities shared between server and client."""
16
17
18 from .parameter import bytes_to_ndarray as bytes_to_ndarray
19 from .parameter import ndarray_to_bytes as ndarray_to_bytes
20 from .parameter import parameters_to_weights as parameters_to_weights
21 from .parameter import weights_to_parameters as weights_to_parameters
22 from .typing import Disconnect as Disconnect
23 from .typing import EvaluateIns as EvaluateIns
24 from .typing import EvaluateRes as EvaluateRes
25 from .typing import FitIns as FitIns
26 from .typing import FitRes as FitRes
27 from .typing import Parameters as Parameters
28 from .typing import ParametersRes as ParametersRes
29 from .typing import Reconnect as Reconnect
30 from .typing import Weights as Weights
31
32 GRPC_MAX_MESSAGE_LENGTH: int = 536870912 # == 512 * 1024 * 1024
33
34 __all__ = [
35 "bytes_to_ndarray",
36 "Disconnect",
37 "EvaluateIns",
38 "EvaluateRes",
39 "FitIns",
40 "FitRes",
41 "GRPC_MAX_MESSAGE_LENGTH",
42 "ndarray_to_bytes",
43 "Parameters",
44 "parameters_to_weights",
45 "ParametersRes",
46 "Reconnect",
47 "Weights",
48 "weights_to_parameters",
49 ]
50
```
Path: `src/py/flwr/client/app.py`
Content:
```
1 # Copyright 2020 Adap GmbH. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """Flower client app."""
16
17
18 import time
19 from logging import INFO
20
21 from flwr.common import GRPC_MAX_MESSAGE_LENGTH
22 from flwr.common.logger import log
23
24 from .client import Client
25 from .grpc_client.connection import insecure_grpc_connection
26 from .grpc_client.message_handler import handle
27 from .keras_client import KerasClient, KerasClientWrapper
28
29
30 def start_client(
31 server_address: str,
32 client: Client,
33 grpc_max_message_length: int = GRPC_MAX_MESSAGE_LENGTH,
34 ) -> None:
35 """Start a Flower Client which connects to a gRPC server."""
36 while True:
37 sleep_duration: int = 0
38 with insecure_grpc_connection(
39 server_address, max_message_length=grpc_max_message_length
40 ) as conn:
41 receive, send = conn
42 log(INFO, "Opened (insecure) gRPC connection")
43
44 while True:
45 server_message = receive()
46 client_message, sleep_duration, keep_going = handle(
47 client, server_message
48 )
49 send(client_message)
50 if not keep_going:
51 break
52 if sleep_duration == 0:
53 log(INFO, "Disconnect and shut down")
54 break
55 # Sleep and reconnect afterwards
56 log(
57 INFO,
58 "Disconnect, then re-establish connection after %s second(s)",
59 sleep_duration,
60 )
61 time.sleep(sleep_duration)
62
63
64 def start_keras_client(server_address: str, client: KerasClient) -> None:
65 """Start a Flower KerasClient which connects to a gRPC server."""
66
67 # Wrap the Keras client
68 flower_client = KerasClientWrapper(client)
69
70 # Start
71 start_client(server_address, flower_client)
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/py/flwr/client/app.py b/src/py/flwr/client/app.py
--- a/src/py/flwr/client/app.py
+++ b/src/py/flwr/client/app.py
@@ -32,7 +32,25 @@
client: Client,
grpc_max_message_length: int = GRPC_MAX_MESSAGE_LENGTH,
) -> None:
- """Start a Flower Client which connects to a gRPC server."""
+ """Start a Flower Client which connects to a gRPC server.
+
+ Arguments:
+ server_address: str. The IPv6 address of the server. If the Flower
+ server runs on the same machine on port 8080, then `server_address`
+ would be `"[::]:8080"`.
+ client: flwr.client.Client. An implementation of the abstract base
+ class `flwr.client.Client`.
+ grpc_max_message_length: int (default: 536_870_912, this equals 512MB).
+ The maximum length of gRPC messages that can be exchanged with the
+ Flower server. The default should be sufficient for most models.
+ Users who train very large models might need to increase this
+ value. Note that the Flower server needs to started with the same
+ value (see `flwr.server.start_server`), otherwise it will not know
+ about the increased limit and block larger messages.
+
+ Returns:
+ None.
+ """
while True:
sleep_duration: int = 0
with insecure_grpc_connection(
@@ -61,11 +79,37 @@
time.sleep(sleep_duration)
-def start_keras_client(server_address: str, client: KerasClient) -> None:
- """Start a Flower KerasClient which connects to a gRPC server."""
+def start_keras_client(
+ server_address: str,
+ client: KerasClient,
+ grpc_max_message_length: int = GRPC_MAX_MESSAGE_LENGTH,
+) -> None:
+ """Start a Flower KerasClient which connects to a gRPC server.
+
+ Arguments:
+ server_address: str. The IPv6 address of the server. If the Flower
+ server runs on the same machine on port 8080, then `server_address`
+ would be `"[::]:8080"`.
+ client: flwr.client.KerasClient. An implementation of the abstract base
+ class `flwr.client.KerasClient`.
+ grpc_max_message_length: int (default: 536_870_912, this equals 512MB).
+ The maximum length of gRPC messages that can be exchanged with the
+ Flower server. The default should be sufficient for most models.
+ Users who train very large models might need to increase this
+ value. Note that the Flower server needs to started with the same
+ value (see `flwr.server.start_server`), otherwise it will not know
+ about the increased limit and block larger messages.
+
+ Returns:
+ None.
+ """
# Wrap the Keras client
flower_client = KerasClientWrapper(client)
# Start
- start_client(server_address, flower_client)
+ start_client(
+ server_address=server_address,
+ client=flower_client,
+ grpc_max_message_length=grpc_max_message_length,
+ )
diff --git a/src/py/flwr/common/__init__.py b/src/py/flwr/common/__init__.py
--- a/src/py/flwr/common/__init__.py
+++ b/src/py/flwr/common/__init__.py
@@ -29,7 +29,7 @@
from .typing import Reconnect as Reconnect
from .typing import Weights as Weights
-GRPC_MAX_MESSAGE_LENGTH: int = 536870912 # == 512 * 1024 * 1024
+GRPC_MAX_MESSAGE_LENGTH: int = 536_870_912 # == 512 * 1024 * 1024
__all__ = [
"bytes_to_ndarray",
| {"golden_diff": "diff --git a/src/py/flwr/client/app.py b/src/py/flwr/client/app.py\n--- a/src/py/flwr/client/app.py\n+++ b/src/py/flwr/client/app.py\n@@ -32,7 +32,25 @@\n client: Client,\n grpc_max_message_length: int = GRPC_MAX_MESSAGE_LENGTH,\n ) -> None:\n- \"\"\"Start a Flower Client which connects to a gRPC server.\"\"\"\n+ \"\"\"Start a Flower Client which connects to a gRPC server.\n+\n+ Arguments:\n+ server_address: str. The IPv6 address of the server. If the Flower\n+ server runs on the same machine on port 8080, then `server_address`\n+ would be `\"[::]:8080\"`.\n+ client: flwr.client.Client. An implementation of the abstract base\n+ class `flwr.client.Client`.\n+ grpc_max_message_length: int (default: 536_870_912, this equals 512MB).\n+ The maximum length of gRPC messages that can be exchanged with the\n+ Flower server. The default should be sufficient for most models.\n+ Users who train very large models might need to increase this\n+ value. Note that the Flower server needs to started with the same\n+ value (see `flwr.server.start_server`), otherwise it will not know\n+ about the increased limit and block larger messages.\n+\n+ Returns:\n+ None.\n+ \"\"\"\n while True:\n sleep_duration: int = 0\n with insecure_grpc_connection(\n@@ -61,11 +79,37 @@\n time.sleep(sleep_duration)\n \n \n-def start_keras_client(server_address: str, client: KerasClient) -> None:\n- \"\"\"Start a Flower KerasClient which connects to a gRPC server.\"\"\"\n+def start_keras_client(\n+ server_address: str,\n+ client: KerasClient,\n+ grpc_max_message_length: int = GRPC_MAX_MESSAGE_LENGTH,\n+) -> None:\n+ \"\"\"Start a Flower KerasClient which connects to a gRPC server.\n+\n+ Arguments:\n+ server_address: str. The IPv6 address of the server. If the Flower\n+ server runs on the same machine on port 8080, then `server_address`\n+ would be `\"[::]:8080\"`.\n+ client: flwr.client.KerasClient. An implementation of the abstract base\n+ class `flwr.client.KerasClient`.\n+ grpc_max_message_length: int (default: 536_870_912, this equals 512MB).\n+ The maximum length of gRPC messages that can be exchanged with the\n+ Flower server. The default should be sufficient for most models.\n+ Users who train very large models might need to increase this\n+ value. Note that the Flower server needs to started with the same\n+ value (see `flwr.server.start_server`), otherwise it will not know\n+ about the increased limit and block larger messages.\n+\n+ Returns:\n+ None.\n+ \"\"\"\n \n # Wrap the Keras client\n flower_client = KerasClientWrapper(client)\n \n # Start\n- start_client(server_address, flower_client)\n+ start_client(\n+ server_address=server_address,\n+ client=flower_client,\n+ grpc_max_message_length=grpc_max_message_length,\n+ )\ndiff --git a/src/py/flwr/common/__init__.py b/src/py/flwr/common/__init__.py\n--- a/src/py/flwr/common/__init__.py\n+++ b/src/py/flwr/common/__init__.py\n@@ -29,7 +29,7 @@\n from .typing import Reconnect as Reconnect\n from .typing import Weights as Weights\n \n-GRPC_MAX_MESSAGE_LENGTH: int = 536870912 # == 512 * 1024 * 1024\n+GRPC_MAX_MESSAGE_LENGTH: int = 536_870_912 # == 512 * 1024 * 1024\n \n __all__ = [\n \"bytes_to_ndarray\",\n", "issue": "Improve docstring for `start_keras_client`\n\n", "before_files": [{"content": "# Copyright 2020 Adap GmbH. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Flower utilities shared between server and client.\"\"\"\n\n\nfrom .parameter import bytes_to_ndarray as bytes_to_ndarray\nfrom .parameter import ndarray_to_bytes as ndarray_to_bytes\nfrom .parameter import parameters_to_weights as parameters_to_weights\nfrom .parameter import weights_to_parameters as weights_to_parameters\nfrom .typing import Disconnect as Disconnect\nfrom .typing import EvaluateIns as EvaluateIns\nfrom .typing import EvaluateRes as EvaluateRes\nfrom .typing import FitIns as FitIns\nfrom .typing import FitRes as FitRes\nfrom .typing import Parameters as Parameters\nfrom .typing import ParametersRes as ParametersRes\nfrom .typing import Reconnect as Reconnect\nfrom .typing import Weights as Weights\n\nGRPC_MAX_MESSAGE_LENGTH: int = 536870912 # == 512 * 1024 * 1024\n\n__all__ = [\n \"bytes_to_ndarray\",\n \"Disconnect\",\n \"EvaluateIns\",\n \"EvaluateRes\",\n \"FitIns\",\n \"FitRes\",\n \"GRPC_MAX_MESSAGE_LENGTH\",\n \"ndarray_to_bytes\",\n \"Parameters\",\n \"parameters_to_weights\",\n \"ParametersRes\",\n \"Reconnect\",\n \"Weights\",\n \"weights_to_parameters\",\n]\n", "path": "src/py/flwr/common/__init__.py"}, {"content": "# Copyright 2020 Adap GmbH. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Flower client app.\"\"\"\n\n\nimport time\nfrom logging import INFO\n\nfrom flwr.common import GRPC_MAX_MESSAGE_LENGTH\nfrom flwr.common.logger import log\n\nfrom .client import Client\nfrom .grpc_client.connection import insecure_grpc_connection\nfrom .grpc_client.message_handler import handle\nfrom .keras_client import KerasClient, KerasClientWrapper\n\n\ndef start_client(\n server_address: str,\n client: Client,\n grpc_max_message_length: int = GRPC_MAX_MESSAGE_LENGTH,\n) -> None:\n \"\"\"Start a Flower Client which connects to a gRPC server.\"\"\"\n while True:\n sleep_duration: int = 0\n with insecure_grpc_connection(\n server_address, max_message_length=grpc_max_message_length\n ) as conn:\n receive, send = conn\n log(INFO, \"Opened (insecure) gRPC connection\")\n\n while True:\n server_message = receive()\n client_message, sleep_duration, keep_going = handle(\n client, server_message\n )\n send(client_message)\n if not keep_going:\n break\n if sleep_duration == 0:\n log(INFO, \"Disconnect and shut down\")\n break\n # Sleep and reconnect afterwards\n log(\n INFO,\n \"Disconnect, then re-establish connection after %s second(s)\",\n sleep_duration,\n )\n time.sleep(sleep_duration)\n\n\ndef start_keras_client(server_address: str, client: KerasClient) -> None:\n \"\"\"Start a Flower KerasClient which connects to a gRPC server.\"\"\"\n\n # Wrap the Keras client\n flower_client = KerasClientWrapper(client)\n\n # Start\n start_client(server_address, flower_client)\n", "path": "src/py/flwr/client/app.py"}], "after_files": [{"content": "# Copyright 2020 Adap GmbH. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Flower utilities shared between server and client.\"\"\"\n\n\nfrom .parameter import bytes_to_ndarray as bytes_to_ndarray\nfrom .parameter import ndarray_to_bytes as ndarray_to_bytes\nfrom .parameter import parameters_to_weights as parameters_to_weights\nfrom .parameter import weights_to_parameters as weights_to_parameters\nfrom .typing import Disconnect as Disconnect\nfrom .typing import EvaluateIns as EvaluateIns\nfrom .typing import EvaluateRes as EvaluateRes\nfrom .typing import FitIns as FitIns\nfrom .typing import FitRes as FitRes\nfrom .typing import Parameters as Parameters\nfrom .typing import ParametersRes as ParametersRes\nfrom .typing import Reconnect as Reconnect\nfrom .typing import Weights as Weights\n\nGRPC_MAX_MESSAGE_LENGTH: int = 536_870_912 # == 512 * 1024 * 1024\n\n__all__ = [\n \"bytes_to_ndarray\",\n \"Disconnect\",\n \"EvaluateIns\",\n \"EvaluateRes\",\n \"FitIns\",\n \"FitRes\",\n \"GRPC_MAX_MESSAGE_LENGTH\",\n \"ndarray_to_bytes\",\n \"Parameters\",\n \"parameters_to_weights\",\n \"ParametersRes\",\n \"Reconnect\",\n \"Weights\",\n \"weights_to_parameters\",\n]\n", "path": "src/py/flwr/common/__init__.py"}, {"content": "# Copyright 2020 Adap GmbH. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Flower client app.\"\"\"\n\n\nimport time\nfrom logging import INFO\n\nfrom flwr.common import GRPC_MAX_MESSAGE_LENGTH\nfrom flwr.common.logger import log\n\nfrom .client import Client\nfrom .grpc_client.connection import insecure_grpc_connection\nfrom .grpc_client.message_handler import handle\nfrom .keras_client import KerasClient, KerasClientWrapper\n\n\ndef start_client(\n server_address: str,\n client: Client,\n grpc_max_message_length: int = GRPC_MAX_MESSAGE_LENGTH,\n) -> None:\n \"\"\"Start a Flower Client which connects to a gRPC server.\n\n Arguments:\n server_address: str. The IPv6 address of the server. If the Flower\n server runs on the same machine on port 8080, then `server_address`\n would be `\"[::]:8080\"`.\n client: flwr.client.Client. An implementation of the abstract base\n class `flwr.client.Client`.\n grpc_max_message_length: int (default: 536_870_912, this equals 512MB).\n The maximum length of gRPC messages that can be exchanged with the\n Flower server. The default should be sufficient for most models.\n Users who train very large models might need to increase this\n value. Note that the Flower server needs to started with the same\n value (see `flwr.server.start_server`), otherwise it will not know\n about the increased limit and block larger messages.\n\n Returns:\n None.\n \"\"\"\n while True:\n sleep_duration: int = 0\n with insecure_grpc_connection(\n server_address, max_message_length=grpc_max_message_length\n ) as conn:\n receive, send = conn\n log(INFO, \"Opened (insecure) gRPC connection\")\n\n while True:\n server_message = receive()\n client_message, sleep_duration, keep_going = handle(\n client, server_message\n )\n send(client_message)\n if not keep_going:\n break\n if sleep_duration == 0:\n log(INFO, \"Disconnect and shut down\")\n break\n # Sleep and reconnect afterwards\n log(\n INFO,\n \"Disconnect, then re-establish connection after %s second(s)\",\n sleep_duration,\n )\n time.sleep(sleep_duration)\n\n\ndef start_keras_client(\n server_address: str,\n client: KerasClient,\n grpc_max_message_length: int = GRPC_MAX_MESSAGE_LENGTH,\n) -> None:\n \"\"\"Start a Flower KerasClient which connects to a gRPC server.\n\n Arguments:\n server_address: str. The IPv6 address of the server. If the Flower\n server runs on the same machine on port 8080, then `server_address`\n would be `\"[::]:8080\"`.\n client: flwr.client.KerasClient. An implementation of the abstract base\n class `flwr.client.KerasClient`.\n grpc_max_message_length: int (default: 536_870_912, this equals 512MB).\n The maximum length of gRPC messages that can be exchanged with the\n Flower server. The default should be sufficient for most models.\n Users who train very large models might need to increase this\n value. Note that the Flower server needs to started with the same\n value (see `flwr.server.start_server`), otherwise it will not know\n about the increased limit and block larger messages.\n\n Returns:\n None.\n \"\"\"\n\n # Wrap the Keras client\n flower_client = KerasClientWrapper(client)\n\n # Start\n start_client(\n server_address=server_address,\n client=flower_client,\n grpc_max_message_length=grpc_max_message_length,\n )\n", "path": "src/py/flwr/client/app.py"}]} | 1,429 | 923 |
gh_patches_debug_12231 | rasdani/github-patches | git_diff | arviz-devs__arviz-1001 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Clarify usage of `plot_joint`
## Short Description
The description of how to use `plot_joint` is not very clear about the use and meaning of parameters.
This is partly a usage question and partly a request for documentation improvement.
## Code Example or link
Looking at the docs for `arviz.plot_joint`, I see this:
```
var_names : Iter of 2 e.g. (var_1, var_2)
Variables to be plotted, two variables are required.
```
But then when I look at the example, I see this:
```
az.plot_joint(data,
>>> var_names=['theta'],
....
```
So … what is an “iter of 2”? `['theta']` is not obviously one, and the example gives a tuple of length 2.
I think what happens in this example is that you are plotting the *single* variable `theta` across a *coordinate* of dimension 2, but this is really quite confusing, and could use a much better explanation.
arviz 0.4.1 (will update!)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `arviz/plots/jointplot.py`
Content:
```
1 """Joint scatter plot of two variables."""
2 from ..data import convert_to_dataset
3 from .plot_utils import _scale_fig_size, xarray_var_iter, get_coords, get_plotting_function
4 from ..utils import _var_names
5
6
7 def plot_joint(
8 data,
9 var_names=None,
10 coords=None,
11 figsize=None,
12 textsize=None,
13 kind="scatter",
14 gridsize="auto",
15 contour=True,
16 fill_last=True,
17 joint_kwargs=None,
18 marginal_kwargs=None,
19 ax=None,
20 backend=None,
21 backend_kwargs=None,
22 show=None,
23 ):
24 """
25 Plot a scatter or hexbin of two variables with their respective marginals distributions.
26
27 Parameters
28 ----------
29 data : obj
30 Any object that can be converted to an az.InferenceData object
31 Refer to documentation of az.convert_to_dataset for details
32 var_names : Iter of 2 e.g. (var_1, var_2)
33 Variables to be plotted, two variables are required.
34 coords : mapping, optional
35 Coordinates of var_names to be plotted. Passed to `Dataset.sel`
36 figsize : tuple
37 Figure size. If None it will be defined automatically.
38 textsize: float
39 Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
40 on figsize.
41 kind : str
42 Type of plot to display (scatter, kde or hexbin)
43 gridsize : int or (int, int), optional.
44 The number of hexagons in the x-direction. Ignored when hexbin is False. See `plt.hexbin`
45 for details
46 contour : bool
47 If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True.
48 fill_last : bool
49 If True fill the last contour of the 2D KDE plot. Defaults to True.
50 joint_kwargs : dicts, optional
51 Additional keywords modifying the join distribution (central subplot)
52 marginal_kwargs : dicts, optional
53 Additional keywords modifying the marginals distributions (top and right subplot)
54 ax : tuple of axes, optional
55 Tuple containing (ax_joint, ax_hist_x, ax_hist_y). If None, a new figure and axes
56 will be created. Matplotlib axes or bokeh figures.
57 backend: str, optional
58 Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
59 backend_kwargs: bool, optional
60 These are kwargs specific to the backend being used. For additional documentation
61 check the plotting method of the backend.
62 show : bool, optional
63 Call backend show function.
64
65 Returns
66 -------
67 axes : matplotlib axes or bokeh figures
68 ax_joint : joint (central) distribution
69 ax_hist_x : x (top) distribution
70 ax_hist_y : y (right) distribution
71
72 Examples
73 --------
74 Scatter Joint plot
75
76 .. plot::
77 :context: close-figs
78
79 >>> import arviz as az
80 >>> data = az.load_arviz_data('non_centered_eight')
81 >>> az.plot_joint(data,
82 >>> var_names=['theta'],
83 >>> coords={'school': ['Choate', 'Phillips Andover']},
84 >>> kind='scatter',
85 >>> figsize=(6, 6))
86
87 Hexbin Joint plot
88
89 .. plot::
90 :context: close-figs
91
92 >>> az.plot_joint(data,
93 >>> var_names=['theta'],
94 >>> coords={'school': ['Choate', 'Phillips Andover']},
95 >>> kind='hexbin',
96 >>> figsize=(6, 6))
97
98 KDE Joint plot
99
100 .. plot::
101 :context: close-figs
102
103 >>> az.plot_joint(data,
104 >>> var_names=['theta'],
105 >>> coords={'school': ['Choate', 'Phillips Andover']},
106 >>> kind='kde',
107 >>> figsize=(6, 6))
108
109 Overlayed plots:
110
111 .. plot::
112 :context: close-figs
113
114 >>> data2 = az.load_arviz_data("centered_eight")
115 >>> kde_kwargs = {"contourf_kwargs": {"alpha": 0}, "contour_kwargs": {"colors": "k"}}
116 >>> ax = az.plot_joint(
117 ... data, var_names=("mu", "tau"), kind="kde", fill_last=False,
118 ... joint_kwargs=kde_kwargs, marginal_kwargs={"color": "k"}
119 ... )
120 >>> kde_kwargs["contour_kwargs"]["colors"] = "r"
121 >>> az.plot_joint(
122 ... data2, var_names=("mu", "tau"), kind="kde", fill_last=False,
123 ... joint_kwargs=kde_kwargs, marginal_kwargs={"color": "r"}, ax=ax
124 ... )
125
126 """
127 valid_kinds = ["scatter", "kde", "hexbin"]
128 if kind not in valid_kinds:
129 raise ValueError(
130 ("Plot type {} not recognized." "Plot type must be in {}").format(kind, valid_kinds)
131 )
132
133 data = convert_to_dataset(data, group="posterior")
134
135 if coords is None:
136 coords = {}
137
138 var_names = _var_names(var_names, data)
139
140 plotters = list(xarray_var_iter(get_coords(data, coords), var_names=var_names, combined=True))
141
142 if len(plotters) != 2:
143 raise Exception(
144 "Number of variables to be plotted must 2 (you supplied {})".format(len(plotters))
145 )
146
147 figsize, ax_labelsize, _, xt_labelsize, linewidth, _ = _scale_fig_size(figsize, textsize)
148
149 if joint_kwargs is None:
150 joint_kwargs = {}
151
152 if marginal_kwargs is None:
153 marginal_kwargs = {}
154 marginal_kwargs.setdefault("plot_kwargs", {})
155 marginal_kwargs["plot_kwargs"]["linewidth"] = linewidth
156
157 plot_joint_kwargs = dict(
158 ax=ax,
159 figsize=figsize,
160 plotters=plotters,
161 ax_labelsize=ax_labelsize,
162 xt_labelsize=xt_labelsize,
163 kind=kind,
164 contour=contour,
165 fill_last=fill_last,
166 joint_kwargs=joint_kwargs,
167 gridsize=gridsize,
168 marginal_kwargs=marginal_kwargs,
169 backend_kwargs=backend_kwargs,
170 show=show,
171 )
172
173 if backend == "bokeh":
174
175 plot_joint_kwargs.pop("ax_labelsize")
176 plot_joint_kwargs["marginal_kwargs"]["plot_kwargs"]["line_width"] = plot_joint_kwargs[
177 "marginal_kwargs"
178 ]["plot_kwargs"].pop("linewidth")
179
180 # TODO: Add backend kwargs
181 plot = get_plotting_function("plot_joint", "jointplot", backend)
182 axes = plot(**plot_joint_kwargs)
183 return axes
184
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/arviz/plots/jointplot.py b/arviz/plots/jointplot.py
--- a/arviz/plots/jointplot.py
+++ b/arviz/plots/jointplot.py
@@ -29,8 +29,9 @@
data : obj
Any object that can be converted to an az.InferenceData object
Refer to documentation of az.convert_to_dataset for details
- var_names : Iter of 2 e.g. (var_1, var_2)
- Variables to be plotted, two variables are required.
+ var_names : str or iterable of str
+ Variables to be plotted. iter of two variables or one variable (with subset having
+ exactly 2 dimensions) are required.
coords : mapping, optional
Coordinates of var_names to be plotted. Passed to `Dataset.sel`
figsize : tuple
| {"golden_diff": "diff --git a/arviz/plots/jointplot.py b/arviz/plots/jointplot.py\n--- a/arviz/plots/jointplot.py\n+++ b/arviz/plots/jointplot.py\n@@ -29,8 +29,9 @@\n data : obj\n Any object that can be converted to an az.InferenceData object\n Refer to documentation of az.convert_to_dataset for details\n- var_names : Iter of 2 e.g. (var_1, var_2)\n- Variables to be plotted, two variables are required.\n+ var_names : str or iterable of str\n+ Variables to be plotted. iter of two variables or one variable (with subset having\n+ exactly 2 dimensions) are required.\n coords : mapping, optional\n Coordinates of var_names to be plotted. Passed to `Dataset.sel`\n figsize : tuple\n", "issue": "Clarify usage of `plot_joint`\n## Short Description\r\n\r\nThe description of how to use `plot_joint` is not very clear about the use and meaning of parameters.\r\n\r\nThis is partly a usage question and partly a request for documentation improvement.\r\n\r\n## Code Example or link\r\n\r\nLooking at the docs for `arviz.plot_joint`, I see this: \r\n```\r\nvar_names : Iter of 2 e.g. (var_1, var_2)\r\n Variables to be plotted, two variables are required.\r\n```\r\nBut then when I look at the example, I see this:\r\n```\r\naz.plot_joint(data,\r\n>>> var_names=['theta'],\r\n....\r\n```\r\nSo \u2026 what is an \u201citer of 2\u201d? `['theta']` is not obviously one, and the example gives a tuple of length 2.\r\nI think what happens in this example is that you are plotting the *single* variable `theta` across a *coordinate* of dimension 2, but this is really quite confusing, and could use a much better explanation.\r\n\r\narviz 0.4.1 (will update!)\r\n\r\n\n", "before_files": [{"content": "\"\"\"Joint scatter plot of two variables.\"\"\"\nfrom ..data import convert_to_dataset\nfrom .plot_utils import _scale_fig_size, xarray_var_iter, get_coords, get_plotting_function\nfrom ..utils import _var_names\n\n\ndef plot_joint(\n data,\n var_names=None,\n coords=None,\n figsize=None,\n textsize=None,\n kind=\"scatter\",\n gridsize=\"auto\",\n contour=True,\n fill_last=True,\n joint_kwargs=None,\n marginal_kwargs=None,\n ax=None,\n backend=None,\n backend_kwargs=None,\n show=None,\n):\n \"\"\"\n Plot a scatter or hexbin of two variables with their respective marginals distributions.\n\n Parameters\n ----------\n data : obj\n Any object that can be converted to an az.InferenceData object\n Refer to documentation of az.convert_to_dataset for details\n var_names : Iter of 2 e.g. (var_1, var_2)\n Variables to be plotted, two variables are required.\n coords : mapping, optional\n Coordinates of var_names to be plotted. Passed to `Dataset.sel`\n figsize : tuple\n Figure size. If None it will be defined automatically.\n textsize: float\n Text size scaling factor for labels, titles and lines. If None it will be autoscaled based\n on figsize.\n kind : str\n Type of plot to display (scatter, kde or hexbin)\n gridsize : int or (int, int), optional.\n The number of hexagons in the x-direction. Ignored when hexbin is False. See `plt.hexbin`\n for details\n contour : bool\n If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True.\n fill_last : bool\n If True fill the last contour of the 2D KDE plot. Defaults to True.\n joint_kwargs : dicts, optional\n Additional keywords modifying the join distribution (central subplot)\n marginal_kwargs : dicts, optional\n Additional keywords modifying the marginals distributions (top and right subplot)\n ax : tuple of axes, optional\n Tuple containing (ax_joint, ax_hist_x, ax_hist_y). If None, a new figure and axes\n will be created. Matplotlib axes or bokeh figures.\n backend: str, optional\n Select plotting backend {\"matplotlib\",\"bokeh\"}. Default \"matplotlib\".\n backend_kwargs: bool, optional\n These are kwargs specific to the backend being used. For additional documentation\n check the plotting method of the backend.\n show : bool, optional\n Call backend show function.\n\n Returns\n -------\n axes : matplotlib axes or bokeh figures\n ax_joint : joint (central) distribution\n ax_hist_x : x (top) distribution\n ax_hist_y : y (right) distribution\n\n Examples\n --------\n Scatter Joint plot\n\n .. plot::\n :context: close-figs\n\n >>> import arviz as az\n >>> data = az.load_arviz_data('non_centered_eight')\n >>> az.plot_joint(data,\n >>> var_names=['theta'],\n >>> coords={'school': ['Choate', 'Phillips Andover']},\n >>> kind='scatter',\n >>> figsize=(6, 6))\n\n Hexbin Joint plot\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_joint(data,\n >>> var_names=['theta'],\n >>> coords={'school': ['Choate', 'Phillips Andover']},\n >>> kind='hexbin',\n >>> figsize=(6, 6))\n\n KDE Joint plot\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_joint(data,\n >>> var_names=['theta'],\n >>> coords={'school': ['Choate', 'Phillips Andover']},\n >>> kind='kde',\n >>> figsize=(6, 6))\n\n Overlayed plots:\n\n .. plot::\n :context: close-figs\n\n >>> data2 = az.load_arviz_data(\"centered_eight\")\n >>> kde_kwargs = {\"contourf_kwargs\": {\"alpha\": 0}, \"contour_kwargs\": {\"colors\": \"k\"}}\n >>> ax = az.plot_joint(\n ... data, var_names=(\"mu\", \"tau\"), kind=\"kde\", fill_last=False,\n ... joint_kwargs=kde_kwargs, marginal_kwargs={\"color\": \"k\"}\n ... )\n >>> kde_kwargs[\"contour_kwargs\"][\"colors\"] = \"r\"\n >>> az.plot_joint(\n ... data2, var_names=(\"mu\", \"tau\"), kind=\"kde\", fill_last=False,\n ... joint_kwargs=kde_kwargs, marginal_kwargs={\"color\": \"r\"}, ax=ax\n ... )\n\n \"\"\"\n valid_kinds = [\"scatter\", \"kde\", \"hexbin\"]\n if kind not in valid_kinds:\n raise ValueError(\n (\"Plot type {} not recognized.\" \"Plot type must be in {}\").format(kind, valid_kinds)\n )\n\n data = convert_to_dataset(data, group=\"posterior\")\n\n if coords is None:\n coords = {}\n\n var_names = _var_names(var_names, data)\n\n plotters = list(xarray_var_iter(get_coords(data, coords), var_names=var_names, combined=True))\n\n if len(plotters) != 2:\n raise Exception(\n \"Number of variables to be plotted must 2 (you supplied {})\".format(len(plotters))\n )\n\n figsize, ax_labelsize, _, xt_labelsize, linewidth, _ = _scale_fig_size(figsize, textsize)\n\n if joint_kwargs is None:\n joint_kwargs = {}\n\n if marginal_kwargs is None:\n marginal_kwargs = {}\n marginal_kwargs.setdefault(\"plot_kwargs\", {})\n marginal_kwargs[\"plot_kwargs\"][\"linewidth\"] = linewidth\n\n plot_joint_kwargs = dict(\n ax=ax,\n figsize=figsize,\n plotters=plotters,\n ax_labelsize=ax_labelsize,\n xt_labelsize=xt_labelsize,\n kind=kind,\n contour=contour,\n fill_last=fill_last,\n joint_kwargs=joint_kwargs,\n gridsize=gridsize,\n marginal_kwargs=marginal_kwargs,\n backend_kwargs=backend_kwargs,\n show=show,\n )\n\n if backend == \"bokeh\":\n\n plot_joint_kwargs.pop(\"ax_labelsize\")\n plot_joint_kwargs[\"marginal_kwargs\"][\"plot_kwargs\"][\"line_width\"] = plot_joint_kwargs[\n \"marginal_kwargs\"\n ][\"plot_kwargs\"].pop(\"linewidth\")\n\n # TODO: Add backend kwargs\n plot = get_plotting_function(\"plot_joint\", \"jointplot\", backend)\n axes = plot(**plot_joint_kwargs)\n return axes\n", "path": "arviz/plots/jointplot.py"}], "after_files": [{"content": "\"\"\"Joint scatter plot of two variables.\"\"\"\nfrom ..data import convert_to_dataset\nfrom .plot_utils import _scale_fig_size, xarray_var_iter, get_coords, get_plotting_function\nfrom ..utils import _var_names\n\n\ndef plot_joint(\n data,\n var_names=None,\n coords=None,\n figsize=None,\n textsize=None,\n kind=\"scatter\",\n gridsize=\"auto\",\n contour=True,\n fill_last=True,\n joint_kwargs=None,\n marginal_kwargs=None,\n ax=None,\n backend=None,\n backend_kwargs=None,\n show=None,\n):\n \"\"\"\n Plot a scatter or hexbin of two variables with their respective marginals distributions.\n\n Parameters\n ----------\n data : obj\n Any object that can be converted to an az.InferenceData object\n Refer to documentation of az.convert_to_dataset for details\n var_names : str or iterable of str\n Variables to be plotted. iter of two variables or one variable (with subset having\n exactly 2 dimensions) are required.\n coords : mapping, optional\n Coordinates of var_names to be plotted. Passed to `Dataset.sel`\n figsize : tuple\n Figure size. If None it will be defined automatically.\n textsize: float\n Text size scaling factor for labels, titles and lines. If None it will be autoscaled based\n on figsize.\n kind : str\n Type of plot to display (scatter, kde or hexbin)\n gridsize : int or (int, int), optional.\n The number of hexagons in the x-direction. Ignored when hexbin is False. See `plt.hexbin`\n for details\n contour : bool\n If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True.\n fill_last : bool\n If True fill the last contour of the 2D KDE plot. Defaults to True.\n joint_kwargs : dicts, optional\n Additional keywords modifying the join distribution (central subplot)\n marginal_kwargs : dicts, optional\n Additional keywords modifying the marginals distributions (top and right subplot)\n ax : tuple of axes, optional\n Tuple containing (ax_joint, ax_hist_x, ax_hist_y). If None, a new figure and axes\n will be created. Matplotlib axes or bokeh figures.\n backend: str, optional\n Select plotting backend {\"matplotlib\",\"bokeh\"}. Default \"matplotlib\".\n backend_kwargs: bool, optional\n These are kwargs specific to the backend being used. For additional documentation\n check the plotting method of the backend.\n show : bool, optional\n Call backend show function.\n\n Returns\n -------\n axes : matplotlib axes or bokeh figures\n ax_joint : joint (central) distribution\n ax_hist_x : x (top) distribution\n ax_hist_y : y (right) distribution\n\n Examples\n --------\n Scatter Joint plot\n\n .. plot::\n :context: close-figs\n\n >>> import arviz as az\n >>> data = az.load_arviz_data('non_centered_eight')\n >>> az.plot_joint(data,\n >>> var_names=['theta'],\n >>> coords={'school': ['Choate', 'Phillips Andover']},\n >>> kind='scatter',\n >>> figsize=(6, 6))\n\n Hexbin Joint plot\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_joint(data,\n >>> var_names=['theta'],\n >>> coords={'school': ['Choate', 'Phillips Andover']},\n >>> kind='hexbin',\n >>> figsize=(6, 6))\n\n KDE Joint plot\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_joint(data,\n >>> var_names=['theta'],\n >>> coords={'school': ['Choate', 'Phillips Andover']},\n >>> kind='kde',\n >>> figsize=(6, 6))\n\n Overlayed plots:\n\n .. plot::\n :context: close-figs\n\n >>> data2 = az.load_arviz_data(\"centered_eight\")\n >>> kde_kwargs = {\"contourf_kwargs\": {\"alpha\": 0}, \"contour_kwargs\": {\"colors\": \"k\"}}\n >>> ax = az.plot_joint(\n ... data, var_names=(\"mu\", \"tau\"), kind=\"kde\", fill_last=False,\n ... joint_kwargs=kde_kwargs, marginal_kwargs={\"color\": \"k\"}\n ... )\n >>> kde_kwargs[\"contour_kwargs\"][\"colors\"] = \"r\"\n >>> az.plot_joint(\n ... data2, var_names=(\"mu\", \"tau\"), kind=\"kde\", fill_last=False,\n ... joint_kwargs=kde_kwargs, marginal_kwargs={\"color\": \"r\"}, ax=ax\n ... )\n\n \"\"\"\n valid_kinds = [\"scatter\", \"kde\", \"hexbin\"]\n if kind not in valid_kinds:\n raise ValueError(\n (\"Plot type {} not recognized.\" \"Plot type must be in {}\").format(kind, valid_kinds)\n )\n\n data = convert_to_dataset(data, group=\"posterior\")\n\n if coords is None:\n coords = {}\n\n var_names = _var_names(var_names, data)\n\n plotters = list(xarray_var_iter(get_coords(data, coords), var_names=var_names, combined=True))\n\n if len(plotters) != 2:\n raise Exception(\n \"Number of variables to be plotted must 2 (you supplied {})\".format(len(plotters))\n )\n\n figsize, ax_labelsize, _, xt_labelsize, linewidth, _ = _scale_fig_size(figsize, textsize)\n\n if joint_kwargs is None:\n joint_kwargs = {}\n\n if marginal_kwargs is None:\n marginal_kwargs = {}\n marginal_kwargs.setdefault(\"plot_kwargs\", {})\n marginal_kwargs[\"plot_kwargs\"][\"linewidth\"] = linewidth\n\n plot_joint_kwargs = dict(\n ax=ax,\n figsize=figsize,\n plotters=plotters,\n ax_labelsize=ax_labelsize,\n xt_labelsize=xt_labelsize,\n kind=kind,\n contour=contour,\n fill_last=fill_last,\n joint_kwargs=joint_kwargs,\n gridsize=gridsize,\n marginal_kwargs=marginal_kwargs,\n backend_kwargs=backend_kwargs,\n show=show,\n )\n\n if backend == \"bokeh\":\n\n plot_joint_kwargs.pop(\"ax_labelsize\")\n plot_joint_kwargs[\"marginal_kwargs\"][\"plot_kwargs\"][\"line_width\"] = plot_joint_kwargs[\n \"marginal_kwargs\"\n ][\"plot_kwargs\"].pop(\"linewidth\")\n\n # TODO: Add backend kwargs\n plot = get_plotting_function(\"plot_joint\", \"jointplot\", backend)\n axes = plot(**plot_joint_kwargs)\n return axes\n", "path": "arviz/plots/jointplot.py"}]} | 2,388 | 188 |
gh_patches_debug_6417 | rasdani/github-patches | git_diff | kornia__kornia-694 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Broken link "InvDepthSmoothnessLoss"
## 📚 Documentation
In the documentation of this function https://kornia.readthedocs.io/en/latest/losses.html#kornia.losses.inverse_depth_smoothness_loss, the link is broken.
```See InvDepthSmoothnessLoss for details.```
The error is due to an incorrect name. The correct name should be `InverseDepthSmoothnessLoss`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kornia/losses/depth_smooth.py`
Content:
```
1 import torch
2 import torch.nn as nn
3 import torch.nn.functional as F
4
5 # Based on
6 # https://github.com/tensorflow/models/blob/master/research/struct2depth/model.py#L625-L641
7
8
9 def _gradient_x(img: torch.Tensor) -> torch.Tensor:
10 assert len(img.shape) == 4, img.shape
11 return img[:, :, :, :-1] - img[:, :, :, 1:]
12
13
14 def _gradient_y(img: torch.Tensor) -> torch.Tensor:
15 assert len(img.shape) == 4, img.shape
16 return img[:, :, :-1, :] - img[:, :, 1:, :]
17
18
19 def inverse_depth_smoothness_loss(
20 idepth: torch.Tensor,
21 image: torch.Tensor) -> torch.Tensor:
22 r"""Computes image-aware inverse depth smoothness loss.
23
24 See :class:`~kornia.losses.InvDepthSmoothnessLoss` for details.
25 """
26 if not torch.is_tensor(idepth):
27 raise TypeError("Input idepth type is not a torch.Tensor. Got {}"
28 .format(type(idepth)))
29
30 if not torch.is_tensor(image):
31 raise TypeError("Input image type is not a torch.Tensor. Got {}"
32 .format(type(image)))
33
34 if not len(idepth.shape) == 4:
35 raise ValueError("Invalid idepth shape, we expect BxCxHxW. Got: {}"
36 .format(idepth.shape))
37
38 if not len(image.shape) == 4:
39 raise ValueError("Invalid image shape, we expect BxCxHxW. Got: {}"
40 .format(image.shape))
41
42 if not idepth.shape[-2:] == image.shape[-2:]:
43 raise ValueError("idepth and image shapes must be the same. Got: {} and {}"
44 .format(idepth.shape, image.shape))
45
46 if not idepth.device == image.device:
47 raise ValueError(
48 "idepth and image must be in the same device. Got: {} and {}" .format(
49 idepth.device, image.device))
50
51 if not idepth.dtype == image.dtype:
52 raise ValueError(
53 "idepth and image must be in the same dtype. Got: {} and {}" .format(
54 idepth.dtype, image.dtype))
55
56 # compute the gradients
57 idepth_dx: torch.Tensor = _gradient_x(idepth)
58 idepth_dy: torch.Tensor = _gradient_y(idepth)
59 image_dx: torch.Tensor = _gradient_x(image)
60 image_dy: torch.Tensor = _gradient_y(image)
61
62 # compute image weights
63 weights_x: torch.Tensor = torch.exp(
64 -torch.mean(torch.abs(image_dx), dim=1, keepdim=True))
65 weights_y: torch.Tensor = torch.exp(
66 -torch.mean(torch.abs(image_dy), dim=1, keepdim=True))
67
68 # apply image weights to depth
69 smoothness_x: torch.Tensor = torch.abs(idepth_dx * weights_x)
70 smoothness_y: torch.Tensor = torch.abs(idepth_dy * weights_y)
71 return torch.mean(smoothness_x) + torch.mean(smoothness_y)
72
73
74 class InverseDepthSmoothnessLoss(nn.Module):
75 r"""Criterion that computes image-aware inverse depth smoothness loss.
76
77 .. math::
78
79 \text{loss} = \left | \partial_x d_{ij} \right | e^{-\left \|
80 \partial_x I_{ij} \right \|} + \left |
81 \partial_y d_{ij} \right | e^{-\left \| \partial_y I_{ij} \right \|}
82
83
84 Shape:
85 - Inverse Depth: :math:`(N, 1, H, W)`
86 - Image: :math:`(N, 3, H, W)`
87 - Output: scalar
88
89 Examples::
90
91 >>> idepth = torch.rand(1, 1, 4, 5)
92 >>> image = torch.rand(1, 3, 4, 5)
93 >>> smooth = kornia.losses.DepthSmoothnessLoss()
94 >>> loss = smooth(idepth, image)
95 """
96
97 def __init__(self) -> None:
98 super(InverseDepthSmoothnessLoss, self).__init__()
99
100 def forward(self, idepth: torch.Tensor, image: torch.Tensor) -> torch.Tensor: # type:ignore
101 return inverse_depth_smoothness_loss(idepth, image)
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kornia/losses/depth_smooth.py b/kornia/losses/depth_smooth.py
--- a/kornia/losses/depth_smooth.py
+++ b/kornia/losses/depth_smooth.py
@@ -21,7 +21,7 @@
image: torch.Tensor) -> torch.Tensor:
r"""Computes image-aware inverse depth smoothness loss.
- See :class:`~kornia.losses.InvDepthSmoothnessLoss` for details.
+ See :class:`~kornia.losses.InverseDepthSmoothnessLoss` for details.
"""
if not torch.is_tensor(idepth):
raise TypeError("Input idepth type is not a torch.Tensor. Got {}"
| {"golden_diff": "diff --git a/kornia/losses/depth_smooth.py b/kornia/losses/depth_smooth.py\n--- a/kornia/losses/depth_smooth.py\n+++ b/kornia/losses/depth_smooth.py\n@@ -21,7 +21,7 @@\n image: torch.Tensor) -> torch.Tensor:\n r\"\"\"Computes image-aware inverse depth smoothness loss.\n \n- See :class:`~kornia.losses.InvDepthSmoothnessLoss` for details.\n+ See :class:`~kornia.losses.InverseDepthSmoothnessLoss` for details.\n \"\"\"\n if not torch.is_tensor(idepth):\n raise TypeError(\"Input idepth type is not a torch.Tensor. Got {}\"\n", "issue": "Broken link \"InvDepthSmoothnessLoss\"\n## \ud83d\udcda Documentation\r\n\r\nIn the documentation of this function https://kornia.readthedocs.io/en/latest/losses.html#kornia.losses.inverse_depth_smoothness_loss, the link is broken.\r\n\r\n```See InvDepthSmoothnessLoss for details.```\r\n\r\nThe error is due to an incorrect name. The correct name should be `InverseDepthSmoothnessLoss`.\r\n\r\n\n", "before_files": [{"content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n# Based on\n# https://github.com/tensorflow/models/blob/master/research/struct2depth/model.py#L625-L641\n\n\ndef _gradient_x(img: torch.Tensor) -> torch.Tensor:\n assert len(img.shape) == 4, img.shape\n return img[:, :, :, :-1] - img[:, :, :, 1:]\n\n\ndef _gradient_y(img: torch.Tensor) -> torch.Tensor:\n assert len(img.shape) == 4, img.shape\n return img[:, :, :-1, :] - img[:, :, 1:, :]\n\n\ndef inverse_depth_smoothness_loss(\n idepth: torch.Tensor,\n image: torch.Tensor) -> torch.Tensor:\n r\"\"\"Computes image-aware inverse depth smoothness loss.\n\n See :class:`~kornia.losses.InvDepthSmoothnessLoss` for details.\n \"\"\"\n if not torch.is_tensor(idepth):\n raise TypeError(\"Input idepth type is not a torch.Tensor. Got {}\"\n .format(type(idepth)))\n\n if not torch.is_tensor(image):\n raise TypeError(\"Input image type is not a torch.Tensor. Got {}\"\n .format(type(image)))\n\n if not len(idepth.shape) == 4:\n raise ValueError(\"Invalid idepth shape, we expect BxCxHxW. Got: {}\"\n .format(idepth.shape))\n\n if not len(image.shape) == 4:\n raise ValueError(\"Invalid image shape, we expect BxCxHxW. Got: {}\"\n .format(image.shape))\n\n if not idepth.shape[-2:] == image.shape[-2:]:\n raise ValueError(\"idepth and image shapes must be the same. Got: {} and {}\"\n .format(idepth.shape, image.shape))\n\n if not idepth.device == image.device:\n raise ValueError(\n \"idepth and image must be in the same device. Got: {} and {}\" .format(\n idepth.device, image.device))\n\n if not idepth.dtype == image.dtype:\n raise ValueError(\n \"idepth and image must be in the same dtype. Got: {} and {}\" .format(\n idepth.dtype, image.dtype))\n\n # compute the gradients\n idepth_dx: torch.Tensor = _gradient_x(idepth)\n idepth_dy: torch.Tensor = _gradient_y(idepth)\n image_dx: torch.Tensor = _gradient_x(image)\n image_dy: torch.Tensor = _gradient_y(image)\n\n # compute image weights\n weights_x: torch.Tensor = torch.exp(\n -torch.mean(torch.abs(image_dx), dim=1, keepdim=True))\n weights_y: torch.Tensor = torch.exp(\n -torch.mean(torch.abs(image_dy), dim=1, keepdim=True))\n\n # apply image weights to depth\n smoothness_x: torch.Tensor = torch.abs(idepth_dx * weights_x)\n smoothness_y: torch.Tensor = torch.abs(idepth_dy * weights_y)\n return torch.mean(smoothness_x) + torch.mean(smoothness_y)\n\n\nclass InverseDepthSmoothnessLoss(nn.Module):\n r\"\"\"Criterion that computes image-aware inverse depth smoothness loss.\n\n .. math::\n\n \\text{loss} = \\left | \\partial_x d_{ij} \\right | e^{-\\left \\|\n \\partial_x I_{ij} \\right \\|} + \\left |\n \\partial_y d_{ij} \\right | e^{-\\left \\| \\partial_y I_{ij} \\right \\|}\n\n\n Shape:\n - Inverse Depth: :math:`(N, 1, H, W)`\n - Image: :math:`(N, 3, H, W)`\n - Output: scalar\n\n Examples::\n\n >>> idepth = torch.rand(1, 1, 4, 5)\n >>> image = torch.rand(1, 3, 4, 5)\n >>> smooth = kornia.losses.DepthSmoothnessLoss()\n >>> loss = smooth(idepth, image)\n \"\"\"\n\n def __init__(self) -> None:\n super(InverseDepthSmoothnessLoss, self).__init__()\n\n def forward(self, idepth: torch.Tensor, image: torch.Tensor) -> torch.Tensor: # type:ignore\n return inverse_depth_smoothness_loss(idepth, image)\n", "path": "kornia/losses/depth_smooth.py"}], "after_files": [{"content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n# Based on\n# https://github.com/tensorflow/models/blob/master/research/struct2depth/model.py#L625-L641\n\n\ndef _gradient_x(img: torch.Tensor) -> torch.Tensor:\n assert len(img.shape) == 4, img.shape\n return img[:, :, :, :-1] - img[:, :, :, 1:]\n\n\ndef _gradient_y(img: torch.Tensor) -> torch.Tensor:\n assert len(img.shape) == 4, img.shape\n return img[:, :, :-1, :] - img[:, :, 1:, :]\n\n\ndef inverse_depth_smoothness_loss(\n idepth: torch.Tensor,\n image: torch.Tensor) -> torch.Tensor:\n r\"\"\"Computes image-aware inverse depth smoothness loss.\n\n See :class:`~kornia.losses.InverseDepthSmoothnessLoss` for details.\n \"\"\"\n if not torch.is_tensor(idepth):\n raise TypeError(\"Input idepth type is not a torch.Tensor. Got {}\"\n .format(type(idepth)))\n\n if not torch.is_tensor(image):\n raise TypeError(\"Input image type is not a torch.Tensor. Got {}\"\n .format(type(image)))\n\n if not len(idepth.shape) == 4:\n raise ValueError(\"Invalid idepth shape, we expect BxCxHxW. Got: {}\"\n .format(idepth.shape))\n\n if not len(image.shape) == 4:\n raise ValueError(\"Invalid image shape, we expect BxCxHxW. Got: {}\"\n .format(image.shape))\n\n if not idepth.shape[-2:] == image.shape[-2:]:\n raise ValueError(\"idepth and image shapes must be the same. Got: {} and {}\"\n .format(idepth.shape, image.shape))\n\n if not idepth.device == image.device:\n raise ValueError(\n \"idepth and image must be in the same device. Got: {} and {}\" .format(\n idepth.device, image.device))\n\n if not idepth.dtype == image.dtype:\n raise ValueError(\n \"idepth and image must be in the same dtype. Got: {} and {}\" .format(\n idepth.dtype, image.dtype))\n\n # compute the gradients\n idepth_dx: torch.Tensor = _gradient_x(idepth)\n idepth_dy: torch.Tensor = _gradient_y(idepth)\n image_dx: torch.Tensor = _gradient_x(image)\n image_dy: torch.Tensor = _gradient_y(image)\n\n # compute image weights\n weights_x: torch.Tensor = torch.exp(\n -torch.mean(torch.abs(image_dx), dim=1, keepdim=True))\n weights_y: torch.Tensor = torch.exp(\n -torch.mean(torch.abs(image_dy), dim=1, keepdim=True))\n\n # apply image weights to depth\n smoothness_x: torch.Tensor = torch.abs(idepth_dx * weights_x)\n smoothness_y: torch.Tensor = torch.abs(idepth_dy * weights_y)\n return torch.mean(smoothness_x) + torch.mean(smoothness_y)\n\n\nclass InverseDepthSmoothnessLoss(nn.Module):\n r\"\"\"Criterion that computes image-aware inverse depth smoothness loss.\n\n .. math::\n\n \\text{loss} = \\left | \\partial_x d_{ij} \\right | e^{-\\left \\|\n \\partial_x I_{ij} \\right \\|} + \\left |\n \\partial_y d_{ij} \\right | e^{-\\left \\| \\partial_y I_{ij} \\right \\|}\n\n\n Shape:\n - Inverse Depth: :math:`(N, 1, H, W)`\n - Image: :math:`(N, 3, H, W)`\n - Output: scalar\n\n Examples::\n\n >>> idepth = torch.rand(1, 1, 4, 5)\n >>> image = torch.rand(1, 3, 4, 5)\n >>> smooth = kornia.losses.DepthSmoothnessLoss()\n >>> loss = smooth(idepth, image)\n \"\"\"\n\n def __init__(self) -> None:\n super(InverseDepthSmoothnessLoss, self).__init__()\n\n def forward(self, idepth: torch.Tensor, image: torch.Tensor) -> torch.Tensor: # type:ignore\n return inverse_depth_smoothness_loss(idepth, image)\n", "path": "kornia/losses/depth_smooth.py"}]} | 1,503 | 158 |
gh_patches_debug_13437 | rasdani/github-patches | git_diff | kserve__kserve-2878 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Stop Error When disabled_grpc=True
/kind bug
**What steps did you take and what happened:**
In our use case we don't use the grpc_server inside `ModelServer` thus we set `enable_grpc=False`. But when server stops it will still call stop from this unstarted server thus got the error below:
```
> await self._server.stop(grace=10)
E AttributeError: 'NoneType' object has no attribute 'stop'
```
The bug is because that this [if](https://github.com/kserve/kserve/blob/master/python/kserve/kserve/model_server.py#L174) will always be evaluated as `True`. Can we please add a one line change to make it support `enable_grpc=False`? Maybe change it to `if self._grpc_server._server` or `if self.enable_grpc`? Should be a very quick fix and that helps a lot! Thank you!
**What did you expect to happen:**
A one line change from [code](https://github.com/kserve/kserve/blob/master/python/kserve/kserve/model_server.py#L174) to `if self._grpc_server._server` or `if self.enable_grpc`
**What's the InferenceService yaml:**
[To help us debug please run `kubectl get isvc $name -n $namespace -oyaml` and paste the output]
**Anything else you would like to add:**
[Miscellaneous information that will assist in solving the issue.]
**Environment:**
- Istio Version:
- Knative Version:
- KServe Version:
- Kubeflow version:
- Cloud Environment:[k8s_istio/istio_dex/gcp_basic_auth/gcp_iap/aws/aws_cognito/ibm]
- Minikube/Kind version:
- Kubernetes version: (use `kubectl version`):
- OS (e.g. from `/etc/os-release`):
Stop Error When disabled_grpc=True
/kind bug
**What steps did you take and what happened:**
In our use case we don't use the grpc_server inside `ModelServer` thus we set `enable_grpc=False`. But when server stops it will still call stop from this unstarted server thus got the error below:
```
> await self._server.stop(grace=10)
E AttributeError: 'NoneType' object has no attribute 'stop'
```
The bug is because that this [if](https://github.com/kserve/kserve/blob/master/python/kserve/kserve/model_server.py#L174) will always be evaluated as `True`. Can we please add a one line change to make it support `enable_grpc=False`? Maybe change it to `if self._grpc_server._server` or `if self.enable_grpc`? Should be a very quick fix and that helps a lot! Thank you!
**What did you expect to happen:**
A one line change from [code](https://github.com/kserve/kserve/blob/master/python/kserve/kserve/model_server.py#L174) to `if self._grpc_server._server` or `if self.enable_grpc`
**What's the InferenceService yaml:**
[To help us debug please run `kubectl get isvc $name -n $namespace -oyaml` and paste the output]
**Anything else you would like to add:**
[Miscellaneous information that will assist in solving the issue.]
**Environment:**
- Istio Version:
- Knative Version:
- KServe Version:
- Kubeflow version:
- Cloud Environment:[k8s_istio/istio_dex/gcp_basic_auth/gcp_iap/aws/aws_cognito/ibm]
- Minikube/Kind version:
- Kubernetes version: (use `kubectl version`):
- OS (e.g. from `/etc/os-release`):
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/kserve/kserve/model_server.py`
Content:
```
1 # Copyright 2022 The KServe Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import argparse
16 import asyncio
17 import concurrent.futures
18 import logging
19 import signal
20 import socket
21 from distutils.util import strtobool
22 from typing import List, Dict, Union, Optional
23
24 from ray import serve as rayserve
25 from ray.serve.api import Deployment, RayServeHandle
26
27 from .protocol.grpc.server import GRPCServer
28 from .protocol.rest.server import UvicornServer
29 from .utils import utils
30 import multiprocessing
31 from multiprocessing import Process
32
33 from .model import Model
34 from .protocol.dataplane import DataPlane
35 from .protocol.model_repository_extension import ModelRepositoryExtension
36 from .model_repository import ModelRepository
37
38 DEFAULT_HTTP_PORT = 8080
39 DEFAULT_GRPC_PORT = 8081
40
41 parser = argparse.ArgumentParser(add_help=False)
42 parser.add_argument("--http_port", default=DEFAULT_HTTP_PORT, type=int,
43 help="The HTTP Port listened to by the model server.")
44 parser.add_argument("--grpc_port", default=DEFAULT_GRPC_PORT, type=int,
45 help="The GRPC Port listened to by the model server.")
46 parser.add_argument("--workers", default=1, type=int,
47 help="The number of workers for multi-processing.")
48 parser.add_argument("--max_threads", default=4, type=int,
49 help="The number of max processing threads in each worker.")
50 parser.add_argument('--max_asyncio_workers', default=None, type=int,
51 help='Max number of asyncio workers to spawn')
52 parser.add_argument("--enable_grpc", default=True, type=lambda x: bool(strtobool(x)),
53 help="Enable gRPC for the model server")
54 parser.add_argument("--enable_docs_url", default=False, type=lambda x: bool(strtobool(x)),
55 help="Enable docs url '/docs' to display Swagger UI.")
56 parser.add_argument("--enable_latency_logging", default=True, type=lambda x: bool(strtobool(x)),
57 help="Output a log per request with latency metrics.")
58 parser.add_argument("--log_config_file", default=None, type=str,
59 help="File path containing UvicornServer's log config. Needs to be a yaml or json file.")
60 parser.add_argument("--access_log_format", default=None, type=str,
61 help="Format to set for the access log (provided by asgi-logger).")
62
63 args, _ = parser.parse_known_args()
64
65 FORMAT = '%(asctime)s.%(msecs)03d %(process)s %(name)s %(levelname)s [%(funcName)s():%(lineno)s] %(message)s'
66 DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
67 logging.basicConfig(level=logging.INFO, format=FORMAT, datefmt=DATE_FORMAT)
68
69
70 class ModelServer:
71 """KServe ModelServer
72
73 Args:
74 http_port (int): HTTP port. Default: ``8080``.
75 grpc_port (int): GRPC port. Default: ``8081``.
76 workers (int): Number of workers for uvicorn. Default: ``1``.
77 max_threads (int): Max number of processing threads. Default: ``4``
78 max_asyncio_workers (int): Max number of AsyncIO threads. Default: ``None``
79 registered_models (ModelRepository): Model repository with registered models.
80 enable_grpc (bool): Whether to turn on grpc server. Default: ``True``
81 enable_docs_url (bool): Whether to turn on ``/docs`` Swagger UI. Default: ``False``.
82 enable_latency_logging (bool): Whether to log latency metric. Default: ``True``.
83 log_config_file (dict): File path containing UvicornServer's log config. Default: ``None``.
84 access_log_format (string): Format to set for the access log (provided by asgi-logger). Default: ``None``
85 """
86
87 def __init__(self, http_port: int = args.http_port,
88 grpc_port: int = args.grpc_port,
89 workers: int = args.workers,
90 max_threads: int = args.max_threads,
91 max_asyncio_workers: int = args.max_asyncio_workers,
92 registered_models: ModelRepository = ModelRepository(),
93 enable_grpc: bool = args.enable_grpc,
94 enable_docs_url: bool = args.enable_docs_url,
95 enable_latency_logging: bool = args.enable_latency_logging,
96 log_config_file: str = args.log_config_file,
97 access_log_format: str = args.access_log_format):
98 self.registered_models = registered_models
99 self.http_port = http_port
100 self.grpc_port = grpc_port
101 self.workers = workers
102 self.max_threads = max_threads
103 self.max_asyncio_workers = max_asyncio_workers
104 self.enable_grpc = enable_grpc
105 self.enable_docs_url = enable_docs_url
106 self.enable_latency_logging = enable_latency_logging
107 self.dataplane = DataPlane(model_registry=registered_models)
108 self.model_repository_extension = ModelRepositoryExtension(
109 model_registry=self.registered_models)
110 self._grpc_server = GRPCServer(grpc_port, self.dataplane, self.model_repository_extension)
111 self.log_config_file = log_config_file
112 self.access_log_format = access_log_format
113
114 def start(self, models: Union[List[Model], Dict[str, Deployment]]) -> None:
115 if isinstance(models, list):
116 for model in models:
117 if isinstance(model, Model):
118 self.register_model(model)
119 # pass whether to log request latency into the model
120 model.enable_latency_logging = self.enable_latency_logging
121 else:
122 raise RuntimeError("Model type should be 'Model'")
123 elif isinstance(models, dict):
124 if all([isinstance(v, Deployment) for v in models.values()]):
125 # TODO: make this port number a variable
126 rayserve.start(detached=True, http_options={"host": "0.0.0.0", "port": 9071})
127 for key in models:
128 models[key].deploy()
129 handle = models[key].get_handle()
130 self.register_model_handle(key, handle)
131 else:
132 raise RuntimeError("Model type should be RayServe Deployment")
133 else:
134 raise RuntimeError("Unknown model collection types")
135
136 if self.max_asyncio_workers is None:
137 # formula as suggest in https://bugs.python.org/issue35279
138 self.max_asyncio_workers = min(32, utils.cpu_count() + 4)
139 logging.info(f"Setting max asyncio worker threads as {self.max_asyncio_workers}")
140 asyncio.get_event_loop().set_default_executor(
141 concurrent.futures.ThreadPoolExecutor(max_workers=self.max_asyncio_workers))
142
143 async def serve():
144 serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
145 serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
146 serversocket.bind(('0.0.0.0', self.http_port))
147 serversocket.listen(5)
148
149 logging.info(f"Starting uvicorn with {self.workers} workers")
150 loop = asyncio.get_event_loop()
151 for sig in [signal.SIGINT, signal.SIGTERM, signal.SIGQUIT]:
152 loop.add_signal_handler(
153 sig, lambda s=sig: asyncio.create_task(self.stop(sig=s))
154 )
155 self._rest_server = UvicornServer(self.http_port, [serversocket],
156 self.dataplane, self.model_repository_extension,
157 self.enable_docs_url, log_config_file=self.log_config_file,
158 access_log_format=self.access_log_format)
159 if self.workers == 1:
160 await self._rest_server.run()
161 else:
162 # Since py38 MacOS/Windows defaults to use spawn for starting multiprocessing.
163 # https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
164 # Spawn does not work with FastAPI/uvicorn in multiprocessing mode, use fork for multiprocessing
165 # https://github.com/tiangolo/fastapi/issues/1586
166 multiprocessing.set_start_method('fork')
167 server = UvicornServer(self.http_port, [serversocket],
168 self.dataplane, self.model_repository_extension,
169 self.enable_docs_url, custom_log_config=self.log_config)
170 for _ in range(self.workers):
171 p = Process(target=server.run_sync)
172 p.start()
173
174 async def servers_task():
175 servers = [serve()]
176 if self.enable_grpc:
177 servers.append(self._grpc_server.start(self.max_threads))
178 await asyncio.gather(*servers)
179
180 asyncio.run(servers_task())
181
182 async def stop(self, sig: Optional[int] = None):
183 logging.info("Stopping the model server")
184 if self._rest_server:
185 logging.info("Stopping the rest server")
186 await self._rest_server.stop()
187 if self._grpc_server:
188 logging.info("Stopping the grpc server")
189 await self._grpc_server.stop(sig)
190
191 def register_model_handle(self, name: str, model_handle: RayServeHandle):
192 self.registered_models.update_handle(name, model_handle)
193 logging.info("Registering model handle: %s", name)
194
195 def register_model(self, model: Model):
196 if not model.name:
197 raise Exception(
198 "Failed to register model, model.name must be provided.")
199 self.registered_models.update(model)
200 logging.info("Registering model: %s", model.name)
201
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/python/kserve/kserve/model_server.py b/python/kserve/kserve/model_server.py
--- a/python/kserve/kserve/model_server.py
+++ b/python/kserve/kserve/model_server.py
@@ -107,7 +107,10 @@
self.dataplane = DataPlane(model_registry=registered_models)
self.model_repository_extension = ModelRepositoryExtension(
model_registry=self.registered_models)
- self._grpc_server = GRPCServer(grpc_port, self.dataplane, self.model_repository_extension)
+ self._grpc_server = None
+ if self.enable_grpc:
+ self._grpc_server = GRPCServer(grpc_port, self.dataplane,
+ self.model_repository_extension)
self.log_config_file = log_config_file
self.access_log_format = access_log_format
| {"golden_diff": "diff --git a/python/kserve/kserve/model_server.py b/python/kserve/kserve/model_server.py\n--- a/python/kserve/kserve/model_server.py\n+++ b/python/kserve/kserve/model_server.py\n@@ -107,7 +107,10 @@\n self.dataplane = DataPlane(model_registry=registered_models)\n self.model_repository_extension = ModelRepositoryExtension(\n model_registry=self.registered_models)\n- self._grpc_server = GRPCServer(grpc_port, self.dataplane, self.model_repository_extension)\n+ self._grpc_server = None\n+ if self.enable_grpc:\n+ self._grpc_server = GRPCServer(grpc_port, self.dataplane,\n+ self.model_repository_extension)\n self.log_config_file = log_config_file\n self.access_log_format = access_log_format\n", "issue": "Stop Error When disabled_grpc=True\n/kind bug\r\n\r\n**What steps did you take and what happened:**\r\nIn our use case we don't use the grpc_server inside `ModelServer` thus we set `enable_grpc=False`. But when server stops it will still call stop from this unstarted server thus got the error below:\r\n```\r\n> await self._server.stop(grace=10)\r\nE AttributeError: 'NoneType' object has no attribute 'stop'\r\n```\r\n\r\nThe bug is because that this [if](https://github.com/kserve/kserve/blob/master/python/kserve/kserve/model_server.py#L174) will always be evaluated as `True`. Can we please add a one line change to make it support `enable_grpc=False`? Maybe change it to `if self._grpc_server._server` or `if self.enable_grpc`? Should be a very quick fix and that helps a lot! Thank you!\r\n\r\n**What did you expect to happen:**\r\nA one line change from [code](https://github.com/kserve/kserve/blob/master/python/kserve/kserve/model_server.py#L174) to `if self._grpc_server._server` or `if self.enable_grpc`\r\n\r\n**What's the InferenceService yaml:**\r\n[To help us debug please run `kubectl get isvc $name -n $namespace -oyaml` and paste the output]\r\n\r\n**Anything else you would like to add:**\r\n[Miscellaneous information that will assist in solving the issue.]\r\n\r\n\r\n**Environment:**\r\n\r\n- Istio Version:\r\n- Knative Version:\r\n- KServe Version:\r\n- Kubeflow version:\r\n- Cloud Environment:[k8s_istio/istio_dex/gcp_basic_auth/gcp_iap/aws/aws_cognito/ibm]\r\n- Minikube/Kind version:\r\n- Kubernetes version: (use `kubectl version`):\r\n- OS (e.g. from `/etc/os-release`):\r\n\nStop Error When disabled_grpc=True\n/kind bug\r\n\r\n**What steps did you take and what happened:**\r\nIn our use case we don't use the grpc_server inside `ModelServer` thus we set `enable_grpc=False`. But when server stops it will still call stop from this unstarted server thus got the error below:\r\n```\r\n> await self._server.stop(grace=10)\r\nE AttributeError: 'NoneType' object has no attribute 'stop'\r\n```\r\n\r\nThe bug is because that this [if](https://github.com/kserve/kserve/blob/master/python/kserve/kserve/model_server.py#L174) will always be evaluated as `True`. Can we please add a one line change to make it support `enable_grpc=False`? Maybe change it to `if self._grpc_server._server` or `if self.enable_grpc`? Should be a very quick fix and that helps a lot! Thank you!\r\n\r\n**What did you expect to happen:**\r\nA one line change from [code](https://github.com/kserve/kserve/blob/master/python/kserve/kserve/model_server.py#L174) to `if self._grpc_server._server` or `if self.enable_grpc`\r\n\r\n**What's the InferenceService yaml:**\r\n[To help us debug please run `kubectl get isvc $name -n $namespace -oyaml` and paste the output]\r\n\r\n**Anything else you would like to add:**\r\n[Miscellaneous information that will assist in solving the issue.]\r\n\r\n\r\n**Environment:**\r\n\r\n- Istio Version:\r\n- Knative Version:\r\n- KServe Version:\r\n- Kubeflow version:\r\n- Cloud Environment:[k8s_istio/istio_dex/gcp_basic_auth/gcp_iap/aws/aws_cognito/ibm]\r\n- Minikube/Kind version:\r\n- Kubernetes version: (use `kubectl version`):\r\n- OS (e.g. from `/etc/os-release`):\r\n\n", "before_files": [{"content": "# Copyright 2022 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport asyncio\nimport concurrent.futures\nimport logging\nimport signal\nimport socket\nfrom distutils.util import strtobool\nfrom typing import List, Dict, Union, Optional\n\nfrom ray import serve as rayserve\nfrom ray.serve.api import Deployment, RayServeHandle\n\nfrom .protocol.grpc.server import GRPCServer\nfrom .protocol.rest.server import UvicornServer\nfrom .utils import utils\nimport multiprocessing\nfrom multiprocessing import Process\n\nfrom .model import Model\nfrom .protocol.dataplane import DataPlane\nfrom .protocol.model_repository_extension import ModelRepositoryExtension\nfrom .model_repository import ModelRepository\n\nDEFAULT_HTTP_PORT = 8080\nDEFAULT_GRPC_PORT = 8081\n\nparser = argparse.ArgumentParser(add_help=False)\nparser.add_argument(\"--http_port\", default=DEFAULT_HTTP_PORT, type=int,\n help=\"The HTTP Port listened to by the model server.\")\nparser.add_argument(\"--grpc_port\", default=DEFAULT_GRPC_PORT, type=int,\n help=\"The GRPC Port listened to by the model server.\")\nparser.add_argument(\"--workers\", default=1, type=int,\n help=\"The number of workers for multi-processing.\")\nparser.add_argument(\"--max_threads\", default=4, type=int,\n help=\"The number of max processing threads in each worker.\")\nparser.add_argument('--max_asyncio_workers', default=None, type=int,\n help='Max number of asyncio workers to spawn')\nparser.add_argument(\"--enable_grpc\", default=True, type=lambda x: bool(strtobool(x)),\n help=\"Enable gRPC for the model server\")\nparser.add_argument(\"--enable_docs_url\", default=False, type=lambda x: bool(strtobool(x)),\n help=\"Enable docs url '/docs' to display Swagger UI.\")\nparser.add_argument(\"--enable_latency_logging\", default=True, type=lambda x: bool(strtobool(x)),\n help=\"Output a log per request with latency metrics.\")\nparser.add_argument(\"--log_config_file\", default=None, type=str,\n help=\"File path containing UvicornServer's log config. Needs to be a yaml or json file.\")\nparser.add_argument(\"--access_log_format\", default=None, type=str,\n help=\"Format to set for the access log (provided by asgi-logger).\")\n\nargs, _ = parser.parse_known_args()\n\nFORMAT = '%(asctime)s.%(msecs)03d %(process)s %(name)s %(levelname)s [%(funcName)s():%(lineno)s] %(message)s'\nDATE_FORMAT = \"%Y-%m-%d %H:%M:%S\"\nlogging.basicConfig(level=logging.INFO, format=FORMAT, datefmt=DATE_FORMAT)\n\n\nclass ModelServer:\n \"\"\"KServe ModelServer\n\n Args:\n http_port (int): HTTP port. Default: ``8080``.\n grpc_port (int): GRPC port. Default: ``8081``.\n workers (int): Number of workers for uvicorn. Default: ``1``.\n max_threads (int): Max number of processing threads. Default: ``4``\n max_asyncio_workers (int): Max number of AsyncIO threads. Default: ``None``\n registered_models (ModelRepository): Model repository with registered models.\n enable_grpc (bool): Whether to turn on grpc server. Default: ``True``\n enable_docs_url (bool): Whether to turn on ``/docs`` Swagger UI. Default: ``False``.\n enable_latency_logging (bool): Whether to log latency metric. Default: ``True``.\n log_config_file (dict): File path containing UvicornServer's log config. Default: ``None``.\n access_log_format (string): Format to set for the access log (provided by asgi-logger). Default: ``None``\n \"\"\"\n\n def __init__(self, http_port: int = args.http_port,\n grpc_port: int = args.grpc_port,\n workers: int = args.workers,\n max_threads: int = args.max_threads,\n max_asyncio_workers: int = args.max_asyncio_workers,\n registered_models: ModelRepository = ModelRepository(),\n enable_grpc: bool = args.enable_grpc,\n enable_docs_url: bool = args.enable_docs_url,\n enable_latency_logging: bool = args.enable_latency_logging,\n log_config_file: str = args.log_config_file,\n access_log_format: str = args.access_log_format):\n self.registered_models = registered_models\n self.http_port = http_port\n self.grpc_port = grpc_port\n self.workers = workers\n self.max_threads = max_threads\n self.max_asyncio_workers = max_asyncio_workers\n self.enable_grpc = enable_grpc\n self.enable_docs_url = enable_docs_url\n self.enable_latency_logging = enable_latency_logging\n self.dataplane = DataPlane(model_registry=registered_models)\n self.model_repository_extension = ModelRepositoryExtension(\n model_registry=self.registered_models)\n self._grpc_server = GRPCServer(grpc_port, self.dataplane, self.model_repository_extension)\n self.log_config_file = log_config_file\n self.access_log_format = access_log_format\n\n def start(self, models: Union[List[Model], Dict[str, Deployment]]) -> None:\n if isinstance(models, list):\n for model in models:\n if isinstance(model, Model):\n self.register_model(model)\n # pass whether to log request latency into the model\n model.enable_latency_logging = self.enable_latency_logging\n else:\n raise RuntimeError(\"Model type should be 'Model'\")\n elif isinstance(models, dict):\n if all([isinstance(v, Deployment) for v in models.values()]):\n # TODO: make this port number a variable\n rayserve.start(detached=True, http_options={\"host\": \"0.0.0.0\", \"port\": 9071})\n for key in models:\n models[key].deploy()\n handle = models[key].get_handle()\n self.register_model_handle(key, handle)\n else:\n raise RuntimeError(\"Model type should be RayServe Deployment\")\n else:\n raise RuntimeError(\"Unknown model collection types\")\n\n if self.max_asyncio_workers is None:\n # formula as suggest in https://bugs.python.org/issue35279\n self.max_asyncio_workers = min(32, utils.cpu_count() + 4)\n logging.info(f\"Setting max asyncio worker threads as {self.max_asyncio_workers}\")\n asyncio.get_event_loop().set_default_executor(\n concurrent.futures.ThreadPoolExecutor(max_workers=self.max_asyncio_workers))\n\n async def serve():\n serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n serversocket.bind(('0.0.0.0', self.http_port))\n serversocket.listen(5)\n\n logging.info(f\"Starting uvicorn with {self.workers} workers\")\n loop = asyncio.get_event_loop()\n for sig in [signal.SIGINT, signal.SIGTERM, signal.SIGQUIT]:\n loop.add_signal_handler(\n sig, lambda s=sig: asyncio.create_task(self.stop(sig=s))\n )\n self._rest_server = UvicornServer(self.http_port, [serversocket],\n self.dataplane, self.model_repository_extension,\n self.enable_docs_url, log_config_file=self.log_config_file,\n access_log_format=self.access_log_format)\n if self.workers == 1:\n await self._rest_server.run()\n else:\n # Since py38 MacOS/Windows defaults to use spawn for starting multiprocessing.\n # https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods\n # Spawn does not work with FastAPI/uvicorn in multiprocessing mode, use fork for multiprocessing\n # https://github.com/tiangolo/fastapi/issues/1586\n multiprocessing.set_start_method('fork')\n server = UvicornServer(self.http_port, [serversocket],\n self.dataplane, self.model_repository_extension,\n self.enable_docs_url, custom_log_config=self.log_config)\n for _ in range(self.workers):\n p = Process(target=server.run_sync)\n p.start()\n\n async def servers_task():\n servers = [serve()]\n if self.enable_grpc:\n servers.append(self._grpc_server.start(self.max_threads))\n await asyncio.gather(*servers)\n\n asyncio.run(servers_task())\n\n async def stop(self, sig: Optional[int] = None):\n logging.info(\"Stopping the model server\")\n if self._rest_server:\n logging.info(\"Stopping the rest server\")\n await self._rest_server.stop()\n if self._grpc_server:\n logging.info(\"Stopping the grpc server\")\n await self._grpc_server.stop(sig)\n\n def register_model_handle(self, name: str, model_handle: RayServeHandle):\n self.registered_models.update_handle(name, model_handle)\n logging.info(\"Registering model handle: %s\", name)\n\n def register_model(self, model: Model):\n if not model.name:\n raise Exception(\n \"Failed to register model, model.name must be provided.\")\n self.registered_models.update(model)\n logging.info(\"Registering model: %s\", model.name)\n", "path": "python/kserve/kserve/model_server.py"}], "after_files": [{"content": "# Copyright 2022 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport asyncio\nimport concurrent.futures\nimport logging\nimport signal\nimport socket\nfrom distutils.util import strtobool\nfrom typing import List, Dict, Union, Optional\n\nfrom ray import serve as rayserve\nfrom ray.serve.api import Deployment, RayServeHandle\n\nfrom .protocol.grpc.server import GRPCServer\nfrom .protocol.rest.server import UvicornServer\nfrom .utils import utils\nimport multiprocessing\nfrom multiprocessing import Process\n\nfrom .model import Model\nfrom .protocol.dataplane import DataPlane\nfrom .protocol.model_repository_extension import ModelRepositoryExtension\nfrom .model_repository import ModelRepository\n\nDEFAULT_HTTP_PORT = 8080\nDEFAULT_GRPC_PORT = 8081\n\nparser = argparse.ArgumentParser(add_help=False)\nparser.add_argument(\"--http_port\", default=DEFAULT_HTTP_PORT, type=int,\n help=\"The HTTP Port listened to by the model server.\")\nparser.add_argument(\"--grpc_port\", default=DEFAULT_GRPC_PORT, type=int,\n help=\"The GRPC Port listened to by the model server.\")\nparser.add_argument(\"--workers\", default=1, type=int,\n help=\"The number of workers for multi-processing.\")\nparser.add_argument(\"--max_threads\", default=4, type=int,\n help=\"The number of max processing threads in each worker.\")\nparser.add_argument('--max_asyncio_workers', default=None, type=int,\n help='Max number of asyncio workers to spawn')\nparser.add_argument(\"--enable_grpc\", default=True, type=lambda x: bool(strtobool(x)),\n help=\"Enable gRPC for the model server\")\nparser.add_argument(\"--enable_docs_url\", default=False, type=lambda x: bool(strtobool(x)),\n help=\"Enable docs url '/docs' to display Swagger UI.\")\nparser.add_argument(\"--enable_latency_logging\", default=True, type=lambda x: bool(strtobool(x)),\n help=\"Output a log per request with latency metrics.\")\nparser.add_argument(\"--log_config_file\", default=None, type=str,\n help=\"File path containing UvicornServer's log config. Needs to be a yaml or json file.\")\nparser.add_argument(\"--access_log_format\", default=None, type=str,\n help=\"Format to set for the access log (provided by asgi-logger).\")\n\nargs, _ = parser.parse_known_args()\n\nFORMAT = '%(asctime)s.%(msecs)03d %(process)s %(name)s %(levelname)s [%(funcName)s():%(lineno)s] %(message)s'\nDATE_FORMAT = \"%Y-%m-%d %H:%M:%S\"\nlogging.basicConfig(level=logging.INFO, format=FORMAT, datefmt=DATE_FORMAT)\n\n\nclass ModelServer:\n \"\"\"KServe ModelServer\n\n Args:\n http_port (int): HTTP port. Default: ``8080``.\n grpc_port (int): GRPC port. Default: ``8081``.\n workers (int): Number of workers for uvicorn. Default: ``1``.\n max_threads (int): Max number of processing threads. Default: ``4``\n max_asyncio_workers (int): Max number of AsyncIO threads. Default: ``None``\n registered_models (ModelRepository): Model repository with registered models.\n enable_grpc (bool): Whether to turn on grpc server. Default: ``True``\n enable_docs_url (bool): Whether to turn on ``/docs`` Swagger UI. Default: ``False``.\n enable_latency_logging (bool): Whether to log latency metric. Default: ``True``.\n log_config_file (dict): File path containing UvicornServer's log config. Default: ``None``.\n access_log_format (string): Format to set for the access log (provided by asgi-logger). Default: ``None``\n \"\"\"\n\n def __init__(self, http_port: int = args.http_port,\n grpc_port: int = args.grpc_port,\n workers: int = args.workers,\n max_threads: int = args.max_threads,\n max_asyncio_workers: int = args.max_asyncio_workers,\n registered_models: ModelRepository = ModelRepository(),\n enable_grpc: bool = args.enable_grpc,\n enable_docs_url: bool = args.enable_docs_url,\n enable_latency_logging: bool = args.enable_latency_logging,\n log_config_file: str = args.log_config_file,\n access_log_format: str = args.access_log_format):\n self.registered_models = registered_models\n self.http_port = http_port\n self.grpc_port = grpc_port\n self.workers = workers\n self.max_threads = max_threads\n self.max_asyncio_workers = max_asyncio_workers\n self.enable_grpc = enable_grpc\n self.enable_docs_url = enable_docs_url\n self.enable_latency_logging = enable_latency_logging\n self.dataplane = DataPlane(model_registry=registered_models)\n self.model_repository_extension = ModelRepositoryExtension(\n model_registry=self.registered_models)\n self._grpc_server = None\n if self.enable_grpc:\n self._grpc_server = GRPCServer(grpc_port, self.dataplane,\n self.model_repository_extension)\n self.log_config_file = log_config_file\n self.access_log_format = access_log_format\n\n def start(self, models: Union[List[Model], Dict[str, Deployment]]) -> None:\n if isinstance(models, list):\n for model in models:\n if isinstance(model, Model):\n self.register_model(model)\n # pass whether to log request latency into the model\n model.enable_latency_logging = self.enable_latency_logging\n else:\n raise RuntimeError(\"Model type should be 'Model'\")\n elif isinstance(models, dict):\n if all([isinstance(v, Deployment) for v in models.values()]):\n # TODO: make this port number a variable\n rayserve.start(detached=True, http_options={\"host\": \"0.0.0.0\", \"port\": 9071})\n for key in models:\n models[key].deploy()\n handle = models[key].get_handle()\n self.register_model_handle(key, handle)\n else:\n raise RuntimeError(\"Model type should be RayServe Deployment\")\n else:\n raise RuntimeError(\"Unknown model collection types\")\n\n if self.max_asyncio_workers is None:\n # formula as suggest in https://bugs.python.org/issue35279\n self.max_asyncio_workers = min(32, utils.cpu_count() + 4)\n logging.info(f\"Setting max asyncio worker threads as {self.max_asyncio_workers}\")\n asyncio.get_event_loop().set_default_executor(\n concurrent.futures.ThreadPoolExecutor(max_workers=self.max_asyncio_workers))\n\n async def serve():\n serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n serversocket.bind(('0.0.0.0', self.http_port))\n serversocket.listen(5)\n\n logging.info(f\"Starting uvicorn with {self.workers} workers\")\n loop = asyncio.get_event_loop()\n for sig in [signal.SIGINT, signal.SIGTERM, signal.SIGQUIT]:\n loop.add_signal_handler(\n sig, lambda s=sig: asyncio.create_task(self.stop(sig=s))\n )\n self._rest_server = UvicornServer(self.http_port, [serversocket],\n self.dataplane, self.model_repository_extension,\n self.enable_docs_url, log_config_file=self.log_config_file,\n access_log_format=self.access_log_format)\n if self.workers == 1:\n await self._rest_server.run()\n else:\n # Since py38 MacOS/Windows defaults to use spawn for starting multiprocessing.\n # https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods\n # Spawn does not work with FastAPI/uvicorn in multiprocessing mode, use fork for multiprocessing\n # https://github.com/tiangolo/fastapi/issues/1586\n multiprocessing.set_start_method('fork')\n server = UvicornServer(self.http_port, [serversocket],\n self.dataplane, self.model_repository_extension,\n self.enable_docs_url, custom_log_config=self.log_config)\n for _ in range(self.workers):\n p = Process(target=server.run_sync)\n p.start()\n\n async def servers_task():\n servers = [serve()]\n if self.enable_grpc:\n servers.append(self._grpc_server.start(self.max_threads))\n await asyncio.gather(*servers)\n\n asyncio.run(servers_task())\n\n async def stop(self, sig: Optional[int] = None):\n logging.info(\"Stopping the model server\")\n if self._rest_server:\n logging.info(\"Stopping the rest server\")\n await self._rest_server.stop()\n if self._grpc_server:\n logging.info(\"Stopping the grpc server\")\n await self._grpc_server.stop(sig)\n\n def register_model_handle(self, name: str, model_handle: RayServeHandle):\n self.registered_models.update_handle(name, model_handle)\n logging.info(\"Registering model handle: %s\", name)\n\n def register_model(self, model: Model):\n if not model.name:\n raise Exception(\n \"Failed to register model, model.name must be provided.\")\n self.registered_models.update(model)\n logging.info(\"Registering model: %s\", model.name)\n", "path": "python/kserve/kserve/model_server.py"}]} | 3,652 | 178 |
gh_patches_debug_10091 | rasdani/github-patches | git_diff | pulp__pulpcore-2558 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Implementation of does_batch in DeclarativeContent is wrong
**Describe the bug**
`does_batch` is supposed to decide, whether content can be waited on, or must be evaluated asap. It produces `False` unnecessarily often, leading to performance issues.
**Additional context**
This is suspected to at least one of the reasons for: https://github.com/pulp/pulp_deb/issues/466
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pulpcore/plugin/stages/models.py`
Content:
```
1 from gettext import gettext as _
2
3 import asyncio
4
5 from pulpcore.constants import ALL_KNOWN_CONTENT_CHECKSUMS
6 from pulpcore.plugin.models import Artifact
7
8
9 class DeclarativeArtifact:
10 """
11 Relates an :class:`~pulpcore.plugin.models.Artifact`, how to download it, and its
12 `relative_path` used later during publishing.
13
14 This is used by the Stages API stages to determine if an
15 :class:`~pulpcore.plugin.models.Artifact` is already present and ensure Pulp can download it in
16 the future. The `artifact` can be either saved or unsaved. If unsaved, the `artifact` attributes
17 may be incomplete because not all digest information can be computed until the
18 :class:`~pulpcore.plugin.models.Artifact` is downloaded.
19
20 Attributes:
21 artifact (:class:`~pulpcore.plugin.models.Artifact`): An
22 :class:`~pulpcore.plugin.models.Artifact` either saved or unsaved. If unsaved, it
23 may have partial digest information attached to it.
24 url (str): the url to fetch the :class:`~pulpcore.plugin.models.Artifact` from.
25 relative_path (str): the relative_path this :class:`~pulpcore.plugin.models.Artifact`
26 should be published at for any Publication.
27 remote (:class:`~pulpcore.plugin.models.Remote`): The remote used to fetch this
28 :class:`~pulpcore.plugin.models.Artifact`.
29 extra_data (dict): A dictionary available for additional data to be stored in.
30 deferred_download (bool): Whether this artifact should be downloaded and saved
31 in the artifact stages. Defaults to `False`. See :ref:`on-demand-support`.
32
33 Raises:
34 ValueError: If `artifact`, `url`, or `relative_path` are not specified. If `remote` is not
35 specified and `artifact` doesn't have a file.
36 """
37
38 __slots__ = ("artifact", "url", "relative_path", "remote", "extra_data", "deferred_download")
39
40 def __init__(
41 self,
42 artifact=None,
43 url=None,
44 relative_path=None,
45 remote=None,
46 extra_data=None,
47 deferred_download=False,
48 ):
49 if not url:
50 raise ValueError(_("DeclarativeArtifact must have a 'url'"))
51 if not relative_path:
52 raise ValueError(_("DeclarativeArtifact must have a 'relative_path'"))
53 if not artifact:
54 raise ValueError(_("DeclarativeArtifact must have a 'artifact'"))
55 if not remote and not artifact.file:
56 raise ValueError(
57 _(
58 "DeclarativeArtifact must have a 'remote' if the Artifact doesn't "
59 "have a file backing it."
60 )
61 )
62 self.artifact = artifact
63 self.url = url
64 self.relative_path = relative_path
65 self.remote = remote
66 self.extra_data = extra_data or {}
67 self.deferred_download = deferred_download
68
69 async def download(self):
70 """
71 Download content and update the associated Artifact.
72
73 Returns:
74 Returns the :class:`~pulpcore.plugin.download.DownloadResult` of the Artifact.
75 """
76 expected_digests = {}
77 validation_kwargs = {}
78 for digest_name in ALL_KNOWN_CONTENT_CHECKSUMS:
79 digest_value = getattr(self.artifact, digest_name)
80 if digest_value:
81 expected_digests[digest_name] = digest_value
82 if expected_digests:
83 validation_kwargs["expected_digests"] = expected_digests
84 if self.artifact.size:
85 expected_size = self.artifact.size
86 validation_kwargs["expected_size"] = expected_size
87 downloader = self.remote.get_downloader(url=self.url, **validation_kwargs)
88 # Custom downloaders may need extra information to complete the request.
89 download_result = await downloader.run(extra_data=self.extra_data)
90 self.artifact = Artifact(**download_result.artifact_attributes, file=download_result.path)
91 return download_result
92
93
94 class DeclarativeContent:
95 """
96 Relates a Content unit and zero or more :class:`~pulpcore.plugin.stages.DeclarativeArtifact`
97 objects.
98
99 This is used by the Stages API stages to determine if a Content unit is already present and
100 ensure all of its associated :class:`~pulpcore.plugin.stages.DeclarativeArtifact` objects are
101 related correctly. The `content` can be either saved or unsaved depending on where in the Stages
102 API pipeline this is used.
103
104 Attributes:
105 content (subclass of :class:`~pulpcore.plugin.models.Content`): A Content unit, possibly
106 unsaved
107 d_artifacts (list): A list of zero or more
108 :class:`~pulpcore.plugin.stages.DeclarativeArtifact` objects associated with `content`.
109 extra_data (dict): A dictionary available for additional data to be stored in.
110
111 Raises:
112 ValueError: If `content` is not specified.
113 """
114
115 __slots__ = (
116 "content",
117 "d_artifacts",
118 "extra_data",
119 "_future",
120 "_thaw_queue_event",
121 "_resolved",
122 )
123
124 def __init__(self, content=None, d_artifacts=None, extra_data=None):
125 if not content:
126 raise ValueError(_("DeclarativeContent must have a 'content'"))
127 self.content = content
128 self.d_artifacts = d_artifacts or []
129 self.extra_data = extra_data or {}
130 self._future = None
131 self._thaw_queue_event = None
132 self._resolved = False
133
134 @property
135 def does_batch(self):
136 """Whether this content is being awaited on and must therefore not wait forever in batches.
137 When overwritten in subclasses, a `True` value must never be turned into `False`.
138 """
139 return not self._resolved and self._future is None
140
141 async def resolution(self):
142 """Coroutine that waits for the content to be saved to database.
143 Returns the content unit."""
144 if self._resolved:
145 # Already resolved ~> shortcut
146 return self.content
147 if self._future is None:
148 # We do not yet have a future
149 self._future = asyncio.get_event_loop().create_future()
150 if self._thaw_queue_event:
151 # We have a future now but are still stuck in a queue
152 self._thaw_queue_event.set()
153 # Now we wait
154 return await self._future
155
156 def resolve(self):
157 """Resolve this content unit and notify any waiting tasks."""
158 self._resolved = True
159 if self._future:
160 self._future.set_result(self.content)
161 self._future = None
162
163 def __str__(self):
164 return str(self.content.__class__.__name__)
165
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pulpcore/plugin/stages/models.py b/pulpcore/plugin/stages/models.py
--- a/pulpcore/plugin/stages/models.py
+++ b/pulpcore/plugin/stages/models.py
@@ -136,7 +136,7 @@
"""Whether this content is being awaited on and must therefore not wait forever in batches.
When overwritten in subclasses, a `True` value must never be turned into `False`.
"""
- return not self._resolved and self._future is None
+ return self._resolved or self._future is None
async def resolution(self):
"""Coroutine that waits for the content to be saved to database.
| {"golden_diff": "diff --git a/pulpcore/plugin/stages/models.py b/pulpcore/plugin/stages/models.py\n--- a/pulpcore/plugin/stages/models.py\n+++ b/pulpcore/plugin/stages/models.py\n@@ -136,7 +136,7 @@\n \"\"\"Whether this content is being awaited on and must therefore not wait forever in batches.\n When overwritten in subclasses, a `True` value must never be turned into `False`.\n \"\"\"\n- return not self._resolved and self._future is None\n+ return self._resolved or self._future is None\n \n async def resolution(self):\n \"\"\"Coroutine that waits for the content to be saved to database.\n", "issue": "Implementation of does_batch in DeclarativeContent is wrong\n**Describe the bug**\r\n`does_batch` is supposed to decide, whether content can be waited on, or must be evaluated asap. It produces `False` unnecessarily often, leading to performance issues.\r\n\r\n**Additional context**\r\nThis is suspected to at least one of the reasons for: https://github.com/pulp/pulp_deb/issues/466\n", "before_files": [{"content": "from gettext import gettext as _\n\nimport asyncio\n\nfrom pulpcore.constants import ALL_KNOWN_CONTENT_CHECKSUMS\nfrom pulpcore.plugin.models import Artifact\n\n\nclass DeclarativeArtifact:\n \"\"\"\n Relates an :class:`~pulpcore.plugin.models.Artifact`, how to download it, and its\n `relative_path` used later during publishing.\n\n This is used by the Stages API stages to determine if an\n :class:`~pulpcore.plugin.models.Artifact` is already present and ensure Pulp can download it in\n the future. The `artifact` can be either saved or unsaved. If unsaved, the `artifact` attributes\n may be incomplete because not all digest information can be computed until the\n :class:`~pulpcore.plugin.models.Artifact` is downloaded.\n\n Attributes:\n artifact (:class:`~pulpcore.plugin.models.Artifact`): An\n :class:`~pulpcore.plugin.models.Artifact` either saved or unsaved. If unsaved, it\n may have partial digest information attached to it.\n url (str): the url to fetch the :class:`~pulpcore.plugin.models.Artifact` from.\n relative_path (str): the relative_path this :class:`~pulpcore.plugin.models.Artifact`\n should be published at for any Publication.\n remote (:class:`~pulpcore.plugin.models.Remote`): The remote used to fetch this\n :class:`~pulpcore.plugin.models.Artifact`.\n extra_data (dict): A dictionary available for additional data to be stored in.\n deferred_download (bool): Whether this artifact should be downloaded and saved\n in the artifact stages. Defaults to `False`. See :ref:`on-demand-support`.\n\n Raises:\n ValueError: If `artifact`, `url`, or `relative_path` are not specified. If `remote` is not\n specified and `artifact` doesn't have a file.\n \"\"\"\n\n __slots__ = (\"artifact\", \"url\", \"relative_path\", \"remote\", \"extra_data\", \"deferred_download\")\n\n def __init__(\n self,\n artifact=None,\n url=None,\n relative_path=None,\n remote=None,\n extra_data=None,\n deferred_download=False,\n ):\n if not url:\n raise ValueError(_(\"DeclarativeArtifact must have a 'url'\"))\n if not relative_path:\n raise ValueError(_(\"DeclarativeArtifact must have a 'relative_path'\"))\n if not artifact:\n raise ValueError(_(\"DeclarativeArtifact must have a 'artifact'\"))\n if not remote and not artifact.file:\n raise ValueError(\n _(\n \"DeclarativeArtifact must have a 'remote' if the Artifact doesn't \"\n \"have a file backing it.\"\n )\n )\n self.artifact = artifact\n self.url = url\n self.relative_path = relative_path\n self.remote = remote\n self.extra_data = extra_data or {}\n self.deferred_download = deferred_download\n\n async def download(self):\n \"\"\"\n Download content and update the associated Artifact.\n\n Returns:\n Returns the :class:`~pulpcore.plugin.download.DownloadResult` of the Artifact.\n \"\"\"\n expected_digests = {}\n validation_kwargs = {}\n for digest_name in ALL_KNOWN_CONTENT_CHECKSUMS:\n digest_value = getattr(self.artifact, digest_name)\n if digest_value:\n expected_digests[digest_name] = digest_value\n if expected_digests:\n validation_kwargs[\"expected_digests\"] = expected_digests\n if self.artifact.size:\n expected_size = self.artifact.size\n validation_kwargs[\"expected_size\"] = expected_size\n downloader = self.remote.get_downloader(url=self.url, **validation_kwargs)\n # Custom downloaders may need extra information to complete the request.\n download_result = await downloader.run(extra_data=self.extra_data)\n self.artifact = Artifact(**download_result.artifact_attributes, file=download_result.path)\n return download_result\n\n\nclass DeclarativeContent:\n \"\"\"\n Relates a Content unit and zero or more :class:`~pulpcore.plugin.stages.DeclarativeArtifact`\n objects.\n\n This is used by the Stages API stages to determine if a Content unit is already present and\n ensure all of its associated :class:`~pulpcore.plugin.stages.DeclarativeArtifact` objects are\n related correctly. The `content` can be either saved or unsaved depending on where in the Stages\n API pipeline this is used.\n\n Attributes:\n content (subclass of :class:`~pulpcore.plugin.models.Content`): A Content unit, possibly\n unsaved\n d_artifacts (list): A list of zero or more\n :class:`~pulpcore.plugin.stages.DeclarativeArtifact` objects associated with `content`.\n extra_data (dict): A dictionary available for additional data to be stored in.\n\n Raises:\n ValueError: If `content` is not specified.\n \"\"\"\n\n __slots__ = (\n \"content\",\n \"d_artifacts\",\n \"extra_data\",\n \"_future\",\n \"_thaw_queue_event\",\n \"_resolved\",\n )\n\n def __init__(self, content=None, d_artifacts=None, extra_data=None):\n if not content:\n raise ValueError(_(\"DeclarativeContent must have a 'content'\"))\n self.content = content\n self.d_artifacts = d_artifacts or []\n self.extra_data = extra_data or {}\n self._future = None\n self._thaw_queue_event = None\n self._resolved = False\n\n @property\n def does_batch(self):\n \"\"\"Whether this content is being awaited on and must therefore not wait forever in batches.\n When overwritten in subclasses, a `True` value must never be turned into `False`.\n \"\"\"\n return not self._resolved and self._future is None\n\n async def resolution(self):\n \"\"\"Coroutine that waits for the content to be saved to database.\n Returns the content unit.\"\"\"\n if self._resolved:\n # Already resolved ~> shortcut\n return self.content\n if self._future is None:\n # We do not yet have a future\n self._future = asyncio.get_event_loop().create_future()\n if self._thaw_queue_event:\n # We have a future now but are still stuck in a queue\n self._thaw_queue_event.set()\n # Now we wait\n return await self._future\n\n def resolve(self):\n \"\"\"Resolve this content unit and notify any waiting tasks.\"\"\"\n self._resolved = True\n if self._future:\n self._future.set_result(self.content)\n self._future = None\n\n def __str__(self):\n return str(self.content.__class__.__name__)\n", "path": "pulpcore/plugin/stages/models.py"}], "after_files": [{"content": "from gettext import gettext as _\n\nimport asyncio\n\nfrom pulpcore.constants import ALL_KNOWN_CONTENT_CHECKSUMS\nfrom pulpcore.plugin.models import Artifact\n\n\nclass DeclarativeArtifact:\n \"\"\"\n Relates an :class:`~pulpcore.plugin.models.Artifact`, how to download it, and its\n `relative_path` used later during publishing.\n\n This is used by the Stages API stages to determine if an\n :class:`~pulpcore.plugin.models.Artifact` is already present and ensure Pulp can download it in\n the future. The `artifact` can be either saved or unsaved. If unsaved, the `artifact` attributes\n may be incomplete because not all digest information can be computed until the\n :class:`~pulpcore.plugin.models.Artifact` is downloaded.\n\n Attributes:\n artifact (:class:`~pulpcore.plugin.models.Artifact`): An\n :class:`~pulpcore.plugin.models.Artifact` either saved or unsaved. If unsaved, it\n may have partial digest information attached to it.\n url (str): the url to fetch the :class:`~pulpcore.plugin.models.Artifact` from.\n relative_path (str): the relative_path this :class:`~pulpcore.plugin.models.Artifact`\n should be published at for any Publication.\n remote (:class:`~pulpcore.plugin.models.Remote`): The remote used to fetch this\n :class:`~pulpcore.plugin.models.Artifact`.\n extra_data (dict): A dictionary available for additional data to be stored in.\n deferred_download (bool): Whether this artifact should be downloaded and saved\n in the artifact stages. Defaults to `False`. See :ref:`on-demand-support`.\n\n Raises:\n ValueError: If `artifact`, `url`, or `relative_path` are not specified. If `remote` is not\n specified and `artifact` doesn't have a file.\n \"\"\"\n\n __slots__ = (\"artifact\", \"url\", \"relative_path\", \"remote\", \"extra_data\", \"deferred_download\")\n\n def __init__(\n self,\n artifact=None,\n url=None,\n relative_path=None,\n remote=None,\n extra_data=None,\n deferred_download=False,\n ):\n if not url:\n raise ValueError(_(\"DeclarativeArtifact must have a 'url'\"))\n if not relative_path:\n raise ValueError(_(\"DeclarativeArtifact must have a 'relative_path'\"))\n if not artifact:\n raise ValueError(_(\"DeclarativeArtifact must have a 'artifact'\"))\n if not remote and not artifact.file:\n raise ValueError(\n _(\n \"DeclarativeArtifact must have a 'remote' if the Artifact doesn't \"\n \"have a file backing it.\"\n )\n )\n self.artifact = artifact\n self.url = url\n self.relative_path = relative_path\n self.remote = remote\n self.extra_data = extra_data or {}\n self.deferred_download = deferred_download\n\n async def download(self):\n \"\"\"\n Download content and update the associated Artifact.\n\n Returns:\n Returns the :class:`~pulpcore.plugin.download.DownloadResult` of the Artifact.\n \"\"\"\n expected_digests = {}\n validation_kwargs = {}\n for digest_name in ALL_KNOWN_CONTENT_CHECKSUMS:\n digest_value = getattr(self.artifact, digest_name)\n if digest_value:\n expected_digests[digest_name] = digest_value\n if expected_digests:\n validation_kwargs[\"expected_digests\"] = expected_digests\n if self.artifact.size:\n expected_size = self.artifact.size\n validation_kwargs[\"expected_size\"] = expected_size\n downloader = self.remote.get_downloader(url=self.url, **validation_kwargs)\n # Custom downloaders may need extra information to complete the request.\n download_result = await downloader.run(extra_data=self.extra_data)\n self.artifact = Artifact(**download_result.artifact_attributes, file=download_result.path)\n return download_result\n\n\nclass DeclarativeContent:\n \"\"\"\n Relates a Content unit and zero or more :class:`~pulpcore.plugin.stages.DeclarativeArtifact`\n objects.\n\n This is used by the Stages API stages to determine if a Content unit is already present and\n ensure all of its associated :class:`~pulpcore.plugin.stages.DeclarativeArtifact` objects are\n related correctly. The `content` can be either saved or unsaved depending on where in the Stages\n API pipeline this is used.\n\n Attributes:\n content (subclass of :class:`~pulpcore.plugin.models.Content`): A Content unit, possibly\n unsaved\n d_artifacts (list): A list of zero or more\n :class:`~pulpcore.plugin.stages.DeclarativeArtifact` objects associated with `content`.\n extra_data (dict): A dictionary available for additional data to be stored in.\n\n Raises:\n ValueError: If `content` is not specified.\n \"\"\"\n\n __slots__ = (\n \"content\",\n \"d_artifacts\",\n \"extra_data\",\n \"_future\",\n \"_thaw_queue_event\",\n \"_resolved\",\n )\n\n def __init__(self, content=None, d_artifacts=None, extra_data=None):\n if not content:\n raise ValueError(_(\"DeclarativeContent must have a 'content'\"))\n self.content = content\n self.d_artifacts = d_artifacts or []\n self.extra_data = extra_data or {}\n self._future = None\n self._thaw_queue_event = None\n self._resolved = False\n\n @property\n def does_batch(self):\n \"\"\"Whether this content is being awaited on and must therefore not wait forever in batches.\n When overwritten in subclasses, a `True` value must never be turned into `False`.\n \"\"\"\n return self._resolved or self._future is None\n\n async def resolution(self):\n \"\"\"Coroutine that waits for the content to be saved to database.\n Returns the content unit.\"\"\"\n if self._resolved:\n # Already resolved ~> shortcut\n return self.content\n if self._future is None:\n # We do not yet have a future\n self._future = asyncio.get_event_loop().create_future()\n if self._thaw_queue_event:\n # We have a future now but are still stuck in a queue\n self._thaw_queue_event.set()\n # Now we wait\n return await self._future\n\n def resolve(self):\n \"\"\"Resolve this content unit and notify any waiting tasks.\"\"\"\n self._resolved = True\n if self._future:\n self._future.set_result(self.content)\n self._future = None\n\n def __str__(self):\n return str(self.content.__class__.__name__)\n", "path": "pulpcore/plugin/stages/models.py"}]} | 2,158 | 144 |
gh_patches_debug_2038 | rasdani/github-patches | git_diff | RedHatInsights__insights-core-1112 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
hostname parser doesn't handle sos_commands/general/hostname
sos_commands/general/hostname contains the hostname with a newline at the end, which results in a file with two lines. The hostname parser specifically checks for one line but doesn't gracefully handle the problem.
We can update the parser to handle this case and/or investigate whether proc/sys/kernel/hostname is a valid substitute to put in sos_archive.py instead.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `insights/parsers/hostname.py`
Content:
```
1 """
2 hostname - command ``/bin/hostname``
3 ====================================
4
5 This parser simply reads the output of ``/bin/hostname``, which is the
6 configured fully qualified domain name of the client system. It then
7 splits it into ``hostname`` and ``domain`` and stores these as attributes,
8 along with the unmodified name in the ``fqdn`` attribute.
9
10 Examples:
11
12 >>> hostname = shared[Hostname]
13 >>> hostname.fqdn
14 'www.example.com'
15 >>> hostname.hostname
16 'www'
17 >>> hostname.domain
18 'example.com'
19
20 """
21
22 from .. import Parser, parser
23 from insights.specs import Specs
24
25
26 @parser(Specs.hostname)
27 class Hostname(Parser):
28 """Class for parsing ``hostname`` command output.
29
30 Attributes:
31 fqdn: The fully qualified domain name of the host. The same to
32 ``hostname`` when domain part is not set.
33 hostname: The hostname.
34 domain: The domain get from the fqdn.
35 """
36 def parse_content(self, content):
37 raw = None
38 if len(content) == 1:
39 raw = content[0].strip()
40 self.fqdn = raw
41 self.hostname = raw.split(".")[0] if raw else None
42 self.domain = ".".join(raw.split(".")[1:]) if raw else None
43
44 def __str__(self):
45 return "<hostname: {h}, domain: {d}>".format(h=self.hostname, d=self.domain)
46
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/insights/parsers/hostname.py b/insights/parsers/hostname.py
--- a/insights/parsers/hostname.py
+++ b/insights/parsers/hostname.py
@@ -34,6 +34,7 @@
domain: The domain get from the fqdn.
"""
def parse_content(self, content):
+ content = filter(None, content)
raw = None
if len(content) == 1:
raw = content[0].strip()
| {"golden_diff": "diff --git a/insights/parsers/hostname.py b/insights/parsers/hostname.py\n--- a/insights/parsers/hostname.py\n+++ b/insights/parsers/hostname.py\n@@ -34,6 +34,7 @@\n domain: The domain get from the fqdn.\n \"\"\"\n def parse_content(self, content):\n+ content = filter(None, content)\n raw = None\n if len(content) == 1:\n raw = content[0].strip()\n", "issue": "hostname parser doesn't handle sos_commands/general/hostname\nsos_commands/general/hostname contains the hostname with a newline at the end, which results in a file with two lines. The hostname parser specifically checks for one line but doesn't gracefully handle the problem.\r\n\r\nWe can update the parser to handle this case and/or investigate whether proc/sys/kernel/hostname is a valid substitute to put in sos_archive.py instead.\n", "before_files": [{"content": "\"\"\"\nhostname - command ``/bin/hostname``\n====================================\n\nThis parser simply reads the output of ``/bin/hostname``, which is the\nconfigured fully qualified domain name of the client system. It then\nsplits it into ``hostname`` and ``domain`` and stores these as attributes,\nalong with the unmodified name in the ``fqdn`` attribute.\n\nExamples:\n\n >>> hostname = shared[Hostname]\n >>> hostname.fqdn\n 'www.example.com'\n >>> hostname.hostname\n 'www'\n >>> hostname.domain\n 'example.com'\n\n\"\"\"\n\nfrom .. import Parser, parser\nfrom insights.specs import Specs\n\n\n@parser(Specs.hostname)\nclass Hostname(Parser):\n \"\"\"Class for parsing ``hostname`` command output.\n\n Attributes:\n fqdn: The fully qualified domain name of the host. The same to\n ``hostname`` when domain part is not set.\n hostname: The hostname.\n domain: The domain get from the fqdn.\n \"\"\"\n def parse_content(self, content):\n raw = None\n if len(content) == 1:\n raw = content[0].strip()\n self.fqdn = raw\n self.hostname = raw.split(\".\")[0] if raw else None\n self.domain = \".\".join(raw.split(\".\")[1:]) if raw else None\n\n def __str__(self):\n return \"<hostname: {h}, domain: {d}>\".format(h=self.hostname, d=self.domain)\n", "path": "insights/parsers/hostname.py"}], "after_files": [{"content": "\"\"\"\nhostname - command ``/bin/hostname``\n====================================\n\nThis parser simply reads the output of ``/bin/hostname``, which is the\nconfigured fully qualified domain name of the client system. It then\nsplits it into ``hostname`` and ``domain`` and stores these as attributes,\nalong with the unmodified name in the ``fqdn`` attribute.\n\nExamples:\n\n >>> hostname = shared[Hostname]\n >>> hostname.fqdn\n 'www.example.com'\n >>> hostname.hostname\n 'www'\n >>> hostname.domain\n 'example.com'\n\n\"\"\"\n\nfrom .. import Parser, parser\nfrom insights.specs import Specs\n\n\n@parser(Specs.hostname)\nclass Hostname(Parser):\n \"\"\"Class for parsing ``hostname`` command output.\n\n Attributes:\n fqdn: The fully qualified domain name of the host. The same to\n ``hostname`` when domain part is not set.\n hostname: The hostname.\n domain: The domain get from the fqdn.\n \"\"\"\n def parse_content(self, content):\n content = filter(None, content)\n raw = None\n if len(content) == 1:\n raw = content[0].strip()\n self.fqdn = raw\n self.hostname = raw.split(\".\")[0] if raw else None\n self.domain = \".\".join(raw.split(\".\")[1:]) if raw else None\n\n def __str__(self):\n return \"<hostname: {h}, domain: {d}>\".format(h=self.hostname, d=self.domain)\n", "path": "insights/parsers/hostname.py"}]} | 740 | 109 |
gh_patches_debug_411 | rasdani/github-patches | git_diff | ocf__ocfweb-48 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Have next 7 days in hours sidebar
This means all our normal hours are visible on a normal week.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ocfweb/main/home.py`
Content:
```
1 from datetime import date
2 from datetime import timedelta
3
4 from django.shortcuts import render_to_response
5 from django.template import RequestContext
6 from django.utils import timezone
7 from ocflib.lab.hours import get_hours
8 from ocflib.lab.staff_hours import get_staff_hours_soonest_first
9
10 from ocfweb.component.blog import get_blog_posts
11 from ocfweb.component.lab_status import get_lab_status
12
13
14 def home(request):
15 hours = [
16 get_hours(date.today() + timedelta(days=i)) for i in range(7)
17 ]
18
19 blog_posts = [
20 post for post
21 in get_blog_posts()
22 if timezone.now() - post.published < timedelta(days=365)
23 ][:2]
24
25 return render_to_response(
26 'home.html',
27 {
28 'fulltitle': 'Open Computing Facility at UC Berkeley',
29 'description': (
30 'The Open Computing Facility is an all-volunteer student '
31 'organization dedicated to free and open-source computing for all UC '
32 'Berkeley students.'''
33 ),
34 'staff_hours': get_staff_hours_soonest_first()[:2],
35 'hours': hours,
36 'today': hours[0],
37 'blog_posts': blog_posts,
38 'lab_status': get_lab_status(),
39 },
40 context_instance=RequestContext(request),
41 )
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ocfweb/main/home.py b/ocfweb/main/home.py
--- a/ocfweb/main/home.py
+++ b/ocfweb/main/home.py
@@ -13,7 +13,7 @@
def home(request):
hours = [
- get_hours(date.today() + timedelta(days=i)) for i in range(7)
+ get_hours(date.today() + timedelta(days=i)) for i in range(5)
]
blog_posts = [
| {"golden_diff": "diff --git a/ocfweb/main/home.py b/ocfweb/main/home.py\n--- a/ocfweb/main/home.py\n+++ b/ocfweb/main/home.py\n@@ -13,7 +13,7 @@\n \n def home(request):\n hours = [\n- get_hours(date.today() + timedelta(days=i)) for i in range(7)\n+ get_hours(date.today() + timedelta(days=i)) for i in range(5)\n ]\n \n blog_posts = [\n", "issue": "Have next 7 days in hours sidebar\nThis means all our normal hours are visible on a normal week.\n\n", "before_files": [{"content": "from datetime import date\nfrom datetime import timedelta\n\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.utils import timezone\nfrom ocflib.lab.hours import get_hours\nfrom ocflib.lab.staff_hours import get_staff_hours_soonest_first\n\nfrom ocfweb.component.blog import get_blog_posts\nfrom ocfweb.component.lab_status import get_lab_status\n\n\ndef home(request):\n hours = [\n get_hours(date.today() + timedelta(days=i)) for i in range(7)\n ]\n\n blog_posts = [\n post for post\n in get_blog_posts()\n if timezone.now() - post.published < timedelta(days=365)\n ][:2]\n\n return render_to_response(\n 'home.html',\n {\n 'fulltitle': 'Open Computing Facility at UC Berkeley',\n 'description': (\n 'The Open Computing Facility is an all-volunteer student '\n 'organization dedicated to free and open-source computing for all UC '\n 'Berkeley students.'''\n ),\n 'staff_hours': get_staff_hours_soonest_first()[:2],\n 'hours': hours,\n 'today': hours[0],\n 'blog_posts': blog_posts,\n 'lab_status': get_lab_status(),\n },\n context_instance=RequestContext(request),\n )\n", "path": "ocfweb/main/home.py"}], "after_files": [{"content": "from datetime import date\nfrom datetime import timedelta\n\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.utils import timezone\nfrom ocflib.lab.hours import get_hours\nfrom ocflib.lab.staff_hours import get_staff_hours_soonest_first\n\nfrom ocfweb.component.blog import get_blog_posts\nfrom ocfweb.component.lab_status import get_lab_status\n\n\ndef home(request):\n hours = [\n get_hours(date.today() + timedelta(days=i)) for i in range(5)\n ]\n\n blog_posts = [\n post for post\n in get_blog_posts()\n if timezone.now() - post.published < timedelta(days=365)\n ][:2]\n\n return render_to_response(\n 'home.html',\n {\n 'fulltitle': 'Open Computing Facility at UC Berkeley',\n 'description': (\n 'The Open Computing Facility is an all-volunteer student '\n 'organization dedicated to free and open-source computing for all UC '\n 'Berkeley students.'''\n ),\n 'staff_hours': get_staff_hours_soonest_first()[:2],\n 'hours': hours,\n 'today': hours[0],\n 'blog_posts': blog_posts,\n 'lab_status': get_lab_status(),\n },\n context_instance=RequestContext(request),\n )\n", "path": "ocfweb/main/home.py"}]} | 633 | 107 |
gh_patches_debug_34747 | rasdani/github-patches | git_diff | Cloud-CV__EvalAI-1035 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add serializers for creating challenge using zip file.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/challenges/serializers.py`
Content:
```
1 from rest_framework import serializers
2
3 from hosts.serializers import ChallengeHostTeamSerializer
4
5 from .models import (
6 Challenge,
7 ChallengePhase,
8 ChallengePhaseSplit,
9 DatasetSplit,)
10
11
12 class ChallengeSerializer(serializers.ModelSerializer):
13
14 is_active = serializers.ReadOnlyField()
15
16 def __init__(self, *args, **kwargs):
17 super(ChallengeSerializer, self).__init__(*args, **kwargs)
18 context = kwargs.get('context')
19 if context and context.get('request').method != 'GET':
20 challenge_host_team = context.get('challenge_host_team')
21 kwargs['data']['creator'] = challenge_host_team.pk
22 else:
23 self.fields['creator'] = ChallengeHostTeamSerializer()
24
25 class Meta:
26 model = Challenge
27 fields = ('id', 'title', 'short_description', 'description', 'terms_and_conditions',
28 'submission_guidelines', 'evaluation_details',
29 'image', 'start_date', 'end_date', 'creator',
30 'published', 'enable_forum', 'anonymous_leaderboard', 'is_active',)
31
32
33 class ChallengePhaseSerializer(serializers.ModelSerializer):
34
35 is_active = serializers.ReadOnlyField()
36
37 def __init__(self, *args, **kwargs):
38 super(ChallengePhaseSerializer, self).__init__(*args, **kwargs)
39 context = kwargs.get('context')
40 if context:
41 challenge = context.get('challenge')
42 kwargs['data']['challenge'] = challenge.pk
43
44 class Meta:
45 model = ChallengePhase
46 fields = ('id', 'name', 'description', 'leaderboard_public', 'start_date',
47 'end_date', 'challenge', 'max_submissions_per_day', 'max_submissions',
48 'is_public', 'is_active', 'codename')
49
50
51 class DatasetSplitSerializer(serializers.ModelSerializer):
52
53 class Meta:
54 model = DatasetSplit
55 fields = '__all__'
56
57
58 class ChallengePhaseSplitSerializer(serializers.ModelSerializer):
59 """Serialize the ChallengePhaseSplits Model"""
60
61 dataset_split_name = serializers.SerializerMethodField()
62 challenge_phase_name = serializers.SerializerMethodField()
63
64 class Meta:
65 model = ChallengePhaseSplit
66 fields = '__all__'
67 fields = ('id', 'dataset_split', 'challenge_phase', 'challenge_phase_name', 'dataset_split_name', 'visibility')
68
69 def get_dataset_split_name(self, obj):
70 return obj.dataset_split.name
71
72 def get_challenge_phase_name(self, obj):
73 return obj.challenge_phase.name
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/apps/challenges/serializers.py b/apps/challenges/serializers.py
--- a/apps/challenges/serializers.py
+++ b/apps/challenges/serializers.py
@@ -2,11 +2,12 @@
from hosts.serializers import ChallengeHostTeamSerializer
-from .models import (
- Challenge,
+from .models import (Challenge,
+ ChallengeConfiguration,
ChallengePhase,
ChallengePhaseSplit,
- DatasetSplit,)
+ DatasetSplit,
+ Leaderboard,)
class ChallengeSerializer(serializers.ModelSerializer):
@@ -63,7 +64,6 @@
class Meta:
model = ChallengePhaseSplit
- fields = '__all__'
fields = ('id', 'dataset_split', 'challenge_phase', 'challenge_phase_name', 'dataset_split_name', 'visibility')
def get_dataset_split_name(self, obj):
@@ -71,3 +71,48 @@
def get_challenge_phase_name(self, obj):
return obj.challenge_phase.name
+
+
+class ChallengeConfigSerializer(serializers.ModelSerializer):
+ """
+ Serialize the ChallengeConfiguration Model.
+ """
+ def __init__(self, *args, **kwargs):
+ super(ChallengeConfigSerializer, self).__init__(*args, **kwargs)
+ context = kwargs.get('context')
+ if context:
+ user = context.get('user')
+ kwargs['data']['user'] = user.pk
+
+ class Meta:
+ model = ChallengeConfiguration
+ fields = ('zip_configuration', 'user',)
+
+
+class LeaderboardSerializer(serializers.ModelSerializer):
+ """
+ Serialize the Leaderboard Model.
+ """
+ class Meta:
+ model = Leaderboard
+ fields = '__all__'
+
+
+class ZipChallengeSerializer(ChallengeSerializer):
+ """
+ Serializer used for creating challenge through zip file.
+ """
+ class Meta:
+ model = Challenge
+ fields = ('id', 'title', 'short_description', 'description', 'terms_and_conditions',
+ 'submission_guidelines', 'start_date', 'end_date', 'creator',
+ 'published', 'enable_forum', 'anonymous_leaderboard', 'is_active',)
+
+
+class ZipChallengePhaseSplitSerializer(serializers.ModelSerializer):
+ """
+ Serializer used for creating challenge phase split through zip file.
+ """
+ class Meta:
+ model = ChallengePhaseSplit
+ fields = '__all__'
| {"golden_diff": "diff --git a/apps/challenges/serializers.py b/apps/challenges/serializers.py\n--- a/apps/challenges/serializers.py\n+++ b/apps/challenges/serializers.py\n@@ -2,11 +2,12 @@\n \n from hosts.serializers import ChallengeHostTeamSerializer\n \n-from .models import (\n- Challenge,\n+from .models import (Challenge,\n+ ChallengeConfiguration,\n ChallengePhase,\n ChallengePhaseSplit,\n- DatasetSplit,)\n+ DatasetSplit,\n+ Leaderboard,)\n \n \n class ChallengeSerializer(serializers.ModelSerializer):\n@@ -63,7 +64,6 @@\n \n class Meta:\n model = ChallengePhaseSplit\n- fields = '__all__'\n fields = ('id', 'dataset_split', 'challenge_phase', 'challenge_phase_name', 'dataset_split_name', 'visibility')\n \n def get_dataset_split_name(self, obj):\n@@ -71,3 +71,48 @@\n \n def get_challenge_phase_name(self, obj):\n return obj.challenge_phase.name\n+\n+\n+class ChallengeConfigSerializer(serializers.ModelSerializer):\n+ \"\"\"\n+ Serialize the ChallengeConfiguration Model.\n+ \"\"\"\n+ def __init__(self, *args, **kwargs):\n+ super(ChallengeConfigSerializer, self).__init__(*args, **kwargs)\n+ context = kwargs.get('context')\n+ if context:\n+ user = context.get('user')\n+ kwargs['data']['user'] = user.pk\n+\n+ class Meta:\n+ model = ChallengeConfiguration\n+ fields = ('zip_configuration', 'user',)\n+\n+\n+class LeaderboardSerializer(serializers.ModelSerializer):\n+ \"\"\"\n+ Serialize the Leaderboard Model.\n+ \"\"\"\n+ class Meta:\n+ model = Leaderboard\n+ fields = '__all__'\n+\n+\n+class ZipChallengeSerializer(ChallengeSerializer):\n+ \"\"\"\n+ Serializer used for creating challenge through zip file.\n+ \"\"\"\n+ class Meta:\n+ model = Challenge\n+ fields = ('id', 'title', 'short_description', 'description', 'terms_and_conditions',\n+ 'submission_guidelines', 'start_date', 'end_date', 'creator',\n+ 'published', 'enable_forum', 'anonymous_leaderboard', 'is_active',)\n+\n+\n+class ZipChallengePhaseSplitSerializer(serializers.ModelSerializer):\n+ \"\"\"\n+ Serializer used for creating challenge phase split through zip file.\n+ \"\"\"\n+ class Meta:\n+ model = ChallengePhaseSplit\n+ fields = '__all__'\n", "issue": "Add serializers for creating challenge using zip file.\n\n", "before_files": [{"content": "from rest_framework import serializers\n\nfrom hosts.serializers import ChallengeHostTeamSerializer\n\nfrom .models import (\n Challenge,\n ChallengePhase,\n ChallengePhaseSplit,\n DatasetSplit,)\n\n\nclass ChallengeSerializer(serializers.ModelSerializer):\n\n is_active = serializers.ReadOnlyField()\n\n def __init__(self, *args, **kwargs):\n super(ChallengeSerializer, self).__init__(*args, **kwargs)\n context = kwargs.get('context')\n if context and context.get('request').method != 'GET':\n challenge_host_team = context.get('challenge_host_team')\n kwargs['data']['creator'] = challenge_host_team.pk\n else:\n self.fields['creator'] = ChallengeHostTeamSerializer()\n\n class Meta:\n model = Challenge\n fields = ('id', 'title', 'short_description', 'description', 'terms_and_conditions',\n 'submission_guidelines', 'evaluation_details',\n 'image', 'start_date', 'end_date', 'creator',\n 'published', 'enable_forum', 'anonymous_leaderboard', 'is_active',)\n\n\nclass ChallengePhaseSerializer(serializers.ModelSerializer):\n\n is_active = serializers.ReadOnlyField()\n\n def __init__(self, *args, **kwargs):\n super(ChallengePhaseSerializer, self).__init__(*args, **kwargs)\n context = kwargs.get('context')\n if context:\n challenge = context.get('challenge')\n kwargs['data']['challenge'] = challenge.pk\n\n class Meta:\n model = ChallengePhase\n fields = ('id', 'name', 'description', 'leaderboard_public', 'start_date',\n 'end_date', 'challenge', 'max_submissions_per_day', 'max_submissions',\n 'is_public', 'is_active', 'codename')\n\n\nclass DatasetSplitSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = DatasetSplit\n fields = '__all__'\n\n\nclass ChallengePhaseSplitSerializer(serializers.ModelSerializer):\n \"\"\"Serialize the ChallengePhaseSplits Model\"\"\"\n\n dataset_split_name = serializers.SerializerMethodField()\n challenge_phase_name = serializers.SerializerMethodField()\n\n class Meta:\n model = ChallengePhaseSplit\n fields = '__all__'\n fields = ('id', 'dataset_split', 'challenge_phase', 'challenge_phase_name', 'dataset_split_name', 'visibility')\n\n def get_dataset_split_name(self, obj):\n return obj.dataset_split.name\n\n def get_challenge_phase_name(self, obj):\n return obj.challenge_phase.name\n", "path": "apps/challenges/serializers.py"}], "after_files": [{"content": "from rest_framework import serializers\n\nfrom hosts.serializers import ChallengeHostTeamSerializer\n\nfrom .models import (Challenge,\n ChallengeConfiguration,\n ChallengePhase,\n ChallengePhaseSplit,\n DatasetSplit,\n Leaderboard,)\n\n\nclass ChallengeSerializer(serializers.ModelSerializer):\n\n is_active = serializers.ReadOnlyField()\n\n def __init__(self, *args, **kwargs):\n super(ChallengeSerializer, self).__init__(*args, **kwargs)\n context = kwargs.get('context')\n if context and context.get('request').method != 'GET':\n challenge_host_team = context.get('challenge_host_team')\n kwargs['data']['creator'] = challenge_host_team.pk\n else:\n self.fields['creator'] = ChallengeHostTeamSerializer()\n\n class Meta:\n model = Challenge\n fields = ('id', 'title', 'short_description', 'description', 'terms_and_conditions',\n 'submission_guidelines', 'evaluation_details',\n 'image', 'start_date', 'end_date', 'creator',\n 'published', 'enable_forum', 'anonymous_leaderboard', 'is_active',)\n\n\nclass ChallengePhaseSerializer(serializers.ModelSerializer):\n\n is_active = serializers.ReadOnlyField()\n\n def __init__(self, *args, **kwargs):\n super(ChallengePhaseSerializer, self).__init__(*args, **kwargs)\n context = kwargs.get('context')\n if context:\n challenge = context.get('challenge')\n kwargs['data']['challenge'] = challenge.pk\n\n class Meta:\n model = ChallengePhase\n fields = ('id', 'name', 'description', 'leaderboard_public', 'start_date',\n 'end_date', 'challenge', 'max_submissions_per_day', 'max_submissions',\n 'is_public', 'is_active', 'codename')\n\n\nclass DatasetSplitSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = DatasetSplit\n fields = '__all__'\n\n\nclass ChallengePhaseSplitSerializer(serializers.ModelSerializer):\n \"\"\"Serialize the ChallengePhaseSplits Model\"\"\"\n\n dataset_split_name = serializers.SerializerMethodField()\n challenge_phase_name = serializers.SerializerMethodField()\n\n class Meta:\n model = ChallengePhaseSplit\n fields = ('id', 'dataset_split', 'challenge_phase', 'challenge_phase_name', 'dataset_split_name', 'visibility')\n\n def get_dataset_split_name(self, obj):\n return obj.dataset_split.name\n\n def get_challenge_phase_name(self, obj):\n return obj.challenge_phase.name\n\n\nclass ChallengeConfigSerializer(serializers.ModelSerializer):\n \"\"\"\n Serialize the ChallengeConfiguration Model.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(ChallengeConfigSerializer, self).__init__(*args, **kwargs)\n context = kwargs.get('context')\n if context:\n user = context.get('user')\n kwargs['data']['user'] = user.pk\n\n class Meta:\n model = ChallengeConfiguration\n fields = ('zip_configuration', 'user',)\n\n\nclass LeaderboardSerializer(serializers.ModelSerializer):\n \"\"\"\n Serialize the Leaderboard Model.\n \"\"\"\n class Meta:\n model = Leaderboard\n fields = '__all__'\n\n\nclass ZipChallengeSerializer(ChallengeSerializer):\n \"\"\"\n Serializer used for creating challenge through zip file.\n \"\"\"\n class Meta:\n model = Challenge\n fields = ('id', 'title', 'short_description', 'description', 'terms_and_conditions',\n 'submission_guidelines', 'start_date', 'end_date', 'creator',\n 'published', 'enable_forum', 'anonymous_leaderboard', 'is_active',)\n\n\nclass ZipChallengePhaseSplitSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer used for creating challenge phase split through zip file.\n \"\"\"\n class Meta:\n model = ChallengePhaseSplit\n fields = '__all__'\n", "path": "apps/challenges/serializers.py"}]} | 924 | 525 |
gh_patches_debug_15659 | rasdani/github-patches | git_diff | spack__spack-2119 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Metis is broken
```
$ spack install [email protected]
==> Installing metis
==> cmake is already installed in /Users/davydden/spack/opt/spack/darwin-sierra-x86_64/clang-8.0.0-apple/cmake-3.6.1-knsjcg5wlbnsy27ja5ngp5xdftflnv4a
==> Trying to fetch from file:///Users/davydden/spack/var/spack/cache/metis/metis-5.0.2.tar.gz
curl: (37) Couldn't open file /Users/davydden/spack/var/spack/cache/metis/metis-5.0.2.tar.gz
==> Fetching from file:///Users/davydden/spack/var/spack/cache/metis/metis-5.0.2.tar.gz failed.
==> Trying to fetch from http://glaros.dtc.umn.edu/gkhome/fetch/sw/metis/metis-5.0.2.tar.gz
######################################################################## 100.0%
==> Staging archive: /Users/davydden/spack/var/spack/stage/metis-5.0.2-36gz5ygfj3o7ptkltsuh3t2cezy56g3y/metis-5.0.2.tar.gz
==> Created stage in /Users/davydden/spack/var/spack/stage/metis-5.0.2-36gz5ygfj3o7ptkltsuh3t2cezy56g3y
==> Applied patch install_gklib_defs_rename.patch
==> Ran patch() for metis
==> Building metis [Package]
==> Executing phase : 'install'
Traceback (most recent call last):
File "/Users/davydden/spack/bin/spack", line 202, in <module>
main()
File "/Users/davydden/spack/bin/spack", line 179, in main
return_val = command(parser, args)
File "/Users/davydden/spack/lib/spack/spack/cmd/install.py", line 117, in install
package.do_install(**kwargs)
File "/Users/davydden/spack/lib/spack/spack/package.py", line 1268, in do_install
spack.build_environment.fork(self, build_process, dirty=dirty)
File "/Users/davydden/spack/lib/spack/spack/build_environment.py", line 542, in fork
raise exception
AttributeError: 'functools.partial' object has no attribute '__module__'
```
@alalazo any guesses here? Maybe related to https://github.com/LLNL/spack/pull/1186. But I don't see any clues in `package.py` of `metis`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/spack/spack/multimethod.py`
Content:
```
1 ##############################################################################
2 # Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
3 # Produced at the Lawrence Livermore National Laboratory.
4 #
5 # This file is part of Spack.
6 # Created by Todd Gamblin, [email protected], All rights reserved.
7 # LLNL-CODE-647188
8 #
9 # For details, see https://github.com/llnl/spack
10 # Please also see the LICENSE file for our notice and the LGPL.
11 #
12 # This program is free software; you can redistribute it and/or modify
13 # it under the terms of the GNU Lesser General Public License (as
14 # published by the Free Software Foundation) version 2.1, February 1999.
15 #
16 # This program is distributed in the hope that it will be useful, but
17 # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
19 # conditions of the GNU Lesser General Public License for more details.
20 #
21 # You should have received a copy of the GNU Lesser General Public
22 # License along with this program; if not, write to the Free Software
23 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 ##############################################################################
25 """This module contains utilities for using multi-methods in
26 spack. You can think of multi-methods like overloaded methods --
27 they're methods with the same name, and we need to select a version
28 of the method based on some criteria. e.g., for overloaded
29 methods, you would select a version of the method to call based on
30 the types of its arguments.
31
32 In spack, multi-methods are used to ease the life of package
33 authors. They allow methods like install() (or other methods
34 called by install()) to declare multiple versions to be called when
35 the package is instantiated with different specs. e.g., if the
36 package is built with OpenMPI on x86_64,, you might want to call a
37 different install method than if it was built for mpich2 on
38 BlueGene/Q. Likewise, you might want to do a different type of
39 install for different versions of the package.
40
41 Multi-methods provide a simple decorator-based syntax for this that
42 avoids overly complicated rat nests of if statements. Obviously,
43 depending on the scenario, regular old conditionals might be clearer,
44 so package authors should use their judgement.
45 """
46 import functools
47
48 from llnl.util.lang import *
49
50 import spack.architecture
51 import spack.error
52 from spack.spec import parse_anonymous_spec
53
54
55 class SpecMultiMethod(object):
56 """This implements a multi-method for Spack specs. Packages are
57 instantiated with a particular spec, and you may want to
58 execute different versions of methods based on what the spec
59 looks like. For example, you might want to call a different
60 version of install() for one platform than you call on another.
61
62 The SpecMultiMethod class implements a callable object that
63 handles method dispatch. When it is called, it looks through
64 registered methods and their associated specs, and it tries
65 to find one that matches the package's spec. If it finds one
66 (and only one), it will call that method.
67
68 The package author is responsible for ensuring that only one
69 condition on multi-methods ever evaluates to true. If
70 multiple methods evaluate to true, this will raise an
71 exception.
72
73 This is intended for use with decorators (see below). The
74 decorator (see docs below) creates SpecMultiMethods and
75 registers method versions with them.
76
77 To register a method, you can do something like this:
78 mm = SpecMultiMethod()
79 mm.register("^chaos_5_x86_64_ib", some_method)
80
81 The object registered needs to be a Spec or some string that
82 will parse to be a valid spec.
83
84 When the mm is actually called, it selects a version of the
85 method to call based on the sys_type of the object it is
86 called on.
87
88 See the docs for decorators below for more details.
89 """
90
91 def __init__(self, default=None):
92 self.method_list = []
93 self.default = default
94 if default:
95 functools.update_wrapper(self, default)
96
97 def register(self, spec, method):
98 """Register a version of a method for a particular sys_type."""
99 self.method_list.append((spec, method))
100
101 if not hasattr(self, '__name__'):
102 functools.update_wrapper(self, method)
103 else:
104 assert(self.__name__ == method.__name__)
105
106 def __get__(self, obj, objtype):
107 """This makes __call__ support instance methods."""
108 return functools.partial(self.__call__, obj)
109
110 def __call__(self, package_self, *args, **kwargs):
111 """Find the first method with a spec that matches the
112 package's spec. If none is found, call the default
113 or if there is none, then raise a NoSuchMethodError.
114 """
115 for spec, method in self.method_list:
116 if package_self.spec.satisfies(spec):
117 return method(package_self, *args, **kwargs)
118
119 if self.default:
120 return self.default(package_self, *args, **kwargs)
121 else:
122 raise NoSuchMethodError(
123 type(package_self), self.__name__, spec,
124 [m[0] for m in self.method_list])
125
126 def __str__(self):
127 return "SpecMultiMethod {\n\tdefault: %s,\n\tspecs: %s\n}" % (
128 self.default, self.method_list)
129
130
131 class when(object):
132 """This annotation lets packages declare multiple versions of
133 methods like install() that depend on the package's spec.
134 For example:
135
136 .. code-block:: python
137
138 class SomePackage(Package):
139 ...
140
141 def install(self, prefix):
142 # Do default install
143
144 @when('arch=chaos_5_x86_64_ib')
145 def install(self, prefix):
146 # This will be executed instead of the default install if
147 # the package's platform() is chaos_5_x86_64_ib.
148
149 @when('arch=bgqos_0")
150 def install(self, prefix):
151 # This will be executed if the package's sys_type is bgqos_0
152
153 This allows each package to have a default version of install() AND
154 specialized versions for particular platforms. The version that is
155 called depends on the architecutre of the instantiated package.
156
157 Note that this works for methods other than install, as well. So,
158 if you only have part of the install that is platform specific, you
159 could do this:
160
161 .. code-block:: python
162
163 class SomePackage(Package):
164 ...
165 # virtual dependence on MPI.
166 # could resolve to mpich, mpich2, OpenMPI
167 depends_on('mpi')
168
169 def setup(self):
170 # do nothing in the default case
171 pass
172
173 @when('^openmpi')
174 def setup(self):
175 # do something special when this is built with OpenMPI for
176 # its MPI implementations.
177
178
179 def install(self, prefix):
180 # Do common install stuff
181 self.setup()
182 # Do more common install stuff
183
184 There must be one (and only one) @when clause that matches the
185 package's spec. If there is more than one, or if none match,
186 then the method will raise an exception when it's called.
187
188 Note that the default version of decorated methods must
189 *always* come first. Otherwise it will override all of the
190 platform-specific versions. There's not much we can do to get
191 around this because of the way decorators work.
192 """
193
194 def __init__(self, spec):
195 pkg = get_calling_module_name()
196 if spec is True:
197 spec = pkg
198 self.spec = (parse_anonymous_spec(spec, pkg)
199 if spec is not False else None)
200
201 def __call__(self, method):
202 # Get the first definition of the method in the calling scope
203 original_method = caller_locals().get(method.__name__)
204
205 # Create a multimethod out of the original method if it
206 # isn't one already.
207 if not type(original_method) == SpecMultiMethod:
208 original_method = SpecMultiMethod(original_method)
209
210 if self.spec is not None:
211 original_method.register(self.spec, method)
212
213 return original_method
214
215
216 class MultiMethodError(spack.error.SpackError):
217 """Superclass for multimethod dispatch errors"""
218
219 def __init__(self, message):
220 super(MultiMethodError, self).__init__(message)
221
222
223 class NoSuchMethodError(spack.error.SpackError):
224 """Raised when we can't find a version of a multi-method."""
225
226 def __init__(self, cls, method_name, spec, possible_specs):
227 super(NoSuchMethodError, self).__init__(
228 "Package %s does not support %s called with %s. Options are: %s"
229 % (cls.__name__, method_name, spec,
230 ", ".join(str(s) for s in possible_specs)))
231
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/spack/spack/multimethod.py b/lib/spack/spack/multimethod.py
--- a/lib/spack/spack/multimethod.py
+++ b/lib/spack/spack/multimethod.py
@@ -105,7 +105,17 @@
def __get__(self, obj, objtype):
"""This makes __call__ support instance methods."""
- return functools.partial(self.__call__, obj)
+ # Method_list is a list of tuples (constraint, method)
+ # Here we are going to assume that we have at least one
+ # element in the list. The first registered function
+ # will be the one 'wrapped'.
+ wrapped_method = self.method_list[0][1]
+ # Call functools.wraps manually to get all the attributes
+ # we need to be disguised as the wrapped_method
+ func = functools.wraps(wrapped_method)(
+ functools.partial(self.__call__, obj)
+ )
+ return func
def __call__(self, package_self, *args, **kwargs):
"""Find the first method with a spec that matches the
| {"golden_diff": "diff --git a/lib/spack/spack/multimethod.py b/lib/spack/spack/multimethod.py\n--- a/lib/spack/spack/multimethod.py\n+++ b/lib/spack/spack/multimethod.py\n@@ -105,7 +105,17 @@\n \n def __get__(self, obj, objtype):\n \"\"\"This makes __call__ support instance methods.\"\"\"\n- return functools.partial(self.__call__, obj)\n+ # Method_list is a list of tuples (constraint, method)\n+ # Here we are going to assume that we have at least one\n+ # element in the list. The first registered function\n+ # will be the one 'wrapped'.\n+ wrapped_method = self.method_list[0][1]\n+ # Call functools.wraps manually to get all the attributes\n+ # we need to be disguised as the wrapped_method\n+ func = functools.wraps(wrapped_method)(\n+ functools.partial(self.__call__, obj)\n+ )\n+ return func\n \n def __call__(self, package_self, *args, **kwargs):\n \"\"\"Find the first method with a spec that matches the\n", "issue": "Metis is broken\n```\n$ spack install [email protected]\n==> Installing metis\n==> cmake is already installed in /Users/davydden/spack/opt/spack/darwin-sierra-x86_64/clang-8.0.0-apple/cmake-3.6.1-knsjcg5wlbnsy27ja5ngp5xdftflnv4a\n==> Trying to fetch from file:///Users/davydden/spack/var/spack/cache/metis/metis-5.0.2.tar.gz\ncurl: (37) Couldn't open file /Users/davydden/spack/var/spack/cache/metis/metis-5.0.2.tar.gz\n==> Fetching from file:///Users/davydden/spack/var/spack/cache/metis/metis-5.0.2.tar.gz failed.\n==> Trying to fetch from http://glaros.dtc.umn.edu/gkhome/fetch/sw/metis/metis-5.0.2.tar.gz\n######################################################################## 100.0%\n==> Staging archive: /Users/davydden/spack/var/spack/stage/metis-5.0.2-36gz5ygfj3o7ptkltsuh3t2cezy56g3y/metis-5.0.2.tar.gz\n==> Created stage in /Users/davydden/spack/var/spack/stage/metis-5.0.2-36gz5ygfj3o7ptkltsuh3t2cezy56g3y\n==> Applied patch install_gklib_defs_rename.patch\n==> Ran patch() for metis\n==> Building metis [Package]\n==> Executing phase : 'install'\nTraceback (most recent call last):\n File \"/Users/davydden/spack/bin/spack\", line 202, in <module>\n main()\n File \"/Users/davydden/spack/bin/spack\", line 179, in main\n return_val = command(parser, args)\n File \"/Users/davydden/spack/lib/spack/spack/cmd/install.py\", line 117, in install\n package.do_install(**kwargs)\n File \"/Users/davydden/spack/lib/spack/spack/package.py\", line 1268, in do_install\n spack.build_environment.fork(self, build_process, dirty=dirty)\n File \"/Users/davydden/spack/lib/spack/spack/build_environment.py\", line 542, in fork\n raise exception\nAttributeError: 'functools.partial' object has no attribute '__module__'\n```\n\n@alalazo any guesses here? Maybe related to https://github.com/LLNL/spack/pull/1186. But I don't see any clues in `package.py` of `metis`.\n\n", "before_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the LICENSE file for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\n\"\"\"This module contains utilities for using multi-methods in\nspack. You can think of multi-methods like overloaded methods --\nthey're methods with the same name, and we need to select a version\nof the method based on some criteria. e.g., for overloaded\nmethods, you would select a version of the method to call based on\nthe types of its arguments.\n\nIn spack, multi-methods are used to ease the life of package\nauthors. They allow methods like install() (or other methods\ncalled by install()) to declare multiple versions to be called when\nthe package is instantiated with different specs. e.g., if the\npackage is built with OpenMPI on x86_64,, you might want to call a\ndifferent install method than if it was built for mpich2 on\nBlueGene/Q. Likewise, you might want to do a different type of\ninstall for different versions of the package.\n\nMulti-methods provide a simple decorator-based syntax for this that\navoids overly complicated rat nests of if statements. Obviously,\ndepending on the scenario, regular old conditionals might be clearer,\nso package authors should use their judgement.\n\"\"\"\nimport functools\n\nfrom llnl.util.lang import *\n\nimport spack.architecture\nimport spack.error\nfrom spack.spec import parse_anonymous_spec\n\n\nclass SpecMultiMethod(object):\n \"\"\"This implements a multi-method for Spack specs. Packages are\n instantiated with a particular spec, and you may want to\n execute different versions of methods based on what the spec\n looks like. For example, you might want to call a different\n version of install() for one platform than you call on another.\n\n The SpecMultiMethod class implements a callable object that\n handles method dispatch. When it is called, it looks through\n registered methods and their associated specs, and it tries\n to find one that matches the package's spec. If it finds one\n (and only one), it will call that method.\n\n The package author is responsible for ensuring that only one\n condition on multi-methods ever evaluates to true. If\n multiple methods evaluate to true, this will raise an\n exception.\n\n This is intended for use with decorators (see below). The\n decorator (see docs below) creates SpecMultiMethods and\n registers method versions with them.\n\n To register a method, you can do something like this:\n mm = SpecMultiMethod()\n mm.register(\"^chaos_5_x86_64_ib\", some_method)\n\n The object registered needs to be a Spec or some string that\n will parse to be a valid spec.\n\n When the mm is actually called, it selects a version of the\n method to call based on the sys_type of the object it is\n called on.\n\n See the docs for decorators below for more details.\n \"\"\"\n\n def __init__(self, default=None):\n self.method_list = []\n self.default = default\n if default:\n functools.update_wrapper(self, default)\n\n def register(self, spec, method):\n \"\"\"Register a version of a method for a particular sys_type.\"\"\"\n self.method_list.append((spec, method))\n\n if not hasattr(self, '__name__'):\n functools.update_wrapper(self, method)\n else:\n assert(self.__name__ == method.__name__)\n\n def __get__(self, obj, objtype):\n \"\"\"This makes __call__ support instance methods.\"\"\"\n return functools.partial(self.__call__, obj)\n\n def __call__(self, package_self, *args, **kwargs):\n \"\"\"Find the first method with a spec that matches the\n package's spec. If none is found, call the default\n or if there is none, then raise a NoSuchMethodError.\n \"\"\"\n for spec, method in self.method_list:\n if package_self.spec.satisfies(spec):\n return method(package_self, *args, **kwargs)\n\n if self.default:\n return self.default(package_self, *args, **kwargs)\n else:\n raise NoSuchMethodError(\n type(package_self), self.__name__, spec,\n [m[0] for m in self.method_list])\n\n def __str__(self):\n return \"SpecMultiMethod {\\n\\tdefault: %s,\\n\\tspecs: %s\\n}\" % (\n self.default, self.method_list)\n\n\nclass when(object):\n \"\"\"This annotation lets packages declare multiple versions of\n methods like install() that depend on the package's spec.\n For example:\n\n .. code-block:: python\n\n class SomePackage(Package):\n ...\n\n def install(self, prefix):\n # Do default install\n\n @when('arch=chaos_5_x86_64_ib')\n def install(self, prefix):\n # This will be executed instead of the default install if\n # the package's platform() is chaos_5_x86_64_ib.\n\n @when('arch=bgqos_0\")\n def install(self, prefix):\n # This will be executed if the package's sys_type is bgqos_0\n\n This allows each package to have a default version of install() AND\n specialized versions for particular platforms. The version that is\n called depends on the architecutre of the instantiated package.\n\n Note that this works for methods other than install, as well. So,\n if you only have part of the install that is platform specific, you\n could do this:\n\n .. code-block:: python\n\n class SomePackage(Package):\n ...\n # virtual dependence on MPI.\n # could resolve to mpich, mpich2, OpenMPI\n depends_on('mpi')\n\n def setup(self):\n # do nothing in the default case\n pass\n\n @when('^openmpi')\n def setup(self):\n # do something special when this is built with OpenMPI for\n # its MPI implementations.\n\n\n def install(self, prefix):\n # Do common install stuff\n self.setup()\n # Do more common install stuff\n\n There must be one (and only one) @when clause that matches the\n package's spec. If there is more than one, or if none match,\n then the method will raise an exception when it's called.\n\n Note that the default version of decorated methods must\n *always* come first. Otherwise it will override all of the\n platform-specific versions. There's not much we can do to get\n around this because of the way decorators work.\n \"\"\"\n\n def __init__(self, spec):\n pkg = get_calling_module_name()\n if spec is True:\n spec = pkg\n self.spec = (parse_anonymous_spec(spec, pkg)\n if spec is not False else None)\n\n def __call__(self, method):\n # Get the first definition of the method in the calling scope\n original_method = caller_locals().get(method.__name__)\n\n # Create a multimethod out of the original method if it\n # isn't one already.\n if not type(original_method) == SpecMultiMethod:\n original_method = SpecMultiMethod(original_method)\n\n if self.spec is not None:\n original_method.register(self.spec, method)\n\n return original_method\n\n\nclass MultiMethodError(spack.error.SpackError):\n \"\"\"Superclass for multimethod dispatch errors\"\"\"\n\n def __init__(self, message):\n super(MultiMethodError, self).__init__(message)\n\n\nclass NoSuchMethodError(spack.error.SpackError):\n \"\"\"Raised when we can't find a version of a multi-method.\"\"\"\n\n def __init__(self, cls, method_name, spec, possible_specs):\n super(NoSuchMethodError, self).__init__(\n \"Package %s does not support %s called with %s. Options are: %s\"\n % (cls.__name__, method_name, spec,\n \", \".join(str(s) for s in possible_specs)))\n", "path": "lib/spack/spack/multimethod.py"}], "after_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the LICENSE file for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\n\"\"\"This module contains utilities for using multi-methods in\nspack. You can think of multi-methods like overloaded methods --\nthey're methods with the same name, and we need to select a version\nof the method based on some criteria. e.g., for overloaded\nmethods, you would select a version of the method to call based on\nthe types of its arguments.\n\nIn spack, multi-methods are used to ease the life of package\nauthors. They allow methods like install() (or other methods\ncalled by install()) to declare multiple versions to be called when\nthe package is instantiated with different specs. e.g., if the\npackage is built with OpenMPI on x86_64,, you might want to call a\ndifferent install method than if it was built for mpich2 on\nBlueGene/Q. Likewise, you might want to do a different type of\ninstall for different versions of the package.\n\nMulti-methods provide a simple decorator-based syntax for this that\navoids overly complicated rat nests of if statements. Obviously,\ndepending on the scenario, regular old conditionals might be clearer,\nso package authors should use their judgement.\n\"\"\"\nimport functools\n\nfrom llnl.util.lang import *\n\nimport spack.architecture\nimport spack.error\nfrom spack.spec import parse_anonymous_spec\n\n\nclass SpecMultiMethod(object):\n \"\"\"This implements a multi-method for Spack specs. Packages are\n instantiated with a particular spec, and you may want to\n execute different versions of methods based on what the spec\n looks like. For example, you might want to call a different\n version of install() for one platform than you call on another.\n\n The SpecMultiMethod class implements a callable object that\n handles method dispatch. When it is called, it looks through\n registered methods and their associated specs, and it tries\n to find one that matches the package's spec. If it finds one\n (and only one), it will call that method.\n\n The package author is responsible for ensuring that only one\n condition on multi-methods ever evaluates to true. If\n multiple methods evaluate to true, this will raise an\n exception.\n\n This is intended for use with decorators (see below). The\n decorator (see docs below) creates SpecMultiMethods and\n registers method versions with them.\n\n To register a method, you can do something like this:\n mm = SpecMultiMethod()\n mm.register(\"^chaos_5_x86_64_ib\", some_method)\n\n The object registered needs to be a Spec or some string that\n will parse to be a valid spec.\n\n When the mm is actually called, it selects a version of the\n method to call based on the sys_type of the object it is\n called on.\n\n See the docs for decorators below for more details.\n \"\"\"\n\n def __init__(self, default=None):\n self.method_list = []\n self.default = default\n if default:\n functools.update_wrapper(self, default)\n\n def register(self, spec, method):\n \"\"\"Register a version of a method for a particular sys_type.\"\"\"\n self.method_list.append((spec, method))\n\n if not hasattr(self, '__name__'):\n functools.update_wrapper(self, method)\n else:\n assert(self.__name__ == method.__name__)\n\n def __get__(self, obj, objtype):\n \"\"\"This makes __call__ support instance methods.\"\"\"\n # Method_list is a list of tuples (constraint, method)\n # Here we are going to assume that we have at least one\n # element in the list. The first registered function\n # will be the one 'wrapped'.\n wrapped_method = self.method_list[0][1]\n # Call functools.wraps manually to get all the attributes\n # we need to be disguised as the wrapped_method\n func = functools.wraps(wrapped_method)(\n functools.partial(self.__call__, obj)\n )\n return func\n\n def __call__(self, package_self, *args, **kwargs):\n \"\"\"Find the first method with a spec that matches the\n package's spec. If none is found, call the default\n or if there is none, then raise a NoSuchMethodError.\n \"\"\"\n for spec, method in self.method_list:\n if package_self.spec.satisfies(spec):\n return method(package_self, *args, **kwargs)\n\n if self.default:\n return self.default(package_self, *args, **kwargs)\n else:\n raise NoSuchMethodError(\n type(package_self), self.__name__, spec,\n [m[0] for m in self.method_list])\n\n def __str__(self):\n return \"SpecMultiMethod {\\n\\tdefault: %s,\\n\\tspecs: %s\\n}\" % (\n self.default, self.method_list)\n\n\nclass when(object):\n \"\"\"This annotation lets packages declare multiple versions of\n methods like install() that depend on the package's spec.\n For example:\n\n .. code-block:: python\n\n class SomePackage(Package):\n ...\n\n def install(self, prefix):\n # Do default install\n\n @when('arch=chaos_5_x86_64_ib')\n def install(self, prefix):\n # This will be executed instead of the default install if\n # the package's platform() is chaos_5_x86_64_ib.\n\n @when('arch=bgqos_0\")\n def install(self, prefix):\n # This will be executed if the package's sys_type is bgqos_0\n\n This allows each package to have a default version of install() AND\n specialized versions for particular platforms. The version that is\n called depends on the architecutre of the instantiated package.\n\n Note that this works for methods other than install, as well. So,\n if you only have part of the install that is platform specific, you\n could do this:\n\n .. code-block:: python\n\n class SomePackage(Package):\n ...\n # virtual dependence on MPI.\n # could resolve to mpich, mpich2, OpenMPI\n depends_on('mpi')\n\n def setup(self):\n # do nothing in the default case\n pass\n\n @when('^openmpi')\n def setup(self):\n # do something special when this is built with OpenMPI for\n # its MPI implementations.\n\n\n def install(self, prefix):\n # Do common install stuff\n self.setup()\n # Do more common install stuff\n\n There must be one (and only one) @when clause that matches the\n package's spec. If there is more than one, or if none match,\n then the method will raise an exception when it's called.\n\n Note that the default version of decorated methods must\n *always* come first. Otherwise it will override all of the\n platform-specific versions. There's not much we can do to get\n around this because of the way decorators work.\n \"\"\"\n\n def __init__(self, spec):\n pkg = get_calling_module_name()\n if spec is True:\n spec = pkg\n self.spec = (parse_anonymous_spec(spec, pkg)\n if spec is not False else None)\n\n def __call__(self, method):\n # Get the first definition of the method in the calling scope\n original_method = caller_locals().get(method.__name__)\n\n # Create a multimethod out of the original method if it\n # isn't one already.\n if not type(original_method) == SpecMultiMethod:\n original_method = SpecMultiMethod(original_method)\n\n if self.spec is not None:\n original_method.register(self.spec, method)\n\n return original_method\n\n\nclass MultiMethodError(spack.error.SpackError):\n \"\"\"Superclass for multimethod dispatch errors\"\"\"\n\n def __init__(self, message):\n super(MultiMethodError, self).__init__(message)\n\n\nclass NoSuchMethodError(spack.error.SpackError):\n \"\"\"Raised when we can't find a version of a multi-method.\"\"\"\n\n def __init__(self, cls, method_name, spec, possible_specs):\n super(NoSuchMethodError, self).__init__(\n \"Package %s does not support %s called with %s. Options are: %s\"\n % (cls.__name__, method_name, spec,\n \", \".join(str(s) for s in possible_specs)))\n", "path": "lib/spack/spack/multimethod.py"}]} | 3,468 | 254 |
gh_patches_debug_39578 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-contrib-2066 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
grpc: trailing metadata can't be passed when aborting an AIO server RPC
**Describe your environment**
Python: 3.12.0
`opentelemetry-distro[otlp]==0.42b0`
`opentelemetry-instrumentation-grpc==0.42b0`
`grpcio==1.59.2`
**Steps to reproduce**
Full replication can be found [here](https://github.com/cookiefission/testbench/tree/main/otel/python-grpc-aio-abort-issue).
`server.py`:
```python
import asyncio
import logging
import grpc
import helloworld_pb2
import helloworld_pb2_grpc
class Greeter(helloworld_pb2_grpc.GreeterServicer):
async def SayHello(
self,
request: helloworld_pb2.HelloRequest,
context: grpc.aio.ServicerContext,
) -> helloworld_pb2.HelloReply:
metadata = (
("this", "should"),
("work", "nicely")
)
await context.abort(code=grpc.StatusCode.ABORTED, details="This is the actual error message", trailing_metadata=metadata)
async def serve() -> None:
server = grpc.aio.server()
helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)
listen_addr = "[::]:50051"
server.add_insecure_port(listen_addr)
logging.info("Starting server on %s", listen_addr)
await server.start()
await server.wait_for_termination()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
asyncio.run(serve())
```
This server is just adopted from [the grpc examples](https://github.com/grpc/grpc/blob/master/examples/python/helloworld/async_greeter_server.py). The helloworld protobuf artefacts can be found [there](https://github.com/grpc/grpc/blob/master/examples/python/helloworld/helloworld_pb2_grpc.py).
For a client, the [`async_greeter_client.py`](https://github.com/grpc/grpc/blob/master/examples/python/helloworld/async_greeter_client.py) from grpc examples can be used.
This only happens when `trailing_metadata` is passed as an argument to `context.abort`.
**What is the expected behavior?**
Running the server without opentelemetry, the (expected) error from running the client is:
```
grpc.aio._call.AioRpcError: <AioRpcError of RPC that terminated with:
status = StatusCode.ABORTED
details = "This is the actual error message"
debug_error_string = "UNKNOWN:Error received from peer {grpc_message:"This is the actual error message", grpc_status:10, created_time:"2023-11-16T15:09:03.552303+00:00"}"
>
```
When running the server under `opentelemetry-instrument`, I expect the same output as when it is run without OTEL.
**What is the actual behavior?**
Instead, an exception is still raised but the underlying code, details, and trailing_metadata are missing. This makes any nuanced error handling on the client side impossible.
```
grpc.aio._call.AioRpcError: <AioRpcError of RPC that terminated with:
status = StatusCode.UNKNOWN
details = "Unexpected <class 'TypeError'>: _OpenTelemetryServicerContext.abort() got an unexpected keyword argument 'trailing_metadata'"
debug_error_string = "UNKNOWN:Error received from peer {created_time:"2023-11-16T15:30:02.017999+00:00", grpc_status:2, grpc_message:"Unexpected <class \'TypeError\'>: _OpenTelemetryServicerContext.abort() got an unexpected keyword argument \'trailing_metadata\'"}"
>
```
**Additional context**
The `abort` method for [`grpc.ServicerContext`](https://grpc.github.io/grpc/python/grpc.html#grpc.ServicerContext.abort) and [`grpc.aio.ServicerContext`](https://grpc.github.io/grpc/python/grpc_asyncio.html#grpc.aio.ServicerContext.abort) is different. It's unclear why. The AIO instrumentation [re-uses](https://github.com/open-telemetry/opentelemetry-python-contrib/blob/main/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_aio_server.py#L19) the [`_OpenTelemetryServicerContext`](https://github.com/open-telemetry/opentelemetry-python-contrib/blob/main/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_server.py#L68) from the non-async server instrumentation which is how this bug snuck in, despite there being a test in place for the abort.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_aio_server.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import grpc.aio
16
17 from ._server import (
18 OpenTelemetryServerInterceptor,
19 _OpenTelemetryServicerContext,
20 _wrap_rpc_behavior,
21 )
22
23
24 class OpenTelemetryAioServerInterceptor(
25 grpc.aio.ServerInterceptor, OpenTelemetryServerInterceptor
26 ):
27 """
28 An AsyncIO gRPC server interceptor, to add OpenTelemetry.
29 Usage::
30 tracer = some OpenTelemetry tracer
31 interceptors = [
32 AsyncOpenTelemetryServerInterceptor(tracer),
33 ]
34 server = aio.server(
35 futures.ThreadPoolExecutor(max_workers=concurrency),
36 interceptors = (interceptors,))
37 """
38
39 async def intercept_service(self, continuation, handler_call_details):
40 if self._filter is not None and not self._filter(handler_call_details):
41 return await continuation(handler_call_details)
42
43 def telemetry_wrapper(behavior, request_streaming, response_streaming):
44 # handle streaming responses specially
45 if response_streaming:
46 return self._intercept_aio_server_stream(
47 behavior,
48 handler_call_details,
49 )
50
51 return self._intercept_aio_server_unary(
52 behavior,
53 handler_call_details,
54 )
55
56 next_handler = await continuation(handler_call_details)
57
58 return _wrap_rpc_behavior(next_handler, telemetry_wrapper)
59
60 def _intercept_aio_server_unary(self, behavior, handler_call_details):
61 async def _unary_interceptor(request_or_iterator, context):
62 with self._set_remote_context(context):
63 with self._start_span(
64 handler_call_details,
65 context,
66 set_status_on_exception=False,
67 ) as span:
68 # wrap the context
69 context = _OpenTelemetryServicerContext(context, span)
70
71 # And now we run the actual RPC.
72 try:
73 return await behavior(request_or_iterator, context)
74
75 except Exception as error:
76 # Bare exceptions are likely to be gRPC aborts, which
77 # we handle in our context wrapper.
78 # Here, we're interested in uncaught exceptions.
79 # pylint:disable=unidiomatic-typecheck
80 if type(error) != Exception: # noqa: E721
81 span.record_exception(error)
82 raise error
83
84 return _unary_interceptor
85
86 def _intercept_aio_server_stream(self, behavior, handler_call_details):
87 async def _stream_interceptor(request_or_iterator, context):
88 with self._set_remote_context(context):
89 with self._start_span(
90 handler_call_details,
91 context,
92 set_status_on_exception=False,
93 ) as span:
94 context = _OpenTelemetryServicerContext(context, span)
95
96 try:
97 async for response in behavior(
98 request_or_iterator, context
99 ):
100 yield response
101
102 except Exception as error:
103 # pylint:disable=unidiomatic-typecheck
104 if type(error) != Exception: # noqa: E721
105 span.record_exception(error)
106 raise error
107
108 return _stream_interceptor
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_aio_server.py b/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_aio_server.py
--- a/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_aio_server.py
+++ b/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_aio_server.py
@@ -12,13 +12,63 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import grpc
import grpc.aio
-
-from ._server import (
- OpenTelemetryServerInterceptor,
- _OpenTelemetryServicerContext,
- _wrap_rpc_behavior,
-)
+import wrapt
+
+from opentelemetry.semconv.trace import SpanAttributes
+from opentelemetry.trace.status import Status, StatusCode
+
+from ._server import OpenTelemetryServerInterceptor, _wrap_rpc_behavior
+
+
+# pylint:disable=abstract-method
+class _OpenTelemetryAioServicerContext(wrapt.ObjectProxy):
+ def __init__(self, servicer_context, active_span):
+ super().__init__(servicer_context)
+ self._self_active_span = active_span
+ self._self_code = grpc.StatusCode.OK
+ self._self_details = None
+
+ async def abort(self, code, details="", trailing_metadata=tuple()):
+ self._self_code = code
+ self._self_details = details
+ self._self_active_span.set_attribute(
+ SpanAttributes.RPC_GRPC_STATUS_CODE, code.value[0]
+ )
+ self._self_active_span.set_status(
+ Status(
+ status_code=StatusCode.ERROR,
+ description=f"{code}:{details}",
+ )
+ )
+ return await self.__wrapped__.abort(code, details, trailing_metadata)
+
+ def set_code(self, code):
+ self._self_code = code
+ details = self._self_details or code.value[1]
+ self._self_active_span.set_attribute(
+ SpanAttributes.RPC_GRPC_STATUS_CODE, code.value[0]
+ )
+ if code != grpc.StatusCode.OK:
+ self._self_active_span.set_status(
+ Status(
+ status_code=StatusCode.ERROR,
+ description=f"{code}:{details}",
+ )
+ )
+ return self.__wrapped__.set_code(code)
+
+ def set_details(self, details):
+ self._self_details = details
+ if self._self_code != grpc.StatusCode.OK:
+ self._self_active_span.set_status(
+ Status(
+ status_code=StatusCode.ERROR,
+ description=f"{self._self_code}:{details}",
+ )
+ )
+ return self.__wrapped__.set_details(details)
class OpenTelemetryAioServerInterceptor(
@@ -66,7 +116,7 @@
set_status_on_exception=False,
) as span:
# wrap the context
- context = _OpenTelemetryServicerContext(context, span)
+ context = _OpenTelemetryAioServicerContext(context, span)
# And now we run the actual RPC.
try:
@@ -91,7 +141,7 @@
context,
set_status_on_exception=False,
) as span:
- context = _OpenTelemetryServicerContext(context, span)
+ context = _OpenTelemetryAioServicerContext(context, span)
try:
async for response in behavior(
| {"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_aio_server.py b/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_aio_server.py\n--- a/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_aio_server.py\n+++ b/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_aio_server.py\n@@ -12,13 +12,63 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import grpc\n import grpc.aio\n-\n-from ._server import (\n- OpenTelemetryServerInterceptor,\n- _OpenTelemetryServicerContext,\n- _wrap_rpc_behavior,\n-)\n+import wrapt\n+\n+from opentelemetry.semconv.trace import SpanAttributes\n+from opentelemetry.trace.status import Status, StatusCode\n+\n+from ._server import OpenTelemetryServerInterceptor, _wrap_rpc_behavior\n+\n+\n+# pylint:disable=abstract-method\n+class _OpenTelemetryAioServicerContext(wrapt.ObjectProxy):\n+ def __init__(self, servicer_context, active_span):\n+ super().__init__(servicer_context)\n+ self._self_active_span = active_span\n+ self._self_code = grpc.StatusCode.OK\n+ self._self_details = None\n+\n+ async def abort(self, code, details=\"\", trailing_metadata=tuple()):\n+ self._self_code = code\n+ self._self_details = details\n+ self._self_active_span.set_attribute(\n+ SpanAttributes.RPC_GRPC_STATUS_CODE, code.value[0]\n+ )\n+ self._self_active_span.set_status(\n+ Status(\n+ status_code=StatusCode.ERROR,\n+ description=f\"{code}:{details}\",\n+ )\n+ )\n+ return await self.__wrapped__.abort(code, details, trailing_metadata)\n+\n+ def set_code(self, code):\n+ self._self_code = code\n+ details = self._self_details or code.value[1]\n+ self._self_active_span.set_attribute(\n+ SpanAttributes.RPC_GRPC_STATUS_CODE, code.value[0]\n+ )\n+ if code != grpc.StatusCode.OK:\n+ self._self_active_span.set_status(\n+ Status(\n+ status_code=StatusCode.ERROR,\n+ description=f\"{code}:{details}\",\n+ )\n+ )\n+ return self.__wrapped__.set_code(code)\n+\n+ def set_details(self, details):\n+ self._self_details = details\n+ if self._self_code != grpc.StatusCode.OK:\n+ self._self_active_span.set_status(\n+ Status(\n+ status_code=StatusCode.ERROR,\n+ description=f\"{self._self_code}:{details}\",\n+ )\n+ )\n+ return self.__wrapped__.set_details(details)\n \n \n class OpenTelemetryAioServerInterceptor(\n@@ -66,7 +116,7 @@\n set_status_on_exception=False,\n ) as span:\n # wrap the context\n- context = _OpenTelemetryServicerContext(context, span)\n+ context = _OpenTelemetryAioServicerContext(context, span)\n \n # And now we run the actual RPC.\n try:\n@@ -91,7 +141,7 @@\n context,\n set_status_on_exception=False,\n ) as span:\n- context = _OpenTelemetryServicerContext(context, span)\n+ context = _OpenTelemetryAioServicerContext(context, span)\n \n try:\n async for response in behavior(\n", "issue": "grpc: trailing metadata can't be passed when aborting an AIO server RPC\n**Describe your environment**\r\n\r\nPython: 3.12.0\r\n`opentelemetry-distro[otlp]==0.42b0`\r\n`opentelemetry-instrumentation-grpc==0.42b0`\r\n`grpcio==1.59.2`\r\n\r\n**Steps to reproduce**\r\nFull replication can be found [here](https://github.com/cookiefission/testbench/tree/main/otel/python-grpc-aio-abort-issue).\r\n\r\n`server.py`:\r\n\r\n```python\r\nimport asyncio\r\nimport logging\r\n\r\nimport grpc\r\nimport helloworld_pb2\r\nimport helloworld_pb2_grpc\r\n\r\n\r\nclass Greeter(helloworld_pb2_grpc.GreeterServicer):\r\n async def SayHello(\r\n self,\r\n request: helloworld_pb2.HelloRequest,\r\n context: grpc.aio.ServicerContext,\r\n ) -> helloworld_pb2.HelloReply:\r\n metadata = (\r\n (\"this\", \"should\"),\r\n (\"work\", \"nicely\")\r\n )\r\n await context.abort(code=grpc.StatusCode.ABORTED, details=\"This is the actual error message\", trailing_metadata=metadata)\r\n\r\n\r\nasync def serve() -> None:\r\n server = grpc.aio.server()\r\n helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)\r\n listen_addr = \"[::]:50051\"\r\n server.add_insecure_port(listen_addr)\r\n logging.info(\"Starting server on %s\", listen_addr)\r\n await server.start()\r\n await server.wait_for_termination()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n logging.basicConfig(level=logging.INFO)\r\n asyncio.run(serve())\r\n```\r\nThis server is just adopted from [the grpc examples](https://github.com/grpc/grpc/blob/master/examples/python/helloworld/async_greeter_server.py). The helloworld protobuf artefacts can be found [there](https://github.com/grpc/grpc/blob/master/examples/python/helloworld/helloworld_pb2_grpc.py).\r\n\r\nFor a client, the [`async_greeter_client.py`](https://github.com/grpc/grpc/blob/master/examples/python/helloworld/async_greeter_client.py) from grpc examples can be used.\r\n\r\nThis only happens when `trailing_metadata` is passed as an argument to `context.abort`. \r\n\r\n**What is the expected behavior?**\r\n\r\nRunning the server without opentelemetry, the (expected) error from running the client is:\r\n\r\n```\r\ngrpc.aio._call.AioRpcError: <AioRpcError of RPC that terminated with:\r\n status = StatusCode.ABORTED\r\n details = \"This is the actual error message\"\r\n debug_error_string = \"UNKNOWN:Error received from peer {grpc_message:\"This is the actual error message\", grpc_status:10, created_time:\"2023-11-16T15:09:03.552303+00:00\"}\"\r\n>\r\n```\r\n\r\nWhen running the server under `opentelemetry-instrument`, I expect the same output as when it is run without OTEL. \r\n\r\n**What is the actual behavior?**\r\n\r\nInstead, an exception is still raised but the underlying code, details, and trailing_metadata are missing. This makes any nuanced error handling on the client side impossible. \r\n\r\n```\r\ngrpc.aio._call.AioRpcError: <AioRpcError of RPC that terminated with:\r\n status = StatusCode.UNKNOWN\r\n details = \"Unexpected <class 'TypeError'>: _OpenTelemetryServicerContext.abort() got an unexpected keyword argument 'trailing_metadata'\"\r\n debug_error_string = \"UNKNOWN:Error received from peer {created_time:\"2023-11-16T15:30:02.017999+00:00\", grpc_status:2, grpc_message:\"Unexpected <class \\'TypeError\\'>: _OpenTelemetryServicerContext.abort() got an unexpected keyword argument \\'trailing_metadata\\'\"}\"\r\n>\r\n```\r\n\r\n**Additional context**\r\n\r\nThe `abort` method for [`grpc.ServicerContext`](https://grpc.github.io/grpc/python/grpc.html#grpc.ServicerContext.abort) and [`grpc.aio.ServicerContext`](https://grpc.github.io/grpc/python/grpc_asyncio.html#grpc.aio.ServicerContext.abort) is different. It's unclear why. The AIO instrumentation [re-uses](https://github.com/open-telemetry/opentelemetry-python-contrib/blob/main/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_aio_server.py#L19) the [`_OpenTelemetryServicerContext`](https://github.com/open-telemetry/opentelemetry-python-contrib/blob/main/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_server.py#L68) from the non-async server instrumentation which is how this bug snuck in, despite there being a test in place for the abort. \r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport grpc.aio\n\nfrom ._server import (\n OpenTelemetryServerInterceptor,\n _OpenTelemetryServicerContext,\n _wrap_rpc_behavior,\n)\n\n\nclass OpenTelemetryAioServerInterceptor(\n grpc.aio.ServerInterceptor, OpenTelemetryServerInterceptor\n):\n \"\"\"\n An AsyncIO gRPC server interceptor, to add OpenTelemetry.\n Usage::\n tracer = some OpenTelemetry tracer\n interceptors = [\n AsyncOpenTelemetryServerInterceptor(tracer),\n ]\n server = aio.server(\n futures.ThreadPoolExecutor(max_workers=concurrency),\n interceptors = (interceptors,))\n \"\"\"\n\n async def intercept_service(self, continuation, handler_call_details):\n if self._filter is not None and not self._filter(handler_call_details):\n return await continuation(handler_call_details)\n\n def telemetry_wrapper(behavior, request_streaming, response_streaming):\n # handle streaming responses specially\n if response_streaming:\n return self._intercept_aio_server_stream(\n behavior,\n handler_call_details,\n )\n\n return self._intercept_aio_server_unary(\n behavior,\n handler_call_details,\n )\n\n next_handler = await continuation(handler_call_details)\n\n return _wrap_rpc_behavior(next_handler, telemetry_wrapper)\n\n def _intercept_aio_server_unary(self, behavior, handler_call_details):\n async def _unary_interceptor(request_or_iterator, context):\n with self._set_remote_context(context):\n with self._start_span(\n handler_call_details,\n context,\n set_status_on_exception=False,\n ) as span:\n # wrap the context\n context = _OpenTelemetryServicerContext(context, span)\n\n # And now we run the actual RPC.\n try:\n return await behavior(request_or_iterator, context)\n\n except Exception as error:\n # Bare exceptions are likely to be gRPC aborts, which\n # we handle in our context wrapper.\n # Here, we're interested in uncaught exceptions.\n # pylint:disable=unidiomatic-typecheck\n if type(error) != Exception: # noqa: E721\n span.record_exception(error)\n raise error\n\n return _unary_interceptor\n\n def _intercept_aio_server_stream(self, behavior, handler_call_details):\n async def _stream_interceptor(request_or_iterator, context):\n with self._set_remote_context(context):\n with self._start_span(\n handler_call_details,\n context,\n set_status_on_exception=False,\n ) as span:\n context = _OpenTelemetryServicerContext(context, span)\n\n try:\n async for response in behavior(\n request_or_iterator, context\n ):\n yield response\n\n except Exception as error:\n # pylint:disable=unidiomatic-typecheck\n if type(error) != Exception: # noqa: E721\n span.record_exception(error)\n raise error\n\n return _stream_interceptor\n", "path": "instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_aio_server.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport grpc\nimport grpc.aio\nimport wrapt\n\nfrom opentelemetry.semconv.trace import SpanAttributes\nfrom opentelemetry.trace.status import Status, StatusCode\n\nfrom ._server import OpenTelemetryServerInterceptor, _wrap_rpc_behavior\n\n\n# pylint:disable=abstract-method\nclass _OpenTelemetryAioServicerContext(wrapt.ObjectProxy):\n def __init__(self, servicer_context, active_span):\n super().__init__(servicer_context)\n self._self_active_span = active_span\n self._self_code = grpc.StatusCode.OK\n self._self_details = None\n\n async def abort(self, code, details=\"\", trailing_metadata=tuple()):\n self._self_code = code\n self._self_details = details\n self._self_active_span.set_attribute(\n SpanAttributes.RPC_GRPC_STATUS_CODE, code.value[0]\n )\n self._self_active_span.set_status(\n Status(\n status_code=StatusCode.ERROR,\n description=f\"{code}:{details}\",\n )\n )\n return await self.__wrapped__.abort(code, details, trailing_metadata)\n\n def set_code(self, code):\n self._self_code = code\n details = self._self_details or code.value[1]\n self._self_active_span.set_attribute(\n SpanAttributes.RPC_GRPC_STATUS_CODE, code.value[0]\n )\n if code != grpc.StatusCode.OK:\n self._self_active_span.set_status(\n Status(\n status_code=StatusCode.ERROR,\n description=f\"{code}:{details}\",\n )\n )\n return self.__wrapped__.set_code(code)\n\n def set_details(self, details):\n self._self_details = details\n if self._self_code != grpc.StatusCode.OK:\n self._self_active_span.set_status(\n Status(\n status_code=StatusCode.ERROR,\n description=f\"{self._self_code}:{details}\",\n )\n )\n return self.__wrapped__.set_details(details)\n\n\nclass OpenTelemetryAioServerInterceptor(\n grpc.aio.ServerInterceptor, OpenTelemetryServerInterceptor\n):\n \"\"\"\n An AsyncIO gRPC server interceptor, to add OpenTelemetry.\n Usage::\n tracer = some OpenTelemetry tracer\n interceptors = [\n AsyncOpenTelemetryServerInterceptor(tracer),\n ]\n server = aio.server(\n futures.ThreadPoolExecutor(max_workers=concurrency),\n interceptors = (interceptors,))\n \"\"\"\n\n async def intercept_service(self, continuation, handler_call_details):\n if self._filter is not None and not self._filter(handler_call_details):\n return await continuation(handler_call_details)\n\n def telemetry_wrapper(behavior, request_streaming, response_streaming):\n # handle streaming responses specially\n if response_streaming:\n return self._intercept_aio_server_stream(\n behavior,\n handler_call_details,\n )\n\n return self._intercept_aio_server_unary(\n behavior,\n handler_call_details,\n )\n\n next_handler = await continuation(handler_call_details)\n\n return _wrap_rpc_behavior(next_handler, telemetry_wrapper)\n\n def _intercept_aio_server_unary(self, behavior, handler_call_details):\n async def _unary_interceptor(request_or_iterator, context):\n with self._set_remote_context(context):\n with self._start_span(\n handler_call_details,\n context,\n set_status_on_exception=False,\n ) as span:\n # wrap the context\n context = _OpenTelemetryAioServicerContext(context, span)\n\n # And now we run the actual RPC.\n try:\n return await behavior(request_or_iterator, context)\n\n except Exception as error:\n # Bare exceptions are likely to be gRPC aborts, which\n # we handle in our context wrapper.\n # Here, we're interested in uncaught exceptions.\n # pylint:disable=unidiomatic-typecheck\n if type(error) != Exception: # noqa: E721\n span.record_exception(error)\n raise error\n\n return _unary_interceptor\n\n def _intercept_aio_server_stream(self, behavior, handler_call_details):\n async def _stream_interceptor(request_or_iterator, context):\n with self._set_remote_context(context):\n with self._start_span(\n handler_call_details,\n context,\n set_status_on_exception=False,\n ) as span:\n context = _OpenTelemetryAioServicerContext(context, span)\n\n try:\n async for response in behavior(\n request_or_iterator, context\n ):\n yield response\n\n except Exception as error:\n # pylint:disable=unidiomatic-typecheck\n if type(error) != Exception: # noqa: E721\n span.record_exception(error)\n raise error\n\n return _stream_interceptor\n", "path": "instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_aio_server.py"}]} | 2,306 | 788 |
gh_patches_debug_9369 | rasdani/github-patches | git_diff | googleapis__python-bigquery-1930 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Retry requests.exceptions.ConnectionError
Hi Team,
Could you add a retry to this exception? We are running this code in Cloud Function and GKE infrastructure from time to time we get these errors
Bigquery SDK == google-cloud-bigquery==3.18.0
```
Error Type: <class 'requests.exceptions.ConnectionError'> error: ('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer'))
Traceback (most recent call last): File "/workspace/visionCommon/common.py", line 117,
in wrapper return func(*args, **kwargs) File "/workspace/main.py", line 576, in controller current_step = check_eligibility() File "/workspace/main.py", line 314,
in check_eligibility total_rows = service.execute_query(query).result().total_rows File "/layers/google.python.pip/pip/lib/python3.9/site-packages/google/cloud/bigquery/job/query.py", line 1595,
in result do_get_result() File "/layers/google.python.pip/pip/lib/python3.9/site-packages/google/api_core/retry/retry_unary.py", line 293,
in retry_wrapped_func return retry_target( File "/layers/google.python.pip/pip/lib/python3.9/site-packages/google/api_core/retry/retry_unary.py", line 153,
in retry_target _retry_error_helper( File "/layers/google.python.pip/pip/lib/python3.9/site-packages/google/api_core/retry/retry_base.py", line 212,
in _retry_error_helper raise final_exc from source_exc File "/layers/google.python.pip/pip/lib/python3.9/site-packages/google/api_core/retry/retry_unary.py", line 144,
in retry_target result = target() File "/layers/google.python.pip/pip/lib/python3.9/site-packages/google/cloud/bigquery/job/query.py", line 1584,
in do_get_result super(QueryJob, self).result(retry=retry, timeout=timeout) File "/layers/google.python.pip/pip/lib/python3.9/site-packages/google/cloud/bigquery/job/base.py", line 971,
in result return super(_AsyncJob, self).result(timeout=timeout, **kwargs) File "/layers/google.python.pip/pip/lib/python3.9/site-packages/google/api_core/future/polling.py", line 256,
in result self._blocking_poll(timeout=timeout, retry=retry, polling=polling) File "/layers/google.python.pip/pip/lib/python3.9/site-packages/google/cloud/bigquery/job/query.py", line 1326,
in _blocking_poll super(QueryJob, self)._blocking_poll(timeout=timeout, **kwargs) File "/layers/google.python.pip/pip/lib/python3.9/site-packages/google/api_core/future/polling.py", line 137,
in _blocking_poll polling(self._done_or_raise)(retry=retry) File "/layers/google.python.pip/pip/lib/python3.9/site-packages/google/api_core/retry/retry_unary.py", line 293,
in retry_wrapped_func return retry_target( File "/layers/google.python.pip/pip/lib/python3.9/site-packages/google/api_core/retry/retry_unary.py", line 153,
in retry_target _retry_error_helper( File "/layers/google.python.pip/pip/lib/python3.9/site-packages/google/api_core/retry/retry_base.py", line 212,
in _retry_error_helper raise final_exc from source_exc File "/layers/google.python.pip/pip/lib/python3.9/site-packages/google/api_core/retry/retry_unary.py", line 144,
in retry_target result = target() File "/layers/google.python.pip/pip/lib/python3.9/site-packages/google/cloud/bigquery/job/query.py", line 1448,
in _done_or_raise self._reload_query_results(retry=retry, timeout=transport_timeout) File "/layers/google.python.pip/pip/lib/python3.9/site-packages/google/cloud/bigquery/job/query.py", line 1429,
in _reload_query_results self._query_results = self._client._get_query_results( File "/layers/google.python.pip/pip/lib/python3.9/site-packages/google/cloud/bigquery/client.py", line 1936,
in _get_query_results resource = self._call_api( File "/layers/google.python.pip/pip/lib/python3.9/site-packages/google/cloud/bigquery/client.py", line 827,
in _call_api return call() File "/layers/google.python.pip/pip/lib/python3.9/site-packages/google/cloud/_http/__init__.py", line 482,
in api_request response = self._make_request( File "/layers/google.python.pip/pip/lib/python3.9/site-packages/google/cloud/_http/__init__.py", line 341,
in _make_request return self._do_request( File "/layers/google.python.pip/pip/lib/python3.9/site-packages/google/cloud/_http/__init__.py", line 379,
in _do_request return self.http.request( File "/layers/google.python.pip/pip/lib/python3.9/site-packages/google/auth/transport/requests.py", line 541,
in request response = super(AuthorizedSession, self).request( File "/layers/google.python.pip/pip/lib/python3.9/site-packages/requests/sessions.py", line 589,
in request resp = self.send(prep, **send_kwargs) File "/layers/google.python.pip/pip/lib/python3.9/site-packages/requests/sessions.py", line 703,
in send r = adapter.send(request, **kwargs) File "/layers/google.python.pip/pip/lib/python3.9/site-packages/requests/adapters.py", line 501,
in send raise ConnectionError(err, request=request) requests.exceptions.ConnectionError: ('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer'))
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `google/cloud/bigquery/retry.py`
Content:
```
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from google.api_core import exceptions
16 from google.api_core import retry
17 from google.auth import exceptions as auth_exceptions # type: ignore
18 import requests.exceptions
19
20
21 _RETRYABLE_REASONS = frozenset(
22 ["rateLimitExceeded", "backendError", "internalError", "badGateway"]
23 )
24
25 _UNSTRUCTURED_RETRYABLE_TYPES = (
26 ConnectionError,
27 exceptions.TooManyRequests,
28 exceptions.InternalServerError,
29 exceptions.BadGateway,
30 exceptions.ServiceUnavailable,
31 requests.exceptions.ChunkedEncodingError,
32 requests.exceptions.ConnectionError,
33 requests.exceptions.Timeout,
34 auth_exceptions.TransportError,
35 )
36
37 _DEFAULT_RETRY_DEADLINE = 10.0 * 60.0 # 10 minutes
38
39 # Ambiguous errors (e.g. internalError, backendError, rateLimitExceeded) retry
40 # until the full `_DEFAULT_RETRY_DEADLINE`. This is because the
41 # `jobs.getQueryResults` REST API translates a job failure into an HTTP error.
42 #
43 # TODO(https://github.com/googleapis/python-bigquery/issues/1903): Investigate
44 # if we can fail early for ambiguous errors in `QueryJob.result()`'s call to
45 # the `jobs.getQueryResult` API.
46 #
47 # We need `_DEFAULT_JOB_DEADLINE` to be some multiple of
48 # `_DEFAULT_RETRY_DEADLINE` to allow for a few retries after the retry
49 # timeout is reached.
50 #
51 # Note: This multiple should actually be a multiple of
52 # (2 * _DEFAULT_RETRY_DEADLINE). After an ambiguous exception, the first
53 # call from `job_retry()` refreshes the job state without actually restarting
54 # the query. The second `job_retry()` actually restarts the query. For a more
55 # detailed explanation, see the comments where we set `restart_query_job = True`
56 # in `QueryJob.result()`'s inner `is_job_done()` function.
57 _DEFAULT_JOB_DEADLINE = 2.0 * (2.0 * _DEFAULT_RETRY_DEADLINE)
58
59
60 def _should_retry(exc):
61 """Predicate for determining when to retry.
62
63 We retry if and only if the 'reason' is 'backendError'
64 or 'rateLimitExceeded'.
65 """
66 if not hasattr(exc, "errors") or len(exc.errors) == 0:
67 # Check for unstructured error returns, e.g. from GFE
68 return isinstance(exc, _UNSTRUCTURED_RETRYABLE_TYPES)
69
70 reason = exc.errors[0]["reason"]
71 return reason in _RETRYABLE_REASONS
72
73
74 DEFAULT_RETRY = retry.Retry(predicate=_should_retry, deadline=_DEFAULT_RETRY_DEADLINE)
75 """The default retry object.
76
77 Any method with a ``retry`` parameter will be retried automatically,
78 with reasonable defaults. To disable retry, pass ``retry=None``.
79 To modify the default retry behavior, call a ``with_XXX`` method
80 on ``DEFAULT_RETRY``. For example, to change the deadline to 30 seconds,
81 pass ``retry=bigquery.DEFAULT_RETRY.with_deadline(30)``.
82 """
83
84 # Note: Take care when updating DEFAULT_TIMEOUT to anything but None. We
85 # briefly had a default timeout, but even setting it at more than twice the
86 # theoretical server-side default timeout of 2 minutes was not enough for
87 # complex queries. See:
88 # https://github.com/googleapis/python-bigquery/issues/970#issuecomment-921934647
89 DEFAULT_TIMEOUT = None
90 """The default API timeout.
91
92 This is the time to wait per request. To adjust the total wait time, set a
93 deadline on the retry object.
94 """
95
96 job_retry_reasons = (
97 "rateLimitExceeded",
98 "backendError",
99 "internalError",
100 "jobRateLimitExceeded",
101 )
102
103
104 def _job_should_retry(exc):
105 # Sometimes we have ambiguous errors, such as 'backendError' which could
106 # be due to an API problem or a job problem. For these, make sure we retry
107 # our is_job_done() function.
108 #
109 # Note: This won't restart the job unless we know for sure it's because of
110 # the job status and set restart_query_job = True in that loop. This means
111 # that we might end up calling this predicate twice for the same job
112 # but from different paths: (1) from jobs.getQueryResults RetryError and
113 # (2) from translating the job error from the body of a jobs.get response.
114 #
115 # Note: If we start retrying job types other than queries where we don't
116 # call the problematic getQueryResults API to check the status, we need
117 # to provide a different predicate, as there shouldn't be ambiguous
118 # errors in those cases.
119 if isinstance(exc, exceptions.RetryError):
120 exc = exc.cause
121
122 if not hasattr(exc, "errors") or len(exc.errors) == 0:
123 return False
124
125 reason = exc.errors[0]["reason"]
126 return reason in job_retry_reasons
127
128
129 DEFAULT_JOB_RETRY = retry.Retry(
130 predicate=_job_should_retry, deadline=_DEFAULT_JOB_DEADLINE
131 )
132 """
133 The default job retry object.
134 """
135
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/google/cloud/bigquery/retry.py b/google/cloud/bigquery/retry.py
--- a/google/cloud/bigquery/retry.py
+++ b/google/cloud/bigquery/retry.py
@@ -119,6 +119,14 @@
if isinstance(exc, exceptions.RetryError):
exc = exc.cause
+ # Per https://github.com/googleapis/python-bigquery/issues/1929, sometimes
+ # retriable errors make their way here. Because of the separate
+ # `restart_query_job` logic to make sure we aren't restarting non-failed
+ # jobs, it should be safe to continue and not totally fail our attempt at
+ # waiting for the query to complete.
+ if _should_retry(exc):
+ return True
+
if not hasattr(exc, "errors") or len(exc.errors) == 0:
return False
| {"golden_diff": "diff --git a/google/cloud/bigquery/retry.py b/google/cloud/bigquery/retry.py\n--- a/google/cloud/bigquery/retry.py\n+++ b/google/cloud/bigquery/retry.py\n@@ -119,6 +119,14 @@\n if isinstance(exc, exceptions.RetryError):\n exc = exc.cause\n \n+ # Per https://github.com/googleapis/python-bigquery/issues/1929, sometimes\n+ # retriable errors make their way here. Because of the separate\n+ # `restart_query_job` logic to make sure we aren't restarting non-failed\n+ # jobs, it should be safe to continue and not totally fail our attempt at\n+ # waiting for the query to complete.\n+ if _should_retry(exc):\n+ return True\n+\n if not hasattr(exc, \"errors\") or len(exc.errors) == 0:\n return False\n", "issue": "Retry requests.exceptions.ConnectionError\nHi Team,\r\n\r\nCould you add a retry to this exception? We are running this code in Cloud Function and GKE infrastructure from time to time we get these errors\r\n\r\nBigquery SDK == google-cloud-bigquery==3.18.0\r\n\r\n```\r\nError Type: <class 'requests.exceptions.ConnectionError'> error: ('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer')) \r\nTraceback (most recent call last): File \"/workspace/visionCommon/common.py\", line 117, \r\nin wrapper return func(*args, **kwargs) File \"/workspace/main.py\", line 576, in controller current_step = check_eligibility() File \"/workspace/main.py\", line 314, \r\nin check_eligibility total_rows = service.execute_query(query).result().total_rows File \"/layers/google.python.pip/pip/lib/python3.9/site-packages/google/cloud/bigquery/job/query.py\", line 1595, \r\nin result do_get_result() File \"/layers/google.python.pip/pip/lib/python3.9/site-packages/google/api_core/retry/retry_unary.py\", line 293, \r\nin retry_wrapped_func return retry_target( File \"/layers/google.python.pip/pip/lib/python3.9/site-packages/google/api_core/retry/retry_unary.py\", line 153, \r\nin retry_target _retry_error_helper( File \"/layers/google.python.pip/pip/lib/python3.9/site-packages/google/api_core/retry/retry_base.py\", line 212, \r\nin _retry_error_helper raise final_exc from source_exc File \"/layers/google.python.pip/pip/lib/python3.9/site-packages/google/api_core/retry/retry_unary.py\", line 144, \r\nin retry_target result = target() File \"/layers/google.python.pip/pip/lib/python3.9/site-packages/google/cloud/bigquery/job/query.py\", line 1584, \r\nin do_get_result super(QueryJob, self).result(retry=retry, timeout=timeout) File \"/layers/google.python.pip/pip/lib/python3.9/site-packages/google/cloud/bigquery/job/base.py\", line 971, \r\nin result return super(_AsyncJob, self).result(timeout=timeout, **kwargs) File \"/layers/google.python.pip/pip/lib/python3.9/site-packages/google/api_core/future/polling.py\", line 256, \r\nin result self._blocking_poll(timeout=timeout, retry=retry, polling=polling) File \"/layers/google.python.pip/pip/lib/python3.9/site-packages/google/cloud/bigquery/job/query.py\", line 1326, \r\nin _blocking_poll super(QueryJob, self)._blocking_poll(timeout=timeout, **kwargs) File \"/layers/google.python.pip/pip/lib/python3.9/site-packages/google/api_core/future/polling.py\", line 137, \r\nin _blocking_poll polling(self._done_or_raise)(retry=retry) File \"/layers/google.python.pip/pip/lib/python3.9/site-packages/google/api_core/retry/retry_unary.py\", line 293, \r\nin retry_wrapped_func return retry_target( File \"/layers/google.python.pip/pip/lib/python3.9/site-packages/google/api_core/retry/retry_unary.py\", line 153, \r\nin retry_target _retry_error_helper( File \"/layers/google.python.pip/pip/lib/python3.9/site-packages/google/api_core/retry/retry_base.py\", line 212, \r\nin _retry_error_helper raise final_exc from source_exc File \"/layers/google.python.pip/pip/lib/python3.9/site-packages/google/api_core/retry/retry_unary.py\", line 144, \r\nin retry_target result = target() File \"/layers/google.python.pip/pip/lib/python3.9/site-packages/google/cloud/bigquery/job/query.py\", line 1448, \r\nin _done_or_raise self._reload_query_results(retry=retry, timeout=transport_timeout) File \"/layers/google.python.pip/pip/lib/python3.9/site-packages/google/cloud/bigquery/job/query.py\", line 1429, \r\nin _reload_query_results self._query_results = self._client._get_query_results( File \"/layers/google.python.pip/pip/lib/python3.9/site-packages/google/cloud/bigquery/client.py\", line 1936, \r\nin _get_query_results resource = self._call_api( File \"/layers/google.python.pip/pip/lib/python3.9/site-packages/google/cloud/bigquery/client.py\", line 827, \r\nin _call_api return call() File \"/layers/google.python.pip/pip/lib/python3.9/site-packages/google/cloud/_http/__init__.py\", line 482, \r\nin api_request response = self._make_request( File \"/layers/google.python.pip/pip/lib/python3.9/site-packages/google/cloud/_http/__init__.py\", line 341, \r\nin _make_request return self._do_request( File \"/layers/google.python.pip/pip/lib/python3.9/site-packages/google/cloud/_http/__init__.py\", line 379, \r\nin _do_request return self.http.request( File \"/layers/google.python.pip/pip/lib/python3.9/site-packages/google/auth/transport/requests.py\", line 541, \r\nin request response = super(AuthorizedSession, self).request( File \"/layers/google.python.pip/pip/lib/python3.9/site-packages/requests/sessions.py\", line 589, \r\nin request resp = self.send(prep, **send_kwargs) File \"/layers/google.python.pip/pip/lib/python3.9/site-packages/requests/sessions.py\", line 703, \r\nin send r = adapter.send(request, **kwargs) File \"/layers/google.python.pip/pip/lib/python3.9/site-packages/requests/adapters.py\", line 501, \r\nin send raise ConnectionError(err, request=request) requests.exceptions.ConnectionError: ('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer'))\r\n```\r\n\r\n \n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom google.api_core import exceptions\nfrom google.api_core import retry\nfrom google.auth import exceptions as auth_exceptions # type: ignore\nimport requests.exceptions\n\n\n_RETRYABLE_REASONS = frozenset(\n [\"rateLimitExceeded\", \"backendError\", \"internalError\", \"badGateway\"]\n)\n\n_UNSTRUCTURED_RETRYABLE_TYPES = (\n ConnectionError,\n exceptions.TooManyRequests,\n exceptions.InternalServerError,\n exceptions.BadGateway,\n exceptions.ServiceUnavailable,\n requests.exceptions.ChunkedEncodingError,\n requests.exceptions.ConnectionError,\n requests.exceptions.Timeout,\n auth_exceptions.TransportError,\n)\n\n_DEFAULT_RETRY_DEADLINE = 10.0 * 60.0 # 10 minutes\n\n# Ambiguous errors (e.g. internalError, backendError, rateLimitExceeded) retry\n# until the full `_DEFAULT_RETRY_DEADLINE`. This is because the\n# `jobs.getQueryResults` REST API translates a job failure into an HTTP error.\n#\n# TODO(https://github.com/googleapis/python-bigquery/issues/1903): Investigate\n# if we can fail early for ambiguous errors in `QueryJob.result()`'s call to\n# the `jobs.getQueryResult` API.\n#\n# We need `_DEFAULT_JOB_DEADLINE` to be some multiple of\n# `_DEFAULT_RETRY_DEADLINE` to allow for a few retries after the retry\n# timeout is reached.\n#\n# Note: This multiple should actually be a multiple of\n# (2 * _DEFAULT_RETRY_DEADLINE). After an ambiguous exception, the first\n# call from `job_retry()` refreshes the job state without actually restarting\n# the query. The second `job_retry()` actually restarts the query. For a more\n# detailed explanation, see the comments where we set `restart_query_job = True`\n# in `QueryJob.result()`'s inner `is_job_done()` function.\n_DEFAULT_JOB_DEADLINE = 2.0 * (2.0 * _DEFAULT_RETRY_DEADLINE)\n\n\ndef _should_retry(exc):\n \"\"\"Predicate for determining when to retry.\n\n We retry if and only if the 'reason' is 'backendError'\n or 'rateLimitExceeded'.\n \"\"\"\n if not hasattr(exc, \"errors\") or len(exc.errors) == 0:\n # Check for unstructured error returns, e.g. from GFE\n return isinstance(exc, _UNSTRUCTURED_RETRYABLE_TYPES)\n\n reason = exc.errors[0][\"reason\"]\n return reason in _RETRYABLE_REASONS\n\n\nDEFAULT_RETRY = retry.Retry(predicate=_should_retry, deadline=_DEFAULT_RETRY_DEADLINE)\n\"\"\"The default retry object.\n\nAny method with a ``retry`` parameter will be retried automatically,\nwith reasonable defaults. To disable retry, pass ``retry=None``.\nTo modify the default retry behavior, call a ``with_XXX`` method\non ``DEFAULT_RETRY``. For example, to change the deadline to 30 seconds,\npass ``retry=bigquery.DEFAULT_RETRY.with_deadline(30)``.\n\"\"\"\n\n# Note: Take care when updating DEFAULT_TIMEOUT to anything but None. We\n# briefly had a default timeout, but even setting it at more than twice the\n# theoretical server-side default timeout of 2 minutes was not enough for\n# complex queries. See:\n# https://github.com/googleapis/python-bigquery/issues/970#issuecomment-921934647\nDEFAULT_TIMEOUT = None\n\"\"\"The default API timeout.\n\nThis is the time to wait per request. To adjust the total wait time, set a\ndeadline on the retry object.\n\"\"\"\n\njob_retry_reasons = (\n \"rateLimitExceeded\",\n \"backendError\",\n \"internalError\",\n \"jobRateLimitExceeded\",\n)\n\n\ndef _job_should_retry(exc):\n # Sometimes we have ambiguous errors, such as 'backendError' which could\n # be due to an API problem or a job problem. For these, make sure we retry\n # our is_job_done() function.\n #\n # Note: This won't restart the job unless we know for sure it's because of\n # the job status and set restart_query_job = True in that loop. This means\n # that we might end up calling this predicate twice for the same job\n # but from different paths: (1) from jobs.getQueryResults RetryError and\n # (2) from translating the job error from the body of a jobs.get response.\n #\n # Note: If we start retrying job types other than queries where we don't\n # call the problematic getQueryResults API to check the status, we need\n # to provide a different predicate, as there shouldn't be ambiguous\n # errors in those cases.\n if isinstance(exc, exceptions.RetryError):\n exc = exc.cause\n\n if not hasattr(exc, \"errors\") or len(exc.errors) == 0:\n return False\n\n reason = exc.errors[0][\"reason\"]\n return reason in job_retry_reasons\n\n\nDEFAULT_JOB_RETRY = retry.Retry(\n predicate=_job_should_retry, deadline=_DEFAULT_JOB_DEADLINE\n)\n\"\"\"\nThe default job retry object.\n\"\"\"\n", "path": "google/cloud/bigquery/retry.py"}], "after_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom google.api_core import exceptions\nfrom google.api_core import retry\nfrom google.auth import exceptions as auth_exceptions # type: ignore\nimport requests.exceptions\n\n\n_RETRYABLE_REASONS = frozenset(\n [\"rateLimitExceeded\", \"backendError\", \"internalError\", \"badGateway\"]\n)\n\n_UNSTRUCTURED_RETRYABLE_TYPES = (\n ConnectionError,\n exceptions.TooManyRequests,\n exceptions.InternalServerError,\n exceptions.BadGateway,\n exceptions.ServiceUnavailable,\n requests.exceptions.ChunkedEncodingError,\n requests.exceptions.ConnectionError,\n requests.exceptions.Timeout,\n auth_exceptions.TransportError,\n)\n\n_DEFAULT_RETRY_DEADLINE = 10.0 * 60.0 # 10 minutes\n\n# Ambiguous errors (e.g. internalError, backendError, rateLimitExceeded) retry\n# until the full `_DEFAULT_RETRY_DEADLINE`. This is because the\n# `jobs.getQueryResults` REST API translates a job failure into an HTTP error.\n#\n# TODO(https://github.com/googleapis/python-bigquery/issues/1903): Investigate\n# if we can fail early for ambiguous errors in `QueryJob.result()`'s call to\n# the `jobs.getQueryResult` API.\n#\n# We need `_DEFAULT_JOB_DEADLINE` to be some multiple of\n# `_DEFAULT_RETRY_DEADLINE` to allow for a few retries after the retry\n# timeout is reached.\n#\n# Note: This multiple should actually be a multiple of\n# (2 * _DEFAULT_RETRY_DEADLINE). After an ambiguous exception, the first\n# call from `job_retry()` refreshes the job state without actually restarting\n# the query. The second `job_retry()` actually restarts the query. For a more\n# detailed explanation, see the comments where we set `restart_query_job = True`\n# in `QueryJob.result()`'s inner `is_job_done()` function.\n_DEFAULT_JOB_DEADLINE = 2.0 * (2.0 * _DEFAULT_RETRY_DEADLINE)\n\n\ndef _should_retry(exc):\n \"\"\"Predicate for determining when to retry.\n\n We retry if and only if the 'reason' is 'backendError'\n or 'rateLimitExceeded'.\n \"\"\"\n if not hasattr(exc, \"errors\") or len(exc.errors) == 0:\n # Check for unstructured error returns, e.g. from GFE\n return isinstance(exc, _UNSTRUCTURED_RETRYABLE_TYPES)\n\n reason = exc.errors[0][\"reason\"]\n return reason in _RETRYABLE_REASONS\n\n\nDEFAULT_RETRY = retry.Retry(predicate=_should_retry, deadline=_DEFAULT_RETRY_DEADLINE)\n\"\"\"The default retry object.\n\nAny method with a ``retry`` parameter will be retried automatically,\nwith reasonable defaults. To disable retry, pass ``retry=None``.\nTo modify the default retry behavior, call a ``with_XXX`` method\non ``DEFAULT_RETRY``. For example, to change the deadline to 30 seconds,\npass ``retry=bigquery.DEFAULT_RETRY.with_deadline(30)``.\n\"\"\"\n\n# Note: Take care when updating DEFAULT_TIMEOUT to anything but None. We\n# briefly had a default timeout, but even setting it at more than twice the\n# theoretical server-side default timeout of 2 minutes was not enough for\n# complex queries. See:\n# https://github.com/googleapis/python-bigquery/issues/970#issuecomment-921934647\nDEFAULT_TIMEOUT = None\n\"\"\"The default API timeout.\n\nThis is the time to wait per request. To adjust the total wait time, set a\ndeadline on the retry object.\n\"\"\"\n\njob_retry_reasons = (\n \"rateLimitExceeded\",\n \"backendError\",\n \"internalError\",\n \"jobRateLimitExceeded\",\n)\n\n\ndef _job_should_retry(exc):\n # Sometimes we have ambiguous errors, such as 'backendError' which could\n # be due to an API problem or a job problem. For these, make sure we retry\n # our is_job_done() function.\n #\n # Note: This won't restart the job unless we know for sure it's because of\n # the job status and set restart_query_job = True in that loop. This means\n # that we might end up calling this predicate twice for the same job\n # but from different paths: (1) from jobs.getQueryResults RetryError and\n # (2) from translating the job error from the body of a jobs.get response.\n #\n # Note: If we start retrying job types other than queries where we don't\n # call the problematic getQueryResults API to check the status, we need\n # to provide a different predicate, as there shouldn't be ambiguous\n # errors in those cases.\n if isinstance(exc, exceptions.RetryError):\n exc = exc.cause\n\n # Per https://github.com/googleapis/python-bigquery/issues/1929, sometimes\n # retriable errors make their way here. Because of the separate\n # `restart_query_job` logic to make sure we aren't restarting non-failed\n # jobs, it should be safe to continue and not totally fail our attempt at\n # waiting for the query to complete.\n if _should_retry(exc):\n return True\n\n if not hasattr(exc, \"errors\") or len(exc.errors) == 0:\n return False\n\n reason = exc.errors[0][\"reason\"]\n return reason in job_retry_reasons\n\n\nDEFAULT_JOB_RETRY = retry.Retry(\n predicate=_job_should_retry, deadline=_DEFAULT_JOB_DEADLINE\n)\n\"\"\"\nThe default job retry object.\n\"\"\"\n", "path": "google/cloud/bigquery/retry.py"}]} | 3,098 | 195 |
gh_patches_debug_9413 | rasdani/github-patches | git_diff | cisagov__manage.get.gov-908 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update Domain management permissions
### Issue Description
Domain management, specifically that of DNS specific information, should be reserved for approved domain requests. Currently, if an item is in the list of registered domains and the status of the associated domain request switches to something like Withdrawn/Investigating, then the user can still edit the domain information AND the domain still appears in Active domains
AC
1. In Active Domain Request table, do not show domain applications that are approved.
Suggestion: check to make sure when you change states on an application that the domain management page doesn't show when the status shouldn't allow it.
### Additional Context (optional)
see [slack thread](https://cisa-corp.slack.com/archives/C05BDEA3C11/p1689089565297719)
### Issue Links
may relate to #701
relates to: #818
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/registrar/views/index.py`
Content:
```
1 from django.db.models import F
2 from django.shortcuts import render
3
4 from registrar.models import DomainApplication
5
6
7 def index(request):
8 """This page is available to anyone without logging in."""
9 context = {}
10 if request.user.is_authenticated:
11 applications = DomainApplication.objects.filter(creator=request.user)
12 context["domain_applications"] = applications
13
14 domains = request.user.permissions.values(
15 "role",
16 pk=F("domain__id"),
17 name=F("domain__name"),
18 created_time=F("domain__created_at"),
19 application_status=F("domain__domain_application__status"),
20 )
21 context["domains"] = domains
22 return render(request, "home.html", context)
23
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/registrar/views/index.py b/src/registrar/views/index.py
--- a/src/registrar/views/index.py
+++ b/src/registrar/views/index.py
@@ -9,7 +9,10 @@
context = {}
if request.user.is_authenticated:
applications = DomainApplication.objects.filter(creator=request.user)
- context["domain_applications"] = applications
+ # Let's exclude the approved applications since our
+ # domain_applications context will be used to populate
+ # the active applications table
+ context["domain_applications"] = applications.exclude(status="approved")
domains = request.user.permissions.values(
"role",
| {"golden_diff": "diff --git a/src/registrar/views/index.py b/src/registrar/views/index.py\n--- a/src/registrar/views/index.py\n+++ b/src/registrar/views/index.py\n@@ -9,7 +9,10 @@\n context = {}\n if request.user.is_authenticated:\n applications = DomainApplication.objects.filter(creator=request.user)\n- context[\"domain_applications\"] = applications\n+ # Let's exclude the approved applications since our\n+ # domain_applications context will be used to populate\n+ # the active applications table\n+ context[\"domain_applications\"] = applications.exclude(status=\"approved\")\n \n domains = request.user.permissions.values(\n \"role\",\n", "issue": "Update Domain management permissions \n### Issue Description\r\n\r\nDomain management, specifically that of DNS specific information, should be reserved for approved domain requests. Currently, if an item is in the list of registered domains and the status of the associated domain request switches to something like Withdrawn/Investigating, then the user can still edit the domain information AND the domain still appears in Active domains\r\n\r\nAC\r\n\r\n1. In Active Domain Request table, do not show domain applications that are approved. \r\n\r\n\r\nSuggestion: check to make sure when you change states on an application that the domain management page doesn't show when the status shouldn't allow it.\r\n\r\n\r\n### Additional Context (optional)\r\n\r\nsee [slack thread](https://cisa-corp.slack.com/archives/C05BDEA3C11/p1689089565297719)\r\n\r\n### Issue Links\r\nmay relate to #701 \r\nrelates to: #818 \r\n_No response_\n", "before_files": [{"content": "from django.db.models import F\nfrom django.shortcuts import render\n\nfrom registrar.models import DomainApplication\n\n\ndef index(request):\n \"\"\"This page is available to anyone without logging in.\"\"\"\n context = {}\n if request.user.is_authenticated:\n applications = DomainApplication.objects.filter(creator=request.user)\n context[\"domain_applications\"] = applications\n\n domains = request.user.permissions.values(\n \"role\",\n pk=F(\"domain__id\"),\n name=F(\"domain__name\"),\n created_time=F(\"domain__created_at\"),\n application_status=F(\"domain__domain_application__status\"),\n )\n context[\"domains\"] = domains\n return render(request, \"home.html\", context)\n", "path": "src/registrar/views/index.py"}], "after_files": [{"content": "from django.db.models import F\nfrom django.shortcuts import render\n\nfrom registrar.models import DomainApplication\n\n\ndef index(request):\n \"\"\"This page is available to anyone without logging in.\"\"\"\n context = {}\n if request.user.is_authenticated:\n applications = DomainApplication.objects.filter(creator=request.user)\n # Let's exclude the approved applications since our\n # domain_applications context will be used to populate\n # the active applications table\n context[\"domain_applications\"] = applications.exclude(status=\"approved\")\n\n domains = request.user.permissions.values(\n \"role\",\n pk=F(\"domain__id\"),\n name=F(\"domain__name\"),\n created_time=F(\"domain__created_at\"),\n application_status=F(\"domain__domain_application__status\"),\n )\n context[\"domains\"] = domains\n return render(request, \"home.html\", context)\n", "path": "src/registrar/views/index.py"}]} | 644 | 142 |
gh_patches_debug_30191 | rasdani/github-patches | git_diff | AUTOMATIC1111__stable-diffusion-webui-4371 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Problems with LDSR upscaling
When using the new LDSR upscaling feature, be it via SD upscaling in img2img or directly in "Extras", black rectangles appear in the outputs on the right and at the bottom when upscaling a 230x219 photo.

original

upscaled
no issues when running it on a 512x704 image

original

upscaled
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `modules/ldsr_model_arch.py`
Content:
```
1 import gc
2 import time
3 import warnings
4
5 import numpy as np
6 import torch
7 import torchvision
8 from PIL import Image
9 from einops import rearrange, repeat
10 from omegaconf import OmegaConf
11
12 from ldm.models.diffusion.ddim import DDIMSampler
13 from ldm.util import instantiate_from_config, ismap
14
15 warnings.filterwarnings("ignore", category=UserWarning)
16
17
18 # Create LDSR Class
19 class LDSR:
20 def load_model_from_config(self, half_attention):
21 print(f"Loading model from {self.modelPath}")
22 pl_sd = torch.load(self.modelPath, map_location="cpu")
23 sd = pl_sd["state_dict"]
24 config = OmegaConf.load(self.yamlPath)
25 model = instantiate_from_config(config.model)
26 model.load_state_dict(sd, strict=False)
27 model.cuda()
28 if half_attention:
29 model = model.half()
30
31 model.eval()
32 return {"model": model}
33
34 def __init__(self, model_path, yaml_path):
35 self.modelPath = model_path
36 self.yamlPath = yaml_path
37
38 @staticmethod
39 def run(model, selected_path, custom_steps, eta):
40 example = get_cond(selected_path)
41
42 n_runs = 1
43 guider = None
44 ckwargs = None
45 ddim_use_x0_pred = False
46 temperature = 1.
47 eta = eta
48 custom_shape = None
49
50 height, width = example["image"].shape[1:3]
51 split_input = height >= 128 and width >= 128
52
53 if split_input:
54 ks = 128
55 stride = 64
56 vqf = 4 #
57 model.split_input_params = {"ks": (ks, ks), "stride": (stride, stride),
58 "vqf": vqf,
59 "patch_distributed_vq": True,
60 "tie_braker": False,
61 "clip_max_weight": 0.5,
62 "clip_min_weight": 0.01,
63 "clip_max_tie_weight": 0.5,
64 "clip_min_tie_weight": 0.01}
65 else:
66 if hasattr(model, "split_input_params"):
67 delattr(model, "split_input_params")
68
69 x_t = None
70 logs = None
71 for n in range(n_runs):
72 if custom_shape is not None:
73 x_t = torch.randn(1, custom_shape[1], custom_shape[2], custom_shape[3]).to(model.device)
74 x_t = repeat(x_t, '1 c h w -> b c h w', b=custom_shape[0])
75
76 logs = make_convolutional_sample(example, model,
77 custom_steps=custom_steps,
78 eta=eta, quantize_x0=False,
79 custom_shape=custom_shape,
80 temperature=temperature, noise_dropout=0.,
81 corrector=guider, corrector_kwargs=ckwargs, x_T=x_t,
82 ddim_use_x0_pred=ddim_use_x0_pred
83 )
84 return logs
85
86 def super_resolution(self, image, steps=100, target_scale=2, half_attention=False):
87 model = self.load_model_from_config(half_attention)
88
89 # Run settings
90 diffusion_steps = int(steps)
91 eta = 1.0
92
93 down_sample_method = 'Lanczos'
94
95 gc.collect()
96 torch.cuda.empty_cache()
97
98 im_og = image
99 width_og, height_og = im_og.size
100 # If we can adjust the max upscale size, then the 4 below should be our variable
101 down_sample_rate = target_scale / 4
102 wd = width_og * down_sample_rate
103 hd = height_og * down_sample_rate
104 width_downsampled_pre = int(wd)
105 height_downsampled_pre = int(hd)
106
107 if down_sample_rate != 1:
108 print(
109 f'Downsampling from [{width_og}, {height_og}] to [{width_downsampled_pre}, {height_downsampled_pre}]')
110 im_og = im_og.resize((width_downsampled_pre, height_downsampled_pre), Image.LANCZOS)
111 else:
112 print(f"Down sample rate is 1 from {target_scale} / 4 (Not downsampling)")
113 logs = self.run(model["model"], im_og, diffusion_steps, eta)
114
115 sample = logs["sample"]
116 sample = sample.detach().cpu()
117 sample = torch.clamp(sample, -1., 1.)
118 sample = (sample + 1.) / 2. * 255
119 sample = sample.numpy().astype(np.uint8)
120 sample = np.transpose(sample, (0, 2, 3, 1))
121 a = Image.fromarray(sample[0])
122
123 del model
124 gc.collect()
125 torch.cuda.empty_cache()
126 return a
127
128
129 def get_cond(selected_path):
130 example = dict()
131 up_f = 4
132 c = selected_path.convert('RGB')
133 c = torch.unsqueeze(torchvision.transforms.ToTensor()(c), 0)
134 c_up = torchvision.transforms.functional.resize(c, size=[up_f * c.shape[2], up_f * c.shape[3]],
135 antialias=True)
136 c_up = rearrange(c_up, '1 c h w -> 1 h w c')
137 c = rearrange(c, '1 c h w -> 1 h w c')
138 c = 2. * c - 1.
139
140 c = c.to(torch.device("cuda"))
141 example["LR_image"] = c
142 example["image"] = c_up
143
144 return example
145
146
147 @torch.no_grad()
148 def convsample_ddim(model, cond, steps, shape, eta=1.0, callback=None, normals_sequence=None,
149 mask=None, x0=None, quantize_x0=False, temperature=1., score_corrector=None,
150 corrector_kwargs=None, x_t=None
151 ):
152 ddim = DDIMSampler(model)
153 bs = shape[0]
154 shape = shape[1:]
155 print(f"Sampling with eta = {eta}; steps: {steps}")
156 samples, intermediates = ddim.sample(steps, batch_size=bs, shape=shape, conditioning=cond, callback=callback,
157 normals_sequence=normals_sequence, quantize_x0=quantize_x0, eta=eta,
158 mask=mask, x0=x0, temperature=temperature, verbose=False,
159 score_corrector=score_corrector,
160 corrector_kwargs=corrector_kwargs, x_t=x_t)
161
162 return samples, intermediates
163
164
165 @torch.no_grad()
166 def make_convolutional_sample(batch, model, custom_steps=None, eta=1.0, quantize_x0=False, custom_shape=None, temperature=1., noise_dropout=0., corrector=None,
167 corrector_kwargs=None, x_T=None, ddim_use_x0_pred=False):
168 log = dict()
169
170 z, c, x, xrec, xc = model.get_input(batch, model.first_stage_key,
171 return_first_stage_outputs=True,
172 force_c_encode=not (hasattr(model, 'split_input_params')
173 and model.cond_stage_key == 'coordinates_bbox'),
174 return_original_cond=True)
175
176 if custom_shape is not None:
177 z = torch.randn(custom_shape)
178 print(f"Generating {custom_shape[0]} samples of shape {custom_shape[1:]}")
179
180 z0 = None
181
182 log["input"] = x
183 log["reconstruction"] = xrec
184
185 if ismap(xc):
186 log["original_conditioning"] = model.to_rgb(xc)
187 if hasattr(model, 'cond_stage_key'):
188 log[model.cond_stage_key] = model.to_rgb(xc)
189
190 else:
191 log["original_conditioning"] = xc if xc is not None else torch.zeros_like(x)
192 if model.cond_stage_model:
193 log[model.cond_stage_key] = xc if xc is not None else torch.zeros_like(x)
194 if model.cond_stage_key == 'class_label':
195 log[model.cond_stage_key] = xc[model.cond_stage_key]
196
197 with model.ema_scope("Plotting"):
198 t0 = time.time()
199
200 sample, intermediates = convsample_ddim(model, c, steps=custom_steps, shape=z.shape,
201 eta=eta,
202 quantize_x0=quantize_x0, mask=None, x0=z0,
203 temperature=temperature, score_corrector=corrector, corrector_kwargs=corrector_kwargs,
204 x_t=x_T)
205 t1 = time.time()
206
207 if ddim_use_x0_pred:
208 sample = intermediates['pred_x0'][-1]
209
210 x_sample = model.decode_first_stage(sample)
211
212 try:
213 x_sample_noquant = model.decode_first_stage(sample, force_not_quantize=True)
214 log["sample_noquant"] = x_sample_noquant
215 log["sample_diff"] = torch.abs(x_sample_noquant - x_sample)
216 except:
217 pass
218
219 log["sample"] = x_sample
220 log["time"] = t1 - t0
221
222 return log
223
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/modules/ldsr_model_arch.py b/modules/ldsr_model_arch.py
--- a/modules/ldsr_model_arch.py
+++ b/modules/ldsr_model_arch.py
@@ -101,8 +101,8 @@
down_sample_rate = target_scale / 4
wd = width_og * down_sample_rate
hd = height_og * down_sample_rate
- width_downsampled_pre = int(wd)
- height_downsampled_pre = int(hd)
+ width_downsampled_pre = int(np.ceil(wd))
+ height_downsampled_pre = int(np.ceil(hd))
if down_sample_rate != 1:
print(
@@ -110,7 +110,12 @@
im_og = im_og.resize((width_downsampled_pre, height_downsampled_pre), Image.LANCZOS)
else:
print(f"Down sample rate is 1 from {target_scale} / 4 (Not downsampling)")
- logs = self.run(model["model"], im_og, diffusion_steps, eta)
+
+ # pad width and height to multiples of 64, pads with the edge values of image to avoid artifacts
+ pad_w, pad_h = np.max(((2, 2), np.ceil(np.array(im_og.size) / 64).astype(int)), axis=0) * 64 - im_og.size
+ im_padded = Image.fromarray(np.pad(np.array(im_og), ((0, pad_h), (0, pad_w), (0, 0)), mode='edge'))
+
+ logs = self.run(model["model"], im_padded, diffusion_steps, eta)
sample = logs["sample"]
sample = sample.detach().cpu()
@@ -120,6 +125,9 @@
sample = np.transpose(sample, (0, 2, 3, 1))
a = Image.fromarray(sample[0])
+ # remove padding
+ a = a.crop((0, 0) + tuple(np.array(im_og.size) * 4))
+
del model
gc.collect()
torch.cuda.empty_cache()
| {"golden_diff": "diff --git a/modules/ldsr_model_arch.py b/modules/ldsr_model_arch.py\n--- a/modules/ldsr_model_arch.py\n+++ b/modules/ldsr_model_arch.py\n@@ -101,8 +101,8 @@\n down_sample_rate = target_scale / 4\n wd = width_og * down_sample_rate\n hd = height_og * down_sample_rate\n- width_downsampled_pre = int(wd)\n- height_downsampled_pre = int(hd)\n+ width_downsampled_pre = int(np.ceil(wd))\n+ height_downsampled_pre = int(np.ceil(hd))\n \n if down_sample_rate != 1:\n print(\n@@ -110,7 +110,12 @@\n im_og = im_og.resize((width_downsampled_pre, height_downsampled_pre), Image.LANCZOS)\n else:\n print(f\"Down sample rate is 1 from {target_scale} / 4 (Not downsampling)\")\n- logs = self.run(model[\"model\"], im_og, diffusion_steps, eta)\n+ \n+ # pad width and height to multiples of 64, pads with the edge values of image to avoid artifacts\n+ pad_w, pad_h = np.max(((2, 2), np.ceil(np.array(im_og.size) / 64).astype(int)), axis=0) * 64 - im_og.size\n+ im_padded = Image.fromarray(np.pad(np.array(im_og), ((0, pad_h), (0, pad_w), (0, 0)), mode='edge'))\n+ \n+ logs = self.run(model[\"model\"], im_padded, diffusion_steps, eta)\n \n sample = logs[\"sample\"]\n sample = sample.detach().cpu()\n@@ -120,6 +125,9 @@\n sample = np.transpose(sample, (0, 2, 3, 1))\n a = Image.fromarray(sample[0])\n \n+ # remove padding\n+ a = a.crop((0, 0) + tuple(np.array(im_og.size) * 4))\n+\n del model\n gc.collect()\n torch.cuda.empty_cache()\n", "issue": "Problems with LDSR upscaling\nWhen using the new LDSR upscaling feature, be it via SD upscaling in img2img or directly in \"Extras\", black rectangles appear in the outputs on the right and at the bottom when upscaling a 230x219 photo. \r\n\r\n\r\noriginal\r\n\r\n\r\nupscaled\r\n\r\nno issues when running it on a 512x704 image\r\n\r\n\r\noriginal\r\n\r\n\r\nupscaled\n", "before_files": [{"content": "import gc\nimport time\nimport warnings\n\nimport numpy as np\nimport torch\nimport torchvision\nfrom PIL import Image\nfrom einops import rearrange, repeat\nfrom omegaconf import OmegaConf\n\nfrom ldm.models.diffusion.ddim import DDIMSampler\nfrom ldm.util import instantiate_from_config, ismap\n\nwarnings.filterwarnings(\"ignore\", category=UserWarning)\n\n\n# Create LDSR Class\nclass LDSR:\n def load_model_from_config(self, half_attention):\n print(f\"Loading model from {self.modelPath}\")\n pl_sd = torch.load(self.modelPath, map_location=\"cpu\")\n sd = pl_sd[\"state_dict\"]\n config = OmegaConf.load(self.yamlPath)\n model = instantiate_from_config(config.model)\n model.load_state_dict(sd, strict=False)\n model.cuda()\n if half_attention:\n model = model.half()\n\n model.eval()\n return {\"model\": model}\n\n def __init__(self, model_path, yaml_path):\n self.modelPath = model_path\n self.yamlPath = yaml_path\n\n @staticmethod\n def run(model, selected_path, custom_steps, eta):\n example = get_cond(selected_path)\n\n n_runs = 1\n guider = None\n ckwargs = None\n ddim_use_x0_pred = False\n temperature = 1.\n eta = eta\n custom_shape = None\n\n height, width = example[\"image\"].shape[1:3]\n split_input = height >= 128 and width >= 128\n\n if split_input:\n ks = 128\n stride = 64\n vqf = 4 #\n model.split_input_params = {\"ks\": (ks, ks), \"stride\": (stride, stride),\n \"vqf\": vqf,\n \"patch_distributed_vq\": True,\n \"tie_braker\": False,\n \"clip_max_weight\": 0.5,\n \"clip_min_weight\": 0.01,\n \"clip_max_tie_weight\": 0.5,\n \"clip_min_tie_weight\": 0.01}\n else:\n if hasattr(model, \"split_input_params\"):\n delattr(model, \"split_input_params\")\n\n x_t = None\n logs = None\n for n in range(n_runs):\n if custom_shape is not None:\n x_t = torch.randn(1, custom_shape[1], custom_shape[2], custom_shape[3]).to(model.device)\n x_t = repeat(x_t, '1 c h w -> b c h w', b=custom_shape[0])\n\n logs = make_convolutional_sample(example, model,\n custom_steps=custom_steps,\n eta=eta, quantize_x0=False,\n custom_shape=custom_shape,\n temperature=temperature, noise_dropout=0.,\n corrector=guider, corrector_kwargs=ckwargs, x_T=x_t,\n ddim_use_x0_pred=ddim_use_x0_pred\n )\n return logs\n\n def super_resolution(self, image, steps=100, target_scale=2, half_attention=False):\n model = self.load_model_from_config(half_attention)\n\n # Run settings\n diffusion_steps = int(steps)\n eta = 1.0\n\n down_sample_method = 'Lanczos'\n\n gc.collect()\n torch.cuda.empty_cache()\n\n im_og = image\n width_og, height_og = im_og.size\n # If we can adjust the max upscale size, then the 4 below should be our variable\n down_sample_rate = target_scale / 4\n wd = width_og * down_sample_rate\n hd = height_og * down_sample_rate\n width_downsampled_pre = int(wd)\n height_downsampled_pre = int(hd)\n\n if down_sample_rate != 1:\n print(\n f'Downsampling from [{width_og}, {height_og}] to [{width_downsampled_pre}, {height_downsampled_pre}]')\n im_og = im_og.resize((width_downsampled_pre, height_downsampled_pre), Image.LANCZOS)\n else:\n print(f\"Down sample rate is 1 from {target_scale} / 4 (Not downsampling)\")\n logs = self.run(model[\"model\"], im_og, diffusion_steps, eta)\n\n sample = logs[\"sample\"]\n sample = sample.detach().cpu()\n sample = torch.clamp(sample, -1., 1.)\n sample = (sample + 1.) / 2. * 255\n sample = sample.numpy().astype(np.uint8)\n sample = np.transpose(sample, (0, 2, 3, 1))\n a = Image.fromarray(sample[0])\n\n del model\n gc.collect()\n torch.cuda.empty_cache()\n return a\n\n\ndef get_cond(selected_path):\n example = dict()\n up_f = 4\n c = selected_path.convert('RGB')\n c = torch.unsqueeze(torchvision.transforms.ToTensor()(c), 0)\n c_up = torchvision.transforms.functional.resize(c, size=[up_f * c.shape[2], up_f * c.shape[3]],\n antialias=True)\n c_up = rearrange(c_up, '1 c h w -> 1 h w c')\n c = rearrange(c, '1 c h w -> 1 h w c')\n c = 2. * c - 1.\n\n c = c.to(torch.device(\"cuda\"))\n example[\"LR_image\"] = c\n example[\"image\"] = c_up\n\n return example\n\n\[email protected]_grad()\ndef convsample_ddim(model, cond, steps, shape, eta=1.0, callback=None, normals_sequence=None,\n mask=None, x0=None, quantize_x0=False, temperature=1., score_corrector=None,\n corrector_kwargs=None, x_t=None\n ):\n ddim = DDIMSampler(model)\n bs = shape[0]\n shape = shape[1:]\n print(f\"Sampling with eta = {eta}; steps: {steps}\")\n samples, intermediates = ddim.sample(steps, batch_size=bs, shape=shape, conditioning=cond, callback=callback,\n normals_sequence=normals_sequence, quantize_x0=quantize_x0, eta=eta,\n mask=mask, x0=x0, temperature=temperature, verbose=False,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs, x_t=x_t)\n\n return samples, intermediates\n\n\[email protected]_grad()\ndef make_convolutional_sample(batch, model, custom_steps=None, eta=1.0, quantize_x0=False, custom_shape=None, temperature=1., noise_dropout=0., corrector=None,\n corrector_kwargs=None, x_T=None, ddim_use_x0_pred=False):\n log = dict()\n\n z, c, x, xrec, xc = model.get_input(batch, model.first_stage_key,\n return_first_stage_outputs=True,\n force_c_encode=not (hasattr(model, 'split_input_params')\n and model.cond_stage_key == 'coordinates_bbox'),\n return_original_cond=True)\n\n if custom_shape is not None:\n z = torch.randn(custom_shape)\n print(f\"Generating {custom_shape[0]} samples of shape {custom_shape[1:]}\")\n\n z0 = None\n\n log[\"input\"] = x\n log[\"reconstruction\"] = xrec\n\n if ismap(xc):\n log[\"original_conditioning\"] = model.to_rgb(xc)\n if hasattr(model, 'cond_stage_key'):\n log[model.cond_stage_key] = model.to_rgb(xc)\n\n else:\n log[\"original_conditioning\"] = xc if xc is not None else torch.zeros_like(x)\n if model.cond_stage_model:\n log[model.cond_stage_key] = xc if xc is not None else torch.zeros_like(x)\n if model.cond_stage_key == 'class_label':\n log[model.cond_stage_key] = xc[model.cond_stage_key]\n\n with model.ema_scope(\"Plotting\"):\n t0 = time.time()\n\n sample, intermediates = convsample_ddim(model, c, steps=custom_steps, shape=z.shape,\n eta=eta,\n quantize_x0=quantize_x0, mask=None, x0=z0,\n temperature=temperature, score_corrector=corrector, corrector_kwargs=corrector_kwargs,\n x_t=x_T)\n t1 = time.time()\n\n if ddim_use_x0_pred:\n sample = intermediates['pred_x0'][-1]\n\n x_sample = model.decode_first_stage(sample)\n\n try:\n x_sample_noquant = model.decode_first_stage(sample, force_not_quantize=True)\n log[\"sample_noquant\"] = x_sample_noquant\n log[\"sample_diff\"] = torch.abs(x_sample_noquant - x_sample)\n except:\n pass\n\n log[\"sample\"] = x_sample\n log[\"time\"] = t1 - t0\n\n return log\n", "path": "modules/ldsr_model_arch.py"}], "after_files": [{"content": "import gc\nimport time\nimport warnings\n\nimport numpy as np\nimport torch\nimport torchvision\nfrom PIL import Image\nfrom einops import rearrange, repeat\nfrom omegaconf import OmegaConf\n\nfrom ldm.models.diffusion.ddim import DDIMSampler\nfrom ldm.util import instantiate_from_config, ismap\n\nwarnings.filterwarnings(\"ignore\", category=UserWarning)\n\n\n# Create LDSR Class\nclass LDSR:\n def load_model_from_config(self, half_attention):\n print(f\"Loading model from {self.modelPath}\")\n pl_sd = torch.load(self.modelPath, map_location=\"cpu\")\n sd = pl_sd[\"state_dict\"]\n config = OmegaConf.load(self.yamlPath)\n model = instantiate_from_config(config.model)\n model.load_state_dict(sd, strict=False)\n model.cuda()\n if half_attention:\n model = model.half()\n\n model.eval()\n return {\"model\": model}\n\n def __init__(self, model_path, yaml_path):\n self.modelPath = model_path\n self.yamlPath = yaml_path\n\n @staticmethod\n def run(model, selected_path, custom_steps, eta):\n example = get_cond(selected_path)\n\n n_runs = 1\n guider = None\n ckwargs = None\n ddim_use_x0_pred = False\n temperature = 1.\n eta = eta\n custom_shape = None\n\n height, width = example[\"image\"].shape[1:3]\n split_input = height >= 128 and width >= 128\n\n if split_input:\n ks = 128\n stride = 64\n vqf = 4 #\n model.split_input_params = {\"ks\": (ks, ks), \"stride\": (stride, stride),\n \"vqf\": vqf,\n \"patch_distributed_vq\": True,\n \"tie_braker\": False,\n \"clip_max_weight\": 0.5,\n \"clip_min_weight\": 0.01,\n \"clip_max_tie_weight\": 0.5,\n \"clip_min_tie_weight\": 0.01}\n else:\n if hasattr(model, \"split_input_params\"):\n delattr(model, \"split_input_params\")\n\n x_t = None\n logs = None\n for n in range(n_runs):\n if custom_shape is not None:\n x_t = torch.randn(1, custom_shape[1], custom_shape[2], custom_shape[3]).to(model.device)\n x_t = repeat(x_t, '1 c h w -> b c h w', b=custom_shape[0])\n\n logs = make_convolutional_sample(example, model,\n custom_steps=custom_steps,\n eta=eta, quantize_x0=False,\n custom_shape=custom_shape,\n temperature=temperature, noise_dropout=0.,\n corrector=guider, corrector_kwargs=ckwargs, x_T=x_t,\n ddim_use_x0_pred=ddim_use_x0_pred\n )\n return logs\n\n def super_resolution(self, image, steps=100, target_scale=2, half_attention=False):\n model = self.load_model_from_config(half_attention)\n\n # Run settings\n diffusion_steps = int(steps)\n eta = 1.0\n\n down_sample_method = 'Lanczos'\n\n gc.collect()\n torch.cuda.empty_cache()\n\n im_og = image\n width_og, height_og = im_og.size\n # If we can adjust the max upscale size, then the 4 below should be our variable\n down_sample_rate = target_scale / 4\n wd = width_og * down_sample_rate\n hd = height_og * down_sample_rate\n width_downsampled_pre = int(np.ceil(wd))\n height_downsampled_pre = int(np.ceil(hd))\n\n if down_sample_rate != 1:\n print(\n f'Downsampling from [{width_og}, {height_og}] to [{width_downsampled_pre}, {height_downsampled_pre}]')\n im_og = im_og.resize((width_downsampled_pre, height_downsampled_pre), Image.LANCZOS)\n else:\n print(f\"Down sample rate is 1 from {target_scale} / 4 (Not downsampling)\")\n \n # pad width and height to multiples of 64, pads with the edge values of image to avoid artifacts\n pad_w, pad_h = np.max(((2, 2), np.ceil(np.array(im_og.size) / 64).astype(int)), axis=0) * 64 - im_og.size\n im_padded = Image.fromarray(np.pad(np.array(im_og), ((0, pad_h), (0, pad_w), (0, 0)), mode='edge'))\n \n logs = self.run(model[\"model\"], im_padded, diffusion_steps, eta)\n\n sample = logs[\"sample\"]\n sample = sample.detach().cpu()\n sample = torch.clamp(sample, -1., 1.)\n sample = (sample + 1.) / 2. * 255\n sample = sample.numpy().astype(np.uint8)\n sample = np.transpose(sample, (0, 2, 3, 1))\n a = Image.fromarray(sample[0])\n\n # remove padding\n a = a.crop((0, 0) + tuple(np.array(im_og.size) * 4))\n\n del model\n gc.collect()\n torch.cuda.empty_cache()\n return a\n\n\ndef get_cond(selected_path):\n example = dict()\n up_f = 4\n c = selected_path.convert('RGB')\n c = torch.unsqueeze(torchvision.transforms.ToTensor()(c), 0)\n c_up = torchvision.transforms.functional.resize(c, size=[up_f * c.shape[2], up_f * c.shape[3]],\n antialias=True)\n c_up = rearrange(c_up, '1 c h w -> 1 h w c')\n c = rearrange(c, '1 c h w -> 1 h w c')\n c = 2. * c - 1.\n\n c = c.to(torch.device(\"cuda\"))\n example[\"LR_image\"] = c\n example[\"image\"] = c_up\n\n return example\n\n\[email protected]_grad()\ndef convsample_ddim(model, cond, steps, shape, eta=1.0, callback=None, normals_sequence=None,\n mask=None, x0=None, quantize_x0=False, temperature=1., score_corrector=None,\n corrector_kwargs=None, x_t=None\n ):\n ddim = DDIMSampler(model)\n bs = shape[0]\n shape = shape[1:]\n print(f\"Sampling with eta = {eta}; steps: {steps}\")\n samples, intermediates = ddim.sample(steps, batch_size=bs, shape=shape, conditioning=cond, callback=callback,\n normals_sequence=normals_sequence, quantize_x0=quantize_x0, eta=eta,\n mask=mask, x0=x0, temperature=temperature, verbose=False,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs, x_t=x_t)\n\n return samples, intermediates\n\n\[email protected]_grad()\ndef make_convolutional_sample(batch, model, custom_steps=None, eta=1.0, quantize_x0=False, custom_shape=None, temperature=1., noise_dropout=0., corrector=None,\n corrector_kwargs=None, x_T=None, ddim_use_x0_pred=False):\n log = dict()\n\n z, c, x, xrec, xc = model.get_input(batch, model.first_stage_key,\n return_first_stage_outputs=True,\n force_c_encode=not (hasattr(model, 'split_input_params')\n and model.cond_stage_key == 'coordinates_bbox'),\n return_original_cond=True)\n\n if custom_shape is not None:\n z = torch.randn(custom_shape)\n print(f\"Generating {custom_shape[0]} samples of shape {custom_shape[1:]}\")\n\n z0 = None\n\n log[\"input\"] = x\n log[\"reconstruction\"] = xrec\n\n if ismap(xc):\n log[\"original_conditioning\"] = model.to_rgb(xc)\n if hasattr(model, 'cond_stage_key'):\n log[model.cond_stage_key] = model.to_rgb(xc)\n\n else:\n log[\"original_conditioning\"] = xc if xc is not None else torch.zeros_like(x)\n if model.cond_stage_model:\n log[model.cond_stage_key] = xc if xc is not None else torch.zeros_like(x)\n if model.cond_stage_key == 'class_label':\n log[model.cond_stage_key] = xc[model.cond_stage_key]\n\n with model.ema_scope(\"Plotting\"):\n t0 = time.time()\n\n sample, intermediates = convsample_ddim(model, c, steps=custom_steps, shape=z.shape,\n eta=eta,\n quantize_x0=quantize_x0, mask=None, x0=z0,\n temperature=temperature, score_corrector=corrector, corrector_kwargs=corrector_kwargs,\n x_t=x_T)\n t1 = time.time()\n\n if ddim_use_x0_pred:\n sample = intermediates['pred_x0'][-1]\n\n x_sample = model.decode_first_stage(sample)\n\n try:\n x_sample_noquant = model.decode_first_stage(sample, force_not_quantize=True)\n log[\"sample_noquant\"] = x_sample_noquant\n log[\"sample_diff\"] = torch.abs(x_sample_noquant - x_sample)\n except:\n pass\n\n log[\"sample\"] = x_sample\n log[\"time\"] = t1 - t0\n\n return log\n", "path": "modules/ldsr_model_arch.py"}]} | 3,166 | 480 |
gh_patches_debug_2812 | rasdani/github-patches | git_diff | comic__grand-challenge.org-827 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update to Django 2.2
- [ ] Check that all Django apps support 2.2
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/grandchallenge/serving/views.py`
Content:
```
1 import os
2 import posixpath
3 import re
4
5 from django.conf import settings
6 from django.core.files import File
7 from django.core.files.storage import DefaultStorage
8 from django.http import Http404, HttpResponse
9 from django.shortcuts import get_object_or_404
10 from django.utils._os import safe_join
11 from rest_framework.authentication import TokenAuthentication
12 from rest_framework.exceptions import AuthenticationFailed
13
14 from grandchallenge.cases.models import Image
15 from grandchallenge.challenges.models import Challenge
16 from grandchallenge.core.storage import ProtectedS3Storage
17 from grandchallenge.evaluation.models import Submission
18 from grandchallenge.serving.api import serve_file
19 from grandchallenge.serving.permissions import (
20 can_access,
21 user_can_download_image,
22 user_can_download_submission,
23 )
24
25
26 def serve_fullpath(*, fullpath):
27 storage = DefaultStorage()
28
29 if not (os.path.abspath(fullpath) == fullpath) or not storage.exists(
30 fullpath
31 ):
32 raise Http404("File not found.")
33
34 try:
35 f = storage.open(fullpath, "rb")
36 file = File(f)
37 return serve_file(file, save_as=True)
38 except IOError:
39 raise Http404("File not found.")
40
41
42 def protected_storage_redirect(*, name):
43 # Get the storage with the internal redirect and auth. This will prepend
44 # settings.PROTECTED_S3_STORAGE_KWARGS['endpoint_url'] to the url
45 storage = ProtectedS3Storage(internal=True)
46
47 if not storage.exists(name=name):
48 raise Http404("File not found.")
49
50 url = storage.url(name=name)
51
52 # Now strip the endpoint_url
53 external_url = re.match(
54 f"^{settings.PROTECTED_S3_STORAGE_KWARGS['endpoint_url']}(.*)$", url
55 ).group(1)
56
57 response = HttpResponse()
58 response["X-Accel-Redirect"] = external_url
59
60 return response
61
62
63 def serve_folder(request, *, challenge_name=None, folder=None, path):
64 """
65 Serve static files in a folder.
66
67 If the file is in a challenge folder, then the subfolders of this challenge
68 will be checked for permissions, see `can_access`.
69
70 If the challenge_short_name is not set, then the folder must be set.
71 ALL FILES IN THIS FOLDER WILL BE AVAILABLE TO DOWNLOAD.
72 """
73 path = posixpath.normpath(path).lstrip("/")
74
75 if challenge_name:
76 if folder:
77 raise AttributeError("Only challenge_name or folder should be set")
78
79 challenge = get_object_or_404(
80 Challenge, short_name__iexact=challenge_name
81 )
82
83 document_root = safe_join(settings.MEDIA_ROOT, challenge.short_name)
84 fullpath = safe_join(document_root, path)
85 allowed = can_access(
86 request.user,
87 fullpath[len(document_root) :].lstrip("/"),
88 challenge=challenge,
89 )
90 elif folder:
91 document_root = safe_join(settings.MEDIA_ROOT, folder)
92 fullpath = safe_join(document_root, path)
93 allowed = True
94 else:
95 raise AttributeError("challenge_name or folder must be set")
96
97 if not allowed:
98 raise Http404("File not found.")
99
100 return serve_fullpath(fullpath=fullpath)
101
102
103 def serve_images(request, *, pk, path):
104 document_root = safe_join(f"/{settings.IMAGE_FILES_SUBDIRECTORY}", pk)
105 path = posixpath.normpath(path).lstrip("/")
106 name = safe_join(document_root, path)
107
108 try:
109 image = Image.objects.get(pk=pk)
110 except Image.DoesNotExist:
111 raise Http404("File not found.")
112
113 try:
114 user, _ = TokenAuthentication().authenticate(request)
115 except (AuthenticationFailed, TypeError):
116 user = request.user
117
118 if user_can_download_image(user=user, image=image):
119 return protected_storage_redirect(name=name)
120
121 raise Http404("File not found.")
122
123
124 def serve_submissions(request, *, submission_pk, **_):
125 try:
126 submission = Submission.objects.get(pk=submission_pk)
127 except Submission.DoesNotExist:
128 raise Http404("File not found.")
129
130 if user_can_download_submission(user=request.user, submission=submission):
131 return protected_storage_redirect(name=submission.file.name)
132
133 raise Http404("File not found.")
134
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/grandchallenge/serving/views.py b/app/grandchallenge/serving/views.py
--- a/app/grandchallenge/serving/views.py
+++ b/app/grandchallenge/serving/views.py
@@ -101,7 +101,7 @@
def serve_images(request, *, pk, path):
- document_root = safe_join(f"/{settings.IMAGE_FILES_SUBDIRECTORY}", pk)
+ document_root = safe_join(f"/{settings.IMAGE_FILES_SUBDIRECTORY}", str(pk))
path = posixpath.normpath(path).lstrip("/")
name = safe_join(document_root, path)
| {"golden_diff": "diff --git a/app/grandchallenge/serving/views.py b/app/grandchallenge/serving/views.py\n--- a/app/grandchallenge/serving/views.py\n+++ b/app/grandchallenge/serving/views.py\n@@ -101,7 +101,7 @@\n \n \n def serve_images(request, *, pk, path):\n- document_root = safe_join(f\"/{settings.IMAGE_FILES_SUBDIRECTORY}\", pk)\n+ document_root = safe_join(f\"/{settings.IMAGE_FILES_SUBDIRECTORY}\", str(pk))\n path = posixpath.normpath(path).lstrip(\"/\")\n name = safe_join(document_root, path)\n", "issue": "Update to Django 2.2\n- [ ] Check that all Django apps support 2.2\n", "before_files": [{"content": "import os\nimport posixpath\nimport re\n\nfrom django.conf import settings\nfrom django.core.files import File\nfrom django.core.files.storage import DefaultStorage\nfrom django.http import Http404, HttpResponse\nfrom django.shortcuts import get_object_or_404\nfrom django.utils._os import safe_join\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework.exceptions import AuthenticationFailed\n\nfrom grandchallenge.cases.models import Image\nfrom grandchallenge.challenges.models import Challenge\nfrom grandchallenge.core.storage import ProtectedS3Storage\nfrom grandchallenge.evaluation.models import Submission\nfrom grandchallenge.serving.api import serve_file\nfrom grandchallenge.serving.permissions import (\n can_access,\n user_can_download_image,\n user_can_download_submission,\n)\n\n\ndef serve_fullpath(*, fullpath):\n storage = DefaultStorage()\n\n if not (os.path.abspath(fullpath) == fullpath) or not storage.exists(\n fullpath\n ):\n raise Http404(\"File not found.\")\n\n try:\n f = storage.open(fullpath, \"rb\")\n file = File(f)\n return serve_file(file, save_as=True)\n except IOError:\n raise Http404(\"File not found.\")\n\n\ndef protected_storage_redirect(*, name):\n # Get the storage with the internal redirect and auth. This will prepend\n # settings.PROTECTED_S3_STORAGE_KWARGS['endpoint_url'] to the url\n storage = ProtectedS3Storage(internal=True)\n\n if not storage.exists(name=name):\n raise Http404(\"File not found.\")\n\n url = storage.url(name=name)\n\n # Now strip the endpoint_url\n external_url = re.match(\n f\"^{settings.PROTECTED_S3_STORAGE_KWARGS['endpoint_url']}(.*)$\", url\n ).group(1)\n\n response = HttpResponse()\n response[\"X-Accel-Redirect\"] = external_url\n\n return response\n\n\ndef serve_folder(request, *, challenge_name=None, folder=None, path):\n \"\"\"\n Serve static files in a folder.\n\n If the file is in a challenge folder, then the subfolders of this challenge\n will be checked for permissions, see `can_access`.\n\n If the challenge_short_name is not set, then the folder must be set.\n ALL FILES IN THIS FOLDER WILL BE AVAILABLE TO DOWNLOAD.\n \"\"\"\n path = posixpath.normpath(path).lstrip(\"/\")\n\n if challenge_name:\n if folder:\n raise AttributeError(\"Only challenge_name or folder should be set\")\n\n challenge = get_object_or_404(\n Challenge, short_name__iexact=challenge_name\n )\n\n document_root = safe_join(settings.MEDIA_ROOT, challenge.short_name)\n fullpath = safe_join(document_root, path)\n allowed = can_access(\n request.user,\n fullpath[len(document_root) :].lstrip(\"/\"),\n challenge=challenge,\n )\n elif folder:\n document_root = safe_join(settings.MEDIA_ROOT, folder)\n fullpath = safe_join(document_root, path)\n allowed = True\n else:\n raise AttributeError(\"challenge_name or folder must be set\")\n\n if not allowed:\n raise Http404(\"File not found.\")\n\n return serve_fullpath(fullpath=fullpath)\n\n\ndef serve_images(request, *, pk, path):\n document_root = safe_join(f\"/{settings.IMAGE_FILES_SUBDIRECTORY}\", pk)\n path = posixpath.normpath(path).lstrip(\"/\")\n name = safe_join(document_root, path)\n\n try:\n image = Image.objects.get(pk=pk)\n except Image.DoesNotExist:\n raise Http404(\"File not found.\")\n\n try:\n user, _ = TokenAuthentication().authenticate(request)\n except (AuthenticationFailed, TypeError):\n user = request.user\n\n if user_can_download_image(user=user, image=image):\n return protected_storage_redirect(name=name)\n\n raise Http404(\"File not found.\")\n\n\ndef serve_submissions(request, *, submission_pk, **_):\n try:\n submission = Submission.objects.get(pk=submission_pk)\n except Submission.DoesNotExist:\n raise Http404(\"File not found.\")\n\n if user_can_download_submission(user=request.user, submission=submission):\n return protected_storage_redirect(name=submission.file.name)\n\n raise Http404(\"File not found.\")\n", "path": "app/grandchallenge/serving/views.py"}], "after_files": [{"content": "import os\nimport posixpath\nimport re\n\nfrom django.conf import settings\nfrom django.core.files import File\nfrom django.core.files.storage import DefaultStorage\nfrom django.http import Http404, HttpResponse\nfrom django.shortcuts import get_object_or_404\nfrom django.utils._os import safe_join\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework.exceptions import AuthenticationFailed\n\nfrom grandchallenge.cases.models import Image\nfrom grandchallenge.challenges.models import Challenge\nfrom grandchallenge.core.storage import ProtectedS3Storage\nfrom grandchallenge.evaluation.models import Submission\nfrom grandchallenge.serving.api import serve_file\nfrom grandchallenge.serving.permissions import (\n can_access,\n user_can_download_image,\n user_can_download_submission,\n)\n\n\ndef serve_fullpath(*, fullpath):\n storage = DefaultStorage()\n\n if not (os.path.abspath(fullpath) == fullpath) or not storage.exists(\n fullpath\n ):\n raise Http404(\"File not found.\")\n\n try:\n f = storage.open(fullpath, \"rb\")\n file = File(f)\n return serve_file(file, save_as=True)\n except IOError:\n raise Http404(\"File not found.\")\n\n\ndef protected_storage_redirect(*, name):\n # Get the storage with the internal redirect and auth. This will prepend\n # settings.PROTECTED_S3_STORAGE_KWARGS['endpoint_url'] to the url\n storage = ProtectedS3Storage(internal=True)\n\n if not storage.exists(name=name):\n raise Http404(\"File not found.\")\n\n url = storage.url(name=name)\n\n # Now strip the endpoint_url\n external_url = re.match(\n f\"^{settings.PROTECTED_S3_STORAGE_KWARGS['endpoint_url']}(.*)$\", url\n ).group(1)\n\n response = HttpResponse()\n response[\"X-Accel-Redirect\"] = external_url\n\n return response\n\n\ndef serve_folder(request, *, challenge_name=None, folder=None, path):\n \"\"\"\n Serve static files in a folder.\n\n If the file is in a challenge folder, then the subfolders of this challenge\n will be checked for permissions, see `can_access`.\n\n If the challenge_short_name is not set, then the folder must be set.\n ALL FILES IN THIS FOLDER WILL BE AVAILABLE TO DOWNLOAD.\n \"\"\"\n path = posixpath.normpath(path).lstrip(\"/\")\n\n if challenge_name:\n if folder:\n raise AttributeError(\"Only challenge_name or folder should be set\")\n\n challenge = get_object_or_404(\n Challenge, short_name__iexact=challenge_name\n )\n\n document_root = safe_join(settings.MEDIA_ROOT, challenge.short_name)\n fullpath = safe_join(document_root, path)\n allowed = can_access(\n request.user,\n fullpath[len(document_root) :].lstrip(\"/\"),\n challenge=challenge,\n )\n elif folder:\n document_root = safe_join(settings.MEDIA_ROOT, folder)\n fullpath = safe_join(document_root, path)\n allowed = True\n else:\n raise AttributeError(\"challenge_name or folder must be set\")\n\n if not allowed:\n raise Http404(\"File not found.\")\n\n return serve_fullpath(fullpath=fullpath)\n\n\ndef serve_images(request, *, pk, path):\n document_root = safe_join(f\"/{settings.IMAGE_FILES_SUBDIRECTORY}\", str(pk))\n path = posixpath.normpath(path).lstrip(\"/\")\n name = safe_join(document_root, path)\n\n try:\n image = Image.objects.get(pk=pk)\n except Image.DoesNotExist:\n raise Http404(\"File not found.\")\n\n try:\n user, _ = TokenAuthentication().authenticate(request)\n except (AuthenticationFailed, TypeError):\n user = request.user\n\n if user_can_download_image(user=user, image=image):\n return protected_storage_redirect(name=name)\n\n raise Http404(\"File not found.\")\n\n\ndef serve_submissions(request, *, submission_pk, **_):\n try:\n submission = Submission.objects.get(pk=submission_pk)\n except Submission.DoesNotExist:\n raise Http404(\"File not found.\")\n\n if user_can_download_submission(user=request.user, submission=submission):\n return protected_storage_redirect(name=submission.file.name)\n\n raise Http404(\"File not found.\")\n", "path": "app/grandchallenge/serving/views.py"}]} | 1,495 | 128 |
gh_patches_debug_13393 | rasdani/github-patches | git_diff | pulp__pulpcore-4310 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Import-check may return error saying "director does not exist" when it actually just lack permissions
Endpoint /pulp/api/v3/importers/core/pulp/import-check/ returns error saying "Directory does not exist" when pulp user lack permissions to read said directory.
**To Reproduce**
Try importing content from a directory where pulp user doesn't have read access.
**Expected behavior**
Error returned should indicate the permission error.
**Additional context**
Pulp is using os.path.exists() method to verify if the directory exists: https://github.com/pulp/pulpcore/blob/main/pulpcore/app/views/importer.py#L44-L45
However, the method can return false if permission is not granted to access the directory even if the directory exists
~~~
os.path.exists(path)
Return True if path refers to an existing path or an open file descriptor. Returns False for broken symbolic links. On some platforms, this function may return False if permission is not granted to execute os.stat() on the requested file, even if the path physically exists.
~~~
os.path method documentation -> https://docs.python.org/3/library/os.path.html
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pulpcore/app/views/importer.py`
Content:
```
1 from gettext import gettext as _
2 import json
3 import os
4 from drf_spectacular.utils import extend_schema
5 from rest_framework.views import APIView
6 from rest_framework.response import Response
7
8 from pulpcore.app import settings
9 from pulpcore.app.serializers import PulpImportCheckResponseSerializer, PulpImportCheckSerializer
10
11
12 def _check_allowed_import_path(a_path):
13 user_provided_realpath = os.path.realpath(a_path)
14 for allowed_path in settings.ALLOWED_IMPORT_PATHS:
15 if user_provided_realpath.startswith(allowed_path):
16 return True, None
17 return False, _(
18 "{} is not an allowed import path".format(os.path.dirname(os.path.realpath(a_path)))
19 )
20
21
22 def _validate_file(in_param, data):
23 """
24 Returns a (is-valid, msgs[]) tuple describing all problems found with data[in_param]
25
26 We check for a number of things, attempting to return all the errors we can find. We don't want
27 to give out information for files in arbitrary locations on the filesystem; if the check
28 for ALLOWED_IMPORT_PATHS fails, we report that and ignore any other problems.
29
30 If the directory containing the base-file doesn't exist, or isn't readable, or the specified
31 file doesn't exist, report and return.
32
33 Error-messages for all other checks are additive.
34 """
35 # check allowed, leave if failed
36 file = data[in_param]
37 real_file = os.path.realpath(file)
38 rc, msg = _check_allowed_import_path(real_file)
39 if not rc:
40 return rc, [msg]
41
42 # check directory-sanity, leave if failed
43 owning_dir = os.path.dirname(real_file)
44 if not os.path.exists(owning_dir):
45 return False, [_("directory {} does not exist").format(owning_dir)]
46 if not os.access(owning_dir, os.R_OK):
47 return False, [_("directory {} does not allow read-access").format(owning_dir)]
48
49 # check file-exists, leave if failed
50 if not os.path.exists(real_file):
51 return False, [_("file {} does not exist").format(real_file)]
52
53 # check file-sanity
54 msgs = []
55 isfile = os.path.isfile(real_file)
56 readable = os.access(real_file, os.R_OK)
57
58 rc = isfile and readable
59 if not isfile:
60 msgs.append(_("{} is not a file".format(real_file)))
61 if not readable:
62 msgs.append(_("{} exists but cannot be read".format(real_file)))
63
64 # extra check for toc-dir-write
65 if in_param == "toc":
66 if not os.access(owning_dir, os.W_OK):
67 rc = False
68 msgs.append(_("directory {} must allow pulp write-access".format(owning_dir)))
69
70 return rc, msgs
71
72
73 class PulpImporterImportCheckView(APIView):
74 """
75 Returns validity of proposed parameters for a PulpImport call.
76 """
77
78 @extend_schema(
79 summary="Validate the parameters to be used for a PulpImport call",
80 operation_id="pulp_import_check_post",
81 request=PulpImportCheckSerializer,
82 responses={200: PulpImportCheckResponseSerializer},
83 )
84 def post(self, request, format=None):
85 """
86 Evaluates validity of proposed PulpImport parameters 'toc', 'path', and 'repo_mapping'.
87
88 * Checks that toc, path are in ALLOWED_IMPORT_PATHS
89 * if ALLOWED:
90 * Checks that toc, path exist and are readable
91 * If toc specified, checks that containing dir is writeable
92 * Checks that repo_mapping is valid JSON
93 """
94 serializer = PulpImportCheckSerializer(data=request.data)
95 if serializer.is_valid():
96 data = {}
97 if "toc" in serializer.data:
98 data["toc"] = {}
99 data["toc"]["context"] = serializer.data["toc"]
100 data["toc"]["is_valid"], data["toc"]["messages"] = _validate_file(
101 "toc", serializer.data
102 )
103
104 if "path" in serializer.data:
105 data["path"] = {}
106 data["path"]["context"] = serializer.data["path"]
107 data["path"]["is_valid"], data["path"]["messages"] = _validate_file(
108 "path", serializer.data
109 )
110
111 if "repo_mapping" in serializer.data:
112 data["repo_mapping"] = {}
113 data["repo_mapping"]["context"] = serializer.data["repo_mapping"]
114 try:
115 json.loads(serializer.data["repo_mapping"])
116 data["repo_mapping"]["is_valid"] = True
117 data["repo_mapping"]["messages"] = []
118 except json.JSONDecodeError:
119 data["repo_mapping"]["is_valid"] = False
120 data["repo_mapping"]["messages"] = [_("invalid JSON")]
121
122 crs = PulpImportCheckResponseSerializer(data, context={"request": request})
123 return Response(crs.data)
124 return Response(serializer.errors, status=400)
125
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pulpcore/app/views/importer.py b/pulpcore/app/views/importer.py
--- a/pulpcore/app/views/importer.py
+++ b/pulpcore/app/views/importer.py
@@ -40,11 +40,14 @@
return rc, [msg]
# check directory-sanity, leave if failed
+ # use os.stat to ensure directory exists and pulp has read-access
+ # return any errors received from os.stat to the user
+
owning_dir = os.path.dirname(real_file)
- if not os.path.exists(owning_dir):
- return False, [_("directory {} does not exist").format(owning_dir)]
- if not os.access(owning_dir, os.R_OK):
- return False, [_("directory {} does not allow read-access").format(owning_dir)]
+ try:
+ os.stat(owning_dir)
+ except OSError as e:
+ return False, [_("{}").format(e)]
# check file-exists, leave if failed
if not os.path.exists(real_file):
| {"golden_diff": "diff --git a/pulpcore/app/views/importer.py b/pulpcore/app/views/importer.py\n--- a/pulpcore/app/views/importer.py\n+++ b/pulpcore/app/views/importer.py\n@@ -40,11 +40,14 @@\n return rc, [msg]\n \n # check directory-sanity, leave if failed\n+ # use os.stat to ensure directory exists and pulp has read-access\n+ # return any errors received from os.stat to the user\n+\n owning_dir = os.path.dirname(real_file)\n- if not os.path.exists(owning_dir):\n- return False, [_(\"directory {} does not exist\").format(owning_dir)]\n- if not os.access(owning_dir, os.R_OK):\n- return False, [_(\"directory {} does not allow read-access\").format(owning_dir)]\n+ try:\n+ os.stat(owning_dir)\n+ except OSError as e:\n+ return False, [_(\"{}\").format(e)]\n \n # check file-exists, leave if failed\n if not os.path.exists(real_file):\n", "issue": "Import-check may return error saying \"director does not exist\" when it actually just lack permissions\nEndpoint /pulp/api/v3/importers/core/pulp/import-check/ returns error saying \"Directory does not exist\" when pulp user lack permissions to read said directory.\r\n\r\n**To Reproduce**\r\n\r\nTry importing content from a directory where pulp user doesn't have read access.\r\n\r\n**Expected behavior**\r\nError returned should indicate the permission error.\r\n\r\n**Additional context**\r\n\r\nPulp is using os.path.exists() method to verify if the directory exists: https://github.com/pulp/pulpcore/blob/main/pulpcore/app/views/importer.py#L44-L45\r\n\r\nHowever, the method can return false if permission is not granted to access the directory even if the directory exists\r\n\r\n~~~\r\nos.path.exists(path)\r\nReturn True if path refers to an existing path or an open file descriptor. Returns False for broken symbolic links. On some platforms, this function may return False if permission is not granted to execute os.stat() on the requested file, even if the path physically exists.\r\n~~~\r\n\r\nos.path method documentation -> https://docs.python.org/3/library/os.path.html\r\n\n", "before_files": [{"content": "from gettext import gettext as _\nimport json\nimport os\nfrom drf_spectacular.utils import extend_schema\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\n\nfrom pulpcore.app import settings\nfrom pulpcore.app.serializers import PulpImportCheckResponseSerializer, PulpImportCheckSerializer\n\n\ndef _check_allowed_import_path(a_path):\n user_provided_realpath = os.path.realpath(a_path)\n for allowed_path in settings.ALLOWED_IMPORT_PATHS:\n if user_provided_realpath.startswith(allowed_path):\n return True, None\n return False, _(\n \"{} is not an allowed import path\".format(os.path.dirname(os.path.realpath(a_path)))\n )\n\n\ndef _validate_file(in_param, data):\n \"\"\"\n Returns a (is-valid, msgs[]) tuple describing all problems found with data[in_param]\n\n We check for a number of things, attempting to return all the errors we can find. We don't want\n to give out information for files in arbitrary locations on the filesystem; if the check\n for ALLOWED_IMPORT_PATHS fails, we report that and ignore any other problems.\n\n If the directory containing the base-file doesn't exist, or isn't readable, or the specified\n file doesn't exist, report and return.\n\n Error-messages for all other checks are additive.\n \"\"\"\n # check allowed, leave if failed\n file = data[in_param]\n real_file = os.path.realpath(file)\n rc, msg = _check_allowed_import_path(real_file)\n if not rc:\n return rc, [msg]\n\n # check directory-sanity, leave if failed\n owning_dir = os.path.dirname(real_file)\n if not os.path.exists(owning_dir):\n return False, [_(\"directory {} does not exist\").format(owning_dir)]\n if not os.access(owning_dir, os.R_OK):\n return False, [_(\"directory {} does not allow read-access\").format(owning_dir)]\n\n # check file-exists, leave if failed\n if not os.path.exists(real_file):\n return False, [_(\"file {} does not exist\").format(real_file)]\n\n # check file-sanity\n msgs = []\n isfile = os.path.isfile(real_file)\n readable = os.access(real_file, os.R_OK)\n\n rc = isfile and readable\n if not isfile:\n msgs.append(_(\"{} is not a file\".format(real_file)))\n if not readable:\n msgs.append(_(\"{} exists but cannot be read\".format(real_file)))\n\n # extra check for toc-dir-write\n if in_param == \"toc\":\n if not os.access(owning_dir, os.W_OK):\n rc = False\n msgs.append(_(\"directory {} must allow pulp write-access\".format(owning_dir)))\n\n return rc, msgs\n\n\nclass PulpImporterImportCheckView(APIView):\n \"\"\"\n Returns validity of proposed parameters for a PulpImport call.\n \"\"\"\n\n @extend_schema(\n summary=\"Validate the parameters to be used for a PulpImport call\",\n operation_id=\"pulp_import_check_post\",\n request=PulpImportCheckSerializer,\n responses={200: PulpImportCheckResponseSerializer},\n )\n def post(self, request, format=None):\n \"\"\"\n Evaluates validity of proposed PulpImport parameters 'toc', 'path', and 'repo_mapping'.\n\n * Checks that toc, path are in ALLOWED_IMPORT_PATHS\n * if ALLOWED:\n * Checks that toc, path exist and are readable\n * If toc specified, checks that containing dir is writeable\n * Checks that repo_mapping is valid JSON\n \"\"\"\n serializer = PulpImportCheckSerializer(data=request.data)\n if serializer.is_valid():\n data = {}\n if \"toc\" in serializer.data:\n data[\"toc\"] = {}\n data[\"toc\"][\"context\"] = serializer.data[\"toc\"]\n data[\"toc\"][\"is_valid\"], data[\"toc\"][\"messages\"] = _validate_file(\n \"toc\", serializer.data\n )\n\n if \"path\" in serializer.data:\n data[\"path\"] = {}\n data[\"path\"][\"context\"] = serializer.data[\"path\"]\n data[\"path\"][\"is_valid\"], data[\"path\"][\"messages\"] = _validate_file(\n \"path\", serializer.data\n )\n\n if \"repo_mapping\" in serializer.data:\n data[\"repo_mapping\"] = {}\n data[\"repo_mapping\"][\"context\"] = serializer.data[\"repo_mapping\"]\n try:\n json.loads(serializer.data[\"repo_mapping\"])\n data[\"repo_mapping\"][\"is_valid\"] = True\n data[\"repo_mapping\"][\"messages\"] = []\n except json.JSONDecodeError:\n data[\"repo_mapping\"][\"is_valid\"] = False\n data[\"repo_mapping\"][\"messages\"] = [_(\"invalid JSON\")]\n\n crs = PulpImportCheckResponseSerializer(data, context={\"request\": request})\n return Response(crs.data)\n return Response(serializer.errors, status=400)\n", "path": "pulpcore/app/views/importer.py"}], "after_files": [{"content": "from gettext import gettext as _\nimport json\nimport os\nfrom drf_spectacular.utils import extend_schema\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\n\nfrom pulpcore.app import settings\nfrom pulpcore.app.serializers import PulpImportCheckResponseSerializer, PulpImportCheckSerializer\n\n\ndef _check_allowed_import_path(a_path):\n user_provided_realpath = os.path.realpath(a_path)\n for allowed_path in settings.ALLOWED_IMPORT_PATHS:\n if user_provided_realpath.startswith(allowed_path):\n return True, None\n return False, _(\n \"{} is not an allowed import path\".format(os.path.dirname(os.path.realpath(a_path)))\n )\n\n\ndef _validate_file(in_param, data):\n \"\"\"\n Returns a (is-valid, msgs[]) tuple describing all problems found with data[in_param]\n\n We check for a number of things, attempting to return all the errors we can find. We don't want\n to give out information for files in arbitrary locations on the filesystem; if the check\n for ALLOWED_IMPORT_PATHS fails, we report that and ignore any other problems.\n\n If the directory containing the base-file doesn't exist, or isn't readable, or the specified\n file doesn't exist, report and return.\n\n Error-messages for all other checks are additive.\n \"\"\"\n # check allowed, leave if failed\n file = data[in_param]\n real_file = os.path.realpath(file)\n rc, msg = _check_allowed_import_path(real_file)\n if not rc:\n return rc, [msg]\n\n # check directory-sanity, leave if failed\n # use os.stat to ensure directory exists and pulp has read-access\n # return any errors received from os.stat to the user\n\n owning_dir = os.path.dirname(real_file)\n try:\n os.stat(owning_dir)\n except OSError as e:\n return False, [_(\"{}\").format(e)]\n\n # check file-exists, leave if failed\n if not os.path.exists(real_file):\n return False, [_(\"file {} does not exist\").format(real_file)]\n\n # check file-sanity\n msgs = []\n isfile = os.path.isfile(real_file)\n readable = os.access(real_file, os.R_OK)\n\n rc = isfile and readable\n if not isfile:\n msgs.append(_(\"{} is not a file\".format(real_file)))\n if not readable:\n msgs.append(_(\"{} exists but cannot be read\".format(real_file)))\n\n # extra check for toc-dir-write\n if in_param == \"toc\":\n if not os.access(owning_dir, os.W_OK):\n rc = False\n msgs.append(_(\"directory {} must allow pulp write-access\".format(owning_dir)))\n\n return rc, msgs\n\n\nclass PulpImporterImportCheckView(APIView):\n \"\"\"\n Returns validity of proposed parameters for a PulpImport call.\n \"\"\"\n\n @extend_schema(\n summary=\"Validate the parameters to be used for a PulpImport call\",\n operation_id=\"pulp_import_check_post\",\n request=PulpImportCheckSerializer,\n responses={200: PulpImportCheckResponseSerializer},\n )\n def post(self, request, format=None):\n \"\"\"\n Evaluates validity of proposed PulpImport parameters 'toc', 'path', and 'repo_mapping'.\n\n * Checks that toc, path are in ALLOWED_IMPORT_PATHS\n * if ALLOWED:\n * Checks that toc, path exist and are readable\n * If toc specified, checks that containing dir is writeable\n * Checks that repo_mapping is valid JSON\n \"\"\"\n serializer = PulpImportCheckSerializer(data=request.data)\n if serializer.is_valid():\n data = {}\n if \"toc\" in serializer.data:\n data[\"toc\"] = {}\n data[\"toc\"][\"context\"] = serializer.data[\"toc\"]\n data[\"toc\"][\"is_valid\"], data[\"toc\"][\"messages\"] = _validate_file(\n \"toc\", serializer.data\n )\n\n if \"path\" in serializer.data:\n data[\"path\"] = {}\n data[\"path\"][\"context\"] = serializer.data[\"path\"]\n data[\"path\"][\"is_valid\"], data[\"path\"][\"messages\"] = _validate_file(\n \"path\", serializer.data\n )\n\n if \"repo_mapping\" in serializer.data:\n data[\"repo_mapping\"] = {}\n data[\"repo_mapping\"][\"context\"] = serializer.data[\"repo_mapping\"]\n try:\n json.loads(serializer.data[\"repo_mapping\"])\n data[\"repo_mapping\"][\"is_valid\"] = True\n data[\"repo_mapping\"][\"messages\"] = []\n except json.JSONDecodeError:\n data[\"repo_mapping\"][\"is_valid\"] = False\n data[\"repo_mapping\"][\"messages\"] = [_(\"invalid JSON\")]\n\n crs = PulpImportCheckResponseSerializer(data, context={\"request\": request})\n return Response(crs.data)\n return Response(serializer.errors, status=400)\n", "path": "pulpcore/app/views/importer.py"}]} | 1,824 | 237 |
gh_patches_debug_21413 | rasdani/github-patches | git_diff | biopython__biopython-3287 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BioPython returns label of SnapGene Feature, not title
### Setup
I am reporting a problem with Biopython version, Python version, and operating
system as follows:
```
[GCC 6.3.0 20170516]
CPython
Linux-4.19.0-10-amd64-x86_64-with-debian-10.5
1.78
```
### Expected behaviour
I would like to get the names of all features found in >4000 SnapGene plasmid maps. I would expect the name of the features returned by BioPython to be the same as those returned by SnapGene Server's reportFeatures function.
### Actual behaviour
For some features, the name returned by BioPython is different from that returned by SnapGene Server's reportFeatures function.
The problematic features are those that have both a title and a label attribute, for instance:

In the example above, BioPython reports the label `lac operator` while SnapGene Server returns the
title `lac repressor encoded by lacI lac operator`. This would not be so much of a problem if I could find `lac repressor encoded by lacI lac operator` anywhere in the relevant SeqFeature object, but I cannot, unless, of course, I missed it.
### Steps to reproduce
I am attaching a relevant example SnapGene file.
[example.zip](https://github.com/biopython/biopython/files/5267092/example.zip)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Bio/SeqIO/SnapGeneIO.py`
Content:
```
1 # Copyright 2017-2019 Damien Goutte-Gattat. All rights reserved.
2 #
3 # This file is part of the Biopython distribution and governed by your
4 # choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
5 # Please see the LICENSE file that should have been included as part of this
6 # package.
7
8 """Bio.SeqIO support for the SnapGene file format.
9
10 The SnapGene binary format is the native format used by the SnapGene program
11 from GSL Biotech LLC.
12 """
13
14 from datetime import datetime
15 from re import sub
16 from struct import unpack
17 from xml.dom.minidom import parseString
18
19 from Bio.Seq import Seq
20 from Bio.SeqFeature import SeqFeature, FeatureLocation
21 from Bio.SeqRecord import SeqRecord
22 from .Interfaces import SequenceIterator
23
24
25 def _iterate(handle):
26 """Iterate over the packets of a SnapGene file.
27
28 A SnapGene file is made of packets, each packet being a TLV-like
29 structure comprising:
30
31 - 1 single byte indicating the packet's type;
32 - 1 big-endian long integer (4 bytes) indicating the length of the
33 packet's data;
34 - the actual data.
35 """
36 while True:
37 packet_type = handle.read(1)
38 if len(packet_type) < 1: # No more packet
39 return
40 packet_type = unpack(">B", packet_type)[0]
41
42 length = handle.read(4)
43 if len(length) < 4:
44 raise ValueError("Unexpected end of packet")
45 length = unpack(">I", length)[0]
46
47 data = handle.read(length)
48 if len(data) < length:
49 raise ValueError("Unexpected end of packet")
50
51 yield (packet_type, length, data)
52
53
54 def _parse_dna_packet(length, data, record):
55 """Parse a DNA sequence packet.
56
57 A DNA sequence packet contains a single byte flag followed by the
58 sequence itself.
59 """
60 if record.seq:
61 raise ValueError("The file contains more than one DNA packet")
62
63 flags, sequence = unpack(">B%ds" % (length - 1), data)
64 record.seq = Seq(sequence.decode("ASCII"))
65 record.annotations["molecule_type"] = "DNA"
66 if flags & 0x01:
67 record.annotations["topology"] = "circular"
68 else:
69 record.annotations["topology"] = "linear"
70
71
72 def _parse_notes_packet(length, data, record):
73 """Parse a 'Notes' packet.
74
75 This type of packet contains some metadata about the sequence. They
76 are stored as a XML string with a 'Notes' root node.
77 """
78 xml = parseString(data.decode("UTF-8"))
79 type = _get_child_value(xml, "Type")
80 if type == "Synthetic":
81 record.annotations["data_file_division"] = "SYN"
82 else:
83 record.annotations["data_file_division"] = "UNC"
84
85 date = _get_child_value(xml, "LastModified")
86 if date:
87 record.annotations["date"] = datetime.strptime(date, "%Y.%m.%d")
88
89 acc = _get_child_value(xml, "AccessionNumber")
90 if acc:
91 record.id = acc
92
93 comment = _get_child_value(xml, "Comments")
94 if comment:
95 record.name = comment.split(" ", 1)[0]
96 record.description = comment
97 if not acc:
98 record.id = record.name
99
100
101 def _parse_cookie_packet(length, data, record):
102 """Parse a SnapGene cookie packet.
103
104 Every SnapGene file starts with a packet of this type. It acts as
105 a magic cookie identifying the file as a SnapGene file.
106 """
107 cookie, seq_type, exp_version, imp_version = unpack(">8sHHH", data)
108 if cookie.decode("ASCII") != "SnapGene":
109 raise ValueError("The file is not a valid SnapGene file")
110
111
112 def _parse_location(rangespec, strand, record):
113 start, end = [int(x) for x in rangespec.split("-")]
114 # Account for SnapGene's 1-based coordinates
115 start = start - 1
116 if start > end:
117 # Range wrapping the end of the sequence
118 l1 = FeatureLocation(start, len(record), strand=strand)
119 l2 = FeatureLocation(0, end, strand=strand)
120 location = l1 + l2
121 else:
122 location = FeatureLocation(start, end, strand=strand)
123 return location
124
125
126 def _parse_features_packet(length, data, record):
127 """Parse a sequence features packet.
128
129 This packet stores sequence features (except primer binding sites,
130 which are in a dedicated Primers packet). The data is a XML string
131 starting with a 'Features' root node.
132 """
133 xml = parseString(data.decode("UTF-8"))
134 for feature in xml.getElementsByTagName("Feature"):
135 quals = {}
136
137 type = _get_attribute_value(feature, "type", default="misc_feature")
138 label = _get_attribute_value(feature, "name")
139 if label:
140 quals["label"] = [label]
141
142 strand = +1
143 directionality = int(
144 _get_attribute_value(feature, "directionality", default="1")
145 )
146 if directionality == 2:
147 strand = -1
148
149 location = None
150 for segment in feature.getElementsByTagName("Segment"):
151 rng = _get_attribute_value(segment, "range")
152 if not location:
153 location = _parse_location(rng, strand, record)
154 else:
155 location = location + _parse_location(rng, strand, record)
156 if not location:
157 raise ValueError("Missing feature location")
158
159 for qualifier in feature.getElementsByTagName("Q"):
160 qname = _get_attribute_value(
161 qualifier, "name", error="Missing qualifier name"
162 )
163 qvalues = []
164 for value in qualifier.getElementsByTagName("V"):
165 if value.hasAttribute("text"):
166 qvalues.append(_decode(value.attributes["text"].value))
167 elif value.hasAttribute("predef"):
168 qvalues.append(_decode(value.attributes["predef"].value))
169 elif value.hasAttribute("int"):
170 qvalues.append(int(value.attributes["int"].value))
171 quals[qname] = qvalues
172
173 feature = SeqFeature(location, type=type, qualifiers=quals)
174 record.features.append(feature)
175
176
177 def _parse_primers_packet(length, data, record):
178 """Parse a Primers packet.
179
180 A Primers packet is similar to a Features packet but specifically
181 stores primer binding features. The data is a XML string starting
182 with a 'Primers' root node.
183 """
184 xml = parseString(data.decode("UTF-8"))
185 for primer in xml.getElementsByTagName("Primer"):
186 quals = {}
187
188 name = _get_attribute_value(primer, "name")
189 if name:
190 quals["label"] = [name]
191
192 for site in primer.getElementsByTagName("BindingSite"):
193 rng = _get_attribute_value(
194 site, "location", error="Missing binding site location"
195 )
196 strand = int(_get_attribute_value(site, "boundStrand", default="0"))
197 if strand == 1:
198 strand = -1
199 else:
200 strand = +1
201
202 feature = SeqFeature(
203 _parse_location(rng, strand, record),
204 type="primer_bind",
205 qualifiers=quals,
206 )
207 record.features.append(feature)
208
209
210 _packet_handlers = {
211 0x00: _parse_dna_packet,
212 0x05: _parse_primers_packet,
213 0x06: _parse_notes_packet,
214 0x0A: _parse_features_packet,
215 }
216
217
218 # Helper functions to process the XML data in
219 # some of the segments
220
221
222 def _decode(text):
223 # Get rid of HTML tags in some values
224 return sub("<[^>]+>", "", text)
225
226
227 def _get_attribute_value(node, name, default=None, error=None):
228 if node.hasAttribute(name):
229 return _decode(node.attributes[name].value)
230 elif error:
231 raise ValueError(error)
232 else:
233 return default
234
235
236 def _get_child_value(node, name, default=None, error=None):
237 children = node.getElementsByTagName(name)
238 if (
239 children
240 and children[0].childNodes
241 and children[0].firstChild.nodeType == node.TEXT_NODE
242 ):
243 return _decode(children[0].firstChild.data)
244 elif error:
245 raise ValueError(error)
246 else:
247 return default
248
249
250 class SnapGeneIterator(SequenceIterator):
251 """Parser for SnapGene files."""
252
253 def __init__(self, source):
254 """Parse a SnapGene file and return a SeqRecord object.
255
256 Argument source is a file-like object or a path to a file.
257
258 Note that a SnapGene file can only contain one sequence, so this
259 iterator will always return a single record.
260 """
261 super().__init__(source, mode="b", fmt="SnapGene")
262
263 def parse(self, handle):
264 """Start parsing the file, and return a SeqRecord generator."""
265 records = self.iterate(handle)
266 return records
267
268 def iterate(self, handle):
269 """Iterate over the records in the SnapGene file."""
270 record = SeqRecord(None)
271 packets = _iterate(handle)
272 try:
273 packet_type, length, data = next(packets)
274 except StopIteration:
275 raise ValueError("Empty file.") from None
276
277 if packet_type != 0x09:
278 raise ValueError("The file does not start with a SnapGene cookie packet")
279 _parse_cookie_packet(length, data, record)
280
281 for (packet_type, length, data) in packets:
282 handler = _packet_handlers.get(packet_type)
283 if handler is not None:
284 handler(length, data, record)
285
286 if not record.seq:
287 raise ValueError("No DNA packet in file")
288
289 yield record
290
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/Bio/SeqIO/SnapGeneIO.py b/Bio/SeqIO/SnapGeneIO.py
--- a/Bio/SeqIO/SnapGeneIO.py
+++ b/Bio/SeqIO/SnapGeneIO.py
@@ -135,9 +135,6 @@
quals = {}
type = _get_attribute_value(feature, "type", default="misc_feature")
- label = _get_attribute_value(feature, "name")
- if label:
- quals["label"] = [label]
strand = +1
directionality = int(
@@ -170,6 +167,16 @@
qvalues.append(int(value.attributes["int"].value))
quals[qname] = qvalues
+ name = _get_attribute_value(feature, "name")
+ if name:
+ if "label" not in quals:
+ # No explicit label attribute, use the SnapGene name
+ quals["label"] = [name]
+ elif name not in quals["label"]:
+ # The SnapGene name is different from the label,
+ # add a specific attribute to represent it
+ quals["name"] = [name]
+
feature = SeqFeature(location, type=type, qualifiers=quals)
record.features.append(feature)
| {"golden_diff": "diff --git a/Bio/SeqIO/SnapGeneIO.py b/Bio/SeqIO/SnapGeneIO.py\n--- a/Bio/SeqIO/SnapGeneIO.py\n+++ b/Bio/SeqIO/SnapGeneIO.py\n@@ -135,9 +135,6 @@\n quals = {}\n \n type = _get_attribute_value(feature, \"type\", default=\"misc_feature\")\n- label = _get_attribute_value(feature, \"name\")\n- if label:\n- quals[\"label\"] = [label]\n \n strand = +1\n directionality = int(\n@@ -170,6 +167,16 @@\n qvalues.append(int(value.attributes[\"int\"].value))\n quals[qname] = qvalues\n \n+ name = _get_attribute_value(feature, \"name\")\n+ if name:\n+ if \"label\" not in quals:\n+ # No explicit label attribute, use the SnapGene name\n+ quals[\"label\"] = [name]\n+ elif name not in quals[\"label\"]:\n+ # The SnapGene name is different from the label,\n+ # add a specific attribute to represent it\n+ quals[\"name\"] = [name]\n+\n feature = SeqFeature(location, type=type, qualifiers=quals)\n record.features.append(feature)\n", "issue": "BioPython returns label of SnapGene Feature, not title\n### Setup\r\n\r\nI am reporting a problem with Biopython version, Python version, and operating\r\nsystem as follows:\r\n\r\n```\r\n[GCC 6.3.0 20170516]\r\nCPython\r\nLinux-4.19.0-10-amd64-x86_64-with-debian-10.5\r\n1.78\r\n```\r\n### Expected behaviour\r\n\r\nI would like to get the names of all features found in >4000 SnapGene plasmid maps. I would expect the name of the features returned by BioPython to be the same as those returned by SnapGene Server's reportFeatures function.\r\n\r\n### Actual behaviour\r\n\r\nFor some features, the name returned by BioPython is different from that returned by SnapGene Server's reportFeatures function. \r\n\r\nThe problematic features are those that have both a title and a label attribute, for instance:\r\n\r\n\r\n\r\nIn the example above, BioPython reports the label `lac operator` while SnapGene Server returns the \r\ntitle `lac repressor encoded by lacI lac operator`. This would not be so much of a problem if I could find `lac repressor encoded by lacI lac operator` anywhere in the relevant SeqFeature object, but I cannot, unless, of course, I missed it.\r\n\r\n### Steps to reproduce\r\n\r\nI am attaching a relevant example SnapGene file.\r\n\r\n[example.zip](https://github.com/biopython/biopython/files/5267092/example.zip)\n", "before_files": [{"content": "# Copyright 2017-2019 Damien Goutte-Gattat. All rights reserved.\n#\n# This file is part of the Biopython distribution and governed by your\n# choice of the \"Biopython License Agreement\" or the \"BSD 3-Clause License\".\n# Please see the LICENSE file that should have been included as part of this\n# package.\n\n\"\"\"Bio.SeqIO support for the SnapGene file format.\n\nThe SnapGene binary format is the native format used by the SnapGene program\nfrom GSL Biotech LLC.\n\"\"\"\n\nfrom datetime import datetime\nfrom re import sub\nfrom struct import unpack\nfrom xml.dom.minidom import parseString\n\nfrom Bio.Seq import Seq\nfrom Bio.SeqFeature import SeqFeature, FeatureLocation\nfrom Bio.SeqRecord import SeqRecord\nfrom .Interfaces import SequenceIterator\n\n\ndef _iterate(handle):\n \"\"\"Iterate over the packets of a SnapGene file.\n\n A SnapGene file is made of packets, each packet being a TLV-like\n structure comprising:\n\n - 1 single byte indicating the packet's type;\n - 1 big-endian long integer (4 bytes) indicating the length of the\n packet's data;\n - the actual data.\n \"\"\"\n while True:\n packet_type = handle.read(1)\n if len(packet_type) < 1: # No more packet\n return\n packet_type = unpack(\">B\", packet_type)[0]\n\n length = handle.read(4)\n if len(length) < 4:\n raise ValueError(\"Unexpected end of packet\")\n length = unpack(\">I\", length)[0]\n\n data = handle.read(length)\n if len(data) < length:\n raise ValueError(\"Unexpected end of packet\")\n\n yield (packet_type, length, data)\n\n\ndef _parse_dna_packet(length, data, record):\n \"\"\"Parse a DNA sequence packet.\n\n A DNA sequence packet contains a single byte flag followed by the\n sequence itself.\n \"\"\"\n if record.seq:\n raise ValueError(\"The file contains more than one DNA packet\")\n\n flags, sequence = unpack(\">B%ds\" % (length - 1), data)\n record.seq = Seq(sequence.decode(\"ASCII\"))\n record.annotations[\"molecule_type\"] = \"DNA\"\n if flags & 0x01:\n record.annotations[\"topology\"] = \"circular\"\n else:\n record.annotations[\"topology\"] = \"linear\"\n\n\ndef _parse_notes_packet(length, data, record):\n \"\"\"Parse a 'Notes' packet.\n\n This type of packet contains some metadata about the sequence. They\n are stored as a XML string with a 'Notes' root node.\n \"\"\"\n xml = parseString(data.decode(\"UTF-8\"))\n type = _get_child_value(xml, \"Type\")\n if type == \"Synthetic\":\n record.annotations[\"data_file_division\"] = \"SYN\"\n else:\n record.annotations[\"data_file_division\"] = \"UNC\"\n\n date = _get_child_value(xml, \"LastModified\")\n if date:\n record.annotations[\"date\"] = datetime.strptime(date, \"%Y.%m.%d\")\n\n acc = _get_child_value(xml, \"AccessionNumber\")\n if acc:\n record.id = acc\n\n comment = _get_child_value(xml, \"Comments\")\n if comment:\n record.name = comment.split(\" \", 1)[0]\n record.description = comment\n if not acc:\n record.id = record.name\n\n\ndef _parse_cookie_packet(length, data, record):\n \"\"\"Parse a SnapGene cookie packet.\n\n Every SnapGene file starts with a packet of this type. It acts as\n a magic cookie identifying the file as a SnapGene file.\n \"\"\"\n cookie, seq_type, exp_version, imp_version = unpack(\">8sHHH\", data)\n if cookie.decode(\"ASCII\") != \"SnapGene\":\n raise ValueError(\"The file is not a valid SnapGene file\")\n\n\ndef _parse_location(rangespec, strand, record):\n start, end = [int(x) for x in rangespec.split(\"-\")]\n # Account for SnapGene's 1-based coordinates\n start = start - 1\n if start > end:\n # Range wrapping the end of the sequence\n l1 = FeatureLocation(start, len(record), strand=strand)\n l2 = FeatureLocation(0, end, strand=strand)\n location = l1 + l2\n else:\n location = FeatureLocation(start, end, strand=strand)\n return location\n\n\ndef _parse_features_packet(length, data, record):\n \"\"\"Parse a sequence features packet.\n\n This packet stores sequence features (except primer binding sites,\n which are in a dedicated Primers packet). The data is a XML string\n starting with a 'Features' root node.\n \"\"\"\n xml = parseString(data.decode(\"UTF-8\"))\n for feature in xml.getElementsByTagName(\"Feature\"):\n quals = {}\n\n type = _get_attribute_value(feature, \"type\", default=\"misc_feature\")\n label = _get_attribute_value(feature, \"name\")\n if label:\n quals[\"label\"] = [label]\n\n strand = +1\n directionality = int(\n _get_attribute_value(feature, \"directionality\", default=\"1\")\n )\n if directionality == 2:\n strand = -1\n\n location = None\n for segment in feature.getElementsByTagName(\"Segment\"):\n rng = _get_attribute_value(segment, \"range\")\n if not location:\n location = _parse_location(rng, strand, record)\n else:\n location = location + _parse_location(rng, strand, record)\n if not location:\n raise ValueError(\"Missing feature location\")\n\n for qualifier in feature.getElementsByTagName(\"Q\"):\n qname = _get_attribute_value(\n qualifier, \"name\", error=\"Missing qualifier name\"\n )\n qvalues = []\n for value in qualifier.getElementsByTagName(\"V\"):\n if value.hasAttribute(\"text\"):\n qvalues.append(_decode(value.attributes[\"text\"].value))\n elif value.hasAttribute(\"predef\"):\n qvalues.append(_decode(value.attributes[\"predef\"].value))\n elif value.hasAttribute(\"int\"):\n qvalues.append(int(value.attributes[\"int\"].value))\n quals[qname] = qvalues\n\n feature = SeqFeature(location, type=type, qualifiers=quals)\n record.features.append(feature)\n\n\ndef _parse_primers_packet(length, data, record):\n \"\"\"Parse a Primers packet.\n\n A Primers packet is similar to a Features packet but specifically\n stores primer binding features. The data is a XML string starting\n with a 'Primers' root node.\n \"\"\"\n xml = parseString(data.decode(\"UTF-8\"))\n for primer in xml.getElementsByTagName(\"Primer\"):\n quals = {}\n\n name = _get_attribute_value(primer, \"name\")\n if name:\n quals[\"label\"] = [name]\n\n for site in primer.getElementsByTagName(\"BindingSite\"):\n rng = _get_attribute_value(\n site, \"location\", error=\"Missing binding site location\"\n )\n strand = int(_get_attribute_value(site, \"boundStrand\", default=\"0\"))\n if strand == 1:\n strand = -1\n else:\n strand = +1\n\n feature = SeqFeature(\n _parse_location(rng, strand, record),\n type=\"primer_bind\",\n qualifiers=quals,\n )\n record.features.append(feature)\n\n\n_packet_handlers = {\n 0x00: _parse_dna_packet,\n 0x05: _parse_primers_packet,\n 0x06: _parse_notes_packet,\n 0x0A: _parse_features_packet,\n}\n\n\n# Helper functions to process the XML data in\n# some of the segments\n\n\ndef _decode(text):\n # Get rid of HTML tags in some values\n return sub(\"<[^>]+>\", \"\", text)\n\n\ndef _get_attribute_value(node, name, default=None, error=None):\n if node.hasAttribute(name):\n return _decode(node.attributes[name].value)\n elif error:\n raise ValueError(error)\n else:\n return default\n\n\ndef _get_child_value(node, name, default=None, error=None):\n children = node.getElementsByTagName(name)\n if (\n children\n and children[0].childNodes\n and children[0].firstChild.nodeType == node.TEXT_NODE\n ):\n return _decode(children[0].firstChild.data)\n elif error:\n raise ValueError(error)\n else:\n return default\n\n\nclass SnapGeneIterator(SequenceIterator):\n \"\"\"Parser for SnapGene files.\"\"\"\n\n def __init__(self, source):\n \"\"\"Parse a SnapGene file and return a SeqRecord object.\n\n Argument source is a file-like object or a path to a file.\n\n Note that a SnapGene file can only contain one sequence, so this\n iterator will always return a single record.\n \"\"\"\n super().__init__(source, mode=\"b\", fmt=\"SnapGene\")\n\n def parse(self, handle):\n \"\"\"Start parsing the file, and return a SeqRecord generator.\"\"\"\n records = self.iterate(handle)\n return records\n\n def iterate(self, handle):\n \"\"\"Iterate over the records in the SnapGene file.\"\"\"\n record = SeqRecord(None)\n packets = _iterate(handle)\n try:\n packet_type, length, data = next(packets)\n except StopIteration:\n raise ValueError(\"Empty file.\") from None\n\n if packet_type != 0x09:\n raise ValueError(\"The file does not start with a SnapGene cookie packet\")\n _parse_cookie_packet(length, data, record)\n\n for (packet_type, length, data) in packets:\n handler = _packet_handlers.get(packet_type)\n if handler is not None:\n handler(length, data, record)\n\n if not record.seq:\n raise ValueError(\"No DNA packet in file\")\n\n yield record\n", "path": "Bio/SeqIO/SnapGeneIO.py"}], "after_files": [{"content": "# Copyright 2017-2019 Damien Goutte-Gattat. All rights reserved.\n#\n# This file is part of the Biopython distribution and governed by your\n# choice of the \"Biopython License Agreement\" or the \"BSD 3-Clause License\".\n# Please see the LICENSE file that should have been included as part of this\n# package.\n\n\"\"\"Bio.SeqIO support for the SnapGene file format.\n\nThe SnapGene binary format is the native format used by the SnapGene program\nfrom GSL Biotech LLC.\n\"\"\"\n\nfrom datetime import datetime\nfrom re import sub\nfrom struct import unpack\nfrom xml.dom.minidom import parseString\n\nfrom Bio.Seq import Seq\nfrom Bio.SeqFeature import SeqFeature, FeatureLocation\nfrom Bio.SeqRecord import SeqRecord\nfrom .Interfaces import SequenceIterator\n\n\ndef _iterate(handle):\n \"\"\"Iterate over the packets of a SnapGene file.\n\n A SnapGene file is made of packets, each packet being a TLV-like\n structure comprising:\n\n - 1 single byte indicating the packet's type;\n - 1 big-endian long integer (4 bytes) indicating the length of the\n packet's data;\n - the actual data.\n \"\"\"\n while True:\n packet_type = handle.read(1)\n if len(packet_type) < 1: # No more packet\n return\n packet_type = unpack(\">B\", packet_type)[0]\n\n length = handle.read(4)\n if len(length) < 4:\n raise ValueError(\"Unexpected end of packet\")\n length = unpack(\">I\", length)[0]\n\n data = handle.read(length)\n if len(data) < length:\n raise ValueError(\"Unexpected end of packet\")\n\n yield (packet_type, length, data)\n\n\ndef _parse_dna_packet(length, data, record):\n \"\"\"Parse a DNA sequence packet.\n\n A DNA sequence packet contains a single byte flag followed by the\n sequence itself.\n \"\"\"\n if record.seq:\n raise ValueError(\"The file contains more than one DNA packet\")\n\n flags, sequence = unpack(\">B%ds\" % (length - 1), data)\n record.seq = Seq(sequence.decode(\"ASCII\"))\n record.annotations[\"molecule_type\"] = \"DNA\"\n if flags & 0x01:\n record.annotations[\"topology\"] = \"circular\"\n else:\n record.annotations[\"topology\"] = \"linear\"\n\n\ndef _parse_notes_packet(length, data, record):\n \"\"\"Parse a 'Notes' packet.\n\n This type of packet contains some metadata about the sequence. They\n are stored as a XML string with a 'Notes' root node.\n \"\"\"\n xml = parseString(data.decode(\"UTF-8\"))\n type = _get_child_value(xml, \"Type\")\n if type == \"Synthetic\":\n record.annotations[\"data_file_division\"] = \"SYN\"\n else:\n record.annotations[\"data_file_division\"] = \"UNC\"\n\n date = _get_child_value(xml, \"LastModified\")\n if date:\n record.annotations[\"date\"] = datetime.strptime(date, \"%Y.%m.%d\")\n\n acc = _get_child_value(xml, \"AccessionNumber\")\n if acc:\n record.id = acc\n\n comment = _get_child_value(xml, \"Comments\")\n if comment:\n record.name = comment.split(\" \", 1)[0]\n record.description = comment\n if not acc:\n record.id = record.name\n\n\ndef _parse_cookie_packet(length, data, record):\n \"\"\"Parse a SnapGene cookie packet.\n\n Every SnapGene file starts with a packet of this type. It acts as\n a magic cookie identifying the file as a SnapGene file.\n \"\"\"\n cookie, seq_type, exp_version, imp_version = unpack(\">8sHHH\", data)\n if cookie.decode(\"ASCII\") != \"SnapGene\":\n raise ValueError(\"The file is not a valid SnapGene file\")\n\n\ndef _parse_location(rangespec, strand, record):\n start, end = [int(x) for x in rangespec.split(\"-\")]\n # Account for SnapGene's 1-based coordinates\n start = start - 1\n if start > end:\n # Range wrapping the end of the sequence\n l1 = FeatureLocation(start, len(record), strand=strand)\n l2 = FeatureLocation(0, end, strand=strand)\n location = l1 + l2\n else:\n location = FeatureLocation(start, end, strand=strand)\n return location\n\n\ndef _parse_features_packet(length, data, record):\n \"\"\"Parse a sequence features packet.\n\n This packet stores sequence features (except primer binding sites,\n which are in a dedicated Primers packet). The data is a XML string\n starting with a 'Features' root node.\n \"\"\"\n xml = parseString(data.decode(\"UTF-8\"))\n for feature in xml.getElementsByTagName(\"Feature\"):\n quals = {}\n\n type = _get_attribute_value(feature, \"type\", default=\"misc_feature\")\n\n strand = +1\n directionality = int(\n _get_attribute_value(feature, \"directionality\", default=\"1\")\n )\n if directionality == 2:\n strand = -1\n\n location = None\n for segment in feature.getElementsByTagName(\"Segment\"):\n rng = _get_attribute_value(segment, \"range\")\n if not location:\n location = _parse_location(rng, strand, record)\n else:\n location = location + _parse_location(rng, strand, record)\n if not location:\n raise ValueError(\"Missing feature location\")\n\n for qualifier in feature.getElementsByTagName(\"Q\"):\n qname = _get_attribute_value(\n qualifier, \"name\", error=\"Missing qualifier name\"\n )\n qvalues = []\n for value in qualifier.getElementsByTagName(\"V\"):\n if value.hasAttribute(\"text\"):\n qvalues.append(_decode(value.attributes[\"text\"].value))\n elif value.hasAttribute(\"predef\"):\n qvalues.append(_decode(value.attributes[\"predef\"].value))\n elif value.hasAttribute(\"int\"):\n qvalues.append(int(value.attributes[\"int\"].value))\n quals[qname] = qvalues\n\n name = _get_attribute_value(feature, \"name\")\n if name:\n if \"label\" not in quals:\n # No explicit label attribute, use the SnapGene name\n quals[\"label\"] = [name]\n elif name not in quals[\"label\"]:\n # The SnapGene name is different from the label,\n # add a specific attribute to represent it\n quals[\"name\"] = [name]\n\n feature = SeqFeature(location, type=type, qualifiers=quals)\n record.features.append(feature)\n\n\ndef _parse_primers_packet(length, data, record):\n \"\"\"Parse a Primers packet.\n\n A Primers packet is similar to a Features packet but specifically\n stores primer binding features. The data is a XML string starting\n with a 'Primers' root node.\n \"\"\"\n xml = parseString(data.decode(\"UTF-8\"))\n for primer in xml.getElementsByTagName(\"Primer\"):\n quals = {}\n\n name = _get_attribute_value(primer, \"name\")\n if name:\n quals[\"label\"] = [name]\n\n for site in primer.getElementsByTagName(\"BindingSite\"):\n rng = _get_attribute_value(\n site, \"location\", error=\"Missing binding site location\"\n )\n strand = int(_get_attribute_value(site, \"boundStrand\", default=\"0\"))\n if strand == 1:\n strand = -1\n else:\n strand = +1\n\n feature = SeqFeature(\n _parse_location(rng, strand, record),\n type=\"primer_bind\",\n qualifiers=quals,\n )\n record.features.append(feature)\n\n\n_packet_handlers = {\n 0x00: _parse_dna_packet,\n 0x05: _parse_primers_packet,\n 0x06: _parse_notes_packet,\n 0x0A: _parse_features_packet,\n}\n\n\n# Helper functions to process the XML data in\n# some of the segments\n\n\ndef _decode(text):\n # Get rid of HTML tags in some values\n return sub(\"<[^>]+>\", \"\", text)\n\n\ndef _get_attribute_value(node, name, default=None, error=None):\n if node.hasAttribute(name):\n return _decode(node.attributes[name].value)\n elif error:\n raise ValueError(error)\n else:\n return default\n\n\ndef _get_child_value(node, name, default=None, error=None):\n children = node.getElementsByTagName(name)\n if (\n children\n and children[0].childNodes\n and children[0].firstChild.nodeType == node.TEXT_NODE\n ):\n return _decode(children[0].firstChild.data)\n elif error:\n raise ValueError(error)\n else:\n return default\n\n\nclass SnapGeneIterator(SequenceIterator):\n \"\"\"Parser for SnapGene files.\"\"\"\n\n def __init__(self, source):\n \"\"\"Parse a SnapGene file and return a SeqRecord object.\n\n Argument source is a file-like object or a path to a file.\n\n Note that a SnapGene file can only contain one sequence, so this\n iterator will always return a single record.\n \"\"\"\n super().__init__(source, mode=\"b\", fmt=\"SnapGene\")\n\n def parse(self, handle):\n \"\"\"Start parsing the file, and return a SeqRecord generator.\"\"\"\n records = self.iterate(handle)\n return records\n\n def iterate(self, handle):\n \"\"\"Iterate over the records in the SnapGene file.\"\"\"\n record = SeqRecord(None)\n packets = _iterate(handle)\n try:\n packet_type, length, data = next(packets)\n except StopIteration:\n raise ValueError(\"Empty file.\") from None\n\n if packet_type != 0x09:\n raise ValueError(\"The file does not start with a SnapGene cookie packet\")\n _parse_cookie_packet(length, data, record)\n\n for (packet_type, length, data) in packets:\n handler = _packet_handlers.get(packet_type)\n if handler is not None:\n handler(length, data, record)\n\n if not record.seq:\n raise ValueError(\"No DNA packet in file\")\n\n yield record\n", "path": "Bio/SeqIO/SnapGeneIO.py"}]} | 3,547 | 290 |
gh_patches_debug_27474 | rasdani/github-patches | git_diff | ESMCI__cime-2648 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
GET_REFCASE wont change from TRUE to FALSE
If the user has GET_REFCASE=TRUE a file Buildconf/refcase.input_data_list is created, if in a subsequent run the user sets GET_REFCASE=FALSE this file must be deleted or the model will still attempt to find a refcase.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/lib/CIME/case/check_input_data.py`
Content:
```
1 """
2 API for checking input for testcase
3 """
4 from CIME.XML.standard_module_setup import *
5 from CIME.utils import SharedArea, find_files, safe_copy, expect
6 from CIME.XML.inputdata import Inputdata
7 import CIME.Servers
8
9 import glob
10
11 logger = logging.getLogger(__name__)
12
13 def _download_if_in_repo(server, input_data_root, rel_path, isdirectory=False):
14 """
15 Return True if successfully downloaded
16 """
17 if not server.fileexists(rel_path):
18 return False
19
20 full_path = os.path.join(input_data_root, rel_path)
21 logging.info("Trying to download file: '{}' to path '{}'".format(rel_path, full_path))
22 # Make sure local path exists, create if it does not
23 if isdirectory or full_path.endswith(os.sep):
24 if not os.path.exists(full_path):
25 logger.info("Creating directory {}".format(full_path))
26 os.makedirs(full_path)
27 isdirectory = True
28 elif not os.path.exists(os.path.dirname(full_path)):
29 os.makedirs(os.path.dirname(full_path))
30
31 # Use umask to make sure files are group read/writable. As long as parent directories
32 # have +s, then everything should work.
33 with SharedArea():
34 if isdirectory:
35 return server.getdirectory(rel_path, full_path)
36 else:
37 return server.getfile(rel_path, full_path)
38
39 ###############################################################################
40 def check_all_input_data(self, protocol=None, address=None, input_data_root=None, data_list_dir="Buildconf", download=True):
41 ###############################################################################
42 success = False
43 if protocol is not None and address is not None:
44 success = self.check_input_data(protocol=protocol, address=address, download=download,
45 input_data_root=input_data_root, data_list_dir=data_list_dir)
46 else:
47 success = self.check_input_data(protocol=protocol, address=address, download=False,
48 input_data_root=input_data_root, data_list_dir=data_list_dir)
49 if download and not success:
50 success = _downloadfromserver(self, input_data_root, data_list_dir)
51
52 expect(not download or (download and success), "Could not find all inputdata on any server")
53 self.stage_refcase(input_data_root=input_data_root, data_list_dir=data_list_dir)
54 return success
55
56 def _downloadfromserver(case, input_data_root, data_list_dir):
57 # needs to be downloaded
58 success = False
59 protocol = 'svn'
60 inputdata = Inputdata()
61 while not success and protocol is not None:
62 protocol, address = inputdata.get_next_server()
63 logger.info("Checking server {} with protocol {}".format(address, protocol))
64 success = case.check_input_data(protocol=protocol, address=address, download=True,
65 input_data_root=input_data_root, data_list_dir=data_list_dir)
66 return success
67
68 def stage_refcase(self, input_data_root=None, data_list_dir=None):
69 get_refcase = self.get_value("GET_REFCASE")
70 run_type = self.get_value("RUN_TYPE")
71 continue_run = self.get_value("CONTINUE_RUN")
72
73 # We do not fully populate the inputdata directory on every
74 # machine and do not expect every user to download the 3TB+ of
75 # data in our inputdata repository. This code checks for the
76 # existence of inputdata in the local inputdata directory and
77 # attempts to download data from the server if it's needed and
78 # missing.
79 if get_refcase and run_type != "startup" and not continue_run:
80 din_loc_root = self.get_value("DIN_LOC_ROOT")
81 run_refdate = self.get_value("RUN_REFDATE")
82 run_refcase = self.get_value("RUN_REFCASE")
83 run_refdir = self.get_value("RUN_REFDIR")
84 rundir = self.get_value("RUNDIR")
85
86 if os.path.isabs(run_refdir):
87 refdir = run_refdir
88 else:
89 refdir = os.path.join(din_loc_root, run_refdir, run_refcase, run_refdate)
90 if not os.path.isdir(refdir):
91 logger.warning("Refcase not found in {}, will attempt to download from inputdata".format(refdir))
92 with open(os.path.join("Buildconf","refcase.input_data_list"),"w") as fd:
93 fd.write("refdir = {}{}".format(refdir, os.sep))
94 if input_data_root is None:
95 input_data_root = din_loc_root
96 if data_list_dir is None:
97 data_list_dir = "Buildconf"
98 success = _downloadfromserver(self, input_data_root=input_data_root, data_list_dir=data_list_dir)
99 expect(success, "Could not download refcase from any server")
100
101 logger.info(" - Prestaging REFCASE ({}) to {}".format(refdir, rundir))
102
103 # prestage the reference case's files.
104
105 if (not os.path.exists(rundir)):
106 logger.debug("Creating run directory: {}".format(rundir))
107 os.makedirs(rundir)
108
109 # copy the refcases' rpointer files to the run directory
110 for rpointerfile in glob.iglob(os.path.join("{}","*rpointer*").format(refdir)):
111 logger.info("Copy rpointer {}".format(rpointerfile))
112 safe_copy(rpointerfile, rundir)
113
114 # link everything else
115
116 for rcfile in glob.iglob(os.path.join(refdir,"*")):
117 rcbaseline = os.path.basename(rcfile)
118 if not os.path.exists("{}/{}".format(rundir, rcbaseline)):
119 logger.info("Staging file {}".format(rcfile))
120 os.symlink(rcfile, "{}/{}".format(rundir, rcbaseline))
121 # Backward compatibility, some old refcases have cam2 in the name
122 # link to local cam file.
123 for cam2file in glob.iglob(os.path.join("{}","*.cam2.*").format(rundir)):
124 camfile = cam2file.replace("cam2", "cam")
125 os.symlink(cam2file, camfile)
126
127 return True
128
129 def check_input_data(case, protocol="svn", address=None, input_data_root=None, data_list_dir="Buildconf", download=False):
130 """
131 Return True if no files missing
132 """
133 case.load_env(reset=True)
134 # Fill in defaults as needed
135 input_data_root = case.get_value("DIN_LOC_ROOT") if input_data_root is None else input_data_root
136
137 expect(os.path.isdir(input_data_root), "Invalid input_data_root directory: '{}'".format(input_data_root))
138 expect(os.path.isdir(data_list_dir), "Invalid data_list_dir directory: '{}'".format(data_list_dir))
139
140 data_list_files = find_files(data_list_dir, "*.input_data_list")
141 expect(data_list_files, "No .input_data_list files found in dir '{}'".format(data_list_dir))
142
143 no_files_missing = True
144
145 if download:
146 if protocol not in vars(CIME.Servers):
147 logger.warning("Client protocol {} not enabled".format(protocol))
148 return False
149
150 if protocol == "svn":
151 server = CIME.Servers.SVN(address)
152 elif protocol == "gftp":
153 server = CIME.Servers.GridFTP(address)
154 elif protocol == "ftp":
155 server = CIME.Servers.FTP(address)
156 elif protocol == "wget":
157 server = CIME.Servers.WGET(address)
158 else:
159 expect(False, "Unsupported inputdata protocol: {}".format(protocol))
160
161
162
163 for data_list_file in data_list_files:
164 logging.info("Loading input file list: '{}'".format(data_list_file))
165 with open(data_list_file, "r") as fd:
166 lines = fd.readlines()
167
168 for line in lines:
169 line = line.strip()
170 if (line and not line.startswith("#")):
171 tokens = line.split('=')
172 description, full_path = tokens[0].strip(), tokens[1].strip()
173 if description.endswith('datapath'):
174 continue
175 if(full_path):
176 # expand xml variables
177 full_path = case.get_resolved_value(full_path)
178 rel_path = full_path.replace(input_data_root, "")
179 model = os.path.basename(data_list_file).split('.')[0]
180
181 if ("/" in rel_path and rel_path == full_path):
182 # User pointing to a file outside of input_data_root, we cannot determine
183 # rel_path, and so cannot download the file. If it already exists, we can
184 # proceed
185 if not os.path.exists(full_path):
186 logging.warning("Model {} missing file {} = '{}'".format(model, description, full_path))
187 if download:
188 logging.warning(" Cannot download file since it lives outside of the input_data_root '{}'".format(input_data_root))
189 no_files_missing = False
190 else:
191 logging.debug(" Found input file: '{}'".format(full_path))
192
193 else:
194 # There are some special values of rel_path that
195 # we need to ignore - some of the component models
196 # set things like 'NULL' or 'same_as_TS' -
197 # basically if rel_path does not contain '/' (a
198 # directory tree) you can assume it's a special
199 # value and ignore it (perhaps with a warning)
200 if ("/" in rel_path and not os.path.exists(full_path)):
201 logging.warning(" Model {} missing file {} = '{}'".format(model, description, full_path))
202 no_files_missing = False
203
204 if (download):
205 no_files_missing = _download_if_in_repo(server, input_data_root, rel_path.strip(os.sep),
206 isdirectory=rel_path.endswith(os.sep))
207 else:
208 logging.debug(" Already had input file: '{}'".format(full_path))
209
210 else:
211 model = os.path.basename(data_list_file).split('.')[0]
212 logging.warning("Model {} no file specified for {}".format(model, description))
213
214 return no_files_missing
215
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scripts/lib/CIME/case/check_input_data.py b/scripts/lib/CIME/case/check_input_data.py
--- a/scripts/lib/CIME/case/check_input_data.py
+++ b/scripts/lib/CIME/case/check_input_data.py
@@ -48,7 +48,7 @@
input_data_root=input_data_root, data_list_dir=data_list_dir)
if download and not success:
success = _downloadfromserver(self, input_data_root, data_list_dir)
-
+
expect(not download or (download and success), "Could not find all inputdata on any server")
self.stage_refcase(input_data_root=input_data_root, data_list_dir=data_list_dir)
return success
@@ -123,7 +123,10 @@
for cam2file in glob.iglob(os.path.join("{}","*.cam2.*").format(rundir)):
camfile = cam2file.replace("cam2", "cam")
os.symlink(cam2file, camfile)
-
+ elif not get_refcase and run_type != "startup":
+ logger.info("GET_REFCASE is false, the user is expected to stage the refcase to the run directory.")
+ if os.path.exists(os.path.join("Buildconf","refcase.input_data_list")):
+ os.remove(os.path.join("Buildconf","refcase.input_data_list"))
return True
def check_input_data(case, protocol="svn", address=None, input_data_root=None, data_list_dir="Buildconf", download=False):
| {"golden_diff": "diff --git a/scripts/lib/CIME/case/check_input_data.py b/scripts/lib/CIME/case/check_input_data.py\n--- a/scripts/lib/CIME/case/check_input_data.py\n+++ b/scripts/lib/CIME/case/check_input_data.py\n@@ -48,7 +48,7 @@\n input_data_root=input_data_root, data_list_dir=data_list_dir)\n if download and not success:\n success = _downloadfromserver(self, input_data_root, data_list_dir)\n- \n+\n expect(not download or (download and success), \"Could not find all inputdata on any server\")\n self.stage_refcase(input_data_root=input_data_root, data_list_dir=data_list_dir)\n return success\n@@ -123,7 +123,10 @@\n for cam2file in glob.iglob(os.path.join(\"{}\",\"*.cam2.*\").format(rundir)):\n camfile = cam2file.replace(\"cam2\", \"cam\")\n os.symlink(cam2file, camfile)\n-\n+ elif not get_refcase and run_type != \"startup\":\n+ logger.info(\"GET_REFCASE is false, the user is expected to stage the refcase to the run directory.\")\n+ if os.path.exists(os.path.join(\"Buildconf\",\"refcase.input_data_list\")):\n+ os.remove(os.path.join(\"Buildconf\",\"refcase.input_data_list\"))\n return True\n \n def check_input_data(case, protocol=\"svn\", address=None, input_data_root=None, data_list_dir=\"Buildconf\", download=False):\n", "issue": "GET_REFCASE wont change from TRUE to FALSE\nIf the user has GET_REFCASE=TRUE a file Buildconf/refcase.input_data_list is created, if in a subsequent run the user sets GET_REFCASE=FALSE this file must be deleted or the model will still attempt to find a refcase.\n", "before_files": [{"content": "\"\"\"\nAPI for checking input for testcase\n\"\"\"\nfrom CIME.XML.standard_module_setup import *\nfrom CIME.utils import SharedArea, find_files, safe_copy, expect\nfrom CIME.XML.inputdata import Inputdata\nimport CIME.Servers\n\nimport glob\n\nlogger = logging.getLogger(__name__)\n\ndef _download_if_in_repo(server, input_data_root, rel_path, isdirectory=False):\n \"\"\"\n Return True if successfully downloaded\n \"\"\"\n if not server.fileexists(rel_path):\n return False\n\n full_path = os.path.join(input_data_root, rel_path)\n logging.info(\"Trying to download file: '{}' to path '{}'\".format(rel_path, full_path))\n # Make sure local path exists, create if it does not\n if isdirectory or full_path.endswith(os.sep):\n if not os.path.exists(full_path):\n logger.info(\"Creating directory {}\".format(full_path))\n os.makedirs(full_path)\n isdirectory = True\n elif not os.path.exists(os.path.dirname(full_path)):\n os.makedirs(os.path.dirname(full_path))\n\n # Use umask to make sure files are group read/writable. As long as parent directories\n # have +s, then everything should work.\n with SharedArea():\n if isdirectory:\n return server.getdirectory(rel_path, full_path)\n else:\n return server.getfile(rel_path, full_path)\n\n###############################################################################\ndef check_all_input_data(self, protocol=None, address=None, input_data_root=None, data_list_dir=\"Buildconf\", download=True):\n###############################################################################\n success = False\n if protocol is not None and address is not None:\n success = self.check_input_data(protocol=protocol, address=address, download=download,\n input_data_root=input_data_root, data_list_dir=data_list_dir)\n else:\n success = self.check_input_data(protocol=protocol, address=address, download=False,\n input_data_root=input_data_root, data_list_dir=data_list_dir)\n if download and not success:\n success = _downloadfromserver(self, input_data_root, data_list_dir)\n \n expect(not download or (download and success), \"Could not find all inputdata on any server\")\n self.stage_refcase(input_data_root=input_data_root, data_list_dir=data_list_dir)\n return success\n\ndef _downloadfromserver(case, input_data_root, data_list_dir):\n # needs to be downloaded\n success = False\n protocol = 'svn'\n inputdata = Inputdata()\n while not success and protocol is not None:\n protocol, address = inputdata.get_next_server()\n logger.info(\"Checking server {} with protocol {}\".format(address, protocol))\n success = case.check_input_data(protocol=protocol, address=address, download=True,\n input_data_root=input_data_root, data_list_dir=data_list_dir)\n return success\n\ndef stage_refcase(self, input_data_root=None, data_list_dir=None):\n get_refcase = self.get_value(\"GET_REFCASE\")\n run_type = self.get_value(\"RUN_TYPE\")\n continue_run = self.get_value(\"CONTINUE_RUN\")\n\n # We do not fully populate the inputdata directory on every\n # machine and do not expect every user to download the 3TB+ of\n # data in our inputdata repository. This code checks for the\n # existence of inputdata in the local inputdata directory and\n # attempts to download data from the server if it's needed and\n # missing.\n if get_refcase and run_type != \"startup\" and not continue_run:\n din_loc_root = self.get_value(\"DIN_LOC_ROOT\")\n run_refdate = self.get_value(\"RUN_REFDATE\")\n run_refcase = self.get_value(\"RUN_REFCASE\")\n run_refdir = self.get_value(\"RUN_REFDIR\")\n rundir = self.get_value(\"RUNDIR\")\n\n if os.path.isabs(run_refdir):\n refdir = run_refdir\n else:\n refdir = os.path.join(din_loc_root, run_refdir, run_refcase, run_refdate)\n if not os.path.isdir(refdir):\n logger.warning(\"Refcase not found in {}, will attempt to download from inputdata\".format(refdir))\n with open(os.path.join(\"Buildconf\",\"refcase.input_data_list\"),\"w\") as fd:\n fd.write(\"refdir = {}{}\".format(refdir, os.sep))\n if input_data_root is None:\n input_data_root = din_loc_root\n if data_list_dir is None:\n data_list_dir = \"Buildconf\"\n success = _downloadfromserver(self, input_data_root=input_data_root, data_list_dir=data_list_dir)\n expect(success, \"Could not download refcase from any server\")\n\n logger.info(\" - Prestaging REFCASE ({}) to {}\".format(refdir, rundir))\n\n # prestage the reference case's files.\n\n if (not os.path.exists(rundir)):\n logger.debug(\"Creating run directory: {}\".format(rundir))\n os.makedirs(rundir)\n\n # copy the refcases' rpointer files to the run directory\n for rpointerfile in glob.iglob(os.path.join(\"{}\",\"*rpointer*\").format(refdir)):\n logger.info(\"Copy rpointer {}\".format(rpointerfile))\n safe_copy(rpointerfile, rundir)\n\n # link everything else\n\n for rcfile in glob.iglob(os.path.join(refdir,\"*\")):\n rcbaseline = os.path.basename(rcfile)\n if not os.path.exists(\"{}/{}\".format(rundir, rcbaseline)):\n logger.info(\"Staging file {}\".format(rcfile))\n os.symlink(rcfile, \"{}/{}\".format(rundir, rcbaseline))\n # Backward compatibility, some old refcases have cam2 in the name\n # link to local cam file.\n for cam2file in glob.iglob(os.path.join(\"{}\",\"*.cam2.*\").format(rundir)):\n camfile = cam2file.replace(\"cam2\", \"cam\")\n os.symlink(cam2file, camfile)\n\n return True\n\ndef check_input_data(case, protocol=\"svn\", address=None, input_data_root=None, data_list_dir=\"Buildconf\", download=False):\n \"\"\"\n Return True if no files missing\n \"\"\"\n case.load_env(reset=True)\n # Fill in defaults as needed\n input_data_root = case.get_value(\"DIN_LOC_ROOT\") if input_data_root is None else input_data_root\n\n expect(os.path.isdir(input_data_root), \"Invalid input_data_root directory: '{}'\".format(input_data_root))\n expect(os.path.isdir(data_list_dir), \"Invalid data_list_dir directory: '{}'\".format(data_list_dir))\n\n data_list_files = find_files(data_list_dir, \"*.input_data_list\")\n expect(data_list_files, \"No .input_data_list files found in dir '{}'\".format(data_list_dir))\n\n no_files_missing = True\n\n if download:\n if protocol not in vars(CIME.Servers):\n logger.warning(\"Client protocol {} not enabled\".format(protocol))\n return False\n\n if protocol == \"svn\":\n server = CIME.Servers.SVN(address)\n elif protocol == \"gftp\":\n server = CIME.Servers.GridFTP(address)\n elif protocol == \"ftp\":\n server = CIME.Servers.FTP(address)\n elif protocol == \"wget\":\n server = CIME.Servers.WGET(address)\n else:\n expect(False, \"Unsupported inputdata protocol: {}\".format(protocol))\n\n\n\n for data_list_file in data_list_files:\n logging.info(\"Loading input file list: '{}'\".format(data_list_file))\n with open(data_list_file, \"r\") as fd:\n lines = fd.readlines()\n\n for line in lines:\n line = line.strip()\n if (line and not line.startswith(\"#\")):\n tokens = line.split('=')\n description, full_path = tokens[0].strip(), tokens[1].strip()\n if description.endswith('datapath'):\n continue\n if(full_path):\n # expand xml variables\n full_path = case.get_resolved_value(full_path)\n rel_path = full_path.replace(input_data_root, \"\")\n model = os.path.basename(data_list_file).split('.')[0]\n\n if (\"/\" in rel_path and rel_path == full_path):\n # User pointing to a file outside of input_data_root, we cannot determine\n # rel_path, and so cannot download the file. If it already exists, we can\n # proceed\n if not os.path.exists(full_path):\n logging.warning(\"Model {} missing file {} = '{}'\".format(model, description, full_path))\n if download:\n logging.warning(\" Cannot download file since it lives outside of the input_data_root '{}'\".format(input_data_root))\n no_files_missing = False\n else:\n logging.debug(\" Found input file: '{}'\".format(full_path))\n\n else:\n # There are some special values of rel_path that\n # we need to ignore - some of the component models\n # set things like 'NULL' or 'same_as_TS' -\n # basically if rel_path does not contain '/' (a\n # directory tree) you can assume it's a special\n # value and ignore it (perhaps with a warning)\n if (\"/\" in rel_path and not os.path.exists(full_path)):\n logging.warning(\" Model {} missing file {} = '{}'\".format(model, description, full_path))\n no_files_missing = False\n\n if (download):\n no_files_missing = _download_if_in_repo(server, input_data_root, rel_path.strip(os.sep),\n isdirectory=rel_path.endswith(os.sep))\n else:\n logging.debug(\" Already had input file: '{}'\".format(full_path))\n\n else:\n model = os.path.basename(data_list_file).split('.')[0]\n logging.warning(\"Model {} no file specified for {}\".format(model, description))\n\n return no_files_missing\n", "path": "scripts/lib/CIME/case/check_input_data.py"}], "after_files": [{"content": "\"\"\"\nAPI for checking input for testcase\n\"\"\"\nfrom CIME.XML.standard_module_setup import *\nfrom CIME.utils import SharedArea, find_files, safe_copy, expect\nfrom CIME.XML.inputdata import Inputdata\nimport CIME.Servers\n\nimport glob\n\nlogger = logging.getLogger(__name__)\n\ndef _download_if_in_repo(server, input_data_root, rel_path, isdirectory=False):\n \"\"\"\n Return True if successfully downloaded\n \"\"\"\n if not server.fileexists(rel_path):\n return False\n\n full_path = os.path.join(input_data_root, rel_path)\n logging.info(\"Trying to download file: '{}' to path '{}'\".format(rel_path, full_path))\n # Make sure local path exists, create if it does not\n if isdirectory or full_path.endswith(os.sep):\n if not os.path.exists(full_path):\n logger.info(\"Creating directory {}\".format(full_path))\n os.makedirs(full_path)\n isdirectory = True\n elif not os.path.exists(os.path.dirname(full_path)):\n os.makedirs(os.path.dirname(full_path))\n\n # Use umask to make sure files are group read/writable. As long as parent directories\n # have +s, then everything should work.\n with SharedArea():\n if isdirectory:\n return server.getdirectory(rel_path, full_path)\n else:\n return server.getfile(rel_path, full_path)\n\n###############################################################################\ndef check_all_input_data(self, protocol=None, address=None, input_data_root=None, data_list_dir=\"Buildconf\", download=True):\n###############################################################################\n success = False\n if protocol is not None and address is not None:\n success = self.check_input_data(protocol=protocol, address=address, download=download,\n input_data_root=input_data_root, data_list_dir=data_list_dir)\n else:\n success = self.check_input_data(protocol=protocol, address=address, download=False,\n input_data_root=input_data_root, data_list_dir=data_list_dir)\n if download and not success:\n success = _downloadfromserver(self, input_data_root, data_list_dir)\n\n expect(not download or (download and success), \"Could not find all inputdata on any server\")\n self.stage_refcase(input_data_root=input_data_root, data_list_dir=data_list_dir)\n return success\n\ndef _downloadfromserver(case, input_data_root, data_list_dir):\n # needs to be downloaded\n success = False\n protocol = 'svn'\n inputdata = Inputdata()\n while not success and protocol is not None:\n protocol, address = inputdata.get_next_server()\n logger.info(\"Checking server {} with protocol {}\".format(address, protocol))\n success = case.check_input_data(protocol=protocol, address=address, download=True,\n input_data_root=input_data_root, data_list_dir=data_list_dir)\n return success\n\ndef stage_refcase(self, input_data_root=None, data_list_dir=None):\n get_refcase = self.get_value(\"GET_REFCASE\")\n run_type = self.get_value(\"RUN_TYPE\")\n continue_run = self.get_value(\"CONTINUE_RUN\")\n\n # We do not fully populate the inputdata directory on every\n # machine and do not expect every user to download the 3TB+ of\n # data in our inputdata repository. This code checks for the\n # existence of inputdata in the local inputdata directory and\n # attempts to download data from the server if it's needed and\n # missing.\n if get_refcase and run_type != \"startup\" and not continue_run:\n din_loc_root = self.get_value(\"DIN_LOC_ROOT\")\n run_refdate = self.get_value(\"RUN_REFDATE\")\n run_refcase = self.get_value(\"RUN_REFCASE\")\n run_refdir = self.get_value(\"RUN_REFDIR\")\n rundir = self.get_value(\"RUNDIR\")\n\n if os.path.isabs(run_refdir):\n refdir = run_refdir\n else:\n refdir = os.path.join(din_loc_root, run_refdir, run_refcase, run_refdate)\n if not os.path.isdir(refdir):\n logger.warning(\"Refcase not found in {}, will attempt to download from inputdata\".format(refdir))\n with open(os.path.join(\"Buildconf\",\"refcase.input_data_list\"),\"w\") as fd:\n fd.write(\"refdir = {}{}\".format(refdir, os.sep))\n if input_data_root is None:\n input_data_root = din_loc_root\n if data_list_dir is None:\n data_list_dir = \"Buildconf\"\n success = _downloadfromserver(self, input_data_root=input_data_root, data_list_dir=data_list_dir)\n expect(success, \"Could not download refcase from any server\")\n\n logger.info(\" - Prestaging REFCASE ({}) to {}\".format(refdir, rundir))\n\n # prestage the reference case's files.\n\n if (not os.path.exists(rundir)):\n logger.debug(\"Creating run directory: {}\".format(rundir))\n os.makedirs(rundir)\n\n # copy the refcases' rpointer files to the run directory\n for rpointerfile in glob.iglob(os.path.join(\"{}\",\"*rpointer*\").format(refdir)):\n logger.info(\"Copy rpointer {}\".format(rpointerfile))\n safe_copy(rpointerfile, rundir)\n\n # link everything else\n\n for rcfile in glob.iglob(os.path.join(refdir,\"*\")):\n rcbaseline = os.path.basename(rcfile)\n if not os.path.exists(\"{}/{}\".format(rundir, rcbaseline)):\n logger.info(\"Staging file {}\".format(rcfile))\n os.symlink(rcfile, \"{}/{}\".format(rundir, rcbaseline))\n # Backward compatibility, some old refcases have cam2 in the name\n # link to local cam file.\n for cam2file in glob.iglob(os.path.join(\"{}\",\"*.cam2.*\").format(rundir)):\n camfile = cam2file.replace(\"cam2\", \"cam\")\n os.symlink(cam2file, camfile)\n elif not get_refcase and run_type != \"startup\":\n logger.info(\"GET_REFCASE is false, the user is expected to stage the refcase to the run directory.\")\n if os.path.exists(os.path.join(\"Buildconf\",\"refcase.input_data_list\")):\n os.remove(os.path.join(\"Buildconf\",\"refcase.input_data_list\"))\n return True\n\ndef check_input_data(case, protocol=\"svn\", address=None, input_data_root=None, data_list_dir=\"Buildconf\", download=False):\n \"\"\"\n Return True if no files missing\n \"\"\"\n case.load_env(reset=True)\n # Fill in defaults as needed\n input_data_root = case.get_value(\"DIN_LOC_ROOT\") if input_data_root is None else input_data_root\n\n expect(os.path.isdir(input_data_root), \"Invalid input_data_root directory: '{}'\".format(input_data_root))\n expect(os.path.isdir(data_list_dir), \"Invalid data_list_dir directory: '{}'\".format(data_list_dir))\n\n data_list_files = find_files(data_list_dir, \"*.input_data_list\")\n expect(data_list_files, \"No .input_data_list files found in dir '{}'\".format(data_list_dir))\n\n no_files_missing = True\n\n if download:\n if protocol not in vars(CIME.Servers):\n logger.warning(\"Client protocol {} not enabled\".format(protocol))\n return False\n\n if protocol == \"svn\":\n server = CIME.Servers.SVN(address)\n elif protocol == \"gftp\":\n server = CIME.Servers.GridFTP(address)\n elif protocol == \"ftp\":\n server = CIME.Servers.FTP(address)\n elif protocol == \"wget\":\n server = CIME.Servers.WGET(address)\n else:\n expect(False, \"Unsupported inputdata protocol: {}\".format(protocol))\n\n\n\n for data_list_file in data_list_files:\n logging.info(\"Loading input file list: '{}'\".format(data_list_file))\n with open(data_list_file, \"r\") as fd:\n lines = fd.readlines()\n\n for line in lines:\n line = line.strip()\n if (line and not line.startswith(\"#\")):\n tokens = line.split('=')\n description, full_path = tokens[0].strip(), tokens[1].strip()\n if description.endswith('datapath'):\n continue\n if(full_path):\n # expand xml variables\n full_path = case.get_resolved_value(full_path)\n rel_path = full_path.replace(input_data_root, \"\")\n model = os.path.basename(data_list_file).split('.')[0]\n\n if (\"/\" in rel_path and rel_path == full_path):\n # User pointing to a file outside of input_data_root, we cannot determine\n # rel_path, and so cannot download the file. If it already exists, we can\n # proceed\n if not os.path.exists(full_path):\n logging.warning(\"Model {} missing file {} = '{}'\".format(model, description, full_path))\n if download:\n logging.warning(\" Cannot download file since it lives outside of the input_data_root '{}'\".format(input_data_root))\n no_files_missing = False\n else:\n logging.debug(\" Found input file: '{}'\".format(full_path))\n\n else:\n # There are some special values of rel_path that\n # we need to ignore - some of the component models\n # set things like 'NULL' or 'same_as_TS' -\n # basically if rel_path does not contain '/' (a\n # directory tree) you can assume it's a special\n # value and ignore it (perhaps with a warning)\n if (\"/\" in rel_path and not os.path.exists(full_path)):\n logging.warning(\" Model {} missing file {} = '{}'\".format(model, description, full_path))\n no_files_missing = False\n\n if (download):\n no_files_missing = _download_if_in_repo(server, input_data_root, rel_path.strip(os.sep),\n isdirectory=rel_path.endswith(os.sep))\n else:\n logging.debug(\" Already had input file: '{}'\".format(full_path))\n\n else:\n model = os.path.basename(data_list_file).split('.')[0]\n logging.warning(\"Model {} no file specified for {}\".format(model, description))\n\n return no_files_missing\n", "path": "scripts/lib/CIME/case/check_input_data.py"}]} | 2,981 | 327 |
gh_patches_debug_6377 | rasdani/github-patches | git_diff | scrapy__scrapy-4361 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[DOCS] Use HTTPS
Reference:
- https://httpsiseasy.com
- https://letsencrypt.org
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Scrapy documentation build configuration file, created by
4 # sphinx-quickstart on Mon Nov 24 12:02:52 2008.
5 #
6 # This file is execfile()d with the current directory set to its containing dir.
7 #
8 # The contents of this file are pickled, so don't put values in the namespace
9 # that aren't pickleable (module imports are okay, they're removed automatically).
10 #
11 # All configuration values have a default; values that are commented out
12 # serve to show the default.
13
14 import sys
15 from datetime import datetime
16 from os import path
17
18 # If your extensions are in another directory, add it here. If the directory
19 # is relative to the documentation root, use os.path.abspath to make it
20 # absolute, like shown here.
21 sys.path.append(path.join(path.dirname(__file__), "_ext"))
22 sys.path.insert(0, path.dirname(path.dirname(__file__)))
23
24
25 # General configuration
26 # ---------------------
27
28 # Add any Sphinx extension module names here, as strings. They can be extensions
29 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
30 extensions = [
31 'hoverxref.extension',
32 'notfound.extension',
33 'scrapydocs',
34 'sphinx.ext.autodoc',
35 'sphinx.ext.coverage',
36 'sphinx.ext.intersphinx',
37 'sphinx.ext.viewcode',
38 ]
39
40 # Add any paths that contain templates here, relative to this directory.
41 templates_path = ['_templates']
42
43 # The suffix of source filenames.
44 source_suffix = '.rst'
45
46 # The encoding of source files.
47 #source_encoding = 'utf-8'
48
49 # The master toctree document.
50 master_doc = 'index'
51
52 # General information about the project.
53 project = 'Scrapy'
54 copyright = '2008–{}, Scrapy developers'.format(datetime.now().year)
55
56 # The version info for the project you're documenting, acts as replacement for
57 # |version| and |release|, also used in various other places throughout the
58 # built documents.
59 #
60 # The short X.Y version.
61 try:
62 import scrapy
63 version = '.'.join(map(str, scrapy.version_info[:2]))
64 release = scrapy.__version__
65 except ImportError:
66 version = ''
67 release = ''
68
69 # The language for content autogenerated by Sphinx. Refer to documentation
70 # for a list of supported languages.
71 language = 'en'
72
73 # There are two options for replacing |today|: either, you set today to some
74 # non-false value, then it is used:
75 #today = ''
76 # Else, today_fmt is used as the format for a strftime call.
77 #today_fmt = '%B %d, %Y'
78
79 # List of documents that shouldn't be included in the build.
80 #unused_docs = []
81
82 exclude_patterns = ['build']
83
84 # List of directories, relative to source directory, that shouldn't be searched
85 # for source files.
86 exclude_trees = ['.build']
87
88 # The reST default role (used for this markup: `text`) to use for all documents.
89 #default_role = None
90
91 # If true, '()' will be appended to :func: etc. cross-reference text.
92 #add_function_parentheses = True
93
94 # If true, the current module name will be prepended to all description
95 # unit titles (such as .. function::).
96 #add_module_names = True
97
98 # If true, sectionauthor and moduleauthor directives will be shown in the
99 # output. They are ignored by default.
100 #show_authors = False
101
102 # The name of the Pygments (syntax highlighting) style to use.
103 pygments_style = 'sphinx'
104
105
106 # Options for HTML output
107 # -----------------------
108
109 # The theme to use for HTML and HTML Help pages. See the documentation for
110 # a list of builtin themes.
111 html_theme = 'sphinx_rtd_theme'
112
113 # Theme options are theme-specific and customize the look and feel of a theme
114 # further. For a list of options available for each theme, see the
115 # documentation.
116 #html_theme_options = {}
117
118 # Add any paths that contain custom themes here, relative to this directory.
119 # Add path to the RTD explicitly to robustify builds (otherwise might
120 # fail in a clean Debian build env)
121 import sphinx_rtd_theme
122 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
123
124
125 # The style sheet to use for HTML and HTML Help pages. A file of that name
126 # must exist either in Sphinx' static/ path, or in one of the custom paths
127 # given in html_static_path.
128 # html_style = 'scrapydoc.css'
129
130 # The name for this set of Sphinx documents. If None, it defaults to
131 # "<project> v<release> documentation".
132 #html_title = None
133
134 # A shorter title for the navigation bar. Default is the same as html_title.
135 #html_short_title = None
136
137 # The name of an image file (relative to this directory) to place at the top
138 # of the sidebar.
139 #html_logo = None
140
141 # The name of an image file (within the static path) to use as favicon of the
142 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
143 # pixels large.
144 #html_favicon = None
145
146 # Add any paths that contain custom static files (such as style sheets) here,
147 # relative to this directory. They are copied after the builtin static files,
148 # so a file named "default.css" will overwrite the builtin "default.css".
149 html_static_path = ['_static']
150
151 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
152 # using the given strftime format.
153 html_last_updated_fmt = '%b %d, %Y'
154
155 # Custom sidebar templates, maps document names to template names.
156 #html_sidebars = {}
157
158 # Additional templates that should be rendered to pages, maps page names to
159 # template names.
160 #html_additional_pages = {}
161
162 # If false, no module index is generated.
163 #html_use_modindex = True
164
165 # If false, no index is generated.
166 #html_use_index = True
167
168 # If true, the index is split into individual pages for each letter.
169 #html_split_index = False
170
171 # If true, the reST sources are included in the HTML build as _sources/<name>.
172 html_copy_source = True
173
174 # If true, an OpenSearch description file will be output, and all pages will
175 # contain a <link> tag referring to it. The value of this option must be the
176 # base URL from which the finished HTML is served.
177 #html_use_opensearch = ''
178
179 # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
180 #html_file_suffix = ''
181
182 # Output file base name for HTML help builder.
183 htmlhelp_basename = 'Scrapydoc'
184
185
186 # Options for LaTeX output
187 # ------------------------
188
189 # The paper size ('letter' or 'a4').
190 #latex_paper_size = 'letter'
191
192 # The font size ('10pt', '11pt' or '12pt').
193 #latex_font_size = '10pt'
194
195 # Grouping the document tree into LaTeX files. List of tuples
196 # (source start file, target name, title, author, document class [howto/manual]).
197 latex_documents = [
198 ('index', 'Scrapy.tex', 'Scrapy Documentation',
199 'Scrapy developers', 'manual'),
200 ]
201
202 # The name of an image file (relative to this directory) to place at the top of
203 # the title page.
204 #latex_logo = None
205
206 # For "manual" documents, if this is true, then toplevel headings are parts,
207 # not chapters.
208 #latex_use_parts = False
209
210 # Additional stuff for the LaTeX preamble.
211 #latex_preamble = ''
212
213 # Documents to append as an appendix to all manuals.
214 #latex_appendices = []
215
216 # If false, no module index is generated.
217 #latex_use_modindex = True
218
219
220 # Options for the linkcheck builder
221 # ---------------------------------
222
223 # A list of regular expressions that match URIs that should not be checked when
224 # doing a linkcheck build.
225 linkcheck_ignore = [
226 'http://localhost:\d+', 'http://hg.scrapy.org',
227 'http://directory.google.com/'
228 ]
229
230
231 # Options for the Coverage extension
232 # ----------------------------------
233 coverage_ignore_pyobjects = [
234 # Contract’s add_pre_hook and add_post_hook are not documented because
235 # they should be transparent to contract developers, for whom pre_hook and
236 # post_hook should be the actual concern.
237 r'\bContract\.add_(pre|post)_hook$',
238
239 # ContractsManager is an internal class, developers are not expected to
240 # interact with it directly in any way.
241 r'\bContractsManager\b$',
242
243 # For default contracts we only want to document their general purpose in
244 # their __init__ method, the methods they reimplement to achieve that purpose
245 # should be irrelevant to developers using those contracts.
246 r'\w+Contract\.(adjust_request_args|(pre|post)_process)$',
247
248 # Methods of downloader middlewares are not documented, only the classes
249 # themselves, since downloader middlewares are controlled through Scrapy
250 # settings.
251 r'^scrapy\.downloadermiddlewares\.\w*?\.(\w*?Middleware|DownloaderStats)\.',
252
253 # Base classes of downloader middlewares are implementation details that
254 # are not meant for users.
255 r'^scrapy\.downloadermiddlewares\.\w*?\.Base\w*?Middleware',
256
257 # Private exception used by the command-line interface implementation.
258 r'^scrapy\.exceptions\.UsageError',
259
260 # Methods of BaseItemExporter subclasses are only documented in
261 # BaseItemExporter.
262 r'^scrapy\.exporters\.(?!BaseItemExporter\b)\w*?\.',
263
264 # Extension behavior is only modified through settings. Methods of
265 # extension classes, as well as helper functions, are implementation
266 # details that are not documented.
267 r'^scrapy\.extensions\.[a-z]\w*?\.[A-Z]\w*?\.', # methods
268 r'^scrapy\.extensions\.[a-z]\w*?\.[a-z]', # helper functions
269
270 # Never documented before, and deprecated now.
271 r'^scrapy\.item\.DictItem$',
272 r'^scrapy\.linkextractors\.FilteringLinkExtractor$',
273
274 # Implementation detail of LxmlLinkExtractor
275 r'^scrapy\.linkextractors\.lxmlhtml\.LxmlParserLinkExtractor',
276 ]
277
278
279 # Options for the InterSphinx extension
280 # -------------------------------------
281
282 intersphinx_mapping = {
283 'coverage': ('https://coverage.readthedocs.io/en/stable', None),
284 'pytest': ('https://docs.pytest.org/en/latest', None),
285 'python': ('https://docs.python.org/3', None),
286 'sphinx': ('https://www.sphinx-doc.org/en/master', None),
287 'tox': ('https://tox.readthedocs.io/en/latest', None),
288 'twisted': ('https://twistedmatrix.com/documents/current', None),
289 'twistedapi': ('https://twistedmatrix.com/documents/current/api', None),
290 }
291
292
293 # Options for sphinx-hoverxref options
294 # ------------------------------------
295
296 hoverxref_auto_ref = True
297
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -281,6 +281,7 @@
intersphinx_mapping = {
'coverage': ('https://coverage.readthedocs.io/en/stable', None),
+ 'cssselect': ('https://cssselect.readthedocs.io/en/latest', None),
'pytest': ('https://docs.pytest.org/en/latest', None),
'python': ('https://docs.python.org/3', None),
'sphinx': ('https://www.sphinx-doc.org/en/master', None),
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -281,6 +281,7 @@\n \n intersphinx_mapping = {\n 'coverage': ('https://coverage.readthedocs.io/en/stable', None),\n+ 'cssselect': ('https://cssselect.readthedocs.io/en/latest', None),\n 'pytest': ('https://docs.pytest.org/en/latest', None),\n 'python': ('https://docs.python.org/3', None),\n 'sphinx': ('https://www.sphinx-doc.org/en/master', None),\n", "issue": "[DOCS] Use HTTPS\nReference:\r\n - https://httpsiseasy.com\r\n - https://letsencrypt.org\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Scrapy documentation build configuration file, created by\n# sphinx-quickstart on Mon Nov 24 12:02:52 2008.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# The contents of this file are pickled, so don't put values in the namespace\n# that aren't pickleable (module imports are okay, they're removed automatically).\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nfrom datetime import datetime\nfrom os import path\n\n# If your extensions are in another directory, add it here. If the directory\n# is relative to the documentation root, use os.path.abspath to make it\n# absolute, like shown here.\nsys.path.append(path.join(path.dirname(__file__), \"_ext\"))\nsys.path.insert(0, path.dirname(path.dirname(__file__)))\n\n\n# General configuration\n# ---------------------\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n 'hoverxref.extension',\n 'notfound.extension',\n 'scrapydocs',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.coverage',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.viewcode',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Scrapy'\ncopyright = '2008\u2013{}, Scrapy developers'.format(datetime.now().year)\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\ntry:\n import scrapy\n version = '.'.join(map(str, scrapy.version_info[:2]))\n release = scrapy.__version__\nexcept ImportError:\n version = ''\n release = ''\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\nlanguage = 'en'\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of documents that shouldn't be included in the build.\n#unused_docs = []\n\nexclude_patterns = ['build']\n\n# List of directories, relative to source directory, that shouldn't be searched\n# for source files.\nexclude_trees = ['.build']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n\n# Options for HTML output\n# -----------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# Add path to the RTD explicitly to robustify builds (otherwise might\n# fail in a clean Debian build env)\nimport sphinx_rtd_theme\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n\n# The style sheet to use for HTML and HTML Help pages. A file of that name\n# must exist either in Sphinx' static/ path, or in one of the custom paths\n# given in html_static_path.\n# html_style = 'scrapydoc.css'\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\nhtml_last_updated_fmt = '%b %d, %Y'\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_use_modindex = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, the reST sources are included in the HTML build as _sources/<name>.\nhtml_copy_source = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# If nonempty, this is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = ''\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Scrapydoc'\n\n\n# Options for LaTeX output\n# ------------------------\n\n# The paper size ('letter' or 'a4').\n#latex_paper_size = 'letter'\n\n# The font size ('10pt', '11pt' or '12pt').\n#latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, document class [howto/manual]).\nlatex_documents = [\n ('index', 'Scrapy.tex', 'Scrapy Documentation',\n 'Scrapy developers', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# Additional stuff for the LaTeX preamble.\n#latex_preamble = ''\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_use_modindex = True\n\n\n# Options for the linkcheck builder\n# ---------------------------------\n\n# A list of regular expressions that match URIs that should not be checked when\n# doing a linkcheck build.\nlinkcheck_ignore = [\n 'http://localhost:\\d+', 'http://hg.scrapy.org',\n 'http://directory.google.com/'\n]\n\n\n# Options for the Coverage extension\n# ----------------------------------\ncoverage_ignore_pyobjects = [\n # Contract\u2019s add_pre_hook and add_post_hook are not documented because\n # they should be transparent to contract developers, for whom pre_hook and\n # post_hook should be the actual concern.\n r'\\bContract\\.add_(pre|post)_hook$',\n\n # ContractsManager is an internal class, developers are not expected to\n # interact with it directly in any way.\n r'\\bContractsManager\\b$',\n\n # For default contracts we only want to document their general purpose in\n # their __init__ method, the methods they reimplement to achieve that purpose\n # should be irrelevant to developers using those contracts.\n r'\\w+Contract\\.(adjust_request_args|(pre|post)_process)$',\n\n # Methods of downloader middlewares are not documented, only the classes\n # themselves, since downloader middlewares are controlled through Scrapy\n # settings.\n r'^scrapy\\.downloadermiddlewares\\.\\w*?\\.(\\w*?Middleware|DownloaderStats)\\.',\n\n # Base classes of downloader middlewares are implementation details that\n # are not meant for users.\n r'^scrapy\\.downloadermiddlewares\\.\\w*?\\.Base\\w*?Middleware',\n\n # Private exception used by the command-line interface implementation.\n r'^scrapy\\.exceptions\\.UsageError',\n\n # Methods of BaseItemExporter subclasses are only documented in\n # BaseItemExporter.\n r'^scrapy\\.exporters\\.(?!BaseItemExporter\\b)\\w*?\\.',\n\n # Extension behavior is only modified through settings. Methods of\n # extension classes, as well as helper functions, are implementation\n # details that are not documented.\n r'^scrapy\\.extensions\\.[a-z]\\w*?\\.[A-Z]\\w*?\\.', # methods\n r'^scrapy\\.extensions\\.[a-z]\\w*?\\.[a-z]', # helper functions\n\n # Never documented before, and deprecated now.\n r'^scrapy\\.item\\.DictItem$',\n r'^scrapy\\.linkextractors\\.FilteringLinkExtractor$',\n\n # Implementation detail of LxmlLinkExtractor\n r'^scrapy\\.linkextractors\\.lxmlhtml\\.LxmlParserLinkExtractor',\n]\n\n\n# Options for the InterSphinx extension\n# -------------------------------------\n\nintersphinx_mapping = {\n 'coverage': ('https://coverage.readthedocs.io/en/stable', None),\n 'pytest': ('https://docs.pytest.org/en/latest', None),\n 'python': ('https://docs.python.org/3', None),\n 'sphinx': ('https://www.sphinx-doc.org/en/master', None),\n 'tox': ('https://tox.readthedocs.io/en/latest', None),\n 'twisted': ('https://twistedmatrix.com/documents/current', None),\n 'twistedapi': ('https://twistedmatrix.com/documents/current/api', None),\n}\n\n\n# Options for sphinx-hoverxref options\n# ------------------------------------\n\nhoverxref_auto_ref = True\n", "path": "docs/conf.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Scrapy documentation build configuration file, created by\n# sphinx-quickstart on Mon Nov 24 12:02:52 2008.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# The contents of this file are pickled, so don't put values in the namespace\n# that aren't pickleable (module imports are okay, they're removed automatically).\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nfrom datetime import datetime\nfrom os import path\n\n# If your extensions are in another directory, add it here. If the directory\n# is relative to the documentation root, use os.path.abspath to make it\n# absolute, like shown here.\nsys.path.append(path.join(path.dirname(__file__), \"_ext\"))\nsys.path.insert(0, path.dirname(path.dirname(__file__)))\n\n\n# General configuration\n# ---------------------\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n 'hoverxref.extension',\n 'notfound.extension',\n 'scrapydocs',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.coverage',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.viewcode',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Scrapy'\ncopyright = '2008\u2013{}, Scrapy developers'.format(datetime.now().year)\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\ntry:\n import scrapy\n version = '.'.join(map(str, scrapy.version_info[:2]))\n release = scrapy.__version__\nexcept ImportError:\n version = ''\n release = ''\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\nlanguage = 'en'\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of documents that shouldn't be included in the build.\n#unused_docs = []\n\nexclude_patterns = ['build']\n\n# List of directories, relative to source directory, that shouldn't be searched\n# for source files.\nexclude_trees = ['.build']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n\n# Options for HTML output\n# -----------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# Add path to the RTD explicitly to robustify builds (otherwise might\n# fail in a clean Debian build env)\nimport sphinx_rtd_theme\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n\n# The style sheet to use for HTML and HTML Help pages. A file of that name\n# must exist either in Sphinx' static/ path, or in one of the custom paths\n# given in html_static_path.\n# html_style = 'scrapydoc.css'\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\nhtml_last_updated_fmt = '%b %d, %Y'\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_use_modindex = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, the reST sources are included in the HTML build as _sources/<name>.\nhtml_copy_source = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# If nonempty, this is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = ''\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Scrapydoc'\n\n\n# Options for LaTeX output\n# ------------------------\n\n# The paper size ('letter' or 'a4').\n#latex_paper_size = 'letter'\n\n# The font size ('10pt', '11pt' or '12pt').\n#latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, document class [howto/manual]).\nlatex_documents = [\n ('index', 'Scrapy.tex', 'Scrapy Documentation',\n 'Scrapy developers', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# Additional stuff for the LaTeX preamble.\n#latex_preamble = ''\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_use_modindex = True\n\n\n# Options for the linkcheck builder\n# ---------------------------------\n\n# A list of regular expressions that match URIs that should not be checked when\n# doing a linkcheck build.\nlinkcheck_ignore = [\n 'http://localhost:\\d+', 'http://hg.scrapy.org',\n 'http://directory.google.com/'\n]\n\n\n# Options for the Coverage extension\n# ----------------------------------\ncoverage_ignore_pyobjects = [\n # Contract\u2019s add_pre_hook and add_post_hook are not documented because\n # they should be transparent to contract developers, for whom pre_hook and\n # post_hook should be the actual concern.\n r'\\bContract\\.add_(pre|post)_hook$',\n\n # ContractsManager is an internal class, developers are not expected to\n # interact with it directly in any way.\n r'\\bContractsManager\\b$',\n\n # For default contracts we only want to document their general purpose in\n # their __init__ method, the methods they reimplement to achieve that purpose\n # should be irrelevant to developers using those contracts.\n r'\\w+Contract\\.(adjust_request_args|(pre|post)_process)$',\n\n # Methods of downloader middlewares are not documented, only the classes\n # themselves, since downloader middlewares are controlled through Scrapy\n # settings.\n r'^scrapy\\.downloadermiddlewares\\.\\w*?\\.(\\w*?Middleware|DownloaderStats)\\.',\n\n # Base classes of downloader middlewares are implementation details that\n # are not meant for users.\n r'^scrapy\\.downloadermiddlewares\\.\\w*?\\.Base\\w*?Middleware',\n\n # Private exception used by the command-line interface implementation.\n r'^scrapy\\.exceptions\\.UsageError',\n\n # Methods of BaseItemExporter subclasses are only documented in\n # BaseItemExporter.\n r'^scrapy\\.exporters\\.(?!BaseItemExporter\\b)\\w*?\\.',\n\n # Extension behavior is only modified through settings. Methods of\n # extension classes, as well as helper functions, are implementation\n # details that are not documented.\n r'^scrapy\\.extensions\\.[a-z]\\w*?\\.[A-Z]\\w*?\\.', # methods\n r'^scrapy\\.extensions\\.[a-z]\\w*?\\.[a-z]', # helper functions\n\n # Never documented before, and deprecated now.\n r'^scrapy\\.item\\.DictItem$',\n r'^scrapy\\.linkextractors\\.FilteringLinkExtractor$',\n\n # Implementation detail of LxmlLinkExtractor\n r'^scrapy\\.linkextractors\\.lxmlhtml\\.LxmlParserLinkExtractor',\n]\n\n\n# Options for the InterSphinx extension\n# -------------------------------------\n\nintersphinx_mapping = {\n 'coverage': ('https://coverage.readthedocs.io/en/stable', None),\n 'cssselect': ('https://cssselect.readthedocs.io/en/latest', None),\n 'pytest': ('https://docs.pytest.org/en/latest', None),\n 'python': ('https://docs.python.org/3', None),\n 'sphinx': ('https://www.sphinx-doc.org/en/master', None),\n 'tox': ('https://tox.readthedocs.io/en/latest', None),\n 'twisted': ('https://twistedmatrix.com/documents/current', None),\n 'twistedapi': ('https://twistedmatrix.com/documents/current/api', None),\n}\n\n\n# Options for sphinx-hoverxref options\n# ------------------------------------\n\nhoverxref_auto_ref = True\n", "path": "docs/conf.py"}]} | 3,525 | 131 |
gh_patches_debug_11032 | rasdani/github-patches | git_diff | spesmilo__electrum-1608 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
allow non-root installation on Linux
I want to install Electrum on my Ubuntu machine in my user's `$HOME` directory, i.e., not via `sudo`. If I run the command
```
pip install https://download.electrum.org/2.5.4/Electrum-2.5.4.tar.gz
```
I get an error when the installation tries to create a desktop shortcut:
```
copying electrum.desktop -> /usr/share/applications/
error: [Errno 13] Permission denied: '/usr/share/applications/electrum.desktop'
```
Maybe the installation script could `try` if this copy operation works, and try again at `$HOME/.local/share/applications` (or, better, `$XDG_DATA_HOME/applications`) in case it doesn't.
If you agree this would make sense and point me to the file where I would need to do this, I'd be happy to provide a PR.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python2
2
3 # python setup.py sdist --format=zip,gztar
4
5 from setuptools import setup
6 import os
7 import sys
8 import platform
9 import imp
10
11 version = imp.load_source('version', 'lib/version.py')
12
13 if sys.version_info[:3] < (2, 7, 0):
14 sys.exit("Error: Electrum requires Python version >= 2.7.0...")
15
16 data_files = []
17
18 if platform.system() in ['Linux', 'FreeBSD', 'DragonFly']:
19 usr_share = os.path.join(sys.prefix, "share")
20 data_files += [
21 (os.path.join(usr_share, 'applications/'), ['electrum.desktop']),
22 (os.path.join(usr_share, 'pixmaps/'), ['icons/electrum.png'])
23 ]
24
25 setup(
26 name="Electrum",
27 version=version.ELECTRUM_VERSION,
28 install_requires=[
29 'slowaes>=0.1a1',
30 'ecdsa>=0.9',
31 'pbkdf2',
32 'requests',
33 'qrcode',
34 'protobuf',
35 'dnspython',
36 'jsonrpclib',
37 ],
38 packages=[
39 'electrum',
40 'electrum_gui',
41 'electrum_gui.qt',
42 'electrum_plugins',
43 'electrum_plugins.audio_modem',
44 'electrum_plugins.cosigner_pool',
45 'electrum_plugins.email_requests',
46 'electrum_plugins.exchange_rate',
47 'electrum_plugins.greenaddress_instant',
48 'electrum_plugins.keepkey',
49 'electrum_plugins.labels',
50 'electrum_plugins.ledger',
51 'electrum_plugins.plot',
52 'electrum_plugins.trezor',
53 'electrum_plugins.trustedcoin',
54 'electrum_plugins.virtualkeyboard',
55 ],
56 package_dir={
57 'electrum': 'lib',
58 'electrum_gui': 'gui',
59 'electrum_plugins': 'plugins',
60 },
61 package_data={
62 'electrum': [
63 'www/index.html',
64 'wordlist/*.txt',
65 'locale/*/LC_MESSAGES/electrum.mo',
66 ]
67 },
68 scripts=['electrum'],
69 data_files=data_files,
70 description="Lightweight Bitcoin Wallet",
71 author="Thomas Voegtlin",
72 author_email="[email protected]",
73 license="GNU GPLv3",
74 url="https://electrum.org",
75 long_description="""Lightweight Bitcoin Wallet"""
76 )
77
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
old mode 100644
new mode 100755
--- a/setup.py
+++ b/setup.py
@@ -17,6 +17,11 @@
if platform.system() in ['Linux', 'FreeBSD', 'DragonFly']:
usr_share = os.path.join(sys.prefix, "share")
+ if not os.access(usr_share, os.W_OK):
+ if 'XDG_DATA_HOME' in os.environ.keys():
+ usr_share = os.environ['$XDG_DATA_HOME']
+ else:
+ usr_share = os.path.expanduser('~/.local/share')
data_files += [
(os.path.join(usr_share, 'applications/'), ['electrum.desktop']),
(os.path.join(usr_share, 'pixmaps/'), ['icons/electrum.png'])
| {"golden_diff": "diff --git a/setup.py b/setup.py\nold mode 100644\nnew mode 100755\n--- a/setup.py\n+++ b/setup.py\n@@ -17,6 +17,11 @@\n \n if platform.system() in ['Linux', 'FreeBSD', 'DragonFly']:\n usr_share = os.path.join(sys.prefix, \"share\")\n+ if not os.access(usr_share, os.W_OK):\n+ if 'XDG_DATA_HOME' in os.environ.keys():\n+ usr_share = os.environ['$XDG_DATA_HOME']\n+ else:\n+ usr_share = os.path.expanduser('~/.local/share')\n data_files += [\n (os.path.join(usr_share, 'applications/'), ['electrum.desktop']),\n (os.path.join(usr_share, 'pixmaps/'), ['icons/electrum.png'])\n", "issue": "allow non-root installation on Linux\nI want to install Electrum on my Ubuntu machine in my user's `$HOME` directory, i.e., not via `sudo`. If I run the command\n\n```\npip install https://download.electrum.org/2.5.4/Electrum-2.5.4.tar.gz\n```\n\nI get an error when the installation tries to create a desktop shortcut:\n\n```\ncopying electrum.desktop -> /usr/share/applications/\n\nerror: [Errno 13] Permission denied: '/usr/share/applications/electrum.desktop'\n```\n\nMaybe the installation script could `try` if this copy operation works, and try again at `$HOME/.local/share/applications` (or, better, `$XDG_DATA_HOME/applications`) in case it doesn't.\n\nIf you agree this would make sense and point me to the file where I would need to do this, I'd be happy to provide a PR.\n\n", "before_files": [{"content": "#!/usr/bin/env python2\n\n# python setup.py sdist --format=zip,gztar\n\nfrom setuptools import setup\nimport os\nimport sys\nimport platform\nimport imp\n\nversion = imp.load_source('version', 'lib/version.py')\n\nif sys.version_info[:3] < (2, 7, 0):\n sys.exit(\"Error: Electrum requires Python version >= 2.7.0...\")\n\ndata_files = []\n\nif platform.system() in ['Linux', 'FreeBSD', 'DragonFly']:\n usr_share = os.path.join(sys.prefix, \"share\")\n data_files += [\n (os.path.join(usr_share, 'applications/'), ['electrum.desktop']),\n (os.path.join(usr_share, 'pixmaps/'), ['icons/electrum.png'])\n ]\n\nsetup(\n name=\"Electrum\",\n version=version.ELECTRUM_VERSION,\n install_requires=[\n 'slowaes>=0.1a1',\n 'ecdsa>=0.9',\n 'pbkdf2',\n 'requests',\n 'qrcode',\n 'protobuf',\n 'dnspython',\n 'jsonrpclib',\n ],\n packages=[\n 'electrum',\n 'electrum_gui',\n 'electrum_gui.qt',\n 'electrum_plugins',\n 'electrum_plugins.audio_modem',\n 'electrum_plugins.cosigner_pool',\n 'electrum_plugins.email_requests',\n 'electrum_plugins.exchange_rate',\n 'electrum_plugins.greenaddress_instant',\n 'electrum_plugins.keepkey',\n 'electrum_plugins.labels',\n 'electrum_plugins.ledger',\n 'electrum_plugins.plot',\n 'electrum_plugins.trezor',\n 'electrum_plugins.trustedcoin',\n 'electrum_plugins.virtualkeyboard',\n ],\n package_dir={\n 'electrum': 'lib',\n 'electrum_gui': 'gui',\n 'electrum_plugins': 'plugins',\n },\n package_data={\n 'electrum': [\n 'www/index.html',\n 'wordlist/*.txt',\n 'locale/*/LC_MESSAGES/electrum.mo',\n ]\n },\n scripts=['electrum'],\n data_files=data_files,\n description=\"Lightweight Bitcoin Wallet\",\n author=\"Thomas Voegtlin\",\n author_email=\"[email protected]\",\n license=\"GNU GPLv3\",\n url=\"https://electrum.org\",\n long_description=\"\"\"Lightweight Bitcoin Wallet\"\"\"\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python2\n\n# python setup.py sdist --format=zip,gztar\n\nfrom setuptools import setup\nimport os\nimport sys\nimport platform\nimport imp\n\nversion = imp.load_source('version', 'lib/version.py')\n\nif sys.version_info[:3] < (2, 7, 0):\n sys.exit(\"Error: Electrum requires Python version >= 2.7.0...\")\n\ndata_files = []\n\nif platform.system() in ['Linux', 'FreeBSD', 'DragonFly']:\n usr_share = os.path.join(sys.prefix, \"share\")\n if not os.access(usr_share, os.W_OK):\n if 'XDG_DATA_HOME' in os.environ.keys():\n usr_share = os.environ['$XDG_DATA_HOME']\n else:\n usr_share = os.path.expanduser('~/.local/share')\n data_files += [\n (os.path.join(usr_share, 'applications/'), ['electrum.desktop']),\n (os.path.join(usr_share, 'pixmaps/'), ['icons/electrum.png'])\n ]\n\nsetup(\n name=\"Electrum\",\n version=version.ELECTRUM_VERSION,\n install_requires=[\n 'slowaes>=0.1a1',\n 'ecdsa>=0.9',\n 'pbkdf2',\n 'requests',\n 'qrcode',\n 'protobuf',\n 'dnspython',\n 'jsonrpclib',\n ],\n packages=[\n 'electrum',\n 'electrum_gui',\n 'electrum_gui.qt',\n 'electrum_plugins',\n 'electrum_plugins.audio_modem',\n 'electrum_plugins.cosigner_pool',\n 'electrum_plugins.email_requests',\n 'electrum_plugins.exchange_rate',\n 'electrum_plugins.greenaddress_instant',\n 'electrum_plugins.keepkey',\n 'electrum_plugins.labels',\n 'electrum_plugins.ledger',\n 'electrum_plugins.plot',\n 'electrum_plugins.trezor',\n 'electrum_plugins.trustedcoin',\n 'electrum_plugins.virtualkeyboard',\n ],\n package_dir={\n 'electrum': 'lib',\n 'electrum_gui': 'gui',\n 'electrum_plugins': 'plugins',\n },\n package_data={\n 'electrum': [\n 'www/index.html',\n 'wordlist/*.txt',\n 'locale/*/LC_MESSAGES/electrum.mo',\n ]\n },\n scripts=['electrum'],\n data_files=data_files,\n description=\"Lightweight Bitcoin Wallet\",\n author=\"Thomas Voegtlin\",\n author_email=\"[email protected]\",\n license=\"GNU GPLv3\",\n url=\"https://electrum.org\",\n long_description=\"\"\"Lightweight Bitcoin Wallet\"\"\"\n)\n", "path": "setup.py"}]} | 1,117 | 187 |
gh_patches_debug_21116 | rasdani/github-patches | git_diff | pypa__pip-5159 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pip fails to install package using git and short commit hash
* Pip version: 10.0.0b1
* Python version: 3.5.2
* Operating system: Ubuntu 16.04
### Description:
Installing a package from a git repository specifying a commit hash using 7 characters.
`pip` shows an error and do not install the package.
I expect `pip` to install the package using the specified commit.
### What I've run:
```
pip install git+git://git.myproject.org/MyProject@0abcdef
Collecting git+git://git.myproject.org/MyProject@0abcdef
Cloning git://git.myproject.org/MyProject (to revision 0abcdef) to /tmp/pip-req-build-xxxxxxxx
Did not find branch or tag '0abcdef', assuming revision or ref.
fatal: Couldn't find remote ref 0abcdef
Command "git fetch -q git+git://git.myproject.org/MyProject 0abcdef" failed with error code 128 in /tmp/pip-req-build-xxxxxxxxx
```
The issue seems to be related to this PR https://github.com/pypa/pip/pull/4674
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pip/_internal/vcs/git.py`
Content:
```
1 from __future__ import absolute_import
2
3 import logging
4 import os.path
5 import re
6
7 from pip._vendor.packaging.version import parse as parse_version
8 from pip._vendor.six.moves.urllib import parse as urllib_parse
9 from pip._vendor.six.moves.urllib import request as urllib_request
10
11 from pip._internal.compat import samefile
12 from pip._internal.exceptions import BadCommand
13 from pip._internal.utils.misc import display_path
14 from pip._internal.utils.temp_dir import TempDirectory
15 from pip._internal.vcs import VersionControl, vcs
16
17 urlsplit = urllib_parse.urlsplit
18 urlunsplit = urllib_parse.urlunsplit
19
20
21 logger = logging.getLogger(__name__)
22
23
24 HASH_REGEX = re.compile('[a-fA-F0-9]{40}')
25
26
27 def looks_like_hash(sha):
28 return bool(HASH_REGEX.match(sha))
29
30
31 class Git(VersionControl):
32 name = 'git'
33 dirname = '.git'
34 repo_name = 'clone'
35 schemes = (
36 'git', 'git+http', 'git+https', 'git+ssh', 'git+git', 'git+file',
37 )
38 # Prevent the user's environment variables from interfering with pip:
39 # https://github.com/pypa/pip/issues/1130
40 unset_environ = ('GIT_DIR', 'GIT_WORK_TREE')
41 default_arg_rev = 'HEAD'
42
43 def __init__(self, url=None, *args, **kwargs):
44
45 # Works around an apparent Git bug
46 # (see http://article.gmane.org/gmane.comp.version-control.git/146500)
47 if url:
48 scheme, netloc, path, query, fragment = urlsplit(url)
49 if scheme.endswith('file'):
50 initial_slashes = path[:-len(path.lstrip('/'))]
51 newpath = (
52 initial_slashes +
53 urllib_request.url2pathname(path)
54 .replace('\\', '/').lstrip('/')
55 )
56 url = urlunsplit((scheme, netloc, newpath, query, fragment))
57 after_plus = scheme.find('+') + 1
58 url = scheme[:after_plus] + urlunsplit(
59 (scheme[after_plus:], netloc, newpath, query, fragment),
60 )
61
62 super(Git, self).__init__(url, *args, **kwargs)
63
64 def get_base_rev_args(self, rev):
65 return [rev]
66
67 def get_git_version(self):
68 VERSION_PFX = 'git version '
69 version = self.run_command(['version'], show_stdout=False)
70 if version.startswith(VERSION_PFX):
71 version = version[len(VERSION_PFX):].split()[0]
72 else:
73 version = ''
74 # get first 3 positions of the git version becasue
75 # on windows it is x.y.z.windows.t, and this parses as
76 # LegacyVersion which always smaller than a Version.
77 version = '.'.join(version.split('.')[:3])
78 return parse_version(version)
79
80 def export(self, location):
81 """Export the Git repository at the url to the destination location"""
82 if not location.endswith('/'):
83 location = location + '/'
84
85 with TempDirectory(kind="export") as temp_dir:
86 self.unpack(temp_dir.path)
87 self.run_command(
88 ['checkout-index', '-a', '-f', '--prefix', location],
89 show_stdout=False, cwd=temp_dir.path
90 )
91
92 def get_revision_sha(self, dest, rev):
93 """
94 Return a commit hash for the given revision if it names a remote
95 branch or tag. Otherwise, return None.
96
97 Args:
98 dest: the repository directory.
99 rev: the revision name.
100 """
101 # Pass rev to pre-filter the list.
102 output = self.run_command(['show-ref', rev], cwd=dest,
103 show_stdout=False, on_returncode='ignore')
104 refs = {}
105 for line in output.strip().splitlines():
106 try:
107 sha, ref = line.split()
108 except ValueError:
109 # Include the offending line to simplify troubleshooting if
110 # this error ever occurs.
111 raise ValueError('unexpected show-ref line: {!r}'.format(line))
112
113 refs[ref] = sha
114
115 branch_ref = 'refs/remotes/origin/{}'.format(rev)
116 tag_ref = 'refs/tags/{}'.format(rev)
117
118 return refs.get(branch_ref) or refs.get(tag_ref)
119
120 def check_rev_options(self, dest, rev_options):
121 """Check the revision options before checkout.
122
123 Returns a new RevOptions object for the SHA1 of the branch or tag
124 if found.
125
126 Args:
127 rev_options: a RevOptions object.
128 """
129 rev = rev_options.arg_rev
130 sha = self.get_revision_sha(dest, rev)
131
132 if sha is not None:
133 return rev_options.make_new(sha)
134
135 # Do not show a warning for the common case of something that has
136 # the form of a Git commit hash.
137 if not looks_like_hash(rev):
138 logger.warning(
139 "Did not find branch or tag '%s', assuming revision or ref.",
140 rev,
141 )
142 return rev_options
143
144 def is_commit_id_equal(self, dest, name):
145 """
146 Return whether the current commit hash equals the given name.
147
148 Args:
149 dest: the repository directory.
150 name: a string name.
151 """
152 if not name:
153 # Then avoid an unnecessary subprocess call.
154 return False
155
156 return self.get_revision(dest) == name
157
158 def switch(self, dest, url, rev_options):
159 self.run_command(['config', 'remote.origin.url', url], cwd=dest)
160 cmd_args = ['checkout', '-q'] + rev_options.to_args()
161 self.run_command(cmd_args, cwd=dest)
162
163 self.update_submodules(dest)
164
165 def update(self, dest, rev_options):
166 # First fetch changes from the default remote
167 if self.get_git_version() >= parse_version('1.9.0'):
168 # fetch tags in addition to everything else
169 self.run_command(['fetch', '-q', '--tags'], cwd=dest)
170 else:
171 self.run_command(['fetch', '-q'], cwd=dest)
172 # Then reset to wanted revision (maybe even origin/master)
173 rev_options = self.check_rev_options(dest, rev_options)
174 cmd_args = ['reset', '--hard', '-q'] + rev_options.to_args()
175 self.run_command(cmd_args, cwd=dest)
176 #: update submodules
177 self.update_submodules(dest)
178
179 def obtain(self, dest):
180 url, rev = self.get_url_rev()
181 rev_options = self.make_rev_options(rev)
182 if self.check_destination(dest, url, rev_options):
183 rev_display = rev_options.to_display()
184 logger.info(
185 'Cloning %s%s to %s', url, rev_display, display_path(dest),
186 )
187 self.run_command(['clone', '-q', url, dest])
188
189 if rev:
190 rev_options = self.check_rev_options(dest, rev_options)
191 # Only do a checkout if the current commit id doesn't match
192 # the requested revision.
193 if not self.is_commit_id_equal(dest, rev_options.rev):
194 cmd_args = ['fetch', '-q', url] + rev_options.to_args()
195 self.run_command(cmd_args, cwd=dest)
196 self.run_command(
197 ['checkout', '-q', 'FETCH_HEAD'],
198 cwd=dest,
199 )
200
201 #: repo may contain submodules
202 self.update_submodules(dest)
203
204 def get_url(self, location):
205 """Return URL of the first remote encountered."""
206 remotes = self.run_command(
207 ['config', '--get-regexp', r'remote\..*\.url'],
208 show_stdout=False, cwd=location,
209 )
210 remotes = remotes.splitlines()
211 found_remote = remotes[0]
212 for remote in remotes:
213 if remote.startswith('remote.origin.url '):
214 found_remote = remote
215 break
216 url = found_remote.split(' ')[1]
217 return url.strip()
218
219 def get_revision(self, location):
220 current_rev = self.run_command(
221 ['rev-parse', 'HEAD'], show_stdout=False, cwd=location,
222 )
223 return current_rev.strip()
224
225 def _get_subdirectory(self, location):
226 """Return the relative path of setup.py to the git repo root."""
227 # find the repo root
228 git_dir = self.run_command(['rev-parse', '--git-dir'],
229 show_stdout=False, cwd=location).strip()
230 if not os.path.isabs(git_dir):
231 git_dir = os.path.join(location, git_dir)
232 root_dir = os.path.join(git_dir, '..')
233 # find setup.py
234 orig_location = location
235 while not os.path.exists(os.path.join(location, 'setup.py')):
236 last_location = location
237 location = os.path.dirname(location)
238 if location == last_location:
239 # We've traversed up to the root of the filesystem without
240 # finding setup.py
241 logger.warning(
242 "Could not find setup.py for directory %s (tried all "
243 "parent directories)",
244 orig_location,
245 )
246 return None
247 # relative path of setup.py to repo root
248 if samefile(root_dir, location):
249 return None
250 return os.path.relpath(location, root_dir)
251
252 def get_src_requirement(self, dist, location):
253 repo = self.get_url(location)
254 if not repo.lower().startswith('git:'):
255 repo = 'git+' + repo
256 egg_project_name = dist.egg_name().split('-', 1)[0]
257 if not repo:
258 return None
259 current_rev = self.get_revision(location)
260 req = '%s@%s#egg=%s' % (repo, current_rev, egg_project_name)
261 subdirectory = self._get_subdirectory(location)
262 if subdirectory:
263 req += '&subdirectory=' + subdirectory
264 return req
265
266 def get_url_rev(self):
267 """
268 Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'.
269 That's required because although they use SSH they sometimes doesn't
270 work with a ssh:// scheme (e.g. Github). But we need a scheme for
271 parsing. Hence we remove it again afterwards and return it as a stub.
272 """
273 if '://' not in self.url:
274 assert 'file:' not in self.url
275 self.url = self.url.replace('git+', 'git+ssh://')
276 url, rev = super(Git, self).get_url_rev()
277 url = url.replace('ssh://', '')
278 else:
279 url, rev = super(Git, self).get_url_rev()
280
281 return url, rev
282
283 def update_submodules(self, location):
284 if not os.path.exists(os.path.join(location, '.gitmodules')):
285 return
286 self.run_command(
287 ['submodule', 'update', '--init', '--recursive', '-q'],
288 cwd=location,
289 )
290
291 @classmethod
292 def controls_location(cls, location):
293 if super(Git, cls).controls_location(location):
294 return True
295 try:
296 r = cls().run_command(['rev-parse'],
297 cwd=location,
298 show_stdout=False,
299 on_returncode='ignore')
300 return not r
301 except BadCommand:
302 logger.debug("could not determine if %s is under git control "
303 "because git is not available", location)
304 return False
305
306
307 vcs.register(Git)
308
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/pip/_internal/vcs/git.py b/src/pip/_internal/vcs/git.py
--- a/src/pip/_internal/vcs/git.py
+++ b/src/pip/_internal/vcs/git.py
@@ -191,12 +191,16 @@
# Only do a checkout if the current commit id doesn't match
# the requested revision.
if not self.is_commit_id_equal(dest, rev_options.rev):
- cmd_args = ['fetch', '-q', url] + rev_options.to_args()
- self.run_command(cmd_args, cwd=dest)
- self.run_command(
- ['checkout', '-q', 'FETCH_HEAD'],
- cwd=dest,
- )
+ rev = rev_options.rev
+ # Only fetch the revision if it's a ref
+ if rev.startswith('refs/'):
+ self.run_command(
+ ['fetch', '-q', url] + rev_options.to_args(),
+ cwd=dest,
+ )
+ # Change the revision to the SHA of the ref we fetched
+ rev = 'FETCH_HEAD'
+ self.run_command(['checkout', '-q', rev], cwd=dest)
#: repo may contain submodules
self.update_submodules(dest)
| {"golden_diff": "diff --git a/src/pip/_internal/vcs/git.py b/src/pip/_internal/vcs/git.py\n--- a/src/pip/_internal/vcs/git.py\n+++ b/src/pip/_internal/vcs/git.py\n@@ -191,12 +191,16 @@\n # Only do a checkout if the current commit id doesn't match\n # the requested revision.\n if not self.is_commit_id_equal(dest, rev_options.rev):\n- cmd_args = ['fetch', '-q', url] + rev_options.to_args()\n- self.run_command(cmd_args, cwd=dest)\n- self.run_command(\n- ['checkout', '-q', 'FETCH_HEAD'],\n- cwd=dest,\n- )\n+ rev = rev_options.rev\n+ # Only fetch the revision if it's a ref\n+ if rev.startswith('refs/'):\n+ self.run_command(\n+ ['fetch', '-q', url] + rev_options.to_args(),\n+ cwd=dest,\n+ )\n+ # Change the revision to the SHA of the ref we fetched\n+ rev = 'FETCH_HEAD'\n+ self.run_command(['checkout', '-q', rev], cwd=dest)\n \n #: repo may contain submodules\n self.update_submodules(dest)\n", "issue": "Pip fails to install package using git and short commit hash \n* Pip version: 10.0.0b1\r\n* Python version: 3.5.2\r\n* Operating system: Ubuntu 16.04\r\n\r\n### Description:\r\n\r\nInstalling a package from a git repository specifying a commit hash using 7 characters.\r\n`pip` shows an error and do not install the package.\r\nI expect `pip` to install the package using the specified commit.\r\n \r\n### What I've run:\r\n\r\n```\r\npip install git+git://git.myproject.org/MyProject@0abcdef\r\nCollecting git+git://git.myproject.org/MyProject@0abcdef\r\n Cloning git://git.myproject.org/MyProject (to revision 0abcdef) to /tmp/pip-req-build-xxxxxxxx\r\n Did not find branch or tag '0abcdef', assuming revision or ref.\r\nfatal: Couldn't find remote ref 0abcdef\r\nCommand \"git fetch -q git+git://git.myproject.org/MyProject 0abcdef\" failed with error code 128 in /tmp/pip-req-build-xxxxxxxxx\r\n```\r\n\r\nThe issue seems to be related to this PR https://github.com/pypa/pip/pull/4674\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport logging\nimport os.path\nimport re\n\nfrom pip._vendor.packaging.version import parse as parse_version\nfrom pip._vendor.six.moves.urllib import parse as urllib_parse\nfrom pip._vendor.six.moves.urllib import request as urllib_request\n\nfrom pip._internal.compat import samefile\nfrom pip._internal.exceptions import BadCommand\nfrom pip._internal.utils.misc import display_path\nfrom pip._internal.utils.temp_dir import TempDirectory\nfrom pip._internal.vcs import VersionControl, vcs\n\nurlsplit = urllib_parse.urlsplit\nurlunsplit = urllib_parse.urlunsplit\n\n\nlogger = logging.getLogger(__name__)\n\n\nHASH_REGEX = re.compile('[a-fA-F0-9]{40}')\n\n\ndef looks_like_hash(sha):\n return bool(HASH_REGEX.match(sha))\n\n\nclass Git(VersionControl):\n name = 'git'\n dirname = '.git'\n repo_name = 'clone'\n schemes = (\n 'git', 'git+http', 'git+https', 'git+ssh', 'git+git', 'git+file',\n )\n # Prevent the user's environment variables from interfering with pip:\n # https://github.com/pypa/pip/issues/1130\n unset_environ = ('GIT_DIR', 'GIT_WORK_TREE')\n default_arg_rev = 'HEAD'\n\n def __init__(self, url=None, *args, **kwargs):\n\n # Works around an apparent Git bug\n # (see http://article.gmane.org/gmane.comp.version-control.git/146500)\n if url:\n scheme, netloc, path, query, fragment = urlsplit(url)\n if scheme.endswith('file'):\n initial_slashes = path[:-len(path.lstrip('/'))]\n newpath = (\n initial_slashes +\n urllib_request.url2pathname(path)\n .replace('\\\\', '/').lstrip('/')\n )\n url = urlunsplit((scheme, netloc, newpath, query, fragment))\n after_plus = scheme.find('+') + 1\n url = scheme[:after_plus] + urlunsplit(\n (scheme[after_plus:], netloc, newpath, query, fragment),\n )\n\n super(Git, self).__init__(url, *args, **kwargs)\n\n def get_base_rev_args(self, rev):\n return [rev]\n\n def get_git_version(self):\n VERSION_PFX = 'git version '\n version = self.run_command(['version'], show_stdout=False)\n if version.startswith(VERSION_PFX):\n version = version[len(VERSION_PFX):].split()[0]\n else:\n version = ''\n # get first 3 positions of the git version becasue\n # on windows it is x.y.z.windows.t, and this parses as\n # LegacyVersion which always smaller than a Version.\n version = '.'.join(version.split('.')[:3])\n return parse_version(version)\n\n def export(self, location):\n \"\"\"Export the Git repository at the url to the destination location\"\"\"\n if not location.endswith('/'):\n location = location + '/'\n\n with TempDirectory(kind=\"export\") as temp_dir:\n self.unpack(temp_dir.path)\n self.run_command(\n ['checkout-index', '-a', '-f', '--prefix', location],\n show_stdout=False, cwd=temp_dir.path\n )\n\n def get_revision_sha(self, dest, rev):\n \"\"\"\n Return a commit hash for the given revision if it names a remote\n branch or tag. Otherwise, return None.\n\n Args:\n dest: the repository directory.\n rev: the revision name.\n \"\"\"\n # Pass rev to pre-filter the list.\n output = self.run_command(['show-ref', rev], cwd=dest,\n show_stdout=False, on_returncode='ignore')\n refs = {}\n for line in output.strip().splitlines():\n try:\n sha, ref = line.split()\n except ValueError:\n # Include the offending line to simplify troubleshooting if\n # this error ever occurs.\n raise ValueError('unexpected show-ref line: {!r}'.format(line))\n\n refs[ref] = sha\n\n branch_ref = 'refs/remotes/origin/{}'.format(rev)\n tag_ref = 'refs/tags/{}'.format(rev)\n\n return refs.get(branch_ref) or refs.get(tag_ref)\n\n def check_rev_options(self, dest, rev_options):\n \"\"\"Check the revision options before checkout.\n\n Returns a new RevOptions object for the SHA1 of the branch or tag\n if found.\n\n Args:\n rev_options: a RevOptions object.\n \"\"\"\n rev = rev_options.arg_rev\n sha = self.get_revision_sha(dest, rev)\n\n if sha is not None:\n return rev_options.make_new(sha)\n\n # Do not show a warning for the common case of something that has\n # the form of a Git commit hash.\n if not looks_like_hash(rev):\n logger.warning(\n \"Did not find branch or tag '%s', assuming revision or ref.\",\n rev,\n )\n return rev_options\n\n def is_commit_id_equal(self, dest, name):\n \"\"\"\n Return whether the current commit hash equals the given name.\n\n Args:\n dest: the repository directory.\n name: a string name.\n \"\"\"\n if not name:\n # Then avoid an unnecessary subprocess call.\n return False\n\n return self.get_revision(dest) == name\n\n def switch(self, dest, url, rev_options):\n self.run_command(['config', 'remote.origin.url', url], cwd=dest)\n cmd_args = ['checkout', '-q'] + rev_options.to_args()\n self.run_command(cmd_args, cwd=dest)\n\n self.update_submodules(dest)\n\n def update(self, dest, rev_options):\n # First fetch changes from the default remote\n if self.get_git_version() >= parse_version('1.9.0'):\n # fetch tags in addition to everything else\n self.run_command(['fetch', '-q', '--tags'], cwd=dest)\n else:\n self.run_command(['fetch', '-q'], cwd=dest)\n # Then reset to wanted revision (maybe even origin/master)\n rev_options = self.check_rev_options(dest, rev_options)\n cmd_args = ['reset', '--hard', '-q'] + rev_options.to_args()\n self.run_command(cmd_args, cwd=dest)\n #: update submodules\n self.update_submodules(dest)\n\n def obtain(self, dest):\n url, rev = self.get_url_rev()\n rev_options = self.make_rev_options(rev)\n if self.check_destination(dest, url, rev_options):\n rev_display = rev_options.to_display()\n logger.info(\n 'Cloning %s%s to %s', url, rev_display, display_path(dest),\n )\n self.run_command(['clone', '-q', url, dest])\n\n if rev:\n rev_options = self.check_rev_options(dest, rev_options)\n # Only do a checkout if the current commit id doesn't match\n # the requested revision.\n if not self.is_commit_id_equal(dest, rev_options.rev):\n cmd_args = ['fetch', '-q', url] + rev_options.to_args()\n self.run_command(cmd_args, cwd=dest)\n self.run_command(\n ['checkout', '-q', 'FETCH_HEAD'],\n cwd=dest,\n )\n\n #: repo may contain submodules\n self.update_submodules(dest)\n\n def get_url(self, location):\n \"\"\"Return URL of the first remote encountered.\"\"\"\n remotes = self.run_command(\n ['config', '--get-regexp', r'remote\\..*\\.url'],\n show_stdout=False, cwd=location,\n )\n remotes = remotes.splitlines()\n found_remote = remotes[0]\n for remote in remotes:\n if remote.startswith('remote.origin.url '):\n found_remote = remote\n break\n url = found_remote.split(' ')[1]\n return url.strip()\n\n def get_revision(self, location):\n current_rev = self.run_command(\n ['rev-parse', 'HEAD'], show_stdout=False, cwd=location,\n )\n return current_rev.strip()\n\n def _get_subdirectory(self, location):\n \"\"\"Return the relative path of setup.py to the git repo root.\"\"\"\n # find the repo root\n git_dir = self.run_command(['rev-parse', '--git-dir'],\n show_stdout=False, cwd=location).strip()\n if not os.path.isabs(git_dir):\n git_dir = os.path.join(location, git_dir)\n root_dir = os.path.join(git_dir, '..')\n # find setup.py\n orig_location = location\n while not os.path.exists(os.path.join(location, 'setup.py')):\n last_location = location\n location = os.path.dirname(location)\n if location == last_location:\n # We've traversed up to the root of the filesystem without\n # finding setup.py\n logger.warning(\n \"Could not find setup.py for directory %s (tried all \"\n \"parent directories)\",\n orig_location,\n )\n return None\n # relative path of setup.py to repo root\n if samefile(root_dir, location):\n return None\n return os.path.relpath(location, root_dir)\n\n def get_src_requirement(self, dist, location):\n repo = self.get_url(location)\n if not repo.lower().startswith('git:'):\n repo = 'git+' + repo\n egg_project_name = dist.egg_name().split('-', 1)[0]\n if not repo:\n return None\n current_rev = self.get_revision(location)\n req = '%s@%s#egg=%s' % (repo, current_rev, egg_project_name)\n subdirectory = self._get_subdirectory(location)\n if subdirectory:\n req += '&subdirectory=' + subdirectory\n return req\n\n def get_url_rev(self):\n \"\"\"\n Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'.\n That's required because although they use SSH they sometimes doesn't\n work with a ssh:// scheme (e.g. Github). But we need a scheme for\n parsing. Hence we remove it again afterwards and return it as a stub.\n \"\"\"\n if '://' not in self.url:\n assert 'file:' not in self.url\n self.url = self.url.replace('git+', 'git+ssh://')\n url, rev = super(Git, self).get_url_rev()\n url = url.replace('ssh://', '')\n else:\n url, rev = super(Git, self).get_url_rev()\n\n return url, rev\n\n def update_submodules(self, location):\n if not os.path.exists(os.path.join(location, '.gitmodules')):\n return\n self.run_command(\n ['submodule', 'update', '--init', '--recursive', '-q'],\n cwd=location,\n )\n\n @classmethod\n def controls_location(cls, location):\n if super(Git, cls).controls_location(location):\n return True\n try:\n r = cls().run_command(['rev-parse'],\n cwd=location,\n show_stdout=False,\n on_returncode='ignore')\n return not r\n except BadCommand:\n logger.debug(\"could not determine if %s is under git control \"\n \"because git is not available\", location)\n return False\n\n\nvcs.register(Git)\n", "path": "src/pip/_internal/vcs/git.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nimport logging\nimport os.path\nimport re\n\nfrom pip._vendor.packaging.version import parse as parse_version\nfrom pip._vendor.six.moves.urllib import parse as urllib_parse\nfrom pip._vendor.six.moves.urllib import request as urllib_request\n\nfrom pip._internal.compat import samefile\nfrom pip._internal.exceptions import BadCommand\nfrom pip._internal.utils.misc import display_path\nfrom pip._internal.utils.temp_dir import TempDirectory\nfrom pip._internal.vcs import VersionControl, vcs\n\nurlsplit = urllib_parse.urlsplit\nurlunsplit = urllib_parse.urlunsplit\n\n\nlogger = logging.getLogger(__name__)\n\n\nHASH_REGEX = re.compile('[a-fA-F0-9]{40}')\n\n\ndef looks_like_hash(sha):\n return bool(HASH_REGEX.match(sha))\n\n\nclass Git(VersionControl):\n name = 'git'\n dirname = '.git'\n repo_name = 'clone'\n schemes = (\n 'git', 'git+http', 'git+https', 'git+ssh', 'git+git', 'git+file',\n )\n # Prevent the user's environment variables from interfering with pip:\n # https://github.com/pypa/pip/issues/1130\n unset_environ = ('GIT_DIR', 'GIT_WORK_TREE')\n default_arg_rev = 'HEAD'\n\n def __init__(self, url=None, *args, **kwargs):\n\n # Works around an apparent Git bug\n # (see http://article.gmane.org/gmane.comp.version-control.git/146500)\n if url:\n scheme, netloc, path, query, fragment = urlsplit(url)\n if scheme.endswith('file'):\n initial_slashes = path[:-len(path.lstrip('/'))]\n newpath = (\n initial_slashes +\n urllib_request.url2pathname(path)\n .replace('\\\\', '/').lstrip('/')\n )\n url = urlunsplit((scheme, netloc, newpath, query, fragment))\n after_plus = scheme.find('+') + 1\n url = scheme[:after_plus] + urlunsplit(\n (scheme[after_plus:], netloc, newpath, query, fragment),\n )\n\n super(Git, self).__init__(url, *args, **kwargs)\n\n def get_base_rev_args(self, rev):\n return [rev]\n\n def get_git_version(self):\n VERSION_PFX = 'git version '\n version = self.run_command(['version'], show_stdout=False)\n if version.startswith(VERSION_PFX):\n version = version[len(VERSION_PFX):].split()[0]\n else:\n version = ''\n # get first 3 positions of the git version becasue\n # on windows it is x.y.z.windows.t, and this parses as\n # LegacyVersion which always smaller than a Version.\n version = '.'.join(version.split('.')[:3])\n return parse_version(version)\n\n def export(self, location):\n \"\"\"Export the Git repository at the url to the destination location\"\"\"\n if not location.endswith('/'):\n location = location + '/'\n\n with TempDirectory(kind=\"export\") as temp_dir:\n self.unpack(temp_dir.path)\n self.run_command(\n ['checkout-index', '-a', '-f', '--prefix', location],\n show_stdout=False, cwd=temp_dir.path\n )\n\n def get_revision_sha(self, dest, rev):\n \"\"\"\n Return a commit hash for the given revision if it names a remote\n branch or tag. Otherwise, return None.\n\n Args:\n dest: the repository directory.\n rev: the revision name.\n \"\"\"\n # Pass rev to pre-filter the list.\n output = self.run_command(['show-ref', rev], cwd=dest,\n show_stdout=False, on_returncode='ignore')\n refs = {}\n for line in output.strip().splitlines():\n try:\n sha, ref = line.split()\n except ValueError:\n # Include the offending line to simplify troubleshooting if\n # this error ever occurs.\n raise ValueError('unexpected show-ref line: {!r}'.format(line))\n\n refs[ref] = sha\n\n branch_ref = 'refs/remotes/origin/{}'.format(rev)\n tag_ref = 'refs/tags/{}'.format(rev)\n\n return refs.get(branch_ref) or refs.get(tag_ref)\n\n def check_rev_options(self, dest, rev_options):\n \"\"\"Check the revision options before checkout.\n\n Returns a new RevOptions object for the SHA1 of the branch or tag\n if found.\n\n Args:\n rev_options: a RevOptions object.\n \"\"\"\n rev = rev_options.arg_rev\n sha = self.get_revision_sha(dest, rev)\n\n if sha is not None:\n return rev_options.make_new(sha)\n\n # Do not show a warning for the common case of something that has\n # the form of a Git commit hash.\n if not looks_like_hash(rev):\n logger.warning(\n \"Did not find branch or tag '%s', assuming revision or ref.\",\n rev,\n )\n return rev_options\n\n def is_commit_id_equal(self, dest, name):\n \"\"\"\n Return whether the current commit hash equals the given name.\n\n Args:\n dest: the repository directory.\n name: a string name.\n \"\"\"\n if not name:\n # Then avoid an unnecessary subprocess call.\n return False\n\n return self.get_revision(dest) == name\n\n def switch(self, dest, url, rev_options):\n self.run_command(['config', 'remote.origin.url', url], cwd=dest)\n cmd_args = ['checkout', '-q'] + rev_options.to_args()\n self.run_command(cmd_args, cwd=dest)\n\n self.update_submodules(dest)\n\n def update(self, dest, rev_options):\n # First fetch changes from the default remote\n if self.get_git_version() >= parse_version('1.9.0'):\n # fetch tags in addition to everything else\n self.run_command(['fetch', '-q', '--tags'], cwd=dest)\n else:\n self.run_command(['fetch', '-q'], cwd=dest)\n # Then reset to wanted revision (maybe even origin/master)\n rev_options = self.check_rev_options(dest, rev_options)\n cmd_args = ['reset', '--hard', '-q'] + rev_options.to_args()\n self.run_command(cmd_args, cwd=dest)\n #: update submodules\n self.update_submodules(dest)\n\n def obtain(self, dest):\n url, rev = self.get_url_rev()\n rev_options = self.make_rev_options(rev)\n if self.check_destination(dest, url, rev_options):\n rev_display = rev_options.to_display()\n logger.info(\n 'Cloning %s%s to %s', url, rev_display, display_path(dest),\n )\n self.run_command(['clone', '-q', url, dest])\n\n if rev:\n rev_options = self.check_rev_options(dest, rev_options)\n # Only do a checkout if the current commit id doesn't match\n # the requested revision.\n if not self.is_commit_id_equal(dest, rev_options.rev):\n rev = rev_options.rev\n # Only fetch the revision if it's a ref\n if rev.startswith('refs/'):\n self.run_command(\n ['fetch', '-q', url] + rev_options.to_args(),\n cwd=dest,\n )\n # Change the revision to the SHA of the ref we fetched\n rev = 'FETCH_HEAD'\n self.run_command(['checkout', '-q', rev], cwd=dest)\n\n #: repo may contain submodules\n self.update_submodules(dest)\n\n def get_url(self, location):\n \"\"\"Return URL of the first remote encountered.\"\"\"\n remotes = self.run_command(\n ['config', '--get-regexp', r'remote\\..*\\.url'],\n show_stdout=False, cwd=location,\n )\n remotes = remotes.splitlines()\n found_remote = remotes[0]\n for remote in remotes:\n if remote.startswith('remote.origin.url '):\n found_remote = remote\n break\n url = found_remote.split(' ')[1]\n return url.strip()\n\n def get_revision(self, location):\n current_rev = self.run_command(\n ['rev-parse', 'HEAD'], show_stdout=False, cwd=location,\n )\n return current_rev.strip()\n\n def _get_subdirectory(self, location):\n \"\"\"Return the relative path of setup.py to the git repo root.\"\"\"\n # find the repo root\n git_dir = self.run_command(['rev-parse', '--git-dir'],\n show_stdout=False, cwd=location).strip()\n if not os.path.isabs(git_dir):\n git_dir = os.path.join(location, git_dir)\n root_dir = os.path.join(git_dir, '..')\n # find setup.py\n orig_location = location\n while not os.path.exists(os.path.join(location, 'setup.py')):\n last_location = location\n location = os.path.dirname(location)\n if location == last_location:\n # We've traversed up to the root of the filesystem without\n # finding setup.py\n logger.warning(\n \"Could not find setup.py for directory %s (tried all \"\n \"parent directories)\",\n orig_location,\n )\n return None\n # relative path of setup.py to repo root\n if samefile(root_dir, location):\n return None\n return os.path.relpath(location, root_dir)\n\n def get_src_requirement(self, dist, location):\n repo = self.get_url(location)\n if not repo.lower().startswith('git:'):\n repo = 'git+' + repo\n egg_project_name = dist.egg_name().split('-', 1)[0]\n if not repo:\n return None\n current_rev = self.get_revision(location)\n req = '%s@%s#egg=%s' % (repo, current_rev, egg_project_name)\n subdirectory = self._get_subdirectory(location)\n if subdirectory:\n req += '&subdirectory=' + subdirectory\n return req\n\n def get_url_rev(self):\n \"\"\"\n Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'.\n That's required because although they use SSH they sometimes doesn't\n work with a ssh:// scheme (e.g. Github). But we need a scheme for\n parsing. Hence we remove it again afterwards and return it as a stub.\n \"\"\"\n if '://' not in self.url:\n assert 'file:' not in self.url\n self.url = self.url.replace('git+', 'git+ssh://')\n url, rev = super(Git, self).get_url_rev()\n url = url.replace('ssh://', '')\n else:\n url, rev = super(Git, self).get_url_rev()\n\n return url, rev\n\n def update_submodules(self, location):\n if not os.path.exists(os.path.join(location, '.gitmodules')):\n return\n self.run_command(\n ['submodule', 'update', '--init', '--recursive', '-q'],\n cwd=location,\n )\n\n @classmethod\n def controls_location(cls, location):\n if super(Git, cls).controls_location(location):\n return True\n try:\n r = cls().run_command(['rev-parse'],\n cwd=location,\n show_stdout=False,\n on_returncode='ignore')\n return not r\n except BadCommand:\n logger.debug(\"could not determine if %s is under git control \"\n \"because git is not available\", location)\n return False\n\n\nvcs.register(Git)\n", "path": "src/pip/_internal/vcs/git.py"}]} | 3,801 | 274 |
gh_patches_debug_21674 | rasdani/github-patches | git_diff | fedora-infra__bodhi-2007 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bodhi-dequqe-stable dies if any update in the queue is no longer eligible to go stable
QuLogic from Freenode reported today that batched updates didn't go stable at 03:00 UTC like they should have. I confirmed that the cron job ran, but I didn't see any notes about its output. I then ran the command by hand and received this output:
```
[bowlofeggs@bodhi-backend01 ~][PROD]$ sudo -u apache /usr/bin/bodhi-dequeue-stable
No handlers could be found for logger "bodhi.server"
This update has not yet met the minimum testing requirements defined in the <a href="https://fedoraproject.org/wiki/Package_update_acceptance_criteria">Package Update Acceptance Criteria</a>
```
The [```dequeue_stable()```](https://github.com/fedora-infra/bodhi/blob/3.0.0/bodhi/server/scripts/dequeue_stable.py#L28-L46) function runs a large transaction with only a single try/except. It seems that some update in the queue no longer meets testing requirements (probably due to receiving a -1 karma after going to batched) and is raising an Exception when the tool attempts to mark it for stable. Since there is only one try/except handler, this causes the whole transaction to be rolled back.
It should be easy to fix this - we just need a try/except around each update.
Thanks to QuLogic from Freenode for reporting this issue to me.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bodhi/server/scripts/dequeue_stable.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright © 2017 Caleigh Runge-Hottman
3 #
4 # This file is part of Bodhi.
5 #
6 # This program is free software; you can redistribute it and/or
7 # modify it under the terms of the GNU General Public License
8 # as published by the Free Software Foundation; either version 2
9 # of the License, or (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 """This script is responsible for moving all updates with a batched request to a stable request."""
20
21 import sys
22
23 import click
24
25 from bodhi.server import buildsys, config, models, Session, initialize_db
26
27
28 @click.command()
29 @click.version_option(message='%(version)s')
30 def dequeue_stable():
31 """Convert all batched requests to stable requests."""
32 initialize_db(config.config)
33 buildsys.setup_buildsystem(config.config)
34 db = Session()
35
36 try:
37 batched = db.query(models.Update).filter_by(request=models.UpdateRequest.batched).all()
38 for update in batched:
39 update.set_request(db, models.UpdateRequest.stable, u'bodhi')
40 db.commit()
41
42 except Exception as e:
43 print(str(e))
44 db.rollback()
45 Session.remove()
46 sys.exit(1)
47
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bodhi/server/scripts/dequeue_stable.py b/bodhi/server/scripts/dequeue_stable.py
--- a/bodhi/server/scripts/dequeue_stable.py
+++ b/bodhi/server/scripts/dequeue_stable.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright © 2017 Caleigh Runge-Hottman
+# Copyright © 2017 Caleigh Runge-Hottman and Red Hat, Inc.
#
# This file is part of Bodhi.
#
@@ -36,11 +36,17 @@
try:
batched = db.query(models.Update).filter_by(request=models.UpdateRequest.batched).all()
for update in batched:
- update.set_request(db, models.UpdateRequest.stable, u'bodhi')
- db.commit()
-
+ try:
+ update.set_request(db, models.UpdateRequest.stable, u'bodhi')
+ db.commit()
+ except Exception as e:
+ print('Unable to stabilize {}: {}'.format(update.alias, str(e)))
+ db.rollback()
+ msg = u"Bodhi is unable to request this update for stabilization: {}"
+ update.comment(db, msg.format(str(e)), author=u'bodhi')
+ db.commit()
except Exception as e:
print(str(e))
- db.rollback()
- Session.remove()
sys.exit(1)
+ finally:
+ Session.remove()
| {"golden_diff": "diff --git a/bodhi/server/scripts/dequeue_stable.py b/bodhi/server/scripts/dequeue_stable.py\n--- a/bodhi/server/scripts/dequeue_stable.py\n+++ b/bodhi/server/scripts/dequeue_stable.py\n@@ -1,5 +1,5 @@\n # -*- coding: utf-8 -*-\n-# Copyright \u00a9 2017 Caleigh Runge-Hottman\n+# Copyright \u00a9 2017 Caleigh Runge-Hottman and Red Hat, Inc.\n #\n # This file is part of Bodhi.\n #\n@@ -36,11 +36,17 @@\n try:\n batched = db.query(models.Update).filter_by(request=models.UpdateRequest.batched).all()\n for update in batched:\n- update.set_request(db, models.UpdateRequest.stable, u'bodhi')\n- db.commit()\n-\n+ try:\n+ update.set_request(db, models.UpdateRequest.stable, u'bodhi')\n+ db.commit()\n+ except Exception as e:\n+ print('Unable to stabilize {}: {}'.format(update.alias, str(e)))\n+ db.rollback()\n+ msg = u\"Bodhi is unable to request this update for stabilization: {}\"\n+ update.comment(db, msg.format(str(e)), author=u'bodhi')\n+ db.commit()\n except Exception as e:\n print(str(e))\n- db.rollback()\n- Session.remove()\n sys.exit(1)\n+ finally:\n+ Session.remove()\n", "issue": "bodhi-dequqe-stable dies if any update in the queue is no longer eligible to go stable\nQuLogic from Freenode reported today that batched updates didn't go stable at 03:00 UTC like they should have. I confirmed that the cron job ran, but I didn't see any notes about its output. I then ran the command by hand and received this output:\r\n\r\n```\r\n[bowlofeggs@bodhi-backend01 ~][PROD]$ sudo -u apache /usr/bin/bodhi-dequeue-stable\r\nNo handlers could be found for logger \"bodhi.server\"\r\nThis update has not yet met the minimum testing requirements defined in the <a href=\"https://fedoraproject.org/wiki/Package_update_acceptance_criteria\">Package Update Acceptance Criteria</a>\r\n```\r\n\r\nThe [```dequeue_stable()```](https://github.com/fedora-infra/bodhi/blob/3.0.0/bodhi/server/scripts/dequeue_stable.py#L28-L46) function runs a large transaction with only a single try/except. It seems that some update in the queue no longer meets testing requirements (probably due to receiving a -1 karma after going to batched) and is raising an Exception when the tool attempts to mark it for stable. Since there is only one try/except handler, this causes the whole transaction to be rolled back.\r\n\r\nIt should be easy to fix this - we just need a try/except around each update.\r\n\r\nThanks to QuLogic from Freenode for reporting this issue to me.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright \u00a9 2017 Caleigh Runge-Hottman\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"This script is responsible for moving all updates with a batched request to a stable request.\"\"\"\n\nimport sys\n\nimport click\n\nfrom bodhi.server import buildsys, config, models, Session, initialize_db\n\n\[email protected]()\[email protected]_option(message='%(version)s')\ndef dequeue_stable():\n \"\"\"Convert all batched requests to stable requests.\"\"\"\n initialize_db(config.config)\n buildsys.setup_buildsystem(config.config)\n db = Session()\n\n try:\n batched = db.query(models.Update).filter_by(request=models.UpdateRequest.batched).all()\n for update in batched:\n update.set_request(db, models.UpdateRequest.stable, u'bodhi')\n db.commit()\n\n except Exception as e:\n print(str(e))\n db.rollback()\n Session.remove()\n sys.exit(1)\n", "path": "bodhi/server/scripts/dequeue_stable.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright \u00a9 2017 Caleigh Runge-Hottman and Red Hat, Inc.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"This script is responsible for moving all updates with a batched request to a stable request.\"\"\"\n\nimport sys\n\nimport click\n\nfrom bodhi.server import buildsys, config, models, Session, initialize_db\n\n\[email protected]()\[email protected]_option(message='%(version)s')\ndef dequeue_stable():\n \"\"\"Convert all batched requests to stable requests.\"\"\"\n initialize_db(config.config)\n buildsys.setup_buildsystem(config.config)\n db = Session()\n\n try:\n batched = db.query(models.Update).filter_by(request=models.UpdateRequest.batched).all()\n for update in batched:\n try:\n update.set_request(db, models.UpdateRequest.stable, u'bodhi')\n db.commit()\n except Exception as e:\n print('Unable to stabilize {}: {}'.format(update.alias, str(e)))\n db.rollback()\n msg = u\"Bodhi is unable to request this update for stabilization: {}\"\n update.comment(db, msg.format(str(e)), author=u'bodhi')\n db.commit()\n except Exception as e:\n print(str(e))\n sys.exit(1)\n finally:\n Session.remove()\n", "path": "bodhi/server/scripts/dequeue_stable.py"}]} | 1,057 | 319 |
gh_patches_debug_57022 | rasdani/github-patches | git_diff | astronomer__astro-sdk-1401 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Doc: Cross link to API reference page from Operators page
Currently there is no way to jump to Func/Operator definition from https://astro-sdk-python.readthedocs.io/en/stable/astro/sql/operators/get_value_list.html (and other operators listed on https://astro-sdk-python.readthedocs.io/en/stable/guides/operators.html) to their definition https://astro-sdk-python.readthedocs.io/en/stable/autoapi/index.html
We should cross-link them using https://www.sphinx-doc.org/en/master/usage/restructuredtext/domains.html#cross-referencing-syntax
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python-sdk/src/astro/sql/__init__.py`
Content:
```
1 from airflow.configuration import conf
2 from airflow.decorators.base import get_unique_task_id
3 from airflow.models.xcom_arg import XComArg
4
5 from astro.sql.operators.append import AppendOperator, append
6 from astro.sql.operators.cleanup import CleanupOperator, cleanup
7 from astro.sql.operators.dataframe import DataframeOperator, dataframe
8 from astro.sql.operators.drop import DropTableOperator, drop_table
9 from astro.sql.operators.export_file import ExportFileOperator, export_file
10 from astro.sql.operators.load_file import LoadFileOperator, load_file
11 from astro.sql.operators.merge import MergeOperator, merge
12 from astro.sql.operators.raw_sql import RawSQLOperator, run_raw_sql
13 from astro.sql.operators.transform import TransformOperator, transform, transform_file
14 from astro.table import Metadata, Table
15
16 __all__ = [
17 "AppendOperator",
18 "append",
19 "CleanupOperator",
20 "cleanup",
21 "DataframeOperator",
22 "dataframe",
23 "DropTableOperator",
24 "drop_table",
25 "ExportFileOperator",
26 "export_file",
27 "LoadFileOperator",
28 "load_file",
29 "MergeOperator",
30 "merge",
31 "Metadata",
32 "run_raw_sql",
33 "Table",
34 "TransformOperator",
35 "transform_file",
36 "transform",
37 ]
38
39
40 def get_value_list(sql: str, conn_id: str, **kwargs) -> XComArg:
41 """
42 Execute a sql statement and return the result.
43 By default, the response size is less than equal to value of ``max_map_length`` conf.
44 You can call a callable handler to alter the response by default it call ``fetchall`` on database result set.
45
46
47 :param sql: sql query to execute.
48 If the sql query will return huge number of row then it can overload the XCOM.
49 also, If you are using output of this method to expand a task using dynamic task map then
50 it can create lots of parallel task. So it is advisable to limit your sql query statement.
51 :param conn_id: Airflow connection id. This connection id will be used to identify the database client
52 and connect with it at runtime
53 """
54 handler = kwargs.get("handler") or (lambda result_set: result_set.fetchall())
55 max_map_length = int(conf.get(section="core", key="max_map_length"))
56 op_kwargs = {
57 "handler": handler,
58 "response_limit": max_map_length,
59 }
60 task_id = kwargs.get("task_id") or get_unique_task_id(
61 "get_value_list", dag=kwargs.get("dag"), task_group=kwargs.get("task_group")
62 )
63 kwargs.update({"task_id": task_id})
64 return RawSQLOperator(
65 sql=sql, conn_id=conn_id, op_kwargs=op_kwargs, python_callable=(lambda *args: None), **kwargs
66 ).output
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/python-sdk/src/astro/sql/__init__.py b/python-sdk/src/astro/sql/__init__.py
--- a/python-sdk/src/astro/sql/__init__.py
+++ b/python-sdk/src/astro/sql/__init__.py
@@ -24,6 +24,7 @@
"drop_table",
"ExportFileOperator",
"export_file",
+ "get_value_list",
"LoadFileOperator",
"load_file",
"MergeOperator",
| {"golden_diff": "diff --git a/python-sdk/src/astro/sql/__init__.py b/python-sdk/src/astro/sql/__init__.py\n--- a/python-sdk/src/astro/sql/__init__.py\n+++ b/python-sdk/src/astro/sql/__init__.py\n@@ -24,6 +24,7 @@\n \"drop_table\",\n \"ExportFileOperator\",\n \"export_file\",\n+ \"get_value_list\",\n \"LoadFileOperator\",\n \"load_file\",\n \"MergeOperator\",\n", "issue": "Doc: Cross link to API reference page from Operators page\nCurrently there is no way to jump to Func/Operator definition from https://astro-sdk-python.readthedocs.io/en/stable/astro/sql/operators/get_value_list.html (and other operators listed on https://astro-sdk-python.readthedocs.io/en/stable/guides/operators.html) to their definition https://astro-sdk-python.readthedocs.io/en/stable/autoapi/index.html\r\n\r\nWe should cross-link them using https://www.sphinx-doc.org/en/master/usage/restructuredtext/domains.html#cross-referencing-syntax\n", "before_files": [{"content": "from airflow.configuration import conf\nfrom airflow.decorators.base import get_unique_task_id\nfrom airflow.models.xcom_arg import XComArg\n\nfrom astro.sql.operators.append import AppendOperator, append\nfrom astro.sql.operators.cleanup import CleanupOperator, cleanup\nfrom astro.sql.operators.dataframe import DataframeOperator, dataframe\nfrom astro.sql.operators.drop import DropTableOperator, drop_table\nfrom astro.sql.operators.export_file import ExportFileOperator, export_file\nfrom astro.sql.operators.load_file import LoadFileOperator, load_file\nfrom astro.sql.operators.merge import MergeOperator, merge\nfrom astro.sql.operators.raw_sql import RawSQLOperator, run_raw_sql\nfrom astro.sql.operators.transform import TransformOperator, transform, transform_file\nfrom astro.table import Metadata, Table\n\n__all__ = [\n \"AppendOperator\",\n \"append\",\n \"CleanupOperator\",\n \"cleanup\",\n \"DataframeOperator\",\n \"dataframe\",\n \"DropTableOperator\",\n \"drop_table\",\n \"ExportFileOperator\",\n \"export_file\",\n \"LoadFileOperator\",\n \"load_file\",\n \"MergeOperator\",\n \"merge\",\n \"Metadata\",\n \"run_raw_sql\",\n \"Table\",\n \"TransformOperator\",\n \"transform_file\",\n \"transform\",\n]\n\n\ndef get_value_list(sql: str, conn_id: str, **kwargs) -> XComArg:\n \"\"\"\n Execute a sql statement and return the result.\n By default, the response size is less than equal to value of ``max_map_length`` conf.\n You can call a callable handler to alter the response by default it call ``fetchall`` on database result set.\n\n\n :param sql: sql query to execute.\n If the sql query will return huge number of row then it can overload the XCOM.\n also, If you are using output of this method to expand a task using dynamic task map then\n it can create lots of parallel task. So it is advisable to limit your sql query statement.\n :param conn_id: Airflow connection id. This connection id will be used to identify the database client\n and connect with it at runtime\n \"\"\"\n handler = kwargs.get(\"handler\") or (lambda result_set: result_set.fetchall())\n max_map_length = int(conf.get(section=\"core\", key=\"max_map_length\"))\n op_kwargs = {\n \"handler\": handler,\n \"response_limit\": max_map_length,\n }\n task_id = kwargs.get(\"task_id\") or get_unique_task_id(\n \"get_value_list\", dag=kwargs.get(\"dag\"), task_group=kwargs.get(\"task_group\")\n )\n kwargs.update({\"task_id\": task_id})\n return RawSQLOperator(\n sql=sql, conn_id=conn_id, op_kwargs=op_kwargs, python_callable=(lambda *args: None), **kwargs\n ).output\n", "path": "python-sdk/src/astro/sql/__init__.py"}], "after_files": [{"content": "from airflow.configuration import conf\nfrom airflow.decorators.base import get_unique_task_id\nfrom airflow.models.xcom_arg import XComArg\n\nfrom astro.sql.operators.append import AppendOperator, append\nfrom astro.sql.operators.cleanup import CleanupOperator, cleanup\nfrom astro.sql.operators.dataframe import DataframeOperator, dataframe\nfrom astro.sql.operators.drop import DropTableOperator, drop_table\nfrom astro.sql.operators.export_file import ExportFileOperator, export_file\nfrom astro.sql.operators.load_file import LoadFileOperator, load_file\nfrom astro.sql.operators.merge import MergeOperator, merge\nfrom astro.sql.operators.raw_sql import RawSQLOperator, run_raw_sql\nfrom astro.sql.operators.transform import TransformOperator, transform, transform_file\nfrom astro.table import Metadata, Table\n\n__all__ = [\n \"AppendOperator\",\n \"append\",\n \"CleanupOperator\",\n \"cleanup\",\n \"DataframeOperator\",\n \"dataframe\",\n \"DropTableOperator\",\n \"drop_table\",\n \"ExportFileOperator\",\n \"export_file\",\n \"get_value_list\",\n \"LoadFileOperator\",\n \"load_file\",\n \"MergeOperator\",\n \"merge\",\n \"Metadata\",\n \"run_raw_sql\",\n \"Table\",\n \"TransformOperator\",\n \"transform_file\",\n \"transform\",\n]\n\n\ndef get_value_list(sql: str, conn_id: str, **kwargs) -> XComArg:\n \"\"\"\n Execute a sql statement and return the result.\n By default, the response size is less than equal to value of ``max_map_length`` conf.\n You can call a callable handler to alter the response by default it call ``fetchall`` on database result set.\n\n\n :param sql: sql query to execute.\n If the sql query will return huge number of row then it can overload the XCOM.\n also, If you are using output of this method to expand a task using dynamic task map then\n it can create lots of parallel task. So it is advisable to limit your sql query statement.\n :param conn_id: Airflow connection id. This connection id will be used to identify the database client\n and connect with it at runtime\n \"\"\"\n handler = kwargs.get(\"handler\") or (lambda result_set: result_set.fetchall())\n max_map_length = int(conf.get(section=\"core\", key=\"max_map_length\"))\n op_kwargs = {\n \"handler\": handler,\n \"response_limit\": max_map_length,\n }\n task_id = kwargs.get(\"task_id\") or get_unique_task_id(\n \"get_value_list\", dag=kwargs.get(\"dag\"), task_group=kwargs.get(\"task_group\")\n )\n kwargs.update({\"task_id\": task_id})\n return RawSQLOperator(\n sql=sql, conn_id=conn_id, op_kwargs=op_kwargs, python_callable=(lambda *args: None), **kwargs\n ).output\n", "path": "python-sdk/src/astro/sql/__init__.py"}]} | 1,109 | 103 |
gh_patches_debug_5232 | rasdani/github-patches | git_diff | ckan__ckan-5937 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
sqlalchemy setting "pool_pre_ping"
**CKAN version**
Introduced in commit https://github.com/ckan/ckan/commit/ae9eb1360028a77b35506716eed2768349827806 which is related at least to 2.9.0, 2.9.1, 2.9.2.
**Describe the bug**
This is the ckan code:
```python
# Enable pessimistic disconnect handling (added in SQLAlchemy 1.2)
# to eliminate database errors due to stale pooled connections
config.setdefault('pool_pre_ping', True)
# Initialize SQLAlchemy
engine = sqlalchemy.engine_from_config(config)
```
This is the sqlalchemy function:
```python
def engine_from_config(configuration, prefix="sqlalchemy.", **kwargs):
```
I guess the `pool_pre_ping` config is not taken into account by sqlalchemy since it's missing the prefix.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ckan/config/environment.py`
Content:
```
1 # encoding: utf-8
2
3 '''CKAN environment configuration'''
4 import os
5 import logging
6 import warnings
7 import pytz
8
9 import six
10 import sqlalchemy
11
12 from six.moves.urllib.parse import urlparse
13
14 import ckan.model as model
15 import ckan.plugins as p
16 import ckan.lib.plugins as lib_plugins
17 import ckan.lib.helpers as helpers
18 import ckan.lib.app_globals as app_globals
19 from ckan.lib.redis import is_redis_available
20 import ckan.lib.render as render
21 import ckan.lib.search as search
22 import ckan.logic as logic
23 import ckan.authz as authz
24 import ckan.lib.jinja_extensions as jinja_extensions
25 from ckan.lib.webassets_tools import webassets_init
26 from ckan.lib.i18n import build_js_translations
27
28 from ckan.common import _, ungettext, config
29 from ckan.exceptions import CkanConfigurationException
30
31 if six.PY2:
32 from pylons import config as pylons_config
33
34
35 log = logging.getLogger(__name__)
36
37
38 # Suppress benign warning 'Unbuilt egg for setuptools'
39 warnings.simplefilter('ignore', UserWarning)
40
41
42 def load_environment(conf):
43 """
44 Configure the Pylons environment via the ``pylons.config`` object. This
45 code should only need to be run once.
46 """
47 if six.PY2:
48 # this must be run at a time when the env is semi-setup, thus inlined
49 # here. Required by the deliverance plugin and iATI
50 from pylons.wsgiapp import PylonsApp
51 import pkg_resources
52 find_controller_generic = getattr(
53 PylonsApp.find_controller,
54 '_old_find_controller',
55 PylonsApp.find_controller)
56
57 # This is from pylons 1.0 source, will monkey-patch into 0.9.7
58 def find_controller(self, controller):
59 if controller in self.controller_classes:
60 return self.controller_classes[controller]
61 # Check to see if its a dotted name
62 if '.' in controller or ':' in controller:
63 ep = pkg_resources.EntryPoint.parse('x={0}'.format(controller))
64
65 if hasattr(ep, 'resolve'):
66 # setuptools >= 10.2
67 mycontroller = ep.resolve()
68 else:
69 # setuptools >= 11.3
70 mycontroller = ep.load(False)
71
72 self.controller_classes[controller] = mycontroller
73 return mycontroller
74 return find_controller_generic(self, controller)
75 find_controller._old_find_controller = find_controller_generic
76 PylonsApp.find_controller = find_controller
77
78 os.environ['CKAN_CONFIG'] = conf['__file__']
79
80 # Pylons paths
81 root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
82
83 valid_base_public_folder_names = ['public']
84 static_files = conf.get('ckan.base_public_folder', 'public')
85 conf['ckan.base_public_folder'] = static_files
86
87 if static_files not in valid_base_public_folder_names:
88 raise CkanConfigurationException(
89 'You provided an invalid value for ckan.base_public_folder. '
90 'Possible values are: "public".'
91 )
92
93 log.info('Loading static files from %s' % static_files)
94 paths = dict(root=root,
95 controllers=os.path.join(root, 'controllers'),
96 static_files=os.path.join(root, static_files),
97 templates=[])
98
99 # Initialize main CKAN config object
100 config.update(conf)
101
102 if six.PY2:
103 # Initialize Pylons own config object
104 pylons_config.init_app(
105 conf['global_conf'], conf, package='ckan', paths=paths)
106
107 # Update the main CKAN config object with the Pylons specific stuff,
108 # as it is quite hard to keep them separated. This should be removed
109 # once Pylons support is dropped
110 config.update(pylons_config)
111
112 # Setup the SQLAlchemy database engine
113 # Suppress a couple of sqlalchemy warnings
114 msgs = ['^Unicode type received non-unicode bind param value',
115 "^Did not recognize type 'BIGINT' of column 'size'",
116 "^Did not recognize type 'tsvector' of column 'search_vector'"
117 ]
118 for msg in msgs:
119 warnings.filterwarnings('ignore', msg, sqlalchemy.exc.SAWarning)
120
121 # load all CKAN plugins
122 p.load_all()
123
124 # Check Redis availability
125 if not is_redis_available():
126 log.critical('Could not connect to Redis.')
127
128 app_globals.reset()
129
130 # issue #3260: remove idle transaction
131 # Session that was used for getting all config params nor committed,
132 # neither removed and we have idle connection as result
133 model.Session.commit()
134
135 # Build JavaScript translations. Must be done after plugins have
136 # been loaded.
137 build_js_translations()
138
139
140 # A mapping of config settings that can be overridden by env vars.
141 # Note: Do not remove the following lines, they are used in the docs
142 # Start CONFIG_FROM_ENV_VARS
143 CONFIG_FROM_ENV_VARS = {
144 'sqlalchemy.url': 'CKAN_SQLALCHEMY_URL',
145 'ckan.datastore.write_url': 'CKAN_DATASTORE_WRITE_URL',
146 'ckan.datastore.read_url': 'CKAN_DATASTORE_READ_URL',
147 'ckan.redis.url': 'CKAN_REDIS_URL',
148 'solr_url': 'CKAN_SOLR_URL',
149 'solr_user': 'CKAN_SOLR_USER',
150 'solr_password': 'CKAN_SOLR_PASSWORD',
151 'ckan.site_id': 'CKAN_SITE_ID',
152 'ckan.site_url': 'CKAN_SITE_URL',
153 'ckan.storage_path': 'CKAN_STORAGE_PATH',
154 'ckan.datapusher.url': 'CKAN_DATAPUSHER_URL',
155 'smtp.server': 'CKAN_SMTP_SERVER',
156 'smtp.starttls': 'CKAN_SMTP_STARTTLS',
157 'smtp.user': 'CKAN_SMTP_USER',
158 'smtp.password': 'CKAN_SMTP_PASSWORD',
159 'smtp.mail_from': 'CKAN_SMTP_MAIL_FROM',
160 'ckan.max_resource_size': 'CKAN_MAX_UPLOAD_SIZE_MB'
161 }
162 # End CONFIG_FROM_ENV_VARS
163
164
165 def update_config():
166 ''' This code needs to be run when the config is changed to take those
167 changes into account. It is called whenever a plugin is loaded as the
168 plugin might have changed the config values (for instance it might
169 change ckan.site_url) '''
170
171 webassets_init()
172
173 for plugin in p.PluginImplementations(p.IConfigurer):
174 # must do update in place as this does not work:
175 # config = plugin.update_config(config)
176 plugin.update_config(config)
177
178 # Set whitelisted env vars on config object
179 # This is set up before globals are initialized
180
181 ckan_db = os.environ.get('CKAN_DB', None)
182 if ckan_db:
183 msg = 'Setting CKAN_DB as an env var is deprecated and will be' \
184 ' removed in a future release. Use CKAN_SQLALCHEMY_URL instead.'
185 log.warn(msg)
186 config['sqlalchemy.url'] = ckan_db
187
188 for option in CONFIG_FROM_ENV_VARS:
189 from_env = os.environ.get(CONFIG_FROM_ENV_VARS[option], None)
190 if from_env:
191 config[option] = from_env
192
193 root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
194
195 site_url = config.get('ckan.site_url', '')
196 if not site_url:
197 raise RuntimeError(
198 'ckan.site_url is not configured and it must have a value.'
199 ' Please amend your .ini file.')
200 if not site_url.lower().startswith('http'):
201 raise RuntimeError(
202 'ckan.site_url should be a full URL, including the schema '
203 '(http or https)')
204
205 display_timezone = config.get('ckan.display_timezone', '')
206 if (display_timezone and
207 display_timezone != 'server' and
208 display_timezone not in pytz.all_timezones):
209 raise CkanConfigurationException(
210 "ckan.display_timezone is not 'server' or a valid timezone"
211 )
212
213 # Remove backslash from site_url if present
214 config['ckan.site_url'] = config['ckan.site_url'].rstrip('/')
215
216 ckan_host = config['ckan.host'] = urlparse(site_url).netloc
217 if config.get('ckan.site_id') is None:
218 if ':' in ckan_host:
219 ckan_host, port = ckan_host.split(':')
220 assert ckan_host, 'You need to configure ckan.site_url or ' \
221 'ckan.site_id for SOLR search-index rebuild to work.'
222 config['ckan.site_id'] = ckan_host
223
224 # ensure that a favicon has been set
225 favicon = config.get('ckan.favicon', '/base/images/ckan.ico')
226 config['ckan.favicon'] = favicon
227
228 # Init SOLR settings and check if the schema is compatible
229 # from ckan.lib.search import SolrSettings, check_solr_schema_version
230
231 # lib.search is imported here as we need the config enabled and parsed
232 search.SolrSettings.init(config.get('solr_url'),
233 config.get('solr_user'),
234 config.get('solr_password'))
235 search.check_solr_schema_version()
236
237 lib_plugins.reset_package_plugins()
238 lib_plugins.register_package_plugins()
239 lib_plugins.reset_group_plugins()
240 lib_plugins.register_group_plugins()
241
242 # initialise the globals
243 app_globals.app_globals._init()
244
245 helpers.load_plugin_helpers()
246 config['pylons.h'] = helpers.helper_functions
247
248 # Templates and CSS loading from configuration
249 valid_base_templates_folder_names = ['templates']
250 templates = config.get('ckan.base_templates_folder', 'templates')
251 config['ckan.base_templates_folder'] = templates
252
253 if templates not in valid_base_templates_folder_names:
254 raise CkanConfigurationException(
255 'You provided an invalid value for ckan.base_templates_folder. '
256 'Possible values are: "templates".'
257 )
258
259 jinja2_templates_path = os.path.join(root, templates)
260 log.info('Loading templates from %s' % jinja2_templates_path)
261 template_paths = [jinja2_templates_path]
262
263 extra_template_paths = config.get('extra_template_paths', '')
264 if extra_template_paths:
265 # must be first for them to override defaults
266 template_paths = extra_template_paths.split(',') + template_paths
267 config['computed_template_paths'] = template_paths
268
269 # Markdown ignores the logger config, so to get rid of excessive
270 # markdown debug messages in the log, set it to the level of the
271 # root logger.
272 logging.getLogger("MARKDOWN").setLevel(logging.getLogger().level)
273
274 if six.PY2:
275 # Create Jinja2 environment
276 env = jinja_extensions.Environment(
277 **jinja_extensions.get_jinja_env_options())
278 env.install_gettext_callables(_, ungettext, newstyle=True)
279 # custom filters
280 env.policies['ext.i18n.trimmed'] = True
281 env.filters['empty_and_escape'] = jinja_extensions.empty_and_escape
282 config['pylons.app_globals'].jinja_env = env
283
284 # CONFIGURATION OPTIONS HERE (note: all config options will override
285 # any Pylons config options)
286
287 # Enable pessimistic disconnect handling (added in SQLAlchemy 1.2)
288 # to eliminate database errors due to stale pooled connections
289 config.setdefault('pool_pre_ping', True)
290
291 # Initialize SQLAlchemy
292 engine = sqlalchemy.engine_from_config(config)
293 model.init_model(engine)
294
295 for plugin in p.PluginImplementations(p.IConfigurable):
296 plugin.configure(config)
297
298 # reset the template cache - we do this here so that when we load the
299 # environment it is clean
300 render.reset_template_info_cache()
301
302 # clear other caches
303 logic.clear_actions_cache()
304 logic.clear_validators_cache()
305 authz.clear_auth_functions_cache()
306
307 # Here we create the site user if they are not already in the database
308 try:
309 logic.get_action('get_site_user')({'ignore_auth': True}, None)
310 except (sqlalchemy.exc.ProgrammingError, sqlalchemy.exc.OperationalError):
311 # (ProgrammingError for Postgres, OperationalError for SQLite)
312 # The database is not initialised. This is a bit dirty. This occurs
313 # when running tests.
314 pass
315 except sqlalchemy.exc.InternalError:
316 # The database is not initialised. Travis hits this
317 pass
318
319 # Close current session and open database connections to ensure a clean
320 # clean environment even if an error occurs later on
321 model.Session.remove()
322 model.Session.bind.dispose()
323
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ckan/config/environment.py b/ckan/config/environment.py
--- a/ckan/config/environment.py
+++ b/ckan/config/environment.py
@@ -286,7 +286,7 @@
# Enable pessimistic disconnect handling (added in SQLAlchemy 1.2)
# to eliminate database errors due to stale pooled connections
- config.setdefault('pool_pre_ping', True)
+ config.setdefault('sqlalchemy.pool_pre_ping', True)
# Initialize SQLAlchemy
engine = sqlalchemy.engine_from_config(config)
| {"golden_diff": "diff --git a/ckan/config/environment.py b/ckan/config/environment.py\n--- a/ckan/config/environment.py\n+++ b/ckan/config/environment.py\n@@ -286,7 +286,7 @@\n \n # Enable pessimistic disconnect handling (added in SQLAlchemy 1.2)\n # to eliminate database errors due to stale pooled connections\n- config.setdefault('pool_pre_ping', True)\n+ config.setdefault('sqlalchemy.pool_pre_ping', True)\n \n # Initialize SQLAlchemy\n engine = sqlalchemy.engine_from_config(config)\n", "issue": "sqlalchemy setting \"pool_pre_ping\"\n**CKAN version**\r\n\r\nIntroduced in commit https://github.com/ckan/ckan/commit/ae9eb1360028a77b35506716eed2768349827806 which is related at least to 2.9.0, 2.9.1, 2.9.2. \r\n\r\n**Describe the bug**\r\n\r\nThis is the ckan code:\r\n```python\r\n # Enable pessimistic disconnect handling (added in SQLAlchemy 1.2)\r\n # to eliminate database errors due to stale pooled connections\r\n config.setdefault('pool_pre_ping', True)\r\n\r\n # Initialize SQLAlchemy\r\n engine = sqlalchemy.engine_from_config(config)\r\n```\r\n\r\nThis is the sqlalchemy function:\r\n```python\r\n def engine_from_config(configuration, prefix=\"sqlalchemy.\", **kwargs):\r\n```\r\n\r\nI guess the `pool_pre_ping` config is not taken into account by sqlalchemy since it's missing the prefix.\r\n\n", "before_files": [{"content": "# encoding: utf-8\n\n'''CKAN environment configuration'''\nimport os\nimport logging\nimport warnings\nimport pytz\n\nimport six\nimport sqlalchemy\n\nfrom six.moves.urllib.parse import urlparse\n\nimport ckan.model as model\nimport ckan.plugins as p\nimport ckan.lib.plugins as lib_plugins\nimport ckan.lib.helpers as helpers\nimport ckan.lib.app_globals as app_globals\nfrom ckan.lib.redis import is_redis_available\nimport ckan.lib.render as render\nimport ckan.lib.search as search\nimport ckan.logic as logic\nimport ckan.authz as authz\nimport ckan.lib.jinja_extensions as jinja_extensions\nfrom ckan.lib.webassets_tools import webassets_init\nfrom ckan.lib.i18n import build_js_translations\n\nfrom ckan.common import _, ungettext, config\nfrom ckan.exceptions import CkanConfigurationException\n\nif six.PY2:\n from pylons import config as pylons_config\n\n\nlog = logging.getLogger(__name__)\n\n\n# Suppress benign warning 'Unbuilt egg for setuptools'\nwarnings.simplefilter('ignore', UserWarning)\n\n\ndef load_environment(conf):\n \"\"\"\n Configure the Pylons environment via the ``pylons.config`` object. This\n code should only need to be run once.\n \"\"\"\n if six.PY2:\n # this must be run at a time when the env is semi-setup, thus inlined\n # here. Required by the deliverance plugin and iATI\n from pylons.wsgiapp import PylonsApp\n import pkg_resources\n find_controller_generic = getattr(\n PylonsApp.find_controller,\n '_old_find_controller',\n PylonsApp.find_controller)\n\n # This is from pylons 1.0 source, will monkey-patch into 0.9.7\n def find_controller(self, controller):\n if controller in self.controller_classes:\n return self.controller_classes[controller]\n # Check to see if its a dotted name\n if '.' in controller or ':' in controller:\n ep = pkg_resources.EntryPoint.parse('x={0}'.format(controller))\n\n if hasattr(ep, 'resolve'):\n # setuptools >= 10.2\n mycontroller = ep.resolve()\n else:\n # setuptools >= 11.3\n mycontroller = ep.load(False)\n\n self.controller_classes[controller] = mycontroller\n return mycontroller\n return find_controller_generic(self, controller)\n find_controller._old_find_controller = find_controller_generic\n PylonsApp.find_controller = find_controller\n\n os.environ['CKAN_CONFIG'] = conf['__file__']\n\n # Pylons paths\n root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n valid_base_public_folder_names = ['public']\n static_files = conf.get('ckan.base_public_folder', 'public')\n conf['ckan.base_public_folder'] = static_files\n\n if static_files not in valid_base_public_folder_names:\n raise CkanConfigurationException(\n 'You provided an invalid value for ckan.base_public_folder. '\n 'Possible values are: \"public\".'\n )\n\n log.info('Loading static files from %s' % static_files)\n paths = dict(root=root,\n controllers=os.path.join(root, 'controllers'),\n static_files=os.path.join(root, static_files),\n templates=[])\n\n # Initialize main CKAN config object\n config.update(conf)\n\n if six.PY2:\n # Initialize Pylons own config object\n pylons_config.init_app(\n conf['global_conf'], conf, package='ckan', paths=paths)\n\n # Update the main CKAN config object with the Pylons specific stuff,\n # as it is quite hard to keep them separated. This should be removed\n # once Pylons support is dropped\n config.update(pylons_config)\n\n # Setup the SQLAlchemy database engine\n # Suppress a couple of sqlalchemy warnings\n msgs = ['^Unicode type received non-unicode bind param value',\n \"^Did not recognize type 'BIGINT' of column 'size'\",\n \"^Did not recognize type 'tsvector' of column 'search_vector'\"\n ]\n for msg in msgs:\n warnings.filterwarnings('ignore', msg, sqlalchemy.exc.SAWarning)\n\n # load all CKAN plugins\n p.load_all()\n\n # Check Redis availability\n if not is_redis_available():\n log.critical('Could not connect to Redis.')\n\n app_globals.reset()\n\n # issue #3260: remove idle transaction\n # Session that was used for getting all config params nor committed,\n # neither removed and we have idle connection as result\n model.Session.commit()\n\n # Build JavaScript translations. Must be done after plugins have\n # been loaded.\n build_js_translations()\n\n\n# A mapping of config settings that can be overridden by env vars.\n# Note: Do not remove the following lines, they are used in the docs\n# Start CONFIG_FROM_ENV_VARS\nCONFIG_FROM_ENV_VARS = {\n 'sqlalchemy.url': 'CKAN_SQLALCHEMY_URL',\n 'ckan.datastore.write_url': 'CKAN_DATASTORE_WRITE_URL',\n 'ckan.datastore.read_url': 'CKAN_DATASTORE_READ_URL',\n 'ckan.redis.url': 'CKAN_REDIS_URL',\n 'solr_url': 'CKAN_SOLR_URL',\n 'solr_user': 'CKAN_SOLR_USER',\n 'solr_password': 'CKAN_SOLR_PASSWORD',\n 'ckan.site_id': 'CKAN_SITE_ID',\n 'ckan.site_url': 'CKAN_SITE_URL',\n 'ckan.storage_path': 'CKAN_STORAGE_PATH',\n 'ckan.datapusher.url': 'CKAN_DATAPUSHER_URL',\n 'smtp.server': 'CKAN_SMTP_SERVER',\n 'smtp.starttls': 'CKAN_SMTP_STARTTLS',\n 'smtp.user': 'CKAN_SMTP_USER',\n 'smtp.password': 'CKAN_SMTP_PASSWORD',\n 'smtp.mail_from': 'CKAN_SMTP_MAIL_FROM',\n 'ckan.max_resource_size': 'CKAN_MAX_UPLOAD_SIZE_MB'\n}\n# End CONFIG_FROM_ENV_VARS\n\n\ndef update_config():\n ''' This code needs to be run when the config is changed to take those\n changes into account. It is called whenever a plugin is loaded as the\n plugin might have changed the config values (for instance it might\n change ckan.site_url) '''\n\n webassets_init()\n\n for plugin in p.PluginImplementations(p.IConfigurer):\n # must do update in place as this does not work:\n # config = plugin.update_config(config)\n plugin.update_config(config)\n\n # Set whitelisted env vars on config object\n # This is set up before globals are initialized\n\n ckan_db = os.environ.get('CKAN_DB', None)\n if ckan_db:\n msg = 'Setting CKAN_DB as an env var is deprecated and will be' \\\n ' removed in a future release. Use CKAN_SQLALCHEMY_URL instead.'\n log.warn(msg)\n config['sqlalchemy.url'] = ckan_db\n\n for option in CONFIG_FROM_ENV_VARS:\n from_env = os.environ.get(CONFIG_FROM_ENV_VARS[option], None)\n if from_env:\n config[option] = from_env\n\n root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n site_url = config.get('ckan.site_url', '')\n if not site_url:\n raise RuntimeError(\n 'ckan.site_url is not configured and it must have a value.'\n ' Please amend your .ini file.')\n if not site_url.lower().startswith('http'):\n raise RuntimeError(\n 'ckan.site_url should be a full URL, including the schema '\n '(http or https)')\n\n display_timezone = config.get('ckan.display_timezone', '')\n if (display_timezone and\n display_timezone != 'server' and\n display_timezone not in pytz.all_timezones):\n raise CkanConfigurationException(\n \"ckan.display_timezone is not 'server' or a valid timezone\"\n )\n\n # Remove backslash from site_url if present\n config['ckan.site_url'] = config['ckan.site_url'].rstrip('/')\n\n ckan_host = config['ckan.host'] = urlparse(site_url).netloc\n if config.get('ckan.site_id') is None:\n if ':' in ckan_host:\n ckan_host, port = ckan_host.split(':')\n assert ckan_host, 'You need to configure ckan.site_url or ' \\\n 'ckan.site_id for SOLR search-index rebuild to work.'\n config['ckan.site_id'] = ckan_host\n\n # ensure that a favicon has been set\n favicon = config.get('ckan.favicon', '/base/images/ckan.ico')\n config['ckan.favicon'] = favicon\n\n # Init SOLR settings and check if the schema is compatible\n # from ckan.lib.search import SolrSettings, check_solr_schema_version\n\n # lib.search is imported here as we need the config enabled and parsed\n search.SolrSettings.init(config.get('solr_url'),\n config.get('solr_user'),\n config.get('solr_password'))\n search.check_solr_schema_version()\n\n lib_plugins.reset_package_plugins()\n lib_plugins.register_package_plugins()\n lib_plugins.reset_group_plugins()\n lib_plugins.register_group_plugins()\n\n # initialise the globals\n app_globals.app_globals._init()\n\n helpers.load_plugin_helpers()\n config['pylons.h'] = helpers.helper_functions\n\n # Templates and CSS loading from configuration\n valid_base_templates_folder_names = ['templates']\n templates = config.get('ckan.base_templates_folder', 'templates')\n config['ckan.base_templates_folder'] = templates\n\n if templates not in valid_base_templates_folder_names:\n raise CkanConfigurationException(\n 'You provided an invalid value for ckan.base_templates_folder. '\n 'Possible values are: \"templates\".'\n )\n\n jinja2_templates_path = os.path.join(root, templates)\n log.info('Loading templates from %s' % jinja2_templates_path)\n template_paths = [jinja2_templates_path]\n\n extra_template_paths = config.get('extra_template_paths', '')\n if extra_template_paths:\n # must be first for them to override defaults\n template_paths = extra_template_paths.split(',') + template_paths\n config['computed_template_paths'] = template_paths\n\n # Markdown ignores the logger config, so to get rid of excessive\n # markdown debug messages in the log, set it to the level of the\n # root logger.\n logging.getLogger(\"MARKDOWN\").setLevel(logging.getLogger().level)\n\n if six.PY2:\n # Create Jinja2 environment\n env = jinja_extensions.Environment(\n **jinja_extensions.get_jinja_env_options())\n env.install_gettext_callables(_, ungettext, newstyle=True)\n # custom filters\n env.policies['ext.i18n.trimmed'] = True\n env.filters['empty_and_escape'] = jinja_extensions.empty_and_escape\n config['pylons.app_globals'].jinja_env = env\n\n # CONFIGURATION OPTIONS HERE (note: all config options will override\n # any Pylons config options)\n\n # Enable pessimistic disconnect handling (added in SQLAlchemy 1.2)\n # to eliminate database errors due to stale pooled connections\n config.setdefault('pool_pre_ping', True)\n\n # Initialize SQLAlchemy\n engine = sqlalchemy.engine_from_config(config)\n model.init_model(engine)\n\n for plugin in p.PluginImplementations(p.IConfigurable):\n plugin.configure(config)\n\n # reset the template cache - we do this here so that when we load the\n # environment it is clean\n render.reset_template_info_cache()\n\n # clear other caches\n logic.clear_actions_cache()\n logic.clear_validators_cache()\n authz.clear_auth_functions_cache()\n\n # Here we create the site user if they are not already in the database\n try:\n logic.get_action('get_site_user')({'ignore_auth': True}, None)\n except (sqlalchemy.exc.ProgrammingError, sqlalchemy.exc.OperationalError):\n # (ProgrammingError for Postgres, OperationalError for SQLite)\n # The database is not initialised. This is a bit dirty. This occurs\n # when running tests.\n pass\n except sqlalchemy.exc.InternalError:\n # The database is not initialised. Travis hits this\n pass\n\n # Close current session and open database connections to ensure a clean\n # clean environment even if an error occurs later on\n model.Session.remove()\n model.Session.bind.dispose()\n", "path": "ckan/config/environment.py"}], "after_files": [{"content": "# encoding: utf-8\n\n'''CKAN environment configuration'''\nimport os\nimport logging\nimport warnings\nimport pytz\n\nimport six\nimport sqlalchemy\n\nfrom six.moves.urllib.parse import urlparse\n\nimport ckan.model as model\nimport ckan.plugins as p\nimport ckan.lib.plugins as lib_plugins\nimport ckan.lib.helpers as helpers\nimport ckan.lib.app_globals as app_globals\nfrom ckan.lib.redis import is_redis_available\nimport ckan.lib.render as render\nimport ckan.lib.search as search\nimport ckan.logic as logic\nimport ckan.authz as authz\nimport ckan.lib.jinja_extensions as jinja_extensions\nfrom ckan.lib.webassets_tools import webassets_init\nfrom ckan.lib.i18n import build_js_translations\n\nfrom ckan.common import _, ungettext, config\nfrom ckan.exceptions import CkanConfigurationException\n\nif six.PY2:\n from pylons import config as pylons_config\n\n\nlog = logging.getLogger(__name__)\n\n\n# Suppress benign warning 'Unbuilt egg for setuptools'\nwarnings.simplefilter('ignore', UserWarning)\n\n\ndef load_environment(conf):\n \"\"\"\n Configure the Pylons environment via the ``pylons.config`` object. This\n code should only need to be run once.\n \"\"\"\n if six.PY2:\n # this must be run at a time when the env is semi-setup, thus inlined\n # here. Required by the deliverance plugin and iATI\n from pylons.wsgiapp import PylonsApp\n import pkg_resources\n find_controller_generic = getattr(\n PylonsApp.find_controller,\n '_old_find_controller',\n PylonsApp.find_controller)\n\n # This is from pylons 1.0 source, will monkey-patch into 0.9.7\n def find_controller(self, controller):\n if controller in self.controller_classes:\n return self.controller_classes[controller]\n # Check to see if its a dotted name\n if '.' in controller or ':' in controller:\n ep = pkg_resources.EntryPoint.parse('x={0}'.format(controller))\n\n if hasattr(ep, 'resolve'):\n # setuptools >= 10.2\n mycontroller = ep.resolve()\n else:\n # setuptools >= 11.3\n mycontroller = ep.load(False)\n\n self.controller_classes[controller] = mycontroller\n return mycontroller\n return find_controller_generic(self, controller)\n find_controller._old_find_controller = find_controller_generic\n PylonsApp.find_controller = find_controller\n\n os.environ['CKAN_CONFIG'] = conf['__file__']\n\n # Pylons paths\n root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n valid_base_public_folder_names = ['public']\n static_files = conf.get('ckan.base_public_folder', 'public')\n conf['ckan.base_public_folder'] = static_files\n\n if static_files not in valid_base_public_folder_names:\n raise CkanConfigurationException(\n 'You provided an invalid value for ckan.base_public_folder. '\n 'Possible values are: \"public\".'\n )\n\n log.info('Loading static files from %s' % static_files)\n paths = dict(root=root,\n controllers=os.path.join(root, 'controllers'),\n static_files=os.path.join(root, static_files),\n templates=[])\n\n # Initialize main CKAN config object\n config.update(conf)\n\n if six.PY2:\n # Initialize Pylons own config object\n pylons_config.init_app(\n conf['global_conf'], conf, package='ckan', paths=paths)\n\n # Update the main CKAN config object with the Pylons specific stuff,\n # as it is quite hard to keep them separated. This should be removed\n # once Pylons support is dropped\n config.update(pylons_config)\n\n # Setup the SQLAlchemy database engine\n # Suppress a couple of sqlalchemy warnings\n msgs = ['^Unicode type received non-unicode bind param value',\n \"^Did not recognize type 'BIGINT' of column 'size'\",\n \"^Did not recognize type 'tsvector' of column 'search_vector'\"\n ]\n for msg in msgs:\n warnings.filterwarnings('ignore', msg, sqlalchemy.exc.SAWarning)\n\n # load all CKAN plugins\n p.load_all()\n\n # Check Redis availability\n if not is_redis_available():\n log.critical('Could not connect to Redis.')\n\n app_globals.reset()\n\n # issue #3260: remove idle transaction\n # Session that was used for getting all config params nor committed,\n # neither removed and we have idle connection as result\n model.Session.commit()\n\n # Build JavaScript translations. Must be done after plugins have\n # been loaded.\n build_js_translations()\n\n\n# A mapping of config settings that can be overridden by env vars.\n# Note: Do not remove the following lines, they are used in the docs\n# Start CONFIG_FROM_ENV_VARS\nCONFIG_FROM_ENV_VARS = {\n 'sqlalchemy.url': 'CKAN_SQLALCHEMY_URL',\n 'ckan.datastore.write_url': 'CKAN_DATASTORE_WRITE_URL',\n 'ckan.datastore.read_url': 'CKAN_DATASTORE_READ_URL',\n 'ckan.redis.url': 'CKAN_REDIS_URL',\n 'solr_url': 'CKAN_SOLR_URL',\n 'solr_user': 'CKAN_SOLR_USER',\n 'solr_password': 'CKAN_SOLR_PASSWORD',\n 'ckan.site_id': 'CKAN_SITE_ID',\n 'ckan.site_url': 'CKAN_SITE_URL',\n 'ckan.storage_path': 'CKAN_STORAGE_PATH',\n 'ckan.datapusher.url': 'CKAN_DATAPUSHER_URL',\n 'smtp.server': 'CKAN_SMTP_SERVER',\n 'smtp.starttls': 'CKAN_SMTP_STARTTLS',\n 'smtp.user': 'CKAN_SMTP_USER',\n 'smtp.password': 'CKAN_SMTP_PASSWORD',\n 'smtp.mail_from': 'CKAN_SMTP_MAIL_FROM',\n 'ckan.max_resource_size': 'CKAN_MAX_UPLOAD_SIZE_MB'\n}\n# End CONFIG_FROM_ENV_VARS\n\n\ndef update_config():\n ''' This code needs to be run when the config is changed to take those\n changes into account. It is called whenever a plugin is loaded as the\n plugin might have changed the config values (for instance it might\n change ckan.site_url) '''\n\n webassets_init()\n\n for plugin in p.PluginImplementations(p.IConfigurer):\n # must do update in place as this does not work:\n # config = plugin.update_config(config)\n plugin.update_config(config)\n\n # Set whitelisted env vars on config object\n # This is set up before globals are initialized\n\n ckan_db = os.environ.get('CKAN_DB', None)\n if ckan_db:\n msg = 'Setting CKAN_DB as an env var is deprecated and will be' \\\n ' removed in a future release. Use CKAN_SQLALCHEMY_URL instead.'\n log.warn(msg)\n config['sqlalchemy.url'] = ckan_db\n\n for option in CONFIG_FROM_ENV_VARS:\n from_env = os.environ.get(CONFIG_FROM_ENV_VARS[option], None)\n if from_env:\n config[option] = from_env\n\n root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n site_url = config.get('ckan.site_url', '')\n if not site_url:\n raise RuntimeError(\n 'ckan.site_url is not configured and it must have a value.'\n ' Please amend your .ini file.')\n if not site_url.lower().startswith('http'):\n raise RuntimeError(\n 'ckan.site_url should be a full URL, including the schema '\n '(http or https)')\n\n display_timezone = config.get('ckan.display_timezone', '')\n if (display_timezone and\n display_timezone != 'server' and\n display_timezone not in pytz.all_timezones):\n raise CkanConfigurationException(\n \"ckan.display_timezone is not 'server' or a valid timezone\"\n )\n\n # Remove backslash from site_url if present\n config['ckan.site_url'] = config['ckan.site_url'].rstrip('/')\n\n ckan_host = config['ckan.host'] = urlparse(site_url).netloc\n if config.get('ckan.site_id') is None:\n if ':' in ckan_host:\n ckan_host, port = ckan_host.split(':')\n assert ckan_host, 'You need to configure ckan.site_url or ' \\\n 'ckan.site_id for SOLR search-index rebuild to work.'\n config['ckan.site_id'] = ckan_host\n\n # ensure that a favicon has been set\n favicon = config.get('ckan.favicon', '/base/images/ckan.ico')\n config['ckan.favicon'] = favicon\n\n # Init SOLR settings and check if the schema is compatible\n # from ckan.lib.search import SolrSettings, check_solr_schema_version\n\n # lib.search is imported here as we need the config enabled and parsed\n search.SolrSettings.init(config.get('solr_url'),\n config.get('solr_user'),\n config.get('solr_password'))\n search.check_solr_schema_version()\n\n lib_plugins.reset_package_plugins()\n lib_plugins.register_package_plugins()\n lib_plugins.reset_group_plugins()\n lib_plugins.register_group_plugins()\n\n # initialise the globals\n app_globals.app_globals._init()\n\n helpers.load_plugin_helpers()\n config['pylons.h'] = helpers.helper_functions\n\n # Templates and CSS loading from configuration\n valid_base_templates_folder_names = ['templates']\n templates = config.get('ckan.base_templates_folder', 'templates')\n config['ckan.base_templates_folder'] = templates\n\n if templates not in valid_base_templates_folder_names:\n raise CkanConfigurationException(\n 'You provided an invalid value for ckan.base_templates_folder. '\n 'Possible values are: \"templates\".'\n )\n\n jinja2_templates_path = os.path.join(root, templates)\n log.info('Loading templates from %s' % jinja2_templates_path)\n template_paths = [jinja2_templates_path]\n\n extra_template_paths = config.get('extra_template_paths', '')\n if extra_template_paths:\n # must be first for them to override defaults\n template_paths = extra_template_paths.split(',') + template_paths\n config['computed_template_paths'] = template_paths\n\n # Markdown ignores the logger config, so to get rid of excessive\n # markdown debug messages in the log, set it to the level of the\n # root logger.\n logging.getLogger(\"MARKDOWN\").setLevel(logging.getLogger().level)\n\n if six.PY2:\n # Create Jinja2 environment\n env = jinja_extensions.Environment(\n **jinja_extensions.get_jinja_env_options())\n env.install_gettext_callables(_, ungettext, newstyle=True)\n # custom filters\n env.policies['ext.i18n.trimmed'] = True\n env.filters['empty_and_escape'] = jinja_extensions.empty_and_escape\n config['pylons.app_globals'].jinja_env = env\n\n # CONFIGURATION OPTIONS HERE (note: all config options will override\n # any Pylons config options)\n\n # Enable pessimistic disconnect handling (added in SQLAlchemy 1.2)\n # to eliminate database errors due to stale pooled connections\n config.setdefault('sqlalchemy.pool_pre_ping', True)\n\n # Initialize SQLAlchemy\n engine = sqlalchemy.engine_from_config(config)\n model.init_model(engine)\n\n for plugin in p.PluginImplementations(p.IConfigurable):\n plugin.configure(config)\n\n # reset the template cache - we do this here so that when we load the\n # environment it is clean\n render.reset_template_info_cache()\n\n # clear other caches\n logic.clear_actions_cache()\n logic.clear_validators_cache()\n authz.clear_auth_functions_cache()\n\n # Here we create the site user if they are not already in the database\n try:\n logic.get_action('get_site_user')({'ignore_auth': True}, None)\n except (sqlalchemy.exc.ProgrammingError, sqlalchemy.exc.OperationalError):\n # (ProgrammingError for Postgres, OperationalError for SQLite)\n # The database is not initialised. This is a bit dirty. This occurs\n # when running tests.\n pass\n except sqlalchemy.exc.InternalError:\n # The database is not initialised. Travis hits this\n pass\n\n # Close current session and open database connections to ensure a clean\n # clean environment even if an error occurs later on\n model.Session.remove()\n model.Session.bind.dispose()\n", "path": "ckan/config/environment.py"}]} | 4,063 | 118 |
gh_patches_debug_5063 | rasdani/github-patches | git_diff | cltk__cltk-371 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Inconsistent case of data directory
[The latin module](https://github.com/cltk/cltk/blob/36b6fe7aa23a8c4f41ef694ddb64b4b02534146c/cltk/corpus/latin/__init__.py#L20) tries to load the latin library from "~/CLTK_DATA", which fails on a case-sensitive filesystem.
All other modules seem to use "~/cltk_data", so that should be standardized.
(It would probably be best if the path was factored out and also configurable, but fixing at least that one inconsistency lets me actually import stuff without errors.)
Inconsistent case of data directory
[The latin module](https://github.com/cltk/cltk/blob/36b6fe7aa23a8c4f41ef694ddb64b4b02534146c/cltk/corpus/latin/__init__.py#L20) tries to load the latin library from "~/CLTK_DATA", which fails on a case-sensitive filesystem.
All other modules seem to use "~/cltk_data", so that should be standardized.
(It would probably be best if the path was factored out and also configurable, but fixing at least that one inconsistency lets me actually import stuff without errors.)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cltk/corpus/latin/__init__.py`
Content:
```
1 # CLTK: Latin Corpus Readers
2
3 __author__ = 'Patrick J. Burns <[email protected]>'
4 __license__ = 'MIT License. See LICENSE.'
5
6 """
7 CLTK Latin corpus readers
8 """
9
10 import os.path
11 from nltk.corpus.reader.plaintext import PlaintextCorpusReader
12 from nltk.tokenize.punkt import PunktSentenceTokenizer, PunktParameters
13
14 from cltk.tokenize.sentence import TokenizeSentence
15 from cltk.tokenize.word import WordTokenizer
16
17 # Would like to have this search through a CLTK_DATA environment variable
18 # Better to use something like make_cltk_path in cltk.utils.file_operations?
19 home = os.path.expanduser('~')
20 cltk_path = os.path.join(home, 'CLTK_DATA')
21
22 word_tokenizer = WordTokenizer('latin')
23
24 if os.path.exists(cltk_path + 'latin/model/latin_models_cltk/tokenizers/sentence'):
25 sent_tokenizer = TokenizeSentence('latin')
26 else:
27 punkt_param = PunktParameters()
28 abbreviations = ['c', 'l', 'm', 'p', 'q', 't', 'ti', 'sex', 'a', 'd', 'cn', 'sp', "m'", 'ser', 'ap', 'n', 'v', 'k', 'mam', 'post', 'f', 'oct', 'opet', 'paul', 'pro', 'sert', 'st', 'sta', 'v', 'vol', 'vop']
29 punkt_param.abbrev_types = set(abbreviations)
30 sent_tokenizer = PunktSentenceTokenizer(punkt_param)
31
32 # Latin Library
33 try:
34 latinlibrary = PlaintextCorpusReader(cltk_path + '/latin/text/latin_text_latin_library',
35 '.*\.txt',
36 word_tokenizer=word_tokenizer,
37 sent_tokenizer=sent_tokenizer,
38 encoding='utf-8')
39 pass
40 except IOError as e:
41 print("Corpus not found. Please check that the Latin Library is installed in CLTK_DATA.")
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cltk/corpus/latin/__init__.py b/cltk/corpus/latin/__init__.py
--- a/cltk/corpus/latin/__init__.py
+++ b/cltk/corpus/latin/__init__.py
@@ -17,7 +17,7 @@
# Would like to have this search through a CLTK_DATA environment variable
# Better to use something like make_cltk_path in cltk.utils.file_operations?
home = os.path.expanduser('~')
-cltk_path = os.path.join(home, 'CLTK_DATA')
+cltk_path = os.path.join(home, 'cltk_data')
word_tokenizer = WordTokenizer('latin')
| {"golden_diff": "diff --git a/cltk/corpus/latin/__init__.py b/cltk/corpus/latin/__init__.py\n--- a/cltk/corpus/latin/__init__.py\n+++ b/cltk/corpus/latin/__init__.py\n@@ -17,7 +17,7 @@\n # Would like to have this search through a CLTK_DATA environment variable\n # Better to use something like make_cltk_path in cltk.utils.file_operations?\n home = os.path.expanduser('~')\n-cltk_path = os.path.join(home, 'CLTK_DATA')\n+cltk_path = os.path.join(home, 'cltk_data')\n \n word_tokenizer = WordTokenizer('latin')\n", "issue": "Inconsistent case of data directory\n[The latin module](https://github.com/cltk/cltk/blob/36b6fe7aa23a8c4f41ef694ddb64b4b02534146c/cltk/corpus/latin/__init__.py#L20) tries to load the latin library from \"~/CLTK_DATA\", which fails on a case-sensitive filesystem.\n\nAll other modules seem to use \"~/cltk_data\", so that should be standardized. \n\n(It would probably be best if the path was factored out and also configurable, but fixing at least that one inconsistency lets me actually import stuff without errors.)\n\nInconsistent case of data directory\n[The latin module](https://github.com/cltk/cltk/blob/36b6fe7aa23a8c4f41ef694ddb64b4b02534146c/cltk/corpus/latin/__init__.py#L20) tries to load the latin library from \"~/CLTK_DATA\", which fails on a case-sensitive filesystem.\n\nAll other modules seem to use \"~/cltk_data\", so that should be standardized. \n\n(It would probably be best if the path was factored out and also configurable, but fixing at least that one inconsistency lets me actually import stuff without errors.)\n\n", "before_files": [{"content": "# CLTK: Latin Corpus Readers\n\n__author__ = 'Patrick J. Burns <[email protected]>'\n__license__ = 'MIT License. See LICENSE.'\n\n\"\"\"\nCLTK Latin corpus readers\n\"\"\"\n\nimport os.path\nfrom nltk.corpus.reader.plaintext import PlaintextCorpusReader\nfrom nltk.tokenize.punkt import PunktSentenceTokenizer, PunktParameters\n\nfrom cltk.tokenize.sentence import TokenizeSentence\nfrom cltk.tokenize.word import WordTokenizer\n\n# Would like to have this search through a CLTK_DATA environment variable\n# Better to use something like make_cltk_path in cltk.utils.file_operations?\nhome = os.path.expanduser('~')\ncltk_path = os.path.join(home, 'CLTK_DATA')\n\nword_tokenizer = WordTokenizer('latin')\n\nif os.path.exists(cltk_path + 'latin/model/latin_models_cltk/tokenizers/sentence'):\n sent_tokenizer = TokenizeSentence('latin')\nelse:\n punkt_param = PunktParameters()\n abbreviations = ['c', 'l', 'm', 'p', 'q', 't', 'ti', 'sex', 'a', 'd', 'cn', 'sp', \"m'\", 'ser', 'ap', 'n', 'v', 'k', 'mam', 'post', 'f', 'oct', 'opet', 'paul', 'pro', 'sert', 'st', 'sta', 'v', 'vol', 'vop']\n punkt_param.abbrev_types = set(abbreviations)\n sent_tokenizer = PunktSentenceTokenizer(punkt_param)\n\n# Latin Library\ntry:\n latinlibrary = PlaintextCorpusReader(cltk_path + '/latin/text/latin_text_latin_library', \n '.*\\.txt',\n word_tokenizer=word_tokenizer, \n sent_tokenizer=sent_tokenizer, \n encoding='utf-8') \n pass\nexcept IOError as e:\n print(\"Corpus not found. Please check that the Latin Library is installed in CLTK_DATA.\")\n", "path": "cltk/corpus/latin/__init__.py"}], "after_files": [{"content": "# CLTK: Latin Corpus Readers\n\n__author__ = 'Patrick J. Burns <[email protected]>'\n__license__ = 'MIT License. See LICENSE.'\n\n\"\"\"\nCLTK Latin corpus readers\n\"\"\"\n\nimport os.path\nfrom nltk.corpus.reader.plaintext import PlaintextCorpusReader\nfrom nltk.tokenize.punkt import PunktSentenceTokenizer, PunktParameters\n\nfrom cltk.tokenize.sentence import TokenizeSentence\nfrom cltk.tokenize.word import WordTokenizer\n\n# Would like to have this search through a CLTK_DATA environment variable\n# Better to use something like make_cltk_path in cltk.utils.file_operations?\nhome = os.path.expanduser('~')\ncltk_path = os.path.join(home, 'cltk_data')\n\nword_tokenizer = WordTokenizer('latin')\n\nif os.path.exists(cltk_path + 'latin/model/latin_models_cltk/tokenizers/sentence'):\n sent_tokenizer = TokenizeSentence('latin')\nelse:\n punkt_param = PunktParameters()\n abbreviations = ['c', 'l', 'm', 'p', 'q', 't', 'ti', 'sex', 'a', 'd', 'cn', 'sp', \"m'\", 'ser', 'ap', 'n', 'v', 'k', 'mam', 'post', 'f', 'oct', 'opet', 'paul', 'pro', 'sert', 'st', 'sta', 'v', 'vol', 'vop']\n punkt_param.abbrev_types = set(abbreviations)\n sent_tokenizer = PunktSentenceTokenizer(punkt_param)\n\n# Latin Library\ntry:\n latinlibrary = PlaintextCorpusReader(cltk_path + '/latin/text/latin_text_latin_library', \n '.*\\.txt',\n word_tokenizer=word_tokenizer, \n sent_tokenizer=sent_tokenizer, \n encoding='utf-8') \n pass\nexcept IOError as e:\n print(\"Corpus not found. Please check that the Latin Library is installed in CLTK_DATA.\")\n", "path": "cltk/corpus/latin/__init__.py"}]} | 1,058 | 147 |
gh_patches_debug_35230 | rasdani/github-patches | git_diff | ocadotechnology__aimmo-191 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove "staff required" for aimmo pages when URL viewing restrictions are merged
Will require https://github.com/ocadotechnology/codeforlife-deploy-appengine/pull/35
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `players/urls.py`
Content:
```
1 from django.conf.urls import url
2 from django.views.generic import TemplateView
3 from django.contrib.auth.decorators import login_required
4 from django.contrib.admin.views.decorators import staff_member_required
5
6 from players import views
7
8 urlpatterns = [
9 url(r'^$', staff_member_required(TemplateView.as_view(template_name='players/home.html')), name='aimmo/home'),
10
11 url(r'^program/$', staff_member_required(login_required(TemplateView.as_view(template_name='players/program.html'))), name='aimmo/program'),
12 url(r'^watch/$', staff_member_required(views.WatchView.as_view()), name='aimmo/watch'),
13 url(r'^statistics/$', staff_member_required(TemplateView.as_view(template_name='players/statistics.html')), name='aimmo/statistics'),
14
15 url(r'^api/code/$', staff_member_required(views.code), name='aimmo/code'),
16 url(r'^api/games/$', views.games, name='aimmo/games'),
17
18 url(r'^jsreverse/$', 'django_js_reverse.views.urls_js', name='aimmo/js_reverse'), # TODO: Pull request to make django_js_reverse.urls
19 ]
20
```
Path: `example_project/example_project/settings.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Code for Life
3 #
4 # Copyright (C) 2015, Ocado Innovation Limited
5 #
6 # This program is free software: you can redistribute it and/or modify
7 # it under the terms of the GNU Affero General Public License as
8 # published by the Free Software Foundation, either version 3 of the
9 # License, or (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU Affero General Public License for more details.
15 #
16 # You should have received a copy of the GNU Affero General Public License
17 # along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #
19 # ADDITIONAL TERMS – Section 7 GNU General Public Licence
20 #
21 # This licence does not grant any right, title or interest in any “Ocado” logos,
22 # trade names or the trademark “Ocado” or any other trademarks or domain names
23 # owned by Ocado Innovation Limited or the Ocado group of companies or any other
24 # distinctive brand features of “Ocado” as may be secured from time to time. You
25 # must not distribute any modification of this program using the trademark
26 # “Ocado” or claim any affiliation or association with Ocado or its employees.
27 #
28 # You are not authorised to use the name Ocado (or any of its trade names) or
29 # the names of any author or contributor in advertising or for publicity purposes
30 # pertaining to the distribution of this program, without the prior written
31 # authorisation of Ocado.
32 #
33 # Any propagation, distribution or conveyance of this program must include this
34 # copyright notice and these terms. You must not misrepresent the origins of this
35 # program; modified versions of the program must be marked as such and not
36 # identified as the original program.
37 """Django settings for example_project project."""
38 import os
39 import subprocess
40
41 DEBUG = True
42
43 DATABASES = {
44 'default': {
45 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
46 'NAME': os.path.join(os.path.abspath(os.path.dirname(__file__)), 'db.sqlite3'), # Or path to database file if using sqlite3.
47 }
48 }
49
50 USE_I18N = True
51 USE_L10N = True
52
53 TIME_ZONE = 'Europe/London'
54 LANGUAGE_CODE = 'en-gb'
55 STATIC_ROOT = os.path.join(os.path.dirname(__file__), 'static')
56 STATIC_URL = '/static/'
57 SECRET_KEY = 'not-a-secret'
58
59 ROOT_URLCONF = 'django_autoconfig.autourlconf'
60
61 WSGI_APPLICATION = 'example_project.wsgi.application'
62
63 INSTALLED_APPS = (
64 'django.contrib.admin',
65 'players',
66 )
67
68 LOGGING = {
69 'version': 1,
70 'disable_existing_loggers': False,
71 'filters': {
72 'require_debug_false': {
73 '()': 'django.utils.log.RequireDebugFalse'
74 }
75 },
76 'handlers': {
77 'console': {
78 'level': 'DEBUG',
79 'class': 'logging.StreamHandler'
80 },
81 },
82 'loggers': {
83 'views': {
84 'handlers': ['console'],
85 'level': 'DEBUG'
86 },
87 }
88 }
89
90
91 def get_url(game):
92 if os.environ.get('AIMMO_MODE', '') == 'minikube':
93 output = subprocess.check_output(['./test-bin/minikube', 'service', 'game-%s' % game, '--url'])
94 return (output.strip(), '/game/%s/socket.io' % game)
95 else:
96 return ('http://localhost:5000', '/socket.io')
97
98 AIMMO_GAME_SERVER_LOCATION_FUNCTION = get_url
99
100 try:
101 from example_project.local_settings import * # pylint: disable=E0611
102 except ImportError:
103 pass
104
105 from django_autoconfig import autoconfig
106 autoconfig.configure_settings(globals())
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/example_project/example_project/settings.py b/example_project/example_project/settings.py
--- a/example_project/example_project/settings.py
+++ b/example_project/example_project/settings.py
@@ -62,6 +62,8 @@
INSTALLED_APPS = (
'django.contrib.admin',
+ 'django.contrib.auth',
+ 'django.contrib.contenttypes',
'players',
)
@@ -87,6 +89,16 @@
}
}
+LOGIN_URL = '/players/accounts/login/'
+
+MIDDLEWARE_CLASSES = [
+ 'django.contrib.sessions.middleware.SessionMiddleware',
+ 'django.middleware.locale.LocaleMiddleware',
+ 'django.middleware.common.CommonMiddleware',
+ 'django.middleware.csrf.CsrfViewMiddleware',
+ 'django.contrib.auth.middleware.AuthenticationMiddleware',
+ 'django.contrib.messages.middleware.MessageMiddleware',
+]
def get_url(game):
if os.environ.get('AIMMO_MODE', '') == 'minikube':
diff --git a/players/urls.py b/players/urls.py
--- a/players/urls.py
+++ b/players/urls.py
@@ -1,18 +1,19 @@
from django.conf.urls import url
from django.views.generic import TemplateView
+from django.contrib.auth import views as auth_views
from django.contrib.auth.decorators import login_required
-from django.contrib.admin.views.decorators import staff_member_required
from players import views
urlpatterns = [
- url(r'^$', staff_member_required(TemplateView.as_view(template_name='players/home.html')), name='aimmo/home'),
+ url(r'^$', TemplateView.as_view(template_name='players/home.html'), name='aimmo/home'),
- url(r'^program/$', staff_member_required(login_required(TemplateView.as_view(template_name='players/program.html'))), name='aimmo/program'),
- url(r'^watch/$', staff_member_required(views.WatchView.as_view()), name='aimmo/watch'),
- url(r'^statistics/$', staff_member_required(TemplateView.as_view(template_name='players/statistics.html')), name='aimmo/statistics'),
+ url(r'^program/$', login_required(TemplateView.as_view(template_name='players/program.html')), name='aimmo/program'),
+ url(r'^accounts/login/$', auth_views.login),
+ url(r'^watch/$', views.WatchView.as_view(), name='aimmo/watch'),
+ url(r'^statistics/$', TemplateView.as_view(template_name='players/statistics.html'), name='aimmo/statistics'),
- url(r'^api/code/$', staff_member_required(views.code), name='aimmo/code'),
+ url(r'^api/code/$', views.code, name='aimmo/code'),
url(r'^api/games/$', views.games, name='aimmo/games'),
url(r'^jsreverse/$', 'django_js_reverse.views.urls_js', name='aimmo/js_reverse'), # TODO: Pull request to make django_js_reverse.urls
| {"golden_diff": "diff --git a/example_project/example_project/settings.py b/example_project/example_project/settings.py\n--- a/example_project/example_project/settings.py\n+++ b/example_project/example_project/settings.py\n@@ -62,6 +62,8 @@\n \n INSTALLED_APPS = (\n 'django.contrib.admin',\n+ 'django.contrib.auth',\n+ 'django.contrib.contenttypes',\n 'players',\n )\n \n@@ -87,6 +89,16 @@\n }\n }\n \n+LOGIN_URL = '/players/accounts/login/'\n+\n+MIDDLEWARE_CLASSES = [\n+ 'django.contrib.sessions.middleware.SessionMiddleware',\n+ 'django.middleware.locale.LocaleMiddleware',\n+ 'django.middleware.common.CommonMiddleware',\n+ 'django.middleware.csrf.CsrfViewMiddleware',\n+ 'django.contrib.auth.middleware.AuthenticationMiddleware',\n+ 'django.contrib.messages.middleware.MessageMiddleware',\n+]\n \n def get_url(game):\n if os.environ.get('AIMMO_MODE', '') == 'minikube':\ndiff --git a/players/urls.py b/players/urls.py\n--- a/players/urls.py\n+++ b/players/urls.py\n@@ -1,18 +1,19 @@\n from django.conf.urls import url\n from django.views.generic import TemplateView\n+from django.contrib.auth import views as auth_views\n from django.contrib.auth.decorators import login_required\n-from django.contrib.admin.views.decorators import staff_member_required\n \n from players import views\n \n urlpatterns = [\n- url(r'^$', staff_member_required(TemplateView.as_view(template_name='players/home.html')), name='aimmo/home'),\n+ url(r'^$', TemplateView.as_view(template_name='players/home.html'), name='aimmo/home'),\n \n- url(r'^program/$', staff_member_required(login_required(TemplateView.as_view(template_name='players/program.html'))), name='aimmo/program'),\n- url(r'^watch/$', staff_member_required(views.WatchView.as_view()), name='aimmo/watch'),\n- url(r'^statistics/$', staff_member_required(TemplateView.as_view(template_name='players/statistics.html')), name='aimmo/statistics'),\n+ url(r'^program/$', login_required(TemplateView.as_view(template_name='players/program.html')), name='aimmo/program'),\n+ url(r'^accounts/login/$', auth_views.login),\n+ url(r'^watch/$', views.WatchView.as_view(), name='aimmo/watch'),\n+ url(r'^statistics/$', TemplateView.as_view(template_name='players/statistics.html'), name='aimmo/statistics'),\n \n- url(r'^api/code/$', staff_member_required(views.code), name='aimmo/code'),\n+ url(r'^api/code/$', views.code, name='aimmo/code'),\n url(r'^api/games/$', views.games, name='aimmo/games'),\n \n url(r'^jsreverse/$', 'django_js_reverse.views.urls_js', name='aimmo/js_reverse'), # TODO: Pull request to make django_js_reverse.urls\n", "issue": "Remove \"staff required\" for aimmo pages when URL viewing restrictions are merged\nWill require https://github.com/ocadotechnology/codeforlife-deploy-appengine/pull/35\n", "before_files": [{"content": "from django.conf.urls import url\nfrom django.views.generic import TemplateView\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.admin.views.decorators import staff_member_required\n\nfrom players import views\n\nurlpatterns = [\n url(r'^$', staff_member_required(TemplateView.as_view(template_name='players/home.html')), name='aimmo/home'),\n\n url(r'^program/$', staff_member_required(login_required(TemplateView.as_view(template_name='players/program.html'))), name='aimmo/program'),\n url(r'^watch/$', staff_member_required(views.WatchView.as_view()), name='aimmo/watch'),\n url(r'^statistics/$', staff_member_required(TemplateView.as_view(template_name='players/statistics.html')), name='aimmo/statistics'),\n\n url(r'^api/code/$', staff_member_required(views.code), name='aimmo/code'),\n url(r'^api/games/$', views.games, name='aimmo/games'),\n\n url(r'^jsreverse/$', 'django_js_reverse.views.urls_js', name='aimmo/js_reverse'), # TODO: Pull request to make django_js_reverse.urls\n]\n", "path": "players/urls.py"}, {"content": "# -*- coding: utf-8 -*-\n# Code for Life\n#\n# Copyright (C) 2015, Ocado Innovation Limited\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n# ADDITIONAL TERMS \u2013 Section 7 GNU General Public Licence\n#\n# This licence does not grant any right, title or interest in any \u201cOcado\u201d logos,\n# trade names or the trademark \u201cOcado\u201d or any other trademarks or domain names\n# owned by Ocado Innovation Limited or the Ocado group of companies or any other\n# distinctive brand features of \u201cOcado\u201d as may be secured from time to time. You\n# must not distribute any modification of this program using the trademark\n# \u201cOcado\u201d or claim any affiliation or association with Ocado or its employees.\n#\n# You are not authorised to use the name Ocado (or any of its trade names) or\n# the names of any author or contributor in advertising or for publicity purposes\n# pertaining to the distribution of this program, without the prior written\n# authorisation of Ocado.\n#\n# Any propagation, distribution or conveyance of this program must include this\n# copyright notice and these terms. You must not misrepresent the origins of this\n# program; modified versions of the program must be marked as such and not\n# identified as the original program.\n\"\"\"Django settings for example_project project.\"\"\"\nimport os\nimport subprocess\n\nDEBUG = True\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.\n 'NAME': os.path.join(os.path.abspath(os.path.dirname(__file__)), 'db.sqlite3'), # Or path to database file if using sqlite3.\n }\n}\n\nUSE_I18N = True\nUSE_L10N = True\n\nTIME_ZONE = 'Europe/London'\nLANGUAGE_CODE = 'en-gb'\nSTATIC_ROOT = os.path.join(os.path.dirname(__file__), 'static')\nSTATIC_URL = '/static/'\nSECRET_KEY = 'not-a-secret'\n\nROOT_URLCONF = 'django_autoconfig.autourlconf'\n\nWSGI_APPLICATION = 'example_project.wsgi.application'\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'players',\n)\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler'\n },\n },\n 'loggers': {\n 'views': {\n 'handlers': ['console'],\n 'level': 'DEBUG'\n },\n }\n}\n\n\ndef get_url(game):\n if os.environ.get('AIMMO_MODE', '') == 'minikube':\n output = subprocess.check_output(['./test-bin/minikube', 'service', 'game-%s' % game, '--url'])\n return (output.strip(), '/game/%s/socket.io' % game)\n else:\n return ('http://localhost:5000', '/socket.io')\n\nAIMMO_GAME_SERVER_LOCATION_FUNCTION = get_url\n\ntry:\n from example_project.local_settings import * # pylint: disable=E0611\nexcept ImportError:\n pass\n\nfrom django_autoconfig import autoconfig\nautoconfig.configure_settings(globals())\n", "path": "example_project/example_project/settings.py"}], "after_files": [{"content": "from django.conf.urls import url\nfrom django.views.generic import TemplateView\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib.auth.decorators import login_required\n\nfrom players import views\n\nurlpatterns = [\n url(r'^$', TemplateView.as_view(template_name='players/home.html'), name='aimmo/home'),\n\n url(r'^program/$', login_required(TemplateView.as_view(template_name='players/program.html')), name='aimmo/program'),\n url(r'^accounts/login/$', auth_views.login),\n url(r'^watch/$', views.WatchView.as_view(), name='aimmo/watch'),\n url(r'^statistics/$', TemplateView.as_view(template_name='players/statistics.html'), name='aimmo/statistics'),\n\n url(r'^api/code/$', views.code, name='aimmo/code'),\n url(r'^api/games/$', views.games, name='aimmo/games'),\n\n url(r'^jsreverse/$', 'django_js_reverse.views.urls_js', name='aimmo/js_reverse'), # TODO: Pull request to make django_js_reverse.urls\n]\n", "path": "players/urls.py"}, {"content": "# -*- coding: utf-8 -*-\n# Code for Life\n#\n# Copyright (C) 2015, Ocado Innovation Limited\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n# ADDITIONAL TERMS \u2013 Section 7 GNU General Public Licence\n#\n# This licence does not grant any right, title or interest in any \u201cOcado\u201d logos,\n# trade names or the trademark \u201cOcado\u201d or any other trademarks or domain names\n# owned by Ocado Innovation Limited or the Ocado group of companies or any other\n# distinctive brand features of \u201cOcado\u201d as may be secured from time to time. You\n# must not distribute any modification of this program using the trademark\n# \u201cOcado\u201d or claim any affiliation or association with Ocado or its employees.\n#\n# You are not authorised to use the name Ocado (or any of its trade names) or\n# the names of any author or contributor in advertising or for publicity purposes\n# pertaining to the distribution of this program, without the prior written\n# authorisation of Ocado.\n#\n# Any propagation, distribution or conveyance of this program must include this\n# copyright notice and these terms. You must not misrepresent the origins of this\n# program; modified versions of the program must be marked as such and not\n# identified as the original program.\n\"\"\"Django settings for example_project project.\"\"\"\nimport os\nimport subprocess\n\nDEBUG = True\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.\n 'NAME': os.path.join(os.path.abspath(os.path.dirname(__file__)), 'db.sqlite3'), # Or path to database file if using sqlite3.\n }\n}\n\nUSE_I18N = True\nUSE_L10N = True\n\nTIME_ZONE = 'Europe/London'\nLANGUAGE_CODE = 'en-gb'\nSTATIC_ROOT = os.path.join(os.path.dirname(__file__), 'static')\nSTATIC_URL = '/static/'\nSECRET_KEY = 'not-a-secret'\n\nROOT_URLCONF = 'django_autoconfig.autourlconf'\n\nWSGI_APPLICATION = 'example_project.wsgi.application'\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'players',\n)\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler'\n },\n },\n 'loggers': {\n 'views': {\n 'handlers': ['console'],\n 'level': 'DEBUG'\n },\n }\n}\n\nLOGIN_URL = '/players/accounts/login/'\n\nMIDDLEWARE_CLASSES = [\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n]\n\ndef get_url(game):\n if os.environ.get('AIMMO_MODE', '') == 'minikube':\n output = subprocess.check_output(['./test-bin/minikube', 'service', 'game-%s' % game, '--url'])\n return (output.strip(), '/game/%s/socket.io' % game)\n else:\n return ('http://localhost:5000', '/socket.io')\n\nAIMMO_GAME_SERVER_LOCATION_FUNCTION = get_url\n\ntry:\n from example_project.local_settings import * # pylint: disable=E0611\nexcept ImportError:\n pass\n\nfrom django_autoconfig import autoconfig\nautoconfig.configure_settings(globals())\n", "path": "example_project/example_project/settings.py"}]} | 1,657 | 612 |
gh_patches_debug_8683 | rasdani/github-patches | git_diff | zulip__zulip-23554 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use consistent naming for user (profile) menus
As [discussed on CZO](https://chat.zulip.org/#narrow/stream/101-design/topic/terms.20for.20user.20profiles), we should develop consistent naming for the pop-up you seen when you click on a user avatar, as well as the detailed user profile.
Specifically, the naming we want to implement is:
1. "User card" for the initial user view.

2. "User profile" for the detailed profile.

Known places where changes need to be made:
- [ ] Tooltip on user avatar: "View user profile (u)" -> "View user card (u)"
- [ ] Menu option on user card: "View full profile" -> "View profile"
- Custom profile fields:
- [ ] "Summary" column name -> "Card"
- [ ] "Display in profile summary" -> "Display on user card"
We should also check whether any other places are affected.
Help center:
- [ ] Whenever instructions mention opening the profile card, we should name it explicitly, e.g.,:
> Click on the ellipsis () to the right of their name. -> Click on the ellipsis () to the right of their name to open their **user card**.
- [ ] Some pages known to be affected: https://zulip.com/help/view-someones-profile, https://zulip.com/help/custom-profile-fields, any pages that have "Via the user's profile" instructions.
We should do an overall audit to make sure the terminology is consistent (can be done as a follow-up).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zerver/lib/markdown/tabbed_sections.py`
Content:
```
1 import re
2 from typing import Any, Dict, List, Mapping, Optional
3
4 import markdown
5 from markdown.extensions import Extension
6 from markdown.preprocessors import Preprocessor
7
8 from zerver.lib.markdown.priorities import PREPROCESSOR_PRIORITES
9
10 START_TABBED_SECTION_REGEX = re.compile(r"^\{start_tabs\}$")
11 END_TABBED_SECTION_REGEX = re.compile(r"^\{end_tabs\}$")
12 TAB_CONTENT_REGEX = re.compile(r"^\{tab\|\s*(.+?)\s*\}$")
13
14 CODE_SECTION_TEMPLATE = """
15 <div class="code-section {tab_class}" markdown="1">
16 {nav_bar}
17 <div class="blocks">
18 {blocks}
19 </div>
20 </div>
21 """.strip()
22
23 NAV_BAR_TEMPLATE = """
24 <ul class="nav">
25 {tabs}
26 </ul>
27 """.strip()
28
29 NAV_LIST_ITEM_TEMPLATE = """
30 <li data-language="{data_language}" tabindex="0">{label}</li>
31 """.strip()
32
33 DIV_TAB_CONTENT_TEMPLATE = """
34 <div data-language="{data_language}" markdown="1">
35 {content}
36 </div>
37 """.strip()
38
39 # If adding new entries here, also check if you need to update
40 # tabbed-instructions.js
41 TAB_SECTION_LABELS = {
42 "desktop-web": "Desktop/Web",
43 "ios": "iOS",
44 "android": "Android",
45 "mac": "macOS",
46 "windows": "Windows",
47 "linux": "Linux",
48 "python": "Python",
49 "js": "JavaScript",
50 "curl": "curl",
51 "zulip-send": "zulip-send",
52 "web": "Web",
53 "desktop": "Desktop",
54 "mobile": "Mobile",
55 "mm-default": "Default installation",
56 "mm-docker": "Docker",
57 "mm-gitlab-omnibus": "GitLab Omnibus",
58 "require-invitations": "Require invitations",
59 "allow-anyone-to-join": "Allow anyone to join",
60 "restrict-by-email-domain": "Restrict by email domain",
61 "zoom": "Zoom",
62 "jitsi-meet": "Jitsi Meet",
63 "bigbluebutton": "BigBlueButton",
64 "disable": "Disabled",
65 "chrome": "Chrome",
66 "firefox": "Firefox",
67 "desktop-app": "Desktop app",
68 "system-proxy-settings": "System proxy settings",
69 "custom-proxy-settings": "Custom proxy settings",
70 "stream": "From a stream view",
71 "not-stream": "From other views",
72 "via-recent-conversations": "Via recent conversations",
73 "via-left-sidebar": "Via left sidebar",
74 "instructions-for-all-platforms": "Instructions for all platforms",
75 "public-streams": "Public streams",
76 "private-streams": "Private streams",
77 "web-public-streams": "Web-public streams",
78 "via-user-profile": "Via the user's profile",
79 "via-organization-settings": "Via organization settings",
80 "via-personal-settings": "Via personal settings",
81 "default-subdomain": "Default subdomain",
82 "custom-subdomain": "Custom subdomain",
83 "zulip-cloud": "Zulip Cloud",
84 "self-hosting": "Self hosting",
85 "okta": "Okta",
86 "onelogin": "OneLogin",
87 "azuread": "AzureAD",
88 "keycloak": "Keycloak",
89 "logged-in": "If you are logged in",
90 "logged-out": "If you are logged out",
91 }
92
93
94 class TabbedSectionsGenerator(Extension):
95 def extendMarkdown(self, md: markdown.Markdown) -> None:
96 md.preprocessors.register(
97 TabbedSectionsPreprocessor(md, self.getConfigs()),
98 "tabbed_sections",
99 PREPROCESSOR_PRIORITES["tabbed_sections"],
100 )
101
102
103 class TabbedSectionsPreprocessor(Preprocessor):
104 def __init__(self, md: markdown.Markdown, config: Mapping[str, Any]) -> None:
105 super().__init__(md)
106
107 def run(self, lines: List[str]) -> List[str]:
108 tab_section = self.parse_tabs(lines)
109 while tab_section:
110 if "tabs" in tab_section:
111 tab_class = "has-tabs"
112 else:
113 tab_class = "no-tabs"
114 tab_section["tabs"] = [
115 {
116 "tab_name": "instructions-for-all-platforms",
117 "start": tab_section["start_tabs_index"],
118 }
119 ]
120 nav_bar = self.generate_nav_bar(tab_section)
121 content_blocks = self.generate_content_blocks(tab_section, lines)
122 rendered_tabs = CODE_SECTION_TEMPLATE.format(
123 tab_class=tab_class, nav_bar=nav_bar, blocks=content_blocks
124 )
125
126 start = tab_section["start_tabs_index"]
127 end = tab_section["end_tabs_index"] + 1
128 lines = [*lines[:start], rendered_tabs, *lines[end:]]
129 tab_section = self.parse_tabs(lines)
130 return lines
131
132 def generate_content_blocks(self, tab_section: Dict[str, Any], lines: List[str]) -> str:
133 tab_content_blocks = []
134 for index, tab in enumerate(tab_section["tabs"]):
135 start_index = tab["start"] + 1
136 try:
137 # If there are more tabs, we can use the starting index
138 # of the next tab as the ending index of the previous one
139 end_index = tab_section["tabs"][index + 1]["start"]
140 except IndexError:
141 # Otherwise, just use the end of the entire section
142 end_index = tab_section["end_tabs_index"]
143
144 content = "\n".join(lines[start_index:end_index]).strip()
145 tab_content_block = DIV_TAB_CONTENT_TEMPLATE.format(
146 data_language=tab["tab_name"],
147 # Wrapping the content in two newlines is necessary here.
148 # If we don't do this, the inner Markdown does not get
149 # rendered properly.
150 content=f"\n{content}\n",
151 )
152 tab_content_blocks.append(tab_content_block)
153 return "\n".join(tab_content_blocks)
154
155 def generate_nav_bar(self, tab_section: Dict[str, Any]) -> str:
156 li_elements = []
157 for tab in tab_section["tabs"]:
158 tab_name = tab.get("tab_name")
159 tab_label = TAB_SECTION_LABELS.get(tab_name)
160 if tab_label is None:
161 raise ValueError(
162 f"Tab '{tab_name}' is not present in TAB_SECTION_LABELS in zerver/lib/markdown/tabbed_sections.py"
163 )
164
165 li = NAV_LIST_ITEM_TEMPLATE.format(data_language=tab_name, label=tab_label)
166 li_elements.append(li)
167
168 return NAV_BAR_TEMPLATE.format(tabs="\n".join(li_elements))
169
170 def parse_tabs(self, lines: List[str]) -> Optional[Dict[str, Any]]:
171 block: Dict[str, Any] = {}
172 for index, line in enumerate(lines):
173 start_match = START_TABBED_SECTION_REGEX.search(line)
174 if start_match:
175 block["start_tabs_index"] = index
176
177 tab_content_match = TAB_CONTENT_REGEX.search(line)
178 if tab_content_match:
179 block.setdefault("tabs", [])
180 tab = {"start": index, "tab_name": tab_content_match.group(1)}
181 block["tabs"].append(tab)
182
183 end_match = END_TABBED_SECTION_REGEX.search(line)
184 if end_match:
185 block["end_tabs_index"] = index
186 break
187 return block
188
189
190 def makeExtension(*args: Any, **kwargs: str) -> TabbedSectionsGenerator:
191 return TabbedSectionsGenerator(**kwargs)
192
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zerver/lib/markdown/tabbed_sections.py b/zerver/lib/markdown/tabbed_sections.py
--- a/zerver/lib/markdown/tabbed_sections.py
+++ b/zerver/lib/markdown/tabbed_sections.py
@@ -75,7 +75,7 @@
"public-streams": "Public streams",
"private-streams": "Private streams",
"web-public-streams": "Web-public streams",
- "via-user-profile": "Via the user's profile",
+ "via-user-card": "Via user card",
"via-organization-settings": "Via organization settings",
"via-personal-settings": "Via personal settings",
"default-subdomain": "Default subdomain",
| {"golden_diff": "diff --git a/zerver/lib/markdown/tabbed_sections.py b/zerver/lib/markdown/tabbed_sections.py\n--- a/zerver/lib/markdown/tabbed_sections.py\n+++ b/zerver/lib/markdown/tabbed_sections.py\n@@ -75,7 +75,7 @@\n \"public-streams\": \"Public streams\",\n \"private-streams\": \"Private streams\",\n \"web-public-streams\": \"Web-public streams\",\n- \"via-user-profile\": \"Via the user's profile\",\n+ \"via-user-card\": \"Via user card\",\n \"via-organization-settings\": \"Via organization settings\",\n \"via-personal-settings\": \"Via personal settings\",\n \"default-subdomain\": \"Default subdomain\",\n", "issue": "Use consistent naming for user (profile) menus\nAs [discussed on CZO](https://chat.zulip.org/#narrow/stream/101-design/topic/terms.20for.20user.20profiles), we should develop consistent naming for the pop-up you seen when you click on a user avatar, as well as the detailed user profile.\r\n\r\nSpecifically, the naming we want to implement is:\r\n\r\n1. \"User card\" for the initial user view.\r\n\r\n\r\n2. \"User profile\" for the detailed profile.\r\n\r\n\r\nKnown places where changes need to be made:\r\n\r\n- [ ] Tooltip on user avatar: \"View user profile (u)\" -> \"View user card (u)\"\r\n- [ ] Menu option on user card: \"View full profile\" -> \"View profile\"\r\n- Custom profile fields:\r\n - [ ] \"Summary\" column name -> \"Card\"\r\n - [ ] \"Display in profile summary\" -> \"Display on user card\"\r\n \r\nWe should also check whether any other places are affected.\r\n\r\nHelp center:\r\n\r\n- [ ] Whenever instructions mention opening the profile card, we should name it explicitly, e.g.,:\r\n> Click on the ellipsis () to the right of their name. -> Click on the ellipsis () to the right of their name to open their **user card**.\r\n- [ ] Some pages known to be affected: https://zulip.com/help/view-someones-profile, https://zulip.com/help/custom-profile-fields, any pages that have \"Via the user's profile\" instructions.\r\n\r\nWe should do an overall audit to make sure the terminology is consistent (can be done as a follow-up).\r\n\r\n\n", "before_files": [{"content": "import re\nfrom typing import Any, Dict, List, Mapping, Optional\n\nimport markdown\nfrom markdown.extensions import Extension\nfrom markdown.preprocessors import Preprocessor\n\nfrom zerver.lib.markdown.priorities import PREPROCESSOR_PRIORITES\n\nSTART_TABBED_SECTION_REGEX = re.compile(r\"^\\{start_tabs\\}$\")\nEND_TABBED_SECTION_REGEX = re.compile(r\"^\\{end_tabs\\}$\")\nTAB_CONTENT_REGEX = re.compile(r\"^\\{tab\\|\\s*(.+?)\\s*\\}$\")\n\nCODE_SECTION_TEMPLATE = \"\"\"\n<div class=\"code-section {tab_class}\" markdown=\"1\">\n{nav_bar}\n<div class=\"blocks\">\n{blocks}\n</div>\n</div>\n\"\"\".strip()\n\nNAV_BAR_TEMPLATE = \"\"\"\n<ul class=\"nav\">\n{tabs}\n</ul>\n\"\"\".strip()\n\nNAV_LIST_ITEM_TEMPLATE = \"\"\"\n<li data-language=\"{data_language}\" tabindex=\"0\">{label}</li>\n\"\"\".strip()\n\nDIV_TAB_CONTENT_TEMPLATE = \"\"\"\n<div data-language=\"{data_language}\" markdown=\"1\">\n{content}\n</div>\n\"\"\".strip()\n\n# If adding new entries here, also check if you need to update\n# tabbed-instructions.js\nTAB_SECTION_LABELS = {\n \"desktop-web\": \"Desktop/Web\",\n \"ios\": \"iOS\",\n \"android\": \"Android\",\n \"mac\": \"macOS\",\n \"windows\": \"Windows\",\n \"linux\": \"Linux\",\n \"python\": \"Python\",\n \"js\": \"JavaScript\",\n \"curl\": \"curl\",\n \"zulip-send\": \"zulip-send\",\n \"web\": \"Web\",\n \"desktop\": \"Desktop\",\n \"mobile\": \"Mobile\",\n \"mm-default\": \"Default installation\",\n \"mm-docker\": \"Docker\",\n \"mm-gitlab-omnibus\": \"GitLab Omnibus\",\n \"require-invitations\": \"Require invitations\",\n \"allow-anyone-to-join\": \"Allow anyone to join\",\n \"restrict-by-email-domain\": \"Restrict by email domain\",\n \"zoom\": \"Zoom\",\n \"jitsi-meet\": \"Jitsi Meet\",\n \"bigbluebutton\": \"BigBlueButton\",\n \"disable\": \"Disabled\",\n \"chrome\": \"Chrome\",\n \"firefox\": \"Firefox\",\n \"desktop-app\": \"Desktop app\",\n \"system-proxy-settings\": \"System proxy settings\",\n \"custom-proxy-settings\": \"Custom proxy settings\",\n \"stream\": \"From a stream view\",\n \"not-stream\": \"From other views\",\n \"via-recent-conversations\": \"Via recent conversations\",\n \"via-left-sidebar\": \"Via left sidebar\",\n \"instructions-for-all-platforms\": \"Instructions for all platforms\",\n \"public-streams\": \"Public streams\",\n \"private-streams\": \"Private streams\",\n \"web-public-streams\": \"Web-public streams\",\n \"via-user-profile\": \"Via the user's profile\",\n \"via-organization-settings\": \"Via organization settings\",\n \"via-personal-settings\": \"Via personal settings\",\n \"default-subdomain\": \"Default subdomain\",\n \"custom-subdomain\": \"Custom subdomain\",\n \"zulip-cloud\": \"Zulip Cloud\",\n \"self-hosting\": \"Self hosting\",\n \"okta\": \"Okta\",\n \"onelogin\": \"OneLogin\",\n \"azuread\": \"AzureAD\",\n \"keycloak\": \"Keycloak\",\n \"logged-in\": \"If you are logged in\",\n \"logged-out\": \"If you are logged out\",\n}\n\n\nclass TabbedSectionsGenerator(Extension):\n def extendMarkdown(self, md: markdown.Markdown) -> None:\n md.preprocessors.register(\n TabbedSectionsPreprocessor(md, self.getConfigs()),\n \"tabbed_sections\",\n PREPROCESSOR_PRIORITES[\"tabbed_sections\"],\n )\n\n\nclass TabbedSectionsPreprocessor(Preprocessor):\n def __init__(self, md: markdown.Markdown, config: Mapping[str, Any]) -> None:\n super().__init__(md)\n\n def run(self, lines: List[str]) -> List[str]:\n tab_section = self.parse_tabs(lines)\n while tab_section:\n if \"tabs\" in tab_section:\n tab_class = \"has-tabs\"\n else:\n tab_class = \"no-tabs\"\n tab_section[\"tabs\"] = [\n {\n \"tab_name\": \"instructions-for-all-platforms\",\n \"start\": tab_section[\"start_tabs_index\"],\n }\n ]\n nav_bar = self.generate_nav_bar(tab_section)\n content_blocks = self.generate_content_blocks(tab_section, lines)\n rendered_tabs = CODE_SECTION_TEMPLATE.format(\n tab_class=tab_class, nav_bar=nav_bar, blocks=content_blocks\n )\n\n start = tab_section[\"start_tabs_index\"]\n end = tab_section[\"end_tabs_index\"] + 1\n lines = [*lines[:start], rendered_tabs, *lines[end:]]\n tab_section = self.parse_tabs(lines)\n return lines\n\n def generate_content_blocks(self, tab_section: Dict[str, Any], lines: List[str]) -> str:\n tab_content_blocks = []\n for index, tab in enumerate(tab_section[\"tabs\"]):\n start_index = tab[\"start\"] + 1\n try:\n # If there are more tabs, we can use the starting index\n # of the next tab as the ending index of the previous one\n end_index = tab_section[\"tabs\"][index + 1][\"start\"]\n except IndexError:\n # Otherwise, just use the end of the entire section\n end_index = tab_section[\"end_tabs_index\"]\n\n content = \"\\n\".join(lines[start_index:end_index]).strip()\n tab_content_block = DIV_TAB_CONTENT_TEMPLATE.format(\n data_language=tab[\"tab_name\"],\n # Wrapping the content in two newlines is necessary here.\n # If we don't do this, the inner Markdown does not get\n # rendered properly.\n content=f\"\\n{content}\\n\",\n )\n tab_content_blocks.append(tab_content_block)\n return \"\\n\".join(tab_content_blocks)\n\n def generate_nav_bar(self, tab_section: Dict[str, Any]) -> str:\n li_elements = []\n for tab in tab_section[\"tabs\"]:\n tab_name = tab.get(\"tab_name\")\n tab_label = TAB_SECTION_LABELS.get(tab_name)\n if tab_label is None:\n raise ValueError(\n f\"Tab '{tab_name}' is not present in TAB_SECTION_LABELS in zerver/lib/markdown/tabbed_sections.py\"\n )\n\n li = NAV_LIST_ITEM_TEMPLATE.format(data_language=tab_name, label=tab_label)\n li_elements.append(li)\n\n return NAV_BAR_TEMPLATE.format(tabs=\"\\n\".join(li_elements))\n\n def parse_tabs(self, lines: List[str]) -> Optional[Dict[str, Any]]:\n block: Dict[str, Any] = {}\n for index, line in enumerate(lines):\n start_match = START_TABBED_SECTION_REGEX.search(line)\n if start_match:\n block[\"start_tabs_index\"] = index\n\n tab_content_match = TAB_CONTENT_REGEX.search(line)\n if tab_content_match:\n block.setdefault(\"tabs\", [])\n tab = {\"start\": index, \"tab_name\": tab_content_match.group(1)}\n block[\"tabs\"].append(tab)\n\n end_match = END_TABBED_SECTION_REGEX.search(line)\n if end_match:\n block[\"end_tabs_index\"] = index\n break\n return block\n\n\ndef makeExtension(*args: Any, **kwargs: str) -> TabbedSectionsGenerator:\n return TabbedSectionsGenerator(**kwargs)\n", "path": "zerver/lib/markdown/tabbed_sections.py"}], "after_files": [{"content": "import re\nfrom typing import Any, Dict, List, Mapping, Optional\n\nimport markdown\nfrom markdown.extensions import Extension\nfrom markdown.preprocessors import Preprocessor\n\nfrom zerver.lib.markdown.priorities import PREPROCESSOR_PRIORITES\n\nSTART_TABBED_SECTION_REGEX = re.compile(r\"^\\{start_tabs\\}$\")\nEND_TABBED_SECTION_REGEX = re.compile(r\"^\\{end_tabs\\}$\")\nTAB_CONTENT_REGEX = re.compile(r\"^\\{tab\\|\\s*(.+?)\\s*\\}$\")\n\nCODE_SECTION_TEMPLATE = \"\"\"\n<div class=\"code-section {tab_class}\" markdown=\"1\">\n{nav_bar}\n<div class=\"blocks\">\n{blocks}\n</div>\n</div>\n\"\"\".strip()\n\nNAV_BAR_TEMPLATE = \"\"\"\n<ul class=\"nav\">\n{tabs}\n</ul>\n\"\"\".strip()\n\nNAV_LIST_ITEM_TEMPLATE = \"\"\"\n<li data-language=\"{data_language}\" tabindex=\"0\">{label}</li>\n\"\"\".strip()\n\nDIV_TAB_CONTENT_TEMPLATE = \"\"\"\n<div data-language=\"{data_language}\" markdown=\"1\">\n{content}\n</div>\n\"\"\".strip()\n\n# If adding new entries here, also check if you need to update\n# tabbed-instructions.js\nTAB_SECTION_LABELS = {\n \"desktop-web\": \"Desktop/Web\",\n \"ios\": \"iOS\",\n \"android\": \"Android\",\n \"mac\": \"macOS\",\n \"windows\": \"Windows\",\n \"linux\": \"Linux\",\n \"python\": \"Python\",\n \"js\": \"JavaScript\",\n \"curl\": \"curl\",\n \"zulip-send\": \"zulip-send\",\n \"web\": \"Web\",\n \"desktop\": \"Desktop\",\n \"mobile\": \"Mobile\",\n \"mm-default\": \"Default installation\",\n \"mm-docker\": \"Docker\",\n \"mm-gitlab-omnibus\": \"GitLab Omnibus\",\n \"require-invitations\": \"Require invitations\",\n \"allow-anyone-to-join\": \"Allow anyone to join\",\n \"restrict-by-email-domain\": \"Restrict by email domain\",\n \"zoom\": \"Zoom\",\n \"jitsi-meet\": \"Jitsi Meet\",\n \"bigbluebutton\": \"BigBlueButton\",\n \"disable\": \"Disabled\",\n \"chrome\": \"Chrome\",\n \"firefox\": \"Firefox\",\n \"desktop-app\": \"Desktop app\",\n \"system-proxy-settings\": \"System proxy settings\",\n \"custom-proxy-settings\": \"Custom proxy settings\",\n \"stream\": \"From a stream view\",\n \"not-stream\": \"From other views\",\n \"via-recent-conversations\": \"Via recent conversations\",\n \"via-left-sidebar\": \"Via left sidebar\",\n \"instructions-for-all-platforms\": \"Instructions for all platforms\",\n \"public-streams\": \"Public streams\",\n \"private-streams\": \"Private streams\",\n \"web-public-streams\": \"Web-public streams\",\n \"via-user-card\": \"Via user card\",\n \"via-organization-settings\": \"Via organization settings\",\n \"via-personal-settings\": \"Via personal settings\",\n \"default-subdomain\": \"Default subdomain\",\n \"custom-subdomain\": \"Custom subdomain\",\n \"zulip-cloud\": \"Zulip Cloud\",\n \"self-hosting\": \"Self hosting\",\n \"okta\": \"Okta\",\n \"onelogin\": \"OneLogin\",\n \"azuread\": \"AzureAD\",\n \"keycloak\": \"Keycloak\",\n \"logged-in\": \"If you are logged in\",\n \"logged-out\": \"If you are logged out\",\n}\n\n\nclass TabbedSectionsGenerator(Extension):\n def extendMarkdown(self, md: markdown.Markdown) -> None:\n md.preprocessors.register(\n TabbedSectionsPreprocessor(md, self.getConfigs()),\n \"tabbed_sections\",\n PREPROCESSOR_PRIORITES[\"tabbed_sections\"],\n )\n\n\nclass TabbedSectionsPreprocessor(Preprocessor):\n def __init__(self, md: markdown.Markdown, config: Mapping[str, Any]) -> None:\n super().__init__(md)\n\n def run(self, lines: List[str]) -> List[str]:\n tab_section = self.parse_tabs(lines)\n while tab_section:\n if \"tabs\" in tab_section:\n tab_class = \"has-tabs\"\n else:\n tab_class = \"no-tabs\"\n tab_section[\"tabs\"] = [\n {\n \"tab_name\": \"instructions-for-all-platforms\",\n \"start\": tab_section[\"start_tabs_index\"],\n }\n ]\n nav_bar = self.generate_nav_bar(tab_section)\n content_blocks = self.generate_content_blocks(tab_section, lines)\n rendered_tabs = CODE_SECTION_TEMPLATE.format(\n tab_class=tab_class, nav_bar=nav_bar, blocks=content_blocks\n )\n\n start = tab_section[\"start_tabs_index\"]\n end = tab_section[\"end_tabs_index\"] + 1\n lines = [*lines[:start], rendered_tabs, *lines[end:]]\n tab_section = self.parse_tabs(lines)\n return lines\n\n def generate_content_blocks(self, tab_section: Dict[str, Any], lines: List[str]) -> str:\n tab_content_blocks = []\n for index, tab in enumerate(tab_section[\"tabs\"]):\n start_index = tab[\"start\"] + 1\n try:\n # If there are more tabs, we can use the starting index\n # of the next tab as the ending index of the previous one\n end_index = tab_section[\"tabs\"][index + 1][\"start\"]\n except IndexError:\n # Otherwise, just use the end of the entire section\n end_index = tab_section[\"end_tabs_index\"]\n\n content = \"\\n\".join(lines[start_index:end_index]).strip()\n tab_content_block = DIV_TAB_CONTENT_TEMPLATE.format(\n data_language=tab[\"tab_name\"],\n # Wrapping the content in two newlines is necessary here.\n # If we don't do this, the inner Markdown does not get\n # rendered properly.\n content=f\"\\n{content}\\n\",\n )\n tab_content_blocks.append(tab_content_block)\n return \"\\n\".join(tab_content_blocks)\n\n def generate_nav_bar(self, tab_section: Dict[str, Any]) -> str:\n li_elements = []\n for tab in tab_section[\"tabs\"]:\n tab_name = tab.get(\"tab_name\")\n tab_label = TAB_SECTION_LABELS.get(tab_name)\n if tab_label is None:\n raise ValueError(\n f\"Tab '{tab_name}' is not present in TAB_SECTION_LABELS in zerver/lib/markdown/tabbed_sections.py\"\n )\n\n li = NAV_LIST_ITEM_TEMPLATE.format(data_language=tab_name, label=tab_label)\n li_elements.append(li)\n\n return NAV_BAR_TEMPLATE.format(tabs=\"\\n\".join(li_elements))\n\n def parse_tabs(self, lines: List[str]) -> Optional[Dict[str, Any]]:\n block: Dict[str, Any] = {}\n for index, line in enumerate(lines):\n start_match = START_TABBED_SECTION_REGEX.search(line)\n if start_match:\n block[\"start_tabs_index\"] = index\n\n tab_content_match = TAB_CONTENT_REGEX.search(line)\n if tab_content_match:\n block.setdefault(\"tabs\", [])\n tab = {\"start\": index, \"tab_name\": tab_content_match.group(1)}\n block[\"tabs\"].append(tab)\n\n end_match = END_TABBED_SECTION_REGEX.search(line)\n if end_match:\n block[\"end_tabs_index\"] = index\n break\n return block\n\n\ndef makeExtension(*args: Any, **kwargs: str) -> TabbedSectionsGenerator:\n return TabbedSectionsGenerator(**kwargs)\n", "path": "zerver/lib/markdown/tabbed_sections.py"}]} | 2,848 | 153 |
gh_patches_debug_28420 | rasdani/github-patches | git_diff | huggingface__diffusers-4625 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unable to load Automatic1111 LoRa due rank being higher than "min(in_features, out_features)}"
### Describe the bug
Hi! I'm unable to load a LoRa as I get the following error.
`ValueError: LoRA rank 384 must be less or equal than 320`
The LoRa works perfectly if the ValueError check is removed from the code. Is it possible to override this error? I don't understand why it must be less or equal. Thanks!
### Reproduction
Run the interference script with [this](https://civitai.com/models/7227/urushi) LoRa
Get error about the LoRa rank
### Logs
```
File "/home/wizard/pipelines/utils.py", line 465, in load_lora_weights_diffusers
loader.load_lora_into_unet(state_dict, network_alphas=network_alphas, unet=unet)
File "/home/wizard/lib/python3.9/site-packages/diffusers/loaders.py", line 1233, in load_lora_into_unet
unet.load_attn_procs(state_dict, network_alphas=network_alphas)
File "/home/wizardlib/python3.9/site-packages/diffusers/loaders.py", line 376, in load_attn_procs
lora = LoRAConv2dLayer(
File "/home/wizard/lib/python3.9/site-packages/diffusers/models/lora.py", line 58, in __init__
raise ValueError(f"LoRA rank {rank} must be less or equal than {min(in_features, out_features)}")
```
### System Info
- `diffusers` version: 0.19.3
- Platform: Linux-5.15.107+-x86_64-with-glibc2.31
- Python version: 3.9.16
- PyTorch version (GPU?): 2.0.1+cu118 (True)
- Huggingface_hub version: 0.16.4
- Transformers version: 4.29.2
- Accelerate version: 0.16.0
- xFormers version: 0.0.20
- Using GPU in script?: yes
- Using distributed or parallel set-up in script?: no
### Who can help?
@williamberman @patrickvonplaten @saya
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/diffusers/models/lora.py`
Content:
```
1 # Copyright 2023 The HuggingFace Team. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import Optional
16
17 import torch.nn.functional as F
18 from torch import nn
19
20
21 class LoRALinearLayer(nn.Module):
22 def __init__(self, in_features, out_features, rank=4, network_alpha=None, device=None, dtype=None):
23 super().__init__()
24
25 if rank > min(in_features, out_features):
26 raise ValueError(f"LoRA rank {rank} must be less or equal than {min(in_features, out_features)}")
27
28 self.down = nn.Linear(in_features, rank, bias=False, device=device, dtype=dtype)
29 self.up = nn.Linear(rank, out_features, bias=False, device=device, dtype=dtype)
30 # This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script.
31 # See https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning
32 self.network_alpha = network_alpha
33 self.rank = rank
34
35 nn.init.normal_(self.down.weight, std=1 / rank)
36 nn.init.zeros_(self.up.weight)
37
38 def forward(self, hidden_states):
39 orig_dtype = hidden_states.dtype
40 dtype = self.down.weight.dtype
41
42 down_hidden_states = self.down(hidden_states.to(dtype))
43 up_hidden_states = self.up(down_hidden_states)
44
45 if self.network_alpha is not None:
46 up_hidden_states *= self.network_alpha / self.rank
47
48 return up_hidden_states.to(orig_dtype)
49
50
51 class LoRAConv2dLayer(nn.Module):
52 def __init__(
53 self, in_features, out_features, rank=4, kernel_size=(1, 1), stride=(1, 1), padding=0, network_alpha=None
54 ):
55 super().__init__()
56
57 if rank > min(in_features, out_features):
58 raise ValueError(f"LoRA rank {rank} must be less or equal than {min(in_features, out_features)}")
59
60 self.down = nn.Conv2d(in_features, rank, kernel_size=kernel_size, stride=stride, padding=padding, bias=False)
61 # according to the official kohya_ss trainer kernel_size are always fixed for the up layer
62 # # see: https://github.com/bmaltais/kohya_ss/blob/2accb1305979ba62f5077a23aabac23b4c37e935/networks/lora_diffusers.py#L129
63 self.up = nn.Conv2d(rank, out_features, kernel_size=(1, 1), stride=(1, 1), bias=False)
64
65 # This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script.
66 # See https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning
67 self.network_alpha = network_alpha
68 self.rank = rank
69
70 nn.init.normal_(self.down.weight, std=1 / rank)
71 nn.init.zeros_(self.up.weight)
72
73 def forward(self, hidden_states):
74 orig_dtype = hidden_states.dtype
75 dtype = self.down.weight.dtype
76
77 down_hidden_states = self.down(hidden_states.to(dtype))
78 up_hidden_states = self.up(down_hidden_states)
79
80 if self.network_alpha is not None:
81 up_hidden_states *= self.network_alpha / self.rank
82
83 return up_hidden_states.to(orig_dtype)
84
85
86 class LoRACompatibleConv(nn.Conv2d):
87 """
88 A convolutional layer that can be used with LoRA.
89 """
90
91 def __init__(self, *args, lora_layer: Optional[LoRAConv2dLayer] = None, **kwargs):
92 super().__init__(*args, **kwargs)
93 self.lora_layer = lora_layer
94
95 def set_lora_layer(self, lora_layer: Optional[LoRAConv2dLayer]):
96 self.lora_layer = lora_layer
97
98 def forward(self, x):
99 if self.lora_layer is None:
100 # make sure to the functional Conv2D function as otherwise torch.compile's graph will break
101 # see: https://github.com/huggingface/diffusers/pull/4315
102 return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
103 else:
104 return super().forward(x) + self.lora_layer(x)
105
106
107 class LoRACompatibleLinear(nn.Linear):
108 """
109 A Linear layer that can be used with LoRA.
110 """
111
112 def __init__(self, *args, lora_layer: Optional[LoRALinearLayer] = None, **kwargs):
113 super().__init__(*args, **kwargs)
114 self.lora_layer = lora_layer
115
116 def set_lora_layer(self, lora_layer: Optional[LoRAConv2dLayer]):
117 self.lora_layer = lora_layer
118
119 def forward(self, x):
120 if self.lora_layer is None:
121 return super().forward(x)
122 else:
123 return super().forward(x) + self.lora_layer(x)
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/diffusers/models/lora.py b/src/diffusers/models/lora.py
--- a/src/diffusers/models/lora.py
+++ b/src/diffusers/models/lora.py
@@ -22,9 +22,6 @@
def __init__(self, in_features, out_features, rank=4, network_alpha=None, device=None, dtype=None):
super().__init__()
- if rank > min(in_features, out_features):
- raise ValueError(f"LoRA rank {rank} must be less or equal than {min(in_features, out_features)}")
-
self.down = nn.Linear(in_features, rank, bias=False, device=device, dtype=dtype)
self.up = nn.Linear(rank, out_features, bias=False, device=device, dtype=dtype)
# This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script.
@@ -54,9 +51,6 @@
):
super().__init__()
- if rank > min(in_features, out_features):
- raise ValueError(f"LoRA rank {rank} must be less or equal than {min(in_features, out_features)}")
-
self.down = nn.Conv2d(in_features, rank, kernel_size=kernel_size, stride=stride, padding=padding, bias=False)
# according to the official kohya_ss trainer kernel_size are always fixed for the up layer
# # see: https://github.com/bmaltais/kohya_ss/blob/2accb1305979ba62f5077a23aabac23b4c37e935/networks/lora_diffusers.py#L129
| {"golden_diff": "diff --git a/src/diffusers/models/lora.py b/src/diffusers/models/lora.py\n--- a/src/diffusers/models/lora.py\n+++ b/src/diffusers/models/lora.py\n@@ -22,9 +22,6 @@\n def __init__(self, in_features, out_features, rank=4, network_alpha=None, device=None, dtype=None):\n super().__init__()\n \n- if rank > min(in_features, out_features):\n- raise ValueError(f\"LoRA rank {rank} must be less or equal than {min(in_features, out_features)}\")\n-\n self.down = nn.Linear(in_features, rank, bias=False, device=device, dtype=dtype)\n self.up = nn.Linear(rank, out_features, bias=False, device=device, dtype=dtype)\n # This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script.\n@@ -54,9 +51,6 @@\n ):\n super().__init__()\n \n- if rank > min(in_features, out_features):\n- raise ValueError(f\"LoRA rank {rank} must be less or equal than {min(in_features, out_features)}\")\n-\n self.down = nn.Conv2d(in_features, rank, kernel_size=kernel_size, stride=stride, padding=padding, bias=False)\n # according to the official kohya_ss trainer kernel_size are always fixed for the up layer\n # # see: https://github.com/bmaltais/kohya_ss/blob/2accb1305979ba62f5077a23aabac23b4c37e935/networks/lora_diffusers.py#L129\n", "issue": "Unable to load Automatic1111 LoRa due rank being higher than \"min(in_features, out_features)}\"\n### Describe the bug\r\n\r\nHi! I'm unable to load a LoRa as I get the following error.\r\n\r\n`ValueError: LoRA rank 384 must be less or equal than 320`\r\n\r\nThe LoRa works perfectly if the ValueError check is removed from the code. Is it possible to override this error? I don't understand why it must be less or equal. Thanks!\r\n\r\n### Reproduction\r\n\r\nRun the interference script with [this](https://civitai.com/models/7227/urushi) LoRa\r\nGet error about the LoRa rank\r\n\r\n### Logs\r\n\r\n```\r\n File \"/home/wizard/pipelines/utils.py\", line 465, in load_lora_weights_diffusers\r\n loader.load_lora_into_unet(state_dict, network_alphas=network_alphas, unet=unet)\r\n File \"/home/wizard/lib/python3.9/site-packages/diffusers/loaders.py\", line 1233, in load_lora_into_unet\r\n unet.load_attn_procs(state_dict, network_alphas=network_alphas)\r\n File \"/home/wizardlib/python3.9/site-packages/diffusers/loaders.py\", line 376, in load_attn_procs\r\n lora = LoRAConv2dLayer(\r\n File \"/home/wizard/lib/python3.9/site-packages/diffusers/models/lora.py\", line 58, in __init__\r\n raise ValueError(f\"LoRA rank {rank} must be less or equal than {min(in_features, out_features)}\")\r\n```\r\n\r\n\r\n### System Info\r\n\r\n- `diffusers` version: 0.19.3\r\n- Platform: Linux-5.15.107+-x86_64-with-glibc2.31\r\n- Python version: 3.9.16\r\n- PyTorch version (GPU?): 2.0.1+cu118 (True)\r\n- Huggingface_hub version: 0.16.4\r\n- Transformers version: 4.29.2\r\n- Accelerate version: 0.16.0\r\n- xFormers version: 0.0.20\r\n- Using GPU in script?: yes\r\n- Using distributed or parallel set-up in script?: no\r\n\r\n### Who can help?\r\n\r\n@williamberman @patrickvonplaten @saya\n", "before_files": [{"content": "# Copyright 2023 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Optional\n\nimport torch.nn.functional as F\nfrom torch import nn\n\n\nclass LoRALinearLayer(nn.Module):\n def __init__(self, in_features, out_features, rank=4, network_alpha=None, device=None, dtype=None):\n super().__init__()\n\n if rank > min(in_features, out_features):\n raise ValueError(f\"LoRA rank {rank} must be less or equal than {min(in_features, out_features)}\")\n\n self.down = nn.Linear(in_features, rank, bias=False, device=device, dtype=dtype)\n self.up = nn.Linear(rank, out_features, bias=False, device=device, dtype=dtype)\n # This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script.\n # See https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning\n self.network_alpha = network_alpha\n self.rank = rank\n\n nn.init.normal_(self.down.weight, std=1 / rank)\n nn.init.zeros_(self.up.weight)\n\n def forward(self, hidden_states):\n orig_dtype = hidden_states.dtype\n dtype = self.down.weight.dtype\n\n down_hidden_states = self.down(hidden_states.to(dtype))\n up_hidden_states = self.up(down_hidden_states)\n\n if self.network_alpha is not None:\n up_hidden_states *= self.network_alpha / self.rank\n\n return up_hidden_states.to(orig_dtype)\n\n\nclass LoRAConv2dLayer(nn.Module):\n def __init__(\n self, in_features, out_features, rank=4, kernel_size=(1, 1), stride=(1, 1), padding=0, network_alpha=None\n ):\n super().__init__()\n\n if rank > min(in_features, out_features):\n raise ValueError(f\"LoRA rank {rank} must be less or equal than {min(in_features, out_features)}\")\n\n self.down = nn.Conv2d(in_features, rank, kernel_size=kernel_size, stride=stride, padding=padding, bias=False)\n # according to the official kohya_ss trainer kernel_size are always fixed for the up layer\n # # see: https://github.com/bmaltais/kohya_ss/blob/2accb1305979ba62f5077a23aabac23b4c37e935/networks/lora_diffusers.py#L129\n self.up = nn.Conv2d(rank, out_features, kernel_size=(1, 1), stride=(1, 1), bias=False)\n\n # This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script.\n # See https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning\n self.network_alpha = network_alpha\n self.rank = rank\n\n nn.init.normal_(self.down.weight, std=1 / rank)\n nn.init.zeros_(self.up.weight)\n\n def forward(self, hidden_states):\n orig_dtype = hidden_states.dtype\n dtype = self.down.weight.dtype\n\n down_hidden_states = self.down(hidden_states.to(dtype))\n up_hidden_states = self.up(down_hidden_states)\n\n if self.network_alpha is not None:\n up_hidden_states *= self.network_alpha / self.rank\n\n return up_hidden_states.to(orig_dtype)\n\n\nclass LoRACompatibleConv(nn.Conv2d):\n \"\"\"\n A convolutional layer that can be used with LoRA.\n \"\"\"\n\n def __init__(self, *args, lora_layer: Optional[LoRAConv2dLayer] = None, **kwargs):\n super().__init__(*args, **kwargs)\n self.lora_layer = lora_layer\n\n def set_lora_layer(self, lora_layer: Optional[LoRAConv2dLayer]):\n self.lora_layer = lora_layer\n\n def forward(self, x):\n if self.lora_layer is None:\n # make sure to the functional Conv2D function as otherwise torch.compile's graph will break\n # see: https://github.com/huggingface/diffusers/pull/4315\n return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)\n else:\n return super().forward(x) + self.lora_layer(x)\n\n\nclass LoRACompatibleLinear(nn.Linear):\n \"\"\"\n A Linear layer that can be used with LoRA.\n \"\"\"\n\n def __init__(self, *args, lora_layer: Optional[LoRALinearLayer] = None, **kwargs):\n super().__init__(*args, **kwargs)\n self.lora_layer = lora_layer\n\n def set_lora_layer(self, lora_layer: Optional[LoRAConv2dLayer]):\n self.lora_layer = lora_layer\n\n def forward(self, x):\n if self.lora_layer is None:\n return super().forward(x)\n else:\n return super().forward(x) + self.lora_layer(x)\n", "path": "src/diffusers/models/lora.py"}], "after_files": [{"content": "# Copyright 2023 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Optional\n\nimport torch.nn.functional as F\nfrom torch import nn\n\n\nclass LoRALinearLayer(nn.Module):\n def __init__(self, in_features, out_features, rank=4, network_alpha=None, device=None, dtype=None):\n super().__init__()\n\n self.down = nn.Linear(in_features, rank, bias=False, device=device, dtype=dtype)\n self.up = nn.Linear(rank, out_features, bias=False, device=device, dtype=dtype)\n # This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script.\n # See https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning\n self.network_alpha = network_alpha\n self.rank = rank\n\n nn.init.normal_(self.down.weight, std=1 / rank)\n nn.init.zeros_(self.up.weight)\n\n def forward(self, hidden_states):\n orig_dtype = hidden_states.dtype\n dtype = self.down.weight.dtype\n\n down_hidden_states = self.down(hidden_states.to(dtype))\n up_hidden_states = self.up(down_hidden_states)\n\n if self.network_alpha is not None:\n up_hidden_states *= self.network_alpha / self.rank\n\n return up_hidden_states.to(orig_dtype)\n\n\nclass LoRAConv2dLayer(nn.Module):\n def __init__(\n self, in_features, out_features, rank=4, kernel_size=(1, 1), stride=(1, 1), padding=0, network_alpha=None\n ):\n super().__init__()\n\n self.down = nn.Conv2d(in_features, rank, kernel_size=kernel_size, stride=stride, padding=padding, bias=False)\n # according to the official kohya_ss trainer kernel_size are always fixed for the up layer\n # # see: https://github.com/bmaltais/kohya_ss/blob/2accb1305979ba62f5077a23aabac23b4c37e935/networks/lora_diffusers.py#L129\n self.up = nn.Conv2d(rank, out_features, kernel_size=(1, 1), stride=(1, 1), bias=False)\n\n # This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script.\n # See https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning\n self.network_alpha = network_alpha\n self.rank = rank\n\n nn.init.normal_(self.down.weight, std=1 / rank)\n nn.init.zeros_(self.up.weight)\n\n def forward(self, hidden_states):\n orig_dtype = hidden_states.dtype\n dtype = self.down.weight.dtype\n\n down_hidden_states = self.down(hidden_states.to(dtype))\n up_hidden_states = self.up(down_hidden_states)\n\n if self.network_alpha is not None:\n up_hidden_states *= self.network_alpha / self.rank\n\n return up_hidden_states.to(orig_dtype)\n\n\nclass LoRACompatibleConv(nn.Conv2d):\n \"\"\"\n A convolutional layer that can be used with LoRA.\n \"\"\"\n\n def __init__(self, *args, lora_layer: Optional[LoRAConv2dLayer] = None, **kwargs):\n super().__init__(*args, **kwargs)\n self.lora_layer = lora_layer\n\n def set_lora_layer(self, lora_layer: Optional[LoRAConv2dLayer]):\n self.lora_layer = lora_layer\n\n def forward(self, x):\n if self.lora_layer is None:\n # make sure to the functional Conv2D function as otherwise torch.compile's graph will break\n # see: https://github.com/huggingface/diffusers/pull/4315\n return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)\n else:\n return super().forward(x) + self.lora_layer(x)\n\n\nclass LoRACompatibleLinear(nn.Linear):\n \"\"\"\n A Linear layer that can be used with LoRA.\n \"\"\"\n\n def __init__(self, *args, lora_layer: Optional[LoRALinearLayer] = None, **kwargs):\n super().__init__(*args, **kwargs)\n self.lora_layer = lora_layer\n\n def set_lora_layer(self, lora_layer: Optional[LoRAConv2dLayer]):\n self.lora_layer = lora_layer\n\n def forward(self, x):\n if self.lora_layer is None:\n return super().forward(x)\n else:\n return super().forward(x) + self.lora_layer(x)\n", "path": "src/diffusers/models/lora.py"}]} | 2,295 | 374 |
gh_patches_debug_18386 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-1168 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
On creating a plan, "Berlin" may be selected as district
"Berlin" is part of the dropdown in the vorhaben create and update form
@MagdaN should i add this issue to the whiteboard or taiga, too?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/plans/models.py`
Content:
```
1 from ckeditor.fields import RichTextField
2 from django.conf import settings
3 from django.core.urlresolvers import reverse
4 from django.db import models
5 from django.utils.translation import ugettext_lazy as _
6
7 from adhocracy4 import transforms
8 from adhocracy4.maps import fields as map_fields
9 from adhocracy4.models.base import UserGeneratedContentModel
10 from adhocracy4.projects import models as project_models
11 from meinberlin.apps.maps.models import MapPreset
12
13 STATUS_TODO = 0
14 STATUS_PLANNING = 1
15 STATUS_IMPLEMENTATION = 2
16 STATUS_DONE = 3
17 STATUS_STOPPED = 4
18
19 PARTICIPATION_NO = 0
20 PARTICIPATION_YES = 1
21 PARTICIPATION_UNDECIDED = 2
22
23
24 class Plan(UserGeneratedContentModel):
25 title = models.CharField(max_length=120, verbose_name=_('Title'))
26 organisation = models.ForeignKey(
27 settings.A4_ORGANISATIONS_MODEL,
28 on_delete=models.CASCADE)
29 project = models.ForeignKey(project_models.Project, blank=True, null=True)
30 point = map_fields.PointField(
31 verbose_name=_('Where can the plan be located on a map?'),
32 help_text=_('Click inside marked area on the map to set a marker. '
33 'Drag and drop the marker to change its place. '
34 'Alternatively you can use the search field to search '
35 'for an address.'))
36 point_label = models.CharField(
37 blank=True,
38 default='',
39 max_length=255,
40 verbose_name=_('Label of the location'),
41 help_text=_('This could be an address or the name of a landmark.'),
42 )
43 district = models.ForeignKey(
44 MapPreset,
45 limit_choices_to={'category__name': 'Berlin'})
46 contact = models.TextField(max_length=255, verbose_name=_('Contact'))
47 cost = models.PositiveIntegerField(blank=True, null=True,
48 verbose_name=_('Cost'))
49 description = RichTextField(verbose_name=_('Description'), blank=True)
50 category = models.CharField(max_length=255, verbose_name=_('Type of plan'))
51 status = models.SmallIntegerField(choices=(
52 (STATUS_TODO, _('Idea')),
53 (STATUS_PLANNING, _('Planning')),
54 (STATUS_IMPLEMENTATION, _('Implementation')),
55 (STATUS_DONE, _('Done')),
56 (STATUS_STOPPED, _('Stopped')),
57 ))
58 participation = models.SmallIntegerField(choices=(
59 (PARTICIPATION_YES, _('Yes')),
60 (PARTICIPATION_NO, _('No')),
61 (PARTICIPATION_UNDECIDED, _('Still undecided')),
62 ))
63
64 class Meta:
65 ordering = ['-created']
66
67 @property
68 def reference_number(self):
69 return '{:d}-{:05d}'.format(self.created.year, self.pk)
70
71 def __str__(self):
72 return self.title
73
74 def get_absolute_url(self):
75 return reverse('meinberlin_plans:plan-detail',
76 kwargs=dict(pk=self.pk, year=self.created.year))
77
78 def save(self, *args, **kwargs):
79 self.description = transforms.clean_html_field(self.description)
80 super().save(*args, **kwargs)
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/meinberlin/apps/plans/models.py b/meinberlin/apps/plans/models.py
--- a/meinberlin/apps/plans/models.py
+++ b/meinberlin/apps/plans/models.py
@@ -2,6 +2,7 @@
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
+from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from adhocracy4 import transforms
@@ -42,7 +43,7 @@
)
district = models.ForeignKey(
MapPreset,
- limit_choices_to={'category__name': 'Berlin'})
+ limit_choices_to=Q(category__name='Berlin') & ~Q(name='Berlin'))
contact = models.TextField(max_length=255, verbose_name=_('Contact'))
cost = models.PositiveIntegerField(blank=True, null=True,
verbose_name=_('Cost'))
| {"golden_diff": "diff --git a/meinberlin/apps/plans/models.py b/meinberlin/apps/plans/models.py\n--- a/meinberlin/apps/plans/models.py\n+++ b/meinberlin/apps/plans/models.py\n@@ -2,6 +2,7 @@\n from django.conf import settings\n from django.core.urlresolvers import reverse\n from django.db import models\n+from django.db.models import Q\n from django.utils.translation import ugettext_lazy as _\n \n from adhocracy4 import transforms\n@@ -42,7 +43,7 @@\n )\n district = models.ForeignKey(\n MapPreset,\n- limit_choices_to={'category__name': 'Berlin'})\n+ limit_choices_to=Q(category__name='Berlin') & ~Q(name='Berlin'))\n contact = models.TextField(max_length=255, verbose_name=_('Contact'))\n cost = models.PositiveIntegerField(blank=True, null=True,\n verbose_name=_('Cost'))\n", "issue": "On creating a plan, \"Berlin\" may be selected as district\n\"Berlin\" is part of the dropdown in the vorhaben create and update form\r\n\r\n@MagdaN should i add this issue to the whiteboard or taiga, too?\n", "before_files": [{"content": "from ckeditor.fields import RichTextField\nfrom django.conf import settings\nfrom django.core.urlresolvers import reverse\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4 import transforms\nfrom adhocracy4.maps import fields as map_fields\nfrom adhocracy4.models.base import UserGeneratedContentModel\nfrom adhocracy4.projects import models as project_models\nfrom meinberlin.apps.maps.models import MapPreset\n\nSTATUS_TODO = 0\nSTATUS_PLANNING = 1\nSTATUS_IMPLEMENTATION = 2\nSTATUS_DONE = 3\nSTATUS_STOPPED = 4\n\nPARTICIPATION_NO = 0\nPARTICIPATION_YES = 1\nPARTICIPATION_UNDECIDED = 2\n\n\nclass Plan(UserGeneratedContentModel):\n title = models.CharField(max_length=120, verbose_name=_('Title'))\n organisation = models.ForeignKey(\n settings.A4_ORGANISATIONS_MODEL,\n on_delete=models.CASCADE)\n project = models.ForeignKey(project_models.Project, blank=True, null=True)\n point = map_fields.PointField(\n verbose_name=_('Where can the plan be located on a map?'),\n help_text=_('Click inside marked area on the map to set a marker. '\n 'Drag and drop the marker to change its place. '\n 'Alternatively you can use the search field to search '\n 'for an address.'))\n point_label = models.CharField(\n blank=True,\n default='',\n max_length=255,\n verbose_name=_('Label of the location'),\n help_text=_('This could be an address or the name of a landmark.'),\n )\n district = models.ForeignKey(\n MapPreset,\n limit_choices_to={'category__name': 'Berlin'})\n contact = models.TextField(max_length=255, verbose_name=_('Contact'))\n cost = models.PositiveIntegerField(blank=True, null=True,\n verbose_name=_('Cost'))\n description = RichTextField(verbose_name=_('Description'), blank=True)\n category = models.CharField(max_length=255, verbose_name=_('Type of plan'))\n status = models.SmallIntegerField(choices=(\n (STATUS_TODO, _('Idea')),\n (STATUS_PLANNING, _('Planning')),\n (STATUS_IMPLEMENTATION, _('Implementation')),\n (STATUS_DONE, _('Done')),\n (STATUS_STOPPED, _('Stopped')),\n ))\n participation = models.SmallIntegerField(choices=(\n (PARTICIPATION_YES, _('Yes')),\n (PARTICIPATION_NO, _('No')),\n (PARTICIPATION_UNDECIDED, _('Still undecided')),\n ))\n\n class Meta:\n ordering = ['-created']\n\n @property\n def reference_number(self):\n return '{:d}-{:05d}'.format(self.created.year, self.pk)\n\n def __str__(self):\n return self.title\n\n def get_absolute_url(self):\n return reverse('meinberlin_plans:plan-detail',\n kwargs=dict(pk=self.pk, year=self.created.year))\n\n def save(self, *args, **kwargs):\n self.description = transforms.clean_html_field(self.description)\n super().save(*args, **kwargs)\n", "path": "meinberlin/apps/plans/models.py"}], "after_files": [{"content": "from ckeditor.fields import RichTextField\nfrom django.conf import settings\nfrom django.core.urlresolvers import reverse\nfrom django.db import models\nfrom django.db.models import Q\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4 import transforms\nfrom adhocracy4.maps import fields as map_fields\nfrom adhocracy4.models.base import UserGeneratedContentModel\nfrom adhocracy4.projects import models as project_models\nfrom meinberlin.apps.maps.models import MapPreset\n\nSTATUS_TODO = 0\nSTATUS_PLANNING = 1\nSTATUS_IMPLEMENTATION = 2\nSTATUS_DONE = 3\nSTATUS_STOPPED = 4\n\nPARTICIPATION_NO = 0\nPARTICIPATION_YES = 1\nPARTICIPATION_UNDECIDED = 2\n\n\nclass Plan(UserGeneratedContentModel):\n title = models.CharField(max_length=120, verbose_name=_('Title'))\n organisation = models.ForeignKey(\n settings.A4_ORGANISATIONS_MODEL,\n on_delete=models.CASCADE)\n project = models.ForeignKey(project_models.Project, blank=True, null=True)\n point = map_fields.PointField(\n verbose_name=_('Where can the plan be located on a map?'),\n help_text=_('Click inside marked area on the map to set a marker. '\n 'Drag and drop the marker to change its place. '\n 'Alternatively you can use the search field to search '\n 'for an address.'))\n point_label = models.CharField(\n blank=True,\n default='',\n max_length=255,\n verbose_name=_('Label of the location'),\n help_text=_('This could be an address or the name of a landmark.'),\n )\n district = models.ForeignKey(\n MapPreset,\n limit_choices_to=Q(category__name='Berlin') & ~Q(name='Berlin'))\n contact = models.TextField(max_length=255, verbose_name=_('Contact'))\n cost = models.PositiveIntegerField(blank=True, null=True,\n verbose_name=_('Cost'))\n description = RichTextField(verbose_name=_('Description'), blank=True)\n category = models.CharField(max_length=255, verbose_name=_('Type of plan'))\n status = models.SmallIntegerField(choices=(\n (STATUS_TODO, _('Idea')),\n (STATUS_PLANNING, _('Planning')),\n (STATUS_IMPLEMENTATION, _('Implementation')),\n (STATUS_DONE, _('Done')),\n (STATUS_STOPPED, _('Stopped')),\n ))\n participation = models.SmallIntegerField(choices=(\n (PARTICIPATION_YES, _('Yes')),\n (PARTICIPATION_NO, _('No')),\n (PARTICIPATION_UNDECIDED, _('Still undecided')),\n ))\n\n class Meta:\n ordering = ['-created']\n\n @property\n def reference_number(self):\n return '{:d}-{:05d}'.format(self.created.year, self.pk)\n\n def __str__(self):\n return self.title\n\n def get_absolute_url(self):\n return reverse('meinberlin_plans:plan-detail',\n kwargs=dict(pk=self.pk, year=self.created.year))\n\n def save(self, *args, **kwargs):\n self.description = transforms.clean_html_field(self.description)\n super().save(*args, **kwargs)\n", "path": "meinberlin/apps/plans/models.py"}]} | 1,124 | 199 |
gh_patches_debug_29253 | rasdani/github-patches | git_diff | mars-project__mars-2957 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Submit query condition to remote node instead of fetch to local then query
<!--
Thank you for your contribution!
Please review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue.
-->
**Is your feature request related to a problem? Please describe.**
Curently if ray fetcher gets objects with condition, it will fetch objects to local, then filter local objects with conditions. This will incur much objects transfer cost and high memory footsprint in local node.
**Describe the solution you'd like**
We should submit query to objects' node then fetch queried result instead.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mars/services/task/execution/ray/fetcher.py`
Content:
```
1 # Copyright 1999-2021 Alibaba Group Holding Ltd.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import asyncio
16 from collections import namedtuple
17 from typing import Dict, List
18 from ..api import Fetcher, register_fetcher_cls
19
20
21 _FetchInfo = namedtuple("FetchInfo", ["key", "object_ref", "conditions"])
22
23
24 @register_fetcher_cls
25 class RayFetcher(Fetcher):
26 name = "ray"
27 required_meta_keys = ("object_refs",)
28
29 def __init__(self, **kwargs):
30 self._fetch_info_list = []
31 self._no_conditions = True
32
33 async def append(self, chunk_key: str, chunk_meta: Dict, conditions: List = None):
34 if conditions is not None:
35 self._no_conditions = False
36 self._fetch_info_list.append(
37 _FetchInfo(chunk_key, chunk_meta["object_refs"][0], conditions)
38 )
39
40 async def get(self):
41 objects = await asyncio.gather(
42 *(info.object_ref for info in self._fetch_info_list)
43 )
44 if self._no_conditions:
45 return objects
46 results = []
47 for o, fetch_info in zip(objects, self._fetch_info_list):
48 if fetch_info.conditions is None:
49 results.append(o)
50 else:
51 try:
52 results.append(o.iloc[fetch_info.conditions])
53 except AttributeError:
54 results.append(o[fetch_info.conditions])
55 return results
56
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mars/services/task/execution/ray/fetcher.py b/mars/services/task/execution/ray/fetcher.py
--- a/mars/services/task/execution/ray/fetcher.py
+++ b/mars/services/task/execution/ray/fetcher.py
@@ -15,9 +15,10 @@
import asyncio
from collections import namedtuple
from typing import Dict, List
+from .....utils import lazy_import
from ..api import Fetcher, register_fetcher_cls
-
+ray = lazy_import("ray")
_FetchInfo = namedtuple("FetchInfo", ["key", "object_ref", "conditions"])
@@ -38,18 +39,27 @@
)
async def get(self):
- objects = await asyncio.gather(
- *(info.object_ref for info in self._fetch_info_list)
- )
if self._no_conditions:
- return objects
- results = []
- for o, fetch_info in zip(objects, self._fetch_info_list):
+ return await asyncio.gather(
+ *(info.object_ref for info in self._fetch_info_list)
+ )
+ refs = [None] * len(self._fetch_info_list)
+ for index, fetch_info in enumerate(self._fetch_info_list):
if fetch_info.conditions is None:
- results.append(o)
+ refs[index] = fetch_info.object_ref
else:
- try:
- results.append(o.iloc[fetch_info.conditions])
- except AttributeError:
- results.append(o[fetch_info.conditions])
- return results
+ refs[index] = query_object_with_condition.remote(
+ fetch_info.object_ref, fetch_info.conditions
+ )
+ return await asyncio.gather(*refs)
+
+
+def query_object_with_condition(o, conditions):
+ try:
+ return o.iloc[conditions]
+ except AttributeError:
+ return o[conditions]
+
+
+if ray:
+ query_object_with_condition = ray.remote(query_object_with_condition)
| {"golden_diff": "diff --git a/mars/services/task/execution/ray/fetcher.py b/mars/services/task/execution/ray/fetcher.py\n--- a/mars/services/task/execution/ray/fetcher.py\n+++ b/mars/services/task/execution/ray/fetcher.py\n@@ -15,9 +15,10 @@\n import asyncio\n from collections import namedtuple\n from typing import Dict, List\n+from .....utils import lazy_import\n from ..api import Fetcher, register_fetcher_cls\n \n-\n+ray = lazy_import(\"ray\")\n _FetchInfo = namedtuple(\"FetchInfo\", [\"key\", \"object_ref\", \"conditions\"])\n \n \n@@ -38,18 +39,27 @@\n )\n \n async def get(self):\n- objects = await asyncio.gather(\n- *(info.object_ref for info in self._fetch_info_list)\n- )\n if self._no_conditions:\n- return objects\n- results = []\n- for o, fetch_info in zip(objects, self._fetch_info_list):\n+ return await asyncio.gather(\n+ *(info.object_ref for info in self._fetch_info_list)\n+ )\n+ refs = [None] * len(self._fetch_info_list)\n+ for index, fetch_info in enumerate(self._fetch_info_list):\n if fetch_info.conditions is None:\n- results.append(o)\n+ refs[index] = fetch_info.object_ref\n else:\n- try:\n- results.append(o.iloc[fetch_info.conditions])\n- except AttributeError:\n- results.append(o[fetch_info.conditions])\n- return results\n+ refs[index] = query_object_with_condition.remote(\n+ fetch_info.object_ref, fetch_info.conditions\n+ )\n+ return await asyncio.gather(*refs)\n+\n+\n+def query_object_with_condition(o, conditions):\n+ try:\n+ return o.iloc[conditions]\n+ except AttributeError:\n+ return o[conditions]\n+\n+\n+if ray:\n+ query_object_with_condition = ray.remote(query_object_with_condition)\n", "issue": "Submit query condition to remote node instead of fetch to local then query\n<!--\r\nThank you for your contribution!\r\n\r\nPlease review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue.\r\n-->\r\n\r\n**Is your feature request related to a problem? Please describe.**\r\nCurently if ray fetcher gets objects with condition, it will fetch objects to local, then filter local objects with conditions. This will incur much objects transfer cost and high memory footsprint in local node. \r\n\r\n**Describe the solution you'd like**\r\nWe should submit query to objects' node then fetch queried result instead. \r\n\r\n\n", "before_files": [{"content": "# Copyright 1999-2021 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport asyncio\nfrom collections import namedtuple\nfrom typing import Dict, List\nfrom ..api import Fetcher, register_fetcher_cls\n\n\n_FetchInfo = namedtuple(\"FetchInfo\", [\"key\", \"object_ref\", \"conditions\"])\n\n\n@register_fetcher_cls\nclass RayFetcher(Fetcher):\n name = \"ray\"\n required_meta_keys = (\"object_refs\",)\n\n def __init__(self, **kwargs):\n self._fetch_info_list = []\n self._no_conditions = True\n\n async def append(self, chunk_key: str, chunk_meta: Dict, conditions: List = None):\n if conditions is not None:\n self._no_conditions = False\n self._fetch_info_list.append(\n _FetchInfo(chunk_key, chunk_meta[\"object_refs\"][0], conditions)\n )\n\n async def get(self):\n objects = await asyncio.gather(\n *(info.object_ref for info in self._fetch_info_list)\n )\n if self._no_conditions:\n return objects\n results = []\n for o, fetch_info in zip(objects, self._fetch_info_list):\n if fetch_info.conditions is None:\n results.append(o)\n else:\n try:\n results.append(o.iloc[fetch_info.conditions])\n except AttributeError:\n results.append(o[fetch_info.conditions])\n return results\n", "path": "mars/services/task/execution/ray/fetcher.py"}], "after_files": [{"content": "# Copyright 1999-2021 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport asyncio\nfrom collections import namedtuple\nfrom typing import Dict, List\nfrom .....utils import lazy_import\nfrom ..api import Fetcher, register_fetcher_cls\n\nray = lazy_import(\"ray\")\n_FetchInfo = namedtuple(\"FetchInfo\", [\"key\", \"object_ref\", \"conditions\"])\n\n\n@register_fetcher_cls\nclass RayFetcher(Fetcher):\n name = \"ray\"\n required_meta_keys = (\"object_refs\",)\n\n def __init__(self, **kwargs):\n self._fetch_info_list = []\n self._no_conditions = True\n\n async def append(self, chunk_key: str, chunk_meta: Dict, conditions: List = None):\n if conditions is not None:\n self._no_conditions = False\n self._fetch_info_list.append(\n _FetchInfo(chunk_key, chunk_meta[\"object_refs\"][0], conditions)\n )\n\n async def get(self):\n if self._no_conditions:\n return await asyncio.gather(\n *(info.object_ref for info in self._fetch_info_list)\n )\n refs = [None] * len(self._fetch_info_list)\n for index, fetch_info in enumerate(self._fetch_info_list):\n if fetch_info.conditions is None:\n refs[index] = fetch_info.object_ref\n else:\n refs[index] = query_object_with_condition.remote(\n fetch_info.object_ref, fetch_info.conditions\n )\n return await asyncio.gather(*refs)\n\n\ndef query_object_with_condition(o, conditions):\n try:\n return o.iloc[conditions]\n except AttributeError:\n return o[conditions]\n\n\nif ray:\n query_object_with_condition = ray.remote(query_object_with_condition)\n", "path": "mars/services/task/execution/ray/fetcher.py"}]} | 918 | 432 |
gh_patches_debug_1383 | rasdani/github-patches | git_diff | vega__altair-150 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
to_dict() not in Chart.__dir__
All in title.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `altair/schema/baseobject.py`
Content:
```
1 import pandas as pd
2 import traitlets as T
3
4 from ..utils._py3k_compat import string_types
5
6 _attr_template = "Attribute not found: {0}. Valid keyword arguments for this class: {1}"
7
8
9 class BaseObject(T.HasTraits):
10
11 skip = []
12
13 def __init__(self, **kwargs):
14 all_traits = list(self.traits())
15 for k in kwargs:
16 if k not in all_traits:
17 raise KeyError(_attr_template.format(k, all_traits))
18 super(BaseObject, self).__init__(**kwargs)
19
20 @classmethod
21 def infer_keywords(cls, *args, **kwargs):
22 """Utility to initialize object from args and kwargs
23
24 Arguments are converted to keyword arguments by inferring the keyword
25 from their type.
26 Keyword arguments are converted to the correct Instance class
27 if required.
28 """
29 def get_class(trait):
30 # TODO: what do do with lists?
31 if isinstance(trait, T.Union):
32 for klass in map(get_class, trait.trait_types):
33 if klass:
34 return klass
35 elif isinstance(trait, T.Instance):
36 return trait.klass
37
38 traits = cls.class_traits()
39 classes = {n: get_class(t) for n, t in traits.items()}
40
41 # Turn all keyword arguments to the appropriate class
42 for name, arg in kwargs.items():
43 Trait = classes.get(name, None)
44 if Trait is not None and not isinstance(arg, Trait):
45 try:
46 kwargs[name] = Trait(arg)
47 except (TypeError, T.TraitError):
48 pass # errors will handled by traitlets below
49
50 # find forward/backward mapping among unique classes
51 name_to_trait = {}
52 while classes:
53 name, trait = classes.popitem()
54 if trait is None:
55 continue
56 if trait not in set.union(set(classes.values()),
57 set(name_to_trait.values())):
58 name_to_trait[name] = trait
59 trait_to_name = {t: n for n, t in name_to_trait.items()}
60
61 # Update all arguments
62 for arg in args:
63 name = trait_to_name.get(type(arg), None)
64 if name is None:
65 raise ValueError("{0}: Unable to infer argument name for {1}".format(cls, arg))
66 elif name in kwargs:
67 raise ValueError("{0}: {1} specified both by arg and kwarg".format(cls, name))
68 else:
69 kwargs[name] = arg
70 return kwargs
71
72 def update_traits(self, **kwargs):
73 for key, val in kwargs.items():
74 self.set_trait(key, val)
75 return self
76
77 def update_inferred_traits(self, *args, **kwargs):
78 kwargs = self.infer_keywords(*args, **kwargs)
79 return self.update_traits(**kwargs)
80
81 def update_subtraits(self, attrs, *args, **kwargs):
82 """Update sub-traits without overwriting other traits"""
83 if not (args or kwargs):
84 return self
85 if isinstance(attrs, string_types):
86 attrs = (attrs,)
87 if len(attrs) == 0:
88 self.update_inferred_traits(*args, **kwargs)
89 else:
90 attr = attrs[0]
91 if attr not in self.traits():
92 raise ValueError('{0} has no trait {1}'.format(self, attr))
93 trait = getattr(self, attr)
94 if trait is None:
95 trait = self.traits()[attr].klass()
96 setattr(self, attr, trait.update_subtraits(attrs[1:], *args, **kwargs))
97 return self
98
99 def __contains__(self, key):
100 try:
101 value = getattr(self, key)
102 except AttributeError:
103 return False
104
105 # comparison to None will break, so check DataFrame specifically
106 if isinstance(value, pd.DataFrame):
107 return True
108 elif value is not None:
109 if isinstance(value, (int, float, bool)):
110 return True
111 else:
112 return bool(value)
113 else:
114 return False
115
116 def __dir__(self):
117 """Customize tab completed attributes."""
118 return list(self.traits())
119
120 @classmethod
121 def from_dict(cls, dct):
122 """Instantiate the object from a valid JSON dictionary"""
123 from ..utils.visitors import FromDict
124 return FromDict().clsvisit(cls, dct)
125
126 def to_dict(self, data=True):
127 """Emit the JSON representation for this object as as dict."""
128 from ..utils.visitors import ToDict
129 self._finalize()
130 return ToDict().visit(self, data)
131
132 def _finalize(self, **kwargs):
133 """Finalize the object, and all contained objects, for export."""
134 def finalize_obj(obj):
135 if isinstance(obj, BaseObject):
136 obj._finalize(**kwargs)
137 elif isinstance(obj, list):
138 for item in obj:
139 finalize_obj(item)
140
141 for name in self.traits():
142 value = getattr(self, name)
143 finalize_obj(value)
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/altair/schema/baseobject.py b/altair/schema/baseobject.py
--- a/altair/schema/baseobject.py
+++ b/altair/schema/baseobject.py
@@ -115,7 +115,7 @@
def __dir__(self):
"""Customize tab completed attributes."""
- return list(self.traits())
+ return list(self.traits())+['to_dict', 'from_dict']
@classmethod
def from_dict(cls, dct):
| {"golden_diff": "diff --git a/altair/schema/baseobject.py b/altair/schema/baseobject.py\n--- a/altair/schema/baseobject.py\n+++ b/altair/schema/baseobject.py\n@@ -115,7 +115,7 @@\n \n def __dir__(self):\n \"\"\"Customize tab completed attributes.\"\"\"\n- return list(self.traits())\n+ return list(self.traits())+['to_dict', 'from_dict']\n \n @classmethod\n def from_dict(cls, dct):\n", "issue": "to_dict() not in Chart.__dir__\nAll in title.\n\n", "before_files": [{"content": "import pandas as pd\nimport traitlets as T\n\nfrom ..utils._py3k_compat import string_types\n\n_attr_template = \"Attribute not found: {0}. Valid keyword arguments for this class: {1}\"\n\n\nclass BaseObject(T.HasTraits):\n\n skip = []\n\n def __init__(self, **kwargs):\n all_traits = list(self.traits())\n for k in kwargs:\n if k not in all_traits:\n raise KeyError(_attr_template.format(k, all_traits))\n super(BaseObject, self).__init__(**kwargs)\n\n @classmethod\n def infer_keywords(cls, *args, **kwargs):\n \"\"\"Utility to initialize object from args and kwargs\n\n Arguments are converted to keyword arguments by inferring the keyword\n from their type.\n Keyword arguments are converted to the correct Instance class\n if required.\n \"\"\"\n def get_class(trait):\n # TODO: what do do with lists?\n if isinstance(trait, T.Union):\n for klass in map(get_class, trait.trait_types):\n if klass:\n return klass\n elif isinstance(trait, T.Instance):\n return trait.klass\n\n traits = cls.class_traits()\n classes = {n: get_class(t) for n, t in traits.items()}\n\n # Turn all keyword arguments to the appropriate class\n for name, arg in kwargs.items():\n Trait = classes.get(name, None)\n if Trait is not None and not isinstance(arg, Trait):\n try:\n kwargs[name] = Trait(arg)\n except (TypeError, T.TraitError):\n pass # errors will handled by traitlets below\n\n # find forward/backward mapping among unique classes\n name_to_trait = {}\n while classes:\n name, trait = classes.popitem()\n if trait is None:\n continue\n if trait not in set.union(set(classes.values()),\n set(name_to_trait.values())):\n name_to_trait[name] = trait\n trait_to_name = {t: n for n, t in name_to_trait.items()}\n\n # Update all arguments\n for arg in args:\n name = trait_to_name.get(type(arg), None)\n if name is None:\n raise ValueError(\"{0}: Unable to infer argument name for {1}\".format(cls, arg))\n elif name in kwargs:\n raise ValueError(\"{0}: {1} specified both by arg and kwarg\".format(cls, name))\n else:\n kwargs[name] = arg\n return kwargs\n\n def update_traits(self, **kwargs):\n for key, val in kwargs.items():\n self.set_trait(key, val)\n return self\n\n def update_inferred_traits(self, *args, **kwargs):\n kwargs = self.infer_keywords(*args, **kwargs)\n return self.update_traits(**kwargs)\n\n def update_subtraits(self, attrs, *args, **kwargs):\n \"\"\"Update sub-traits without overwriting other traits\"\"\"\n if not (args or kwargs):\n return self\n if isinstance(attrs, string_types):\n attrs = (attrs,)\n if len(attrs) == 0:\n self.update_inferred_traits(*args, **kwargs)\n else:\n attr = attrs[0]\n if attr not in self.traits():\n raise ValueError('{0} has no trait {1}'.format(self, attr))\n trait = getattr(self, attr)\n if trait is None:\n trait = self.traits()[attr].klass()\n setattr(self, attr, trait.update_subtraits(attrs[1:], *args, **kwargs))\n return self\n\n def __contains__(self, key):\n try:\n value = getattr(self, key)\n except AttributeError:\n return False\n\n # comparison to None will break, so check DataFrame specifically\n if isinstance(value, pd.DataFrame):\n return True\n elif value is not None:\n if isinstance(value, (int, float, bool)):\n return True\n else:\n return bool(value)\n else:\n return False\n\n def __dir__(self):\n \"\"\"Customize tab completed attributes.\"\"\"\n return list(self.traits())\n\n @classmethod\n def from_dict(cls, dct):\n \"\"\"Instantiate the object from a valid JSON dictionary\"\"\"\n from ..utils.visitors import FromDict\n return FromDict().clsvisit(cls, dct)\n\n def to_dict(self, data=True):\n \"\"\"Emit the JSON representation for this object as as dict.\"\"\"\n from ..utils.visitors import ToDict\n self._finalize()\n return ToDict().visit(self, data)\n\n def _finalize(self, **kwargs):\n \"\"\"Finalize the object, and all contained objects, for export.\"\"\"\n def finalize_obj(obj):\n if isinstance(obj, BaseObject):\n obj._finalize(**kwargs)\n elif isinstance(obj, list):\n for item in obj:\n finalize_obj(item)\n\n for name in self.traits():\n value = getattr(self, name)\n finalize_obj(value)\n", "path": "altair/schema/baseobject.py"}], "after_files": [{"content": "import pandas as pd\nimport traitlets as T\n\nfrom ..utils._py3k_compat import string_types\n\n_attr_template = \"Attribute not found: {0}. Valid keyword arguments for this class: {1}\"\n\n\nclass BaseObject(T.HasTraits):\n\n skip = []\n\n def __init__(self, **kwargs):\n all_traits = list(self.traits())\n for k in kwargs:\n if k not in all_traits:\n raise KeyError(_attr_template.format(k, all_traits))\n super(BaseObject, self).__init__(**kwargs)\n\n @classmethod\n def infer_keywords(cls, *args, **kwargs):\n \"\"\"Utility to initialize object from args and kwargs\n\n Arguments are converted to keyword arguments by inferring the keyword\n from their type.\n Keyword arguments are converted to the correct Instance class\n if required.\n \"\"\"\n def get_class(trait):\n # TODO: what do do with lists?\n if isinstance(trait, T.Union):\n for klass in map(get_class, trait.trait_types):\n if klass:\n return klass\n elif isinstance(trait, T.Instance):\n return trait.klass\n\n traits = cls.class_traits()\n classes = {n: get_class(t) for n, t in traits.items()}\n\n # Turn all keyword arguments to the appropriate class\n for name, arg in kwargs.items():\n Trait = classes.get(name, None)\n if Trait is not None and not isinstance(arg, Trait):\n try:\n kwargs[name] = Trait(arg)\n except (TypeError, T.TraitError):\n pass # errors will handled by traitlets below\n\n # find forward/backward mapping among unique classes\n name_to_trait = {}\n while classes:\n name, trait = classes.popitem()\n if trait is None:\n continue\n if trait not in set.union(set(classes.values()),\n set(name_to_trait.values())):\n name_to_trait[name] = trait\n trait_to_name = {t: n for n, t in name_to_trait.items()}\n\n # Update all arguments\n for arg in args:\n name = trait_to_name.get(type(arg), None)\n if name is None:\n raise ValueError(\"{0}: Unable to infer argument name for {1}\".format(cls, arg))\n elif name in kwargs:\n raise ValueError(\"{0}: {1} specified both by arg and kwarg\".format(cls, name))\n else:\n kwargs[name] = arg\n return kwargs\n\n def update_traits(self, **kwargs):\n for key, val in kwargs.items():\n self.set_trait(key, val)\n return self\n\n def update_inferred_traits(self, *args, **kwargs):\n kwargs = self.infer_keywords(*args, **kwargs)\n return self.update_traits(**kwargs)\n\n def update_subtraits(self, attrs, *args, **kwargs):\n \"\"\"Update sub-traits without overwriting other traits\"\"\"\n if not (args or kwargs):\n return self\n if isinstance(attrs, string_types):\n attrs = (attrs,)\n if len(attrs) == 0:\n self.update_inferred_traits(*args, **kwargs)\n else:\n attr = attrs[0]\n if attr not in self.traits():\n raise ValueError('{0} has no trait {1}'.format(self, attr))\n trait = getattr(self, attr)\n if trait is None:\n trait = self.traits()[attr].klass()\n setattr(self, attr, trait.update_subtraits(attrs[1:], *args, **kwargs))\n return self\n\n def __contains__(self, key):\n try:\n value = getattr(self, key)\n except AttributeError:\n return False\n\n # comparison to None will break, so check DataFrame specifically\n if isinstance(value, pd.DataFrame):\n return True\n elif value is not None:\n if isinstance(value, (int, float, bool)):\n return True\n else:\n return bool(value)\n else:\n return False\n\n def __dir__(self):\n \"\"\"Customize tab completed attributes.\"\"\"\n return list(self.traits())+['to_dict', 'from_dict']\n\n @classmethod\n def from_dict(cls, dct):\n \"\"\"Instantiate the object from a valid JSON dictionary\"\"\"\n from ..utils.visitors import FromDict\n return FromDict().clsvisit(cls, dct)\n\n def to_dict(self, data=True):\n \"\"\"Emit the JSON representation for this object as as dict.\"\"\"\n from ..utils.visitors import ToDict\n self._finalize()\n return ToDict().visit(self, data)\n\n def _finalize(self, **kwargs):\n \"\"\"Finalize the object, and all contained objects, for export.\"\"\"\n def finalize_obj(obj):\n if isinstance(obj, BaseObject):\n obj._finalize(**kwargs)\n elif isinstance(obj, list):\n for item in obj:\n finalize_obj(item)\n\n for name in self.traits():\n value = getattr(self, name)\n finalize_obj(value)\n", "path": "altair/schema/baseobject.py"}]} | 1,650 | 108 |
gh_patches_debug_20770 | rasdani/github-patches | git_diff | hartwork__jawanndenn-45 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error 500 when pasting JSON with single surrogate characters
Example:
```json
{
"equal_width": false,
"title": "Which fruit do *you* like?",
"options": [
"Apple",
"Banana",
"Orange",
"Papaya", "\ud800"
]
}
```
Error 500 when pasting JSON with single surrogate characters
Example:
```json
{
"equal_width": false,
"title": "Which fruit do *you* like?",
"options": [
"Apple",
"Banana",
"Orange",
"Papaya", "\ud800"
]
}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `jawanndenn/views.py`
Content:
```
1 # Copyright (C) 2019 Sebastian Pipping <[email protected]>
2 # Licensed under GNU Affero GPL v3 or later
3
4 import json # lgtm [py/import-and-import-from]
5 from functools import wraps
6 from json import JSONDecodeError
7
8 from django.conf import settings
9 from django.db import transaction
10 from django.http import (Http404, HttpResponseBadRequest, HttpResponseNotFound,
11 JsonResponse)
12 from django.shortcuts import redirect
13 from django.template.response import TemplateResponse
14 from django.utils.module_loading import import_string
15 from django.views.decorators.http import require_GET, require_POST
16 from django.views.defaults import bad_request
17 from django.views.static import serve
18 from jawanndenn.markup import safe_html
19 from jawanndenn.models import Ballot, Poll, Vote
20 from jawanndenn.serializers import PollConfigSerializer
21 from rest_framework.exceptions import ValidationError
22
23 _staticfile_finders = [import_string(cls_string)()
24 for cls_string in settings.STATICFILES_FINDERS]
25
26
27 def _except_poll_does_not_exist(wrappee):
28 """Decorator that turns Poll.DoesNotExist into 404 Not Found"""
29 @wraps(wrappee)
30 def wrapper(*args, **kwargs):
31 try:
32 return wrappee(*args, **kwargs)
33 except Poll.DoesNotExist:
34 return HttpResponseNotFound('No such poll')
35
36 return wrapper
37
38
39 def _except_validation_error(wrappee):
40 """Decorator that turns ValidationError into 400 Bad Request"""
41 @wraps(wrappee)
42 def wrapper(request, *args, **kwargs):
43 try:
44 return wrappee(request, *args, **kwargs)
45 except ValidationError as exception:
46 return bad_request(request, exception)
47
48 return wrapper
49
50
51 @require_GET
52 def index_get_view(request):
53 return TemplateResponse(request,
54 template='html/setup.xhtml',
55 content_type='application/xhtml+xml')
56
57
58 @require_POST
59 @_except_validation_error
60 def poll_post_view(request):
61 config_json = request.POST.get('config', '{}')
62 try:
63 config = json.loads(config_json)
64 except JSONDecodeError:
65 raise ValidationError('Poll configuration is not well-formed JSON.')
66
67 serializer = PollConfigSerializer(data=config)
68 serializer.is_valid(raise_exception=True)
69
70 with transaction.atomic():
71 if Poll.objects.count() >= settings.JAWANNDENN_MAX_POLLS:
72 return HttpResponseBadRequest(
73 f'Maximum number of {settings.JAWANNDENN_MAX_POLLS} polls '
74 'reached, please contact the administrator.')
75
76 poll = serializer.save()
77
78 return redirect(poll)
79
80
81 @require_GET
82 @_except_poll_does_not_exist
83 def poll_data_get_view(request, poll_id):
84 with transaction.atomic():
85 poll = Poll.objects.get(slug=poll_id)
86 poll_config = {
87 'equal_width': poll.equal_width,
88 'title': poll.title,
89 'options': list(poll.options.order_by('position')
90 .values_list('name', flat=True)),
91 }
92 votes = [
93 [ballot.voter_name, [vote.yes for vote
94 in ballot.votes.order_by('option__position')]]
95 for ballot
96 in poll.ballots.order_by('created', 'id')
97 ]
98
99 data = {
100 'config': poll_config,
101 'votes': votes,
102 }
103
104 return JsonResponse(data)
105
106
107 @require_GET
108 @_except_poll_does_not_exist
109 def poll_get_view(request, poll_id):
110 Poll.objects.get(slug=poll_id)
111
112 return TemplateResponse(request,
113 template='html/poll.xhtml',
114 content_type='application/xhtml+xml')
115
116
117 @require_POST
118 @_except_poll_does_not_exist
119 def vote_post_view(request, poll_id):
120 with transaction.atomic():
121 poll = Poll.objects.get(slug=poll_id)
122
123 if poll.ballots.count() >= settings.JAWANNDENN_MAX_VOTES_PER_POLL:
124 return HttpResponseBadRequest(
125 f'Maximum number of {settings.JAWANNDENN_MAX_VOTES_PER_POLL} '
126 'votes reached for this poll'
127 ', please contact the administrator.')
128
129 voter_name = safe_html(request.POST.get('voterName'))
130 votes = [
131 request.POST.get(f'option{i}', 'off') == 'on'
132 for i
133 in range(poll.options.count())
134 ]
135
136 ballot = Ballot.objects.create(poll=poll, voter_name=voter_name)
137 for option, vote in zip(poll.options.order_by('position'), votes):
138 Vote.objects.create(ballot=ballot, option=option, yes=vote)
139
140 return redirect(poll)
141
142
143 @require_GET
144 def serve_using_finders(request, path, show_indexes=False):
145 """
146 Wrapper around django.views.static.serve that uses
147 settings.STATICFILES_FINDERS rather than a single document_root
148 """
149 for finder in _staticfile_finders:
150 fullpath = finder.find(path)
151 if fullpath:
152 document_root = fullpath[:-len(path)] if path else fullpath
153 return serve(request, path, document_root=document_root,
154 show_indexes=show_indexes)
155 else:
156 raise Http404
157
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/jawanndenn/views.py b/jawanndenn/views.py
--- a/jawanndenn/views.py
+++ b/jawanndenn/views.py
@@ -1,10 +1,9 @@
# Copyright (C) 2019 Sebastian Pipping <[email protected]>
# Licensed under GNU Affero GPL v3 or later
-import json # lgtm [py/import-and-import-from]
from functools import wraps
-from json import JSONDecodeError
+import rapidjson as json # lgtm [py/import-and-import-from]
from django.conf import settings
from django.db import transaction
from django.http import (Http404, HttpResponseBadRequest, HttpResponseNotFound,
@@ -18,6 +17,7 @@
from jawanndenn.markup import safe_html
from jawanndenn.models import Ballot, Poll, Vote
from jawanndenn.serializers import PollConfigSerializer
+from rapidjson import JSONDecodeError
from rest_framework.exceptions import ValidationError
_staticfile_finders = [import_string(cls_string)()
| {"golden_diff": "diff --git a/jawanndenn/views.py b/jawanndenn/views.py\n--- a/jawanndenn/views.py\n+++ b/jawanndenn/views.py\n@@ -1,10 +1,9 @@\n # Copyright (C) 2019 Sebastian Pipping <[email protected]>\n # Licensed under GNU Affero GPL v3 or later\n \n-import json # lgtm [py/import-and-import-from]\n from functools import wraps\n-from json import JSONDecodeError\n \n+import rapidjson as json # lgtm [py/import-and-import-from]\n from django.conf import settings\n from django.db import transaction\n from django.http import (Http404, HttpResponseBadRequest, HttpResponseNotFound,\n@@ -18,6 +17,7 @@\n from jawanndenn.markup import safe_html\n from jawanndenn.models import Ballot, Poll, Vote\n from jawanndenn.serializers import PollConfigSerializer\n+from rapidjson import JSONDecodeError\n from rest_framework.exceptions import ValidationError\n \n _staticfile_finders = [import_string(cls_string)()\n", "issue": "Error 500 when pasting JSON with single surrogate characters\nExample:\r\n```json\r\n{\r\n \"equal_width\": false,\r\n \"title\": \"Which fruit do *you* like?\",\r\n \"options\": [\r\n \"Apple\",\r\n \"Banana\",\r\n \"Orange\",\r\n \"Papaya\", \"\\ud800\"\r\n ]\r\n}\r\n```\nError 500 when pasting JSON with single surrogate characters\nExample:\r\n```json\r\n{\r\n \"equal_width\": false,\r\n \"title\": \"Which fruit do *you* like?\",\r\n \"options\": [\r\n \"Apple\",\r\n \"Banana\",\r\n \"Orange\",\r\n \"Papaya\", \"\\ud800\"\r\n ]\r\n}\r\n```\n", "before_files": [{"content": "# Copyright (C) 2019 Sebastian Pipping <[email protected]>\n# Licensed under GNU Affero GPL v3 or later\n\nimport json # lgtm [py/import-and-import-from]\nfrom functools import wraps\nfrom json import JSONDecodeError\n\nfrom django.conf import settings\nfrom django.db import transaction\nfrom django.http import (Http404, HttpResponseBadRequest, HttpResponseNotFound,\n JsonResponse)\nfrom django.shortcuts import redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.module_loading import import_string\nfrom django.views.decorators.http import require_GET, require_POST\nfrom django.views.defaults import bad_request\nfrom django.views.static import serve\nfrom jawanndenn.markup import safe_html\nfrom jawanndenn.models import Ballot, Poll, Vote\nfrom jawanndenn.serializers import PollConfigSerializer\nfrom rest_framework.exceptions import ValidationError\n\n_staticfile_finders = [import_string(cls_string)()\n for cls_string in settings.STATICFILES_FINDERS]\n\n\ndef _except_poll_does_not_exist(wrappee):\n \"\"\"Decorator that turns Poll.DoesNotExist into 404 Not Found\"\"\"\n @wraps(wrappee)\n def wrapper(*args, **kwargs):\n try:\n return wrappee(*args, **kwargs)\n except Poll.DoesNotExist:\n return HttpResponseNotFound('No such poll')\n\n return wrapper\n\n\ndef _except_validation_error(wrappee):\n \"\"\"Decorator that turns ValidationError into 400 Bad Request\"\"\"\n @wraps(wrappee)\n def wrapper(request, *args, **kwargs):\n try:\n return wrappee(request, *args, **kwargs)\n except ValidationError as exception:\n return bad_request(request, exception)\n\n return wrapper\n\n\n@require_GET\ndef index_get_view(request):\n return TemplateResponse(request,\n template='html/setup.xhtml',\n content_type='application/xhtml+xml')\n\n\n@require_POST\n@_except_validation_error\ndef poll_post_view(request):\n config_json = request.POST.get('config', '{}')\n try:\n config = json.loads(config_json)\n except JSONDecodeError:\n raise ValidationError('Poll configuration is not well-formed JSON.')\n\n serializer = PollConfigSerializer(data=config)\n serializer.is_valid(raise_exception=True)\n\n with transaction.atomic():\n if Poll.objects.count() >= settings.JAWANNDENN_MAX_POLLS:\n return HttpResponseBadRequest(\n f'Maximum number of {settings.JAWANNDENN_MAX_POLLS} polls '\n 'reached, please contact the administrator.')\n\n poll = serializer.save()\n\n return redirect(poll)\n\n\n@require_GET\n@_except_poll_does_not_exist\ndef poll_data_get_view(request, poll_id):\n with transaction.atomic():\n poll = Poll.objects.get(slug=poll_id)\n poll_config = {\n 'equal_width': poll.equal_width,\n 'title': poll.title,\n 'options': list(poll.options.order_by('position')\n .values_list('name', flat=True)),\n }\n votes = [\n [ballot.voter_name, [vote.yes for vote\n in ballot.votes.order_by('option__position')]]\n for ballot\n in poll.ballots.order_by('created', 'id')\n ]\n\n data = {\n 'config': poll_config,\n 'votes': votes,\n }\n\n return JsonResponse(data)\n\n\n@require_GET\n@_except_poll_does_not_exist\ndef poll_get_view(request, poll_id):\n Poll.objects.get(slug=poll_id)\n\n return TemplateResponse(request,\n template='html/poll.xhtml',\n content_type='application/xhtml+xml')\n\n\n@require_POST\n@_except_poll_does_not_exist\ndef vote_post_view(request, poll_id):\n with transaction.atomic():\n poll = Poll.objects.get(slug=poll_id)\n\n if poll.ballots.count() >= settings.JAWANNDENN_MAX_VOTES_PER_POLL:\n return HttpResponseBadRequest(\n f'Maximum number of {settings.JAWANNDENN_MAX_VOTES_PER_POLL} '\n 'votes reached for this poll'\n ', please contact the administrator.')\n\n voter_name = safe_html(request.POST.get('voterName'))\n votes = [\n request.POST.get(f'option{i}', 'off') == 'on'\n for i\n in range(poll.options.count())\n ]\n\n ballot = Ballot.objects.create(poll=poll, voter_name=voter_name)\n for option, vote in zip(poll.options.order_by('position'), votes):\n Vote.objects.create(ballot=ballot, option=option, yes=vote)\n\n return redirect(poll)\n\n\n@require_GET\ndef serve_using_finders(request, path, show_indexes=False):\n \"\"\"\n Wrapper around django.views.static.serve that uses\n settings.STATICFILES_FINDERS rather than a single document_root\n \"\"\"\n for finder in _staticfile_finders:\n fullpath = finder.find(path)\n if fullpath:\n document_root = fullpath[:-len(path)] if path else fullpath\n return serve(request, path, document_root=document_root,\n show_indexes=show_indexes)\n else:\n raise Http404\n", "path": "jawanndenn/views.py"}], "after_files": [{"content": "# Copyright (C) 2019 Sebastian Pipping <[email protected]>\n# Licensed under GNU Affero GPL v3 or later\n\nfrom functools import wraps\n\nimport rapidjson as json # lgtm [py/import-and-import-from]\nfrom django.conf import settings\nfrom django.db import transaction\nfrom django.http import (Http404, HttpResponseBadRequest, HttpResponseNotFound,\n JsonResponse)\nfrom django.shortcuts import redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.module_loading import import_string\nfrom django.views.decorators.http import require_GET, require_POST\nfrom django.views.defaults import bad_request\nfrom django.views.static import serve\nfrom jawanndenn.markup import safe_html\nfrom jawanndenn.models import Ballot, Poll, Vote\nfrom jawanndenn.serializers import PollConfigSerializer\nfrom rapidjson import JSONDecodeError\nfrom rest_framework.exceptions import ValidationError\n\n_staticfile_finders = [import_string(cls_string)()\n for cls_string in settings.STATICFILES_FINDERS]\n\n\ndef _except_poll_does_not_exist(wrappee):\n \"\"\"Decorator that turns Poll.DoesNotExist into 404 Not Found\"\"\"\n @wraps(wrappee)\n def wrapper(*args, **kwargs):\n try:\n return wrappee(*args, **kwargs)\n except Poll.DoesNotExist:\n return HttpResponseNotFound('No such poll')\n\n return wrapper\n\n\ndef _except_validation_error(wrappee):\n \"\"\"Decorator that turns ValidationError into 400 Bad Request\"\"\"\n @wraps(wrappee)\n def wrapper(request, *args, **kwargs):\n try:\n return wrappee(request, *args, **kwargs)\n except ValidationError as exception:\n return bad_request(request, exception)\n\n return wrapper\n\n\n@require_GET\ndef index_get_view(request):\n return TemplateResponse(request,\n template='html/setup.xhtml',\n content_type='application/xhtml+xml')\n\n\n@require_POST\n@_except_validation_error\ndef poll_post_view(request):\n config_json = request.POST.get('config', '{}')\n try:\n config = json.loads(config_json)\n except JSONDecodeError:\n raise ValidationError('Poll configuration is not well-formed JSON.')\n\n serializer = PollConfigSerializer(data=config)\n serializer.is_valid(raise_exception=True)\n\n with transaction.atomic():\n if Poll.objects.count() >= settings.JAWANNDENN_MAX_POLLS:\n return HttpResponseBadRequest(\n f'Maximum number of {settings.JAWANNDENN_MAX_POLLS} polls '\n 'reached, please contact the administrator.')\n\n poll = serializer.save()\n\n return redirect(poll)\n\n\n@require_GET\n@_except_poll_does_not_exist\ndef poll_data_get_view(request, poll_id):\n with transaction.atomic():\n poll = Poll.objects.get(slug=poll_id)\n poll_config = {\n 'equal_width': poll.equal_width,\n 'title': poll.title,\n 'options': list(poll.options.order_by('position')\n .values_list('name', flat=True)),\n }\n votes = [\n [ballot.voter_name, [vote.yes for vote\n in ballot.votes.order_by('option__position')]]\n for ballot\n in poll.ballots.order_by('created', 'id')\n ]\n\n data = {\n 'config': poll_config,\n 'votes': votes,\n }\n\n return JsonResponse(data)\n\n\n@require_GET\n@_except_poll_does_not_exist\ndef poll_get_view(request, poll_id):\n Poll.objects.get(slug=poll_id)\n\n return TemplateResponse(request,\n template='html/poll.xhtml',\n content_type='application/xhtml+xml')\n\n\n@require_POST\n@_except_poll_does_not_exist\ndef vote_post_view(request, poll_id):\n with transaction.atomic():\n poll = Poll.objects.get(slug=poll_id)\n\n if poll.ballots.count() >= settings.JAWANNDENN_MAX_VOTES_PER_POLL:\n return HttpResponseBadRequest(\n f'Maximum number of {settings.JAWANNDENN_MAX_VOTES_PER_POLL} '\n 'votes reached for this poll'\n ', please contact the administrator.')\n\n voter_name = safe_html(request.POST.get('voterName'))\n votes = [\n request.POST.get(f'option{i}', 'off') == 'on'\n for i\n in range(poll.options.count())\n ]\n\n ballot = Ballot.objects.create(poll=poll, voter_name=voter_name)\n for option, vote in zip(poll.options.order_by('position'), votes):\n Vote.objects.create(ballot=ballot, option=option, yes=vote)\n\n return redirect(poll)\n\n\n@require_GET\ndef serve_using_finders(request, path, show_indexes=False):\n \"\"\"\n Wrapper around django.views.static.serve that uses\n settings.STATICFILES_FINDERS rather than a single document_root\n \"\"\"\n for finder in _staticfile_finders:\n fullpath = finder.find(path)\n if fullpath:\n document_root = fullpath[:-len(path)] if path else fullpath\n return serve(request, path, document_root=document_root,\n show_indexes=show_indexes)\n else:\n raise Http404\n", "path": "jawanndenn/views.py"}]} | 1,865 | 229 |
gh_patches_debug_435 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-1864 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Missing documentation about unit testing inline scripts
At the company I work on we are developing some inline scripts to use internally.
We are struggling to create test for our scripts because the only code we found to test scripts are the ones at [/mitmproxy/mitmproxy/blob/v0.17/test/mitmproxy/test_examples.py](/mitmproxy/mitmproxy/blob/v0.17/test/mitmproxy/test_examples.py).
The examples are quite good but real documentation is missing for the ones who wants to unit test.
Is there another place where I can find better (maybe more advanced) test examples ?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mitmproxy/events.py`
Content:
```
1 from mitmproxy import controller
2 from mitmproxy import http
3 from mitmproxy import tcp
4 from mitmproxy import websocket
5
6 Events = frozenset([
7 "clientconnect",
8 "clientdisconnect",
9 "serverconnect",
10 "serverdisconnect",
11
12 "tcp_start",
13 "tcp_message",
14 "tcp_error",
15 "tcp_end",
16
17 "http_connect",
18 "request",
19 "requestheaders",
20 "response",
21 "responseheaders",
22 "error",
23
24 "intercept",
25 "resume",
26
27 "websocket_handshake",
28 "websocket_start",
29 "websocket_message",
30 "websocket_error",
31 "websocket_end",
32
33 "next_layer",
34
35 "configure",
36 "done",
37 "log",
38 "start",
39 "tick",
40 ])
41
42
43 def event_sequence(f):
44 if isinstance(f, http.HTTPFlow):
45 if f.request:
46 yield "requestheaders", f
47 yield "request", f
48 if f.response:
49 yield "responseheaders", f
50 yield "response", f
51 if f.error:
52 yield "error", f
53 elif isinstance(f, websocket.WebSocketFlow):
54 messages = f.messages
55 f.messages = []
56 f.reply = controller.DummyReply()
57 yield "websocket_start", f
58 while messages:
59 f.messages.append(messages.pop(0))
60 yield "websocket_message", f
61 if f.error:
62 yield "websocket_error", f
63 yield "websocket_end", f
64 elif isinstance(f, tcp.TCPFlow):
65 messages = f.messages
66 f.messages = []
67 f.reply = controller.DummyReply()
68 yield "tcp_start", f
69 while messages:
70 f.messages.append(messages.pop(0))
71 yield "tcp_message", f
72 if f.error:
73 yield "tcp_error", f
74 yield "tcp_end", f
75 else:
76 raise NotImplementedError
77
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mitmproxy/events.py b/mitmproxy/events.py
--- a/mitmproxy/events.py
+++ b/mitmproxy/events.py
@@ -21,9 +21,6 @@
"responseheaders",
"error",
- "intercept",
- "resume",
-
"websocket_handshake",
"websocket_start",
"websocket_message",
| {"golden_diff": "diff --git a/mitmproxy/events.py b/mitmproxy/events.py\n--- a/mitmproxy/events.py\n+++ b/mitmproxy/events.py\n@@ -21,9 +21,6 @@\n \"responseheaders\",\n \"error\",\n \n- \"intercept\",\n- \"resume\",\n-\n \"websocket_handshake\",\n \"websocket_start\",\n \"websocket_message\",\n", "issue": "Missing documentation about unit testing inline scripts\nAt the company I work on we are developing some inline scripts to use internally.\n\nWe are struggling to create test for our scripts because the only code we found to test scripts are the ones at [/mitmproxy/mitmproxy/blob/v0.17/test/mitmproxy/test_examples.py](/mitmproxy/mitmproxy/blob/v0.17/test/mitmproxy/test_examples.py).\n\nThe examples are quite good but real documentation is missing for the ones who wants to unit test.\n\nIs there another place where I can find better (maybe more advanced) test examples ?\n\n", "before_files": [{"content": "from mitmproxy import controller\nfrom mitmproxy import http\nfrom mitmproxy import tcp\nfrom mitmproxy import websocket\n\nEvents = frozenset([\n \"clientconnect\",\n \"clientdisconnect\",\n \"serverconnect\",\n \"serverdisconnect\",\n\n \"tcp_start\",\n \"tcp_message\",\n \"tcp_error\",\n \"tcp_end\",\n\n \"http_connect\",\n \"request\",\n \"requestheaders\",\n \"response\",\n \"responseheaders\",\n \"error\",\n\n \"intercept\",\n \"resume\",\n\n \"websocket_handshake\",\n \"websocket_start\",\n \"websocket_message\",\n \"websocket_error\",\n \"websocket_end\",\n\n \"next_layer\",\n\n \"configure\",\n \"done\",\n \"log\",\n \"start\",\n \"tick\",\n])\n\n\ndef event_sequence(f):\n if isinstance(f, http.HTTPFlow):\n if f.request:\n yield \"requestheaders\", f\n yield \"request\", f\n if f.response:\n yield \"responseheaders\", f\n yield \"response\", f\n if f.error:\n yield \"error\", f\n elif isinstance(f, websocket.WebSocketFlow):\n messages = f.messages\n f.messages = []\n f.reply = controller.DummyReply()\n yield \"websocket_start\", f\n while messages:\n f.messages.append(messages.pop(0))\n yield \"websocket_message\", f\n if f.error:\n yield \"websocket_error\", f\n yield \"websocket_end\", f\n elif isinstance(f, tcp.TCPFlow):\n messages = f.messages\n f.messages = []\n f.reply = controller.DummyReply()\n yield \"tcp_start\", f\n while messages:\n f.messages.append(messages.pop(0))\n yield \"tcp_message\", f\n if f.error:\n yield \"tcp_error\", f\n yield \"tcp_end\", f\n else:\n raise NotImplementedError\n", "path": "mitmproxy/events.py"}], "after_files": [{"content": "from mitmproxy import controller\nfrom mitmproxy import http\nfrom mitmproxy import tcp\nfrom mitmproxy import websocket\n\nEvents = frozenset([\n \"clientconnect\",\n \"clientdisconnect\",\n \"serverconnect\",\n \"serverdisconnect\",\n\n \"tcp_start\",\n \"tcp_message\",\n \"tcp_error\",\n \"tcp_end\",\n\n \"http_connect\",\n \"request\",\n \"requestheaders\",\n \"response\",\n \"responseheaders\",\n \"error\",\n\n \"websocket_handshake\",\n \"websocket_start\",\n \"websocket_message\",\n \"websocket_error\",\n \"websocket_end\",\n\n \"next_layer\",\n\n \"configure\",\n \"done\",\n \"log\",\n \"start\",\n \"tick\",\n])\n\n\ndef event_sequence(f):\n if isinstance(f, http.HTTPFlow):\n if f.request:\n yield \"requestheaders\", f\n yield \"request\", f\n if f.response:\n yield \"responseheaders\", f\n yield \"response\", f\n if f.error:\n yield \"error\", f\n elif isinstance(f, websocket.WebSocketFlow):\n messages = f.messages\n f.messages = []\n f.reply = controller.DummyReply()\n yield \"websocket_start\", f\n while messages:\n f.messages.append(messages.pop(0))\n yield \"websocket_message\", f\n if f.error:\n yield \"websocket_error\", f\n yield \"websocket_end\", f\n elif isinstance(f, tcp.TCPFlow):\n messages = f.messages\n f.messages = []\n f.reply = controller.DummyReply()\n yield \"tcp_start\", f\n while messages:\n f.messages.append(messages.pop(0))\n yield \"tcp_message\", f\n if f.error:\n yield \"tcp_error\", f\n yield \"tcp_end\", f\n else:\n raise NotImplementedError\n", "path": "mitmproxy/events.py"}]} | 933 | 82 |
gh_patches_debug_30774 | rasdani/github-patches | git_diff | ietf-tools__datatracker-5914 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Draft submission with empty strings in date element fails with generic "Meta-Data errors found!" message
### Describe the issue
I tried submitting an xml draft, hard fail due to "Meta-Data error found". Very odd since it passes the submission checks.
There is a closed issue that sounds similar, but apparently not fixed.
I can send the xml, but this doesn't allow that file type.
<img width="1009" alt="Image" src="https://github.com/ietf-tools/datatracker/assets/6423658/fb3e7616-ea04-46e8-a6d2-0d359888f7b7">
### Code of Conduct
- [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ietf/utils/xmldraft.py`
Content:
```
1 # Copyright The IETF Trust 2022, All Rights Reserved
2 # -*- coding: utf-8 -*-
3 import datetime
4 import io
5 import re
6 import xml2rfc
7
8 import debug # pyflakes: ignore
9
10 from contextlib import ExitStack
11 from xml2rfc.util.date import augment_date, extract_date
12 from ietf.utils.timezone import date_today
13
14 from .draft import Draft
15
16
17 class XMLDraft(Draft):
18 """Draft from XML source
19
20 Not all methods from the superclass are implemented yet.
21 """
22 def __init__(self, xml_file):
23 """Initialize XMLDraft instance
24
25 :parameter xml_file: path to file containing XML source
26 """
27 super().__init__()
28 # cast xml_file to str so, e.g., this will work with a Path
29 self.xmltree, self.xml_version = self.parse_xml(str(xml_file))
30 self.xmlroot = self.xmltree.getroot()
31 self.filename, self.revision = self._parse_docname()
32
33 @staticmethod
34 def parse_xml(filename):
35 """Parse XML draft
36
37 Converts to xml2rfc v3 schema, then returns the root of the v3 tree and the original
38 xml version.
39 """
40 orig_write_out = xml2rfc.log.write_out
41 orig_write_err = xml2rfc.log.write_err
42 parser_out = io.StringIO()
43 parser_err = io.StringIO()
44
45 with ExitStack() as stack:
46 @stack.callback
47 def cleanup(): # called when context exited, even if there's an exception
48 xml2rfc.log.write_out = orig_write_out
49 xml2rfc.log.write_err = orig_write_err
50
51 xml2rfc.log.write_out = parser_out
52 xml2rfc.log.write_err = parser_err
53
54 parser = xml2rfc.XmlRfcParser(filename, quiet=True)
55 try:
56 tree = parser.parse()
57 except Exception as e:
58 raise XMLParseError(parser_out.getvalue(), parser_err.getvalue()) from e
59
60 xml_version = tree.getroot().get('version', '2')
61 if xml_version == '2':
62 v2v3 = xml2rfc.V2v3XmlWriter(tree)
63 tree.tree = v2v3.convert2to3()
64 return tree, xml_version
65
66 def _document_name(self, ref):
67 """Get document name from reference."""
68 series = ["rfc", "bcp", "fyi", "std"]
69 # handle xinclude first
70 # FIXME: this assumes the xinclude is a bibxml href; if it isn't, there can
71 # still be false negatives. it would be better to expand the xinclude and parse
72 # its seriesInfo.
73 if ref.tag.endswith("}include"):
74 name = re.search(
75 rf"reference\.({'|'.join(series).upper()})\.(\d{{4}})\.xml",
76 ref.attrib["href"],
77 )
78 if name:
79 return f"{name.group(1)}{int(name.group(2))}".lower()
80 name = re.search(
81 r"reference\.I-D\.(?:draft-)?(.*)\.xml", ref.attrib["href"]
82 )
83 if name:
84 return f"draft-{name.group(1)}"
85 # can't extract the name, give up
86 return ""
87
88 # check the anchor next
89 anchor = ref.get("anchor").lower() # always give back lowercase
90 label = anchor.rstrip("0123456789") # remove trailing digits
91 if label in series:
92 number = int(anchor[len(label) :])
93 return f"{label}{number}"
94
95 # if we couldn't find a match so far, try the seriesInfo
96 series_query = " or ".join(f"@name='{x.upper()}'" for x in series)
97 for info in ref.xpath(
98 f"./seriesInfo[{series_query} or @name='Internet-Draft']"
99 ):
100 if not info.attrib["value"]:
101 continue
102 if info.attrib["name"] == "Internet-Draft":
103 return info.attrib["value"]
104 else:
105 return f'{info.attrib["name"].lower()}{info.attrib["value"]}'
106 return ""
107
108 def _reference_section_type(self, section_name):
109 """Determine reference type from name of references section"""
110 if section_name:
111 section_name = section_name.lower()
112 if 'normative' in section_name:
113 return self.REF_TYPE_NORMATIVE
114 elif 'informative' in section_name:
115 return self.REF_TYPE_INFORMATIVE
116 return self.REF_TYPE_UNKNOWN
117
118 def _reference_section_name(self, section_elt):
119 section_name = section_elt.findtext('name')
120 if section_name is None and 'title' in section_elt.keys():
121 section_name = section_elt.get('title') # fall back to title if we have it
122 return section_name
123
124 def _parse_docname(self):
125 docname = self.xmlroot.attrib.get('docName')
126 revmatch = re.match(
127 r'^(?P<filename>.+?)(?:-(?P<rev>[0-9][0-9]))?$',
128 docname,
129
130 )
131 if revmatch is None:
132 raise ValueError('Unable to parse docName')
133 # If a group had no match it is None
134 return revmatch.group('filename'), revmatch.group('rev')
135
136 def get_title(self):
137 return self.xmlroot.findtext('front/title').strip()
138
139 def get_creation_date(self):
140 date_elt = self.xmlroot.find("front/date")
141 if date_elt is not None:
142 # ths mimics handling of date elements in the xml2rfc text/html writers
143 today = date_today()
144 year, month, day = extract_date(date_elt, today)
145 year, month, day = augment_date(year, month, day, today)
146 if day is None:
147 # Must choose a day for a datetime.date. Per RFC 7991 sect 2.17, we use
148 # today's date if it is consistent with the rest of the date. Otherwise,
149 # arbitrariy (and consistent with the text parser) assume the 15th.
150 if year == today.year and month == today.month:
151 day = today.day
152 else:
153 day = 15
154 return datetime.date(year, month, day)
155 return None
156
157 # todo fix the implementation of XMLDraft.get_abstract()
158 #
159 # This code was pulled from ietf.submit.forms where it existed for some time.
160 # It does not work, at least with modern xml2rfc. This assumes that the abstract
161 # is simply text in the front/abstract node, but the XML schema wraps the actual
162 # abstract text in <t> elements (and allows <dl>, <ol>, and <ul> as well). As a
163 # result, this method normally returns an empty string, which is later replaced by
164 # the abstract parsed from the rendered text. For now, I a commenting this out
165 # and making it explicit that the abstract always comes from the text format.
166 #
167 # def get_abstract(self):
168 # """Extract the abstract"""
169 # abstract = self.xmlroot.findtext('front/abstract')
170 # return abstract.strip() if abstract else ''
171
172 def get_author_list(self):
173 """Get detailed author list
174
175 Returns a list of dicts with the following keys:
176 name, first_name, middle_initial, last_name,
177 name_suffix, email, country, affiliation
178 Values will be None if not available
179 """
180 result = []
181 empty_author = {
182 k: None for k in [
183 'name', 'first_name', 'middle_initial', 'last_name',
184 'name_suffix', 'email', 'country', 'affiliation',
185 ]
186 }
187
188 for author in self.xmlroot.findall('front/author'):
189 info = {
190 'name': author.attrib.get('fullname'),
191 'email': author.findtext('address/email'),
192 'affiliation': author.findtext('organization'),
193 }
194 elem = author.find('address/postal/country')
195 if elem is not None:
196 ascii_country = elem.get('ascii', None)
197 info['country'] = ascii_country if ascii_country else elem.text
198 for item in info:
199 if info[item]:
200 info[item] = info[item].strip()
201 result.append(empty_author | info) # merge, preferring info
202 return result
203
204 def get_refs(self):
205 """Extract references from the draft"""
206 refs = {}
207 # accept nested <references> sections
208 for section in self.xmlroot.findall("back//references"):
209 ref_type = self._reference_section_type(
210 self._reference_section_name(section)
211 )
212 for ref in (
213 section.findall("./reference")
214 + section.findall("./referencegroup")
215 + section.findall(
216 "./xi:include", {"xi": "http://www.w3.org/2001/XInclude"}
217 )
218 ):
219 name = self._document_name(ref)
220 if name:
221 refs[name] = ref_type
222 return refs
223
224
225 class XMLParseError(Exception):
226 """An error occurred while parsing"""
227 def __init__(self, out: str, err: str, *args):
228 super().__init__(*args)
229 self._out = out
230 self._err = err
231
232 def parser_msgs(self):
233 return self._out.splitlines() + self._err.splitlines()
234
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ietf/utils/xmldraft.py b/ietf/utils/xmldraft.py
--- a/ietf/utils/xmldraft.py
+++ b/ietf/utils/xmldraft.py
@@ -136,24 +136,27 @@
def get_title(self):
return self.xmlroot.findtext('front/title').strip()
+ @staticmethod
+ def parse_creation_date(date_elt):
+ if date_elt is None:
+ return None
+ today = date_today()
+ # ths mimics handling of date elements in the xml2rfc text/html writers
+ year, month, day = extract_date(date_elt, today)
+ year, month, day = augment_date(year, month, day, today)
+ if not day:
+ # Must choose a day for a datetime.date. Per RFC 7991 sect 2.17, we use
+ # today's date if it is consistent with the rest of the date. Otherwise,
+ # arbitrariy (and consistent with the text parser) assume the 15th.
+ if year == today.year and month == today.month:
+ day = today.day
+ else:
+ day = 15
+ return datetime.date(year, month, day)
+
def get_creation_date(self):
- date_elt = self.xmlroot.find("front/date")
- if date_elt is not None:
- # ths mimics handling of date elements in the xml2rfc text/html writers
- today = date_today()
- year, month, day = extract_date(date_elt, today)
- year, month, day = augment_date(year, month, day, today)
- if day is None:
- # Must choose a day for a datetime.date. Per RFC 7991 sect 2.17, we use
- # today's date if it is consistent with the rest of the date. Otherwise,
- # arbitrariy (and consistent with the text parser) assume the 15th.
- if year == today.year and month == today.month:
- day = today.day
- else:
- day = 15
- return datetime.date(year, month, day)
- return None
-
+ return self.parse_creation_date(self.xmlroot.find("front/date"))
+
# todo fix the implementation of XMLDraft.get_abstract()
#
# This code was pulled from ietf.submit.forms where it existed for some time.
| {"golden_diff": "diff --git a/ietf/utils/xmldraft.py b/ietf/utils/xmldraft.py\n--- a/ietf/utils/xmldraft.py\n+++ b/ietf/utils/xmldraft.py\n@@ -136,24 +136,27 @@\n def get_title(self):\n return self.xmlroot.findtext('front/title').strip()\n \n+ @staticmethod\n+ def parse_creation_date(date_elt):\n+ if date_elt is None:\n+ return None\n+ today = date_today()\n+ # ths mimics handling of date elements in the xml2rfc text/html writers\n+ year, month, day = extract_date(date_elt, today)\n+ year, month, day = augment_date(year, month, day, today)\n+ if not day:\n+ # Must choose a day for a datetime.date. Per RFC 7991 sect 2.17, we use\n+ # today's date if it is consistent with the rest of the date. Otherwise,\n+ # arbitrariy (and consistent with the text parser) assume the 15th.\n+ if year == today.year and month == today.month:\n+ day = today.day\n+ else:\n+ day = 15\n+ return datetime.date(year, month, day)\n+\n def get_creation_date(self):\n- date_elt = self.xmlroot.find(\"front/date\")\n- if date_elt is not None:\n- # ths mimics handling of date elements in the xml2rfc text/html writers\n- today = date_today()\n- year, month, day = extract_date(date_elt, today)\n- year, month, day = augment_date(year, month, day, today)\n- if day is None:\n- # Must choose a day for a datetime.date. Per RFC 7991 sect 2.17, we use\n- # today's date if it is consistent with the rest of the date. Otherwise,\n- # arbitrariy (and consistent with the text parser) assume the 15th.\n- if year == today.year and month == today.month:\n- day = today.day\n- else:\n- day = 15\n- return datetime.date(year, month, day)\n- return None\n- \n+ return self.parse_creation_date(self.xmlroot.find(\"front/date\"))\n+\n # todo fix the implementation of XMLDraft.get_abstract()\n #\n # This code was pulled from ietf.submit.forms where it existed for some time.\n", "issue": "Draft submission with empty strings in date element fails with generic \"Meta-Data errors found!\" message\n### Describe the issue\n\nI tried submitting an xml draft, hard fail due to \"Meta-Data error found\". Very odd since it passes the submission checks.\r\n\r\nThere is a closed issue that sounds similar, but apparently not fixed.\r\n\r\nI can send the xml, but this doesn't allow that file type.\r\n\r\n<img width=\"1009\" alt=\"Image\" src=\"https://github.com/ietf-tools/datatracker/assets/6423658/fb3e7616-ea04-46e8-a6d2-0d359888f7b7\">\r\n\r\n\n\n### Code of Conduct\n\n- [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md)\n", "before_files": [{"content": "# Copyright The IETF Trust 2022, All Rights Reserved\n# -*- coding: utf-8 -*-\nimport datetime\nimport io\nimport re\nimport xml2rfc\n\nimport debug # pyflakes: ignore\n\nfrom contextlib import ExitStack\nfrom xml2rfc.util.date import augment_date, extract_date\nfrom ietf.utils.timezone import date_today\n\nfrom .draft import Draft\n\n\nclass XMLDraft(Draft):\n \"\"\"Draft from XML source\n\n Not all methods from the superclass are implemented yet.\n \"\"\"\n def __init__(self, xml_file):\n \"\"\"Initialize XMLDraft instance\n\n :parameter xml_file: path to file containing XML source\n \"\"\"\n super().__init__()\n # cast xml_file to str so, e.g., this will work with a Path\n self.xmltree, self.xml_version = self.parse_xml(str(xml_file))\n self.xmlroot = self.xmltree.getroot()\n self.filename, self.revision = self._parse_docname()\n\n @staticmethod\n def parse_xml(filename):\n \"\"\"Parse XML draft\n\n Converts to xml2rfc v3 schema, then returns the root of the v3 tree and the original\n xml version.\n \"\"\"\n orig_write_out = xml2rfc.log.write_out\n orig_write_err = xml2rfc.log.write_err\n parser_out = io.StringIO()\n parser_err = io.StringIO()\n\n with ExitStack() as stack:\n @stack.callback\n def cleanup(): # called when context exited, even if there's an exception\n xml2rfc.log.write_out = orig_write_out\n xml2rfc.log.write_err = orig_write_err\n\n xml2rfc.log.write_out = parser_out\n xml2rfc.log.write_err = parser_err\n\n parser = xml2rfc.XmlRfcParser(filename, quiet=True)\n try:\n tree = parser.parse()\n except Exception as e:\n raise XMLParseError(parser_out.getvalue(), parser_err.getvalue()) from e\n\n xml_version = tree.getroot().get('version', '2')\n if xml_version == '2':\n v2v3 = xml2rfc.V2v3XmlWriter(tree)\n tree.tree = v2v3.convert2to3()\n return tree, xml_version\n\n def _document_name(self, ref):\n \"\"\"Get document name from reference.\"\"\"\n series = [\"rfc\", \"bcp\", \"fyi\", \"std\"]\n # handle xinclude first\n # FIXME: this assumes the xinclude is a bibxml href; if it isn't, there can\n # still be false negatives. it would be better to expand the xinclude and parse\n # its seriesInfo.\n if ref.tag.endswith(\"}include\"):\n name = re.search(\n rf\"reference\\.({'|'.join(series).upper()})\\.(\\d{{4}})\\.xml\",\n ref.attrib[\"href\"],\n )\n if name:\n return f\"{name.group(1)}{int(name.group(2))}\".lower()\n name = re.search(\n r\"reference\\.I-D\\.(?:draft-)?(.*)\\.xml\", ref.attrib[\"href\"]\n )\n if name:\n return f\"draft-{name.group(1)}\"\n # can't extract the name, give up\n return \"\"\n\n # check the anchor next\n anchor = ref.get(\"anchor\").lower() # always give back lowercase\n label = anchor.rstrip(\"0123456789\") # remove trailing digits\n if label in series:\n number = int(anchor[len(label) :])\n return f\"{label}{number}\"\n\n # if we couldn't find a match so far, try the seriesInfo\n series_query = \" or \".join(f\"@name='{x.upper()}'\" for x in series)\n for info in ref.xpath(\n f\"./seriesInfo[{series_query} or @name='Internet-Draft']\"\n ):\n if not info.attrib[\"value\"]:\n continue\n if info.attrib[\"name\"] == \"Internet-Draft\":\n return info.attrib[\"value\"]\n else:\n return f'{info.attrib[\"name\"].lower()}{info.attrib[\"value\"]}'\n return \"\"\n\n def _reference_section_type(self, section_name):\n \"\"\"Determine reference type from name of references section\"\"\"\n if section_name:\n section_name = section_name.lower()\n if 'normative' in section_name:\n return self.REF_TYPE_NORMATIVE\n elif 'informative' in section_name:\n return self.REF_TYPE_INFORMATIVE\n return self.REF_TYPE_UNKNOWN\n\n def _reference_section_name(self, section_elt):\n section_name = section_elt.findtext('name')\n if section_name is None and 'title' in section_elt.keys():\n section_name = section_elt.get('title') # fall back to title if we have it\n return section_name\n\n def _parse_docname(self):\n docname = self.xmlroot.attrib.get('docName')\n revmatch = re.match(\n r'^(?P<filename>.+?)(?:-(?P<rev>[0-9][0-9]))?$',\n docname,\n\n )\n if revmatch is None:\n raise ValueError('Unable to parse docName')\n # If a group had no match it is None\n return revmatch.group('filename'), revmatch.group('rev')\n\n def get_title(self):\n return self.xmlroot.findtext('front/title').strip()\n\n def get_creation_date(self):\n date_elt = self.xmlroot.find(\"front/date\")\n if date_elt is not None:\n # ths mimics handling of date elements in the xml2rfc text/html writers\n today = date_today()\n year, month, day = extract_date(date_elt, today)\n year, month, day = augment_date(year, month, day, today)\n if day is None:\n # Must choose a day for a datetime.date. Per RFC 7991 sect 2.17, we use\n # today's date if it is consistent with the rest of the date. Otherwise,\n # arbitrariy (and consistent with the text parser) assume the 15th.\n if year == today.year and month == today.month:\n day = today.day\n else:\n day = 15\n return datetime.date(year, month, day)\n return None\n \n # todo fix the implementation of XMLDraft.get_abstract()\n #\n # This code was pulled from ietf.submit.forms where it existed for some time.\n # It does not work, at least with modern xml2rfc. This assumes that the abstract\n # is simply text in the front/abstract node, but the XML schema wraps the actual\n # abstract text in <t> elements (and allows <dl>, <ol>, and <ul> as well). As a\n # result, this method normally returns an empty string, which is later replaced by\n # the abstract parsed from the rendered text. For now, I a commenting this out\n # and making it explicit that the abstract always comes from the text format.\n #\n # def get_abstract(self):\n # \"\"\"Extract the abstract\"\"\"\n # abstract = self.xmlroot.findtext('front/abstract')\n # return abstract.strip() if abstract else ''\n\n def get_author_list(self):\n \"\"\"Get detailed author list\n\n Returns a list of dicts with the following keys:\n name, first_name, middle_initial, last_name,\n name_suffix, email, country, affiliation\n Values will be None if not available\n \"\"\"\n result = []\n empty_author = {\n k: None for k in [\n 'name', 'first_name', 'middle_initial', 'last_name',\n 'name_suffix', 'email', 'country', 'affiliation',\n ]\n }\n\n for author in self.xmlroot.findall('front/author'):\n info = {\n 'name': author.attrib.get('fullname'),\n 'email': author.findtext('address/email'),\n 'affiliation': author.findtext('organization'),\n }\n elem = author.find('address/postal/country')\n if elem is not None:\n ascii_country = elem.get('ascii', None)\n info['country'] = ascii_country if ascii_country else elem.text\n for item in info:\n if info[item]:\n info[item] = info[item].strip()\n result.append(empty_author | info) # merge, preferring info\n return result\n\n def get_refs(self):\n \"\"\"Extract references from the draft\"\"\"\n refs = {}\n # accept nested <references> sections\n for section in self.xmlroot.findall(\"back//references\"):\n ref_type = self._reference_section_type(\n self._reference_section_name(section)\n )\n for ref in (\n section.findall(\"./reference\")\n + section.findall(\"./referencegroup\")\n + section.findall(\n \"./xi:include\", {\"xi\": \"http://www.w3.org/2001/XInclude\"}\n )\n ):\n name = self._document_name(ref)\n if name:\n refs[name] = ref_type\n return refs\n\n\nclass XMLParseError(Exception):\n \"\"\"An error occurred while parsing\"\"\"\n def __init__(self, out: str, err: str, *args):\n super().__init__(*args)\n self._out = out\n self._err = err\n\n def parser_msgs(self):\n return self._out.splitlines() + self._err.splitlines()\n", "path": "ietf/utils/xmldraft.py"}], "after_files": [{"content": "# Copyright The IETF Trust 2022, All Rights Reserved\n# -*- coding: utf-8 -*-\nimport datetime\nimport io\nimport re\nimport xml2rfc\n\nimport debug # pyflakes: ignore\n\nfrom contextlib import ExitStack\nfrom xml2rfc.util.date import augment_date, extract_date\nfrom ietf.utils.timezone import date_today\n\nfrom .draft import Draft\n\n\nclass XMLDraft(Draft):\n \"\"\"Draft from XML source\n\n Not all methods from the superclass are implemented yet.\n \"\"\"\n def __init__(self, xml_file):\n \"\"\"Initialize XMLDraft instance\n\n :parameter xml_file: path to file containing XML source\n \"\"\"\n super().__init__()\n # cast xml_file to str so, e.g., this will work with a Path\n self.xmltree, self.xml_version = self.parse_xml(str(xml_file))\n self.xmlroot = self.xmltree.getroot()\n self.filename, self.revision = self._parse_docname()\n\n @staticmethod\n def parse_xml(filename):\n \"\"\"Parse XML draft\n\n Converts to xml2rfc v3 schema, then returns the root of the v3 tree and the original\n xml version.\n \"\"\"\n orig_write_out = xml2rfc.log.write_out\n orig_write_err = xml2rfc.log.write_err\n parser_out = io.StringIO()\n parser_err = io.StringIO()\n\n with ExitStack() as stack:\n @stack.callback\n def cleanup(): # called when context exited, even if there's an exception\n xml2rfc.log.write_out = orig_write_out\n xml2rfc.log.write_err = orig_write_err\n\n xml2rfc.log.write_out = parser_out\n xml2rfc.log.write_err = parser_err\n\n parser = xml2rfc.XmlRfcParser(filename, quiet=True)\n try:\n tree = parser.parse()\n except Exception as e:\n raise XMLParseError(parser_out.getvalue(), parser_err.getvalue()) from e\n\n xml_version = tree.getroot().get('version', '2')\n if xml_version == '2':\n v2v3 = xml2rfc.V2v3XmlWriter(tree)\n tree.tree = v2v3.convert2to3()\n return tree, xml_version\n\n def _document_name(self, ref):\n \"\"\"Get document name from reference.\"\"\"\n series = [\"rfc\", \"bcp\", \"fyi\", \"std\"]\n # handle xinclude first\n # FIXME: this assumes the xinclude is a bibxml href; if it isn't, there can\n # still be false negatives. it would be better to expand the xinclude and parse\n # its seriesInfo.\n if ref.tag.endswith(\"}include\"):\n name = re.search(\n rf\"reference\\.({'|'.join(series).upper()})\\.(\\d{{4}})\\.xml\",\n ref.attrib[\"href\"],\n )\n if name:\n return f\"{name.group(1)}{int(name.group(2))}\".lower()\n name = re.search(\n r\"reference\\.I-D\\.(?:draft-)?(.*)\\.xml\", ref.attrib[\"href\"]\n )\n if name:\n return f\"draft-{name.group(1)}\"\n # can't extract the name, give up\n return \"\"\n\n # check the anchor next\n anchor = ref.get(\"anchor\").lower() # always give back lowercase\n label = anchor.rstrip(\"0123456789\") # remove trailing digits\n if label in series:\n number = int(anchor[len(label) :])\n return f\"{label}{number}\"\n\n # if we couldn't find a match so far, try the seriesInfo\n series_query = \" or \".join(f\"@name='{x.upper()}'\" for x in series)\n for info in ref.xpath(\n f\"./seriesInfo[{series_query} or @name='Internet-Draft']\"\n ):\n if not info.attrib[\"value\"]:\n continue\n if info.attrib[\"name\"] == \"Internet-Draft\":\n return info.attrib[\"value\"]\n else:\n return f'{info.attrib[\"name\"].lower()}{info.attrib[\"value\"]}'\n return \"\"\n\n def _reference_section_type(self, section_name):\n \"\"\"Determine reference type from name of references section\"\"\"\n if section_name:\n section_name = section_name.lower()\n if 'normative' in section_name:\n return self.REF_TYPE_NORMATIVE\n elif 'informative' in section_name:\n return self.REF_TYPE_INFORMATIVE\n return self.REF_TYPE_UNKNOWN\n\n def _reference_section_name(self, section_elt):\n section_name = section_elt.findtext('name')\n if section_name is None and 'title' in section_elt.keys():\n section_name = section_elt.get('title') # fall back to title if we have it\n return section_name\n\n def _parse_docname(self):\n docname = self.xmlroot.attrib.get('docName')\n revmatch = re.match(\n r'^(?P<filename>.+?)(?:-(?P<rev>[0-9][0-9]))?$',\n docname,\n\n )\n if revmatch is None:\n raise ValueError('Unable to parse docName')\n # If a group had no match it is None\n return revmatch.group('filename'), revmatch.group('rev')\n\n def get_title(self):\n return self.xmlroot.findtext('front/title').strip()\n\n @staticmethod\n def parse_creation_date(date_elt):\n if date_elt is None:\n return None\n today = date_today()\n # ths mimics handling of date elements in the xml2rfc text/html writers\n year, month, day = extract_date(date_elt, today)\n year, month, day = augment_date(year, month, day, today)\n if not day:\n # Must choose a day for a datetime.date. Per RFC 7991 sect 2.17, we use\n # today's date if it is consistent with the rest of the date. Otherwise,\n # arbitrariy (and consistent with the text parser) assume the 15th.\n if year == today.year and month == today.month:\n day = today.day\n else:\n day = 15\n return datetime.date(year, month, day)\n\n def get_creation_date(self):\n return self.parse_creation_date(self.xmlroot.find(\"front/date\"))\n\n # todo fix the implementation of XMLDraft.get_abstract()\n #\n # This code was pulled from ietf.submit.forms where it existed for some time.\n # It does not work, at least with modern xml2rfc. This assumes that the abstract\n # is simply text in the front/abstract node, but the XML schema wraps the actual\n # abstract text in <t> elements (and allows <dl>, <ol>, and <ul> as well). As a\n # result, this method normally returns an empty string, which is later replaced by\n # the abstract parsed from the rendered text. For now, I a commenting this out\n # and making it explicit that the abstract always comes from the text format.\n #\n # def get_abstract(self):\n # \"\"\"Extract the abstract\"\"\"\n # abstract = self.xmlroot.findtext('front/abstract')\n # return abstract.strip() if abstract else ''\n\n def get_author_list(self):\n \"\"\"Get detailed author list\n\n Returns a list of dicts with the following keys:\n name, first_name, middle_initial, last_name,\n name_suffix, email, country, affiliation\n Values will be None if not available\n \"\"\"\n result = []\n empty_author = {\n k: None for k in [\n 'name', 'first_name', 'middle_initial', 'last_name',\n 'name_suffix', 'email', 'country', 'affiliation',\n ]\n }\n\n for author in self.xmlroot.findall('front/author'):\n info = {\n 'name': author.attrib.get('fullname'),\n 'email': author.findtext('address/email'),\n 'affiliation': author.findtext('organization'),\n }\n elem = author.find('address/postal/country')\n if elem is not None:\n ascii_country = elem.get('ascii', None)\n info['country'] = ascii_country if ascii_country else elem.text\n for item in info:\n if info[item]:\n info[item] = info[item].strip()\n result.append(empty_author | info) # merge, preferring info\n return result\n\n def get_refs(self):\n \"\"\"Extract references from the draft\"\"\"\n refs = {}\n # accept nested <references> sections\n for section in self.xmlroot.findall(\"back//references\"):\n ref_type = self._reference_section_type(\n self._reference_section_name(section)\n )\n for ref in (\n section.findall(\"./reference\")\n + section.findall(\"./referencegroup\")\n + section.findall(\n \"./xi:include\", {\"xi\": \"http://www.w3.org/2001/XInclude\"}\n )\n ):\n name = self._document_name(ref)\n if name:\n refs[name] = ref_type\n return refs\n\n\nclass XMLParseError(Exception):\n \"\"\"An error occurred while parsing\"\"\"\n def __init__(self, out: str, err: str, *args):\n super().__init__(*args)\n self._out = out\n self._err = err\n\n def parser_msgs(self):\n return self._out.splitlines() + self._err.splitlines()\n", "path": "ietf/utils/xmldraft.py"}]} | 3,101 | 552 |
gh_patches_debug_13355 | rasdani/github-patches | git_diff | huggingface__text-generation-inference-1260 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PEFT support does not work with local directories
### System Info
text-generation-inference version: main branch
The rest of the system info is probably not relevant.
### Information
- [X] Docker
- [X] The CLI directly
### Tasks
- [X] An officially supported command
- [ ] My own modifications
### Reproduction
Steps to reproduce:
1. Create a directory that contains PEFT weights, for example: `mypeft` which contains two files: `adapter_config.json` and `adapter_model.bin`
2. Try to download the base model using the following command: `text-generation-server download-weights mypeft`
### Expected behavior
It should be able to download the base model weights and merge the PEFT weights into the base model.
However, as mentioned in [this comment](https://github.com/huggingface/text-generation-inference/pull/762#issuecomment-1728042436):
> it seems the checks for PEFT model is only in the block checking if the model is not a local model
There is a workaround as documented in [this comment](https://github.com/huggingface/text-generation-inference/pull/762#issuecomment-1729689087)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `server/text_generation_server/cli.py`
Content:
```
1 import os
2 import sys
3 import typer
4
5 from pathlib import Path
6 from loguru import logger
7 from typing import Optional
8 from enum import Enum
9 from huggingface_hub import hf_hub_download
10
11
12 app = typer.Typer()
13
14
15 class Quantization(str, Enum):
16 bitsandbytes = "bitsandbytes"
17 bitsandbytes_nf4 = "bitsandbytes-nf4"
18 bitsandbytes_fp4 = "bitsandbytes-fp4"
19 gptq = "gptq"
20 awq = "awq"
21 eetq = "eetq"
22
23
24 class Dtype(str, Enum):
25 float16 = "float16"
26 bloat16 = "bfloat16"
27
28
29 @app.command()
30 def serve(
31 model_id: str,
32 revision: Optional[str] = None,
33 sharded: bool = False,
34 quantize: Optional[Quantization] = None,
35 dtype: Optional[Dtype] = None,
36 trust_remote_code: bool = False,
37 uds_path: Path = "/tmp/text-generation-server",
38 logger_level: str = "INFO",
39 json_output: bool = False,
40 otlp_endpoint: Optional[str] = None,
41 ):
42 if sharded:
43 assert (
44 os.getenv("RANK", None) is not None
45 ), "RANK must be set when sharded is True"
46 assert (
47 os.getenv("WORLD_SIZE", None) is not None
48 ), "WORLD_SIZE must be set when sharded is True"
49 assert (
50 os.getenv("MASTER_ADDR", None) is not None
51 ), "MASTER_ADDR must be set when sharded is True"
52 assert (
53 os.getenv("MASTER_PORT", None) is not None
54 ), "MASTER_PORT must be set when sharded is True"
55
56 # Remove default handler
57 logger.remove()
58 logger.add(
59 sys.stdout,
60 format="{message}",
61 filter="text_generation_server",
62 level=logger_level,
63 serialize=json_output,
64 backtrace=True,
65 diagnose=False,
66 )
67
68 # Import here after the logger is added to log potential import exceptions
69 from text_generation_server import server
70 from text_generation_server.tracing import setup_tracing
71
72 # Setup OpenTelemetry distributed tracing
73 if otlp_endpoint is not None:
74 setup_tracing(shard=os.getenv("RANK", 0), otlp_endpoint=otlp_endpoint)
75
76 # Downgrade enum into str for easier management later on
77 quantize = None if quantize is None else quantize.value
78 dtype = None if dtype is None else dtype.value
79 if dtype is not None and quantize is not None:
80 raise RuntimeError(
81 "Only 1 can be set between `dtype` and `quantize`, as they both decide how goes the final model."
82 )
83 server.serve(
84 model_id, revision, sharded, quantize, dtype, trust_remote_code, uds_path
85 )
86
87
88 @app.command()
89 def download_weights(
90 model_id: str,
91 revision: Optional[str] = None,
92 extension: str = ".safetensors",
93 auto_convert: bool = True,
94 logger_level: str = "INFO",
95 json_output: bool = False,
96 trust_remote_code: bool = False,
97 ):
98 # Remove default handler
99 logger.remove()
100 logger.add(
101 sys.stdout,
102 format="{message}",
103 filter="text_generation_server",
104 level=logger_level,
105 serialize=json_output,
106 backtrace=True,
107 diagnose=False,
108 )
109
110 # Import here after the logger is added to log potential import exceptions
111 from text_generation_server import utils
112
113 # Test if files were already download
114 try:
115 utils.weight_files(model_id, revision, extension)
116 logger.info("Files are already present on the host. " "Skipping download.")
117 return
118 # Local files not found
119 except (utils.LocalEntryNotFoundError, FileNotFoundError):
120 pass
121
122 is_local_model = (Path(model_id).exists() and Path(model_id).is_dir()) or os.getenv(
123 "WEIGHTS_CACHE_OVERRIDE", None
124 ) is not None
125
126 if not is_local_model:
127 try:
128 adapter_config_filename = hf_hub_download(
129 model_id, revision=revision, filename="adapter_config.json"
130 )
131 utils.download_and_unload_peft(
132 model_id, revision, trust_remote_code=trust_remote_code
133 )
134 is_local_model = True
135 utils.weight_files(model_id, revision, extension)
136 return
137 except (utils.LocalEntryNotFoundError, utils.EntryNotFoundError):
138 pass
139
140 # Try to download weights from the hub
141 try:
142 filenames = utils.weight_hub_files(model_id, revision, extension)
143 utils.download_weights(filenames, model_id, revision)
144 # Successfully downloaded weights
145 return
146
147 # No weights found on the hub with this extension
148 except utils.EntryNotFoundError as e:
149 # Check if we want to automatically convert to safetensors or if we can use .bin weights instead
150 if not extension == ".safetensors" or not auto_convert:
151 raise e
152
153 # Try to see if there are local pytorch weights
154 try:
155 # Get weights for a local model, a hub cached model and inside the WEIGHTS_CACHE_OVERRIDE
156 local_pt_files = utils.weight_files(model_id, revision, ".bin")
157
158 # No local pytorch weights
159 except utils.LocalEntryNotFoundError:
160 if extension == ".safetensors":
161 logger.warning(
162 f"No safetensors weights found for model {model_id} at revision {revision}. "
163 f"Downloading PyTorch weights."
164 )
165
166 # Try to see if there are pytorch weights on the hub
167 pt_filenames = utils.weight_hub_files(model_id, revision, ".bin")
168 # Download pytorch weights
169 local_pt_files = utils.download_weights(pt_filenames, model_id, revision)
170
171 if auto_convert:
172 logger.warning(
173 f"No safetensors weights found for model {model_id} at revision {revision}. "
174 f"Converting PyTorch weights to safetensors."
175 )
176
177 # Safetensors final filenames
178 local_st_files = [
179 p.parent / f"{p.stem.lstrip('pytorch_')}.safetensors"
180 for p in local_pt_files
181 ]
182 try:
183 import transformers
184 import json
185
186 if is_local_model:
187 config_filename = os.path.join(model_id, "config.json")
188 else:
189 config_filename = hf_hub_download(
190 model_id, revision=revision, filename="config.json"
191 )
192 with open(config_filename, "r") as f:
193 config = json.load(f)
194 architecture = config["architectures"][0]
195
196 class_ = getattr(transformers, architecture)
197
198 # Name for this varible depends on transformers version.
199 discard_names = getattr(class_, "_tied_weights_keys", [])
200
201 except Exception as e:
202 discard_names = []
203 # Convert pytorch weights to safetensors
204 utils.convert_files(local_pt_files, local_st_files, discard_names)
205
206
207 @app.command()
208 def quantize(
209 model_id: str,
210 output_dir: str,
211 revision: Optional[str] = None,
212 logger_level: str = "INFO",
213 json_output: bool = False,
214 trust_remote_code: bool = False,
215 upload_to_model_id: Optional[str] = None,
216 percdamp: float = 0.01,
217 act_order: bool = False,
218 ):
219 if revision is None:
220 revision = "main"
221 download_weights(
222 model_id=model_id,
223 revision=revision,
224 logger_level=logger_level,
225 json_output=json_output,
226 )
227 from text_generation_server.utils.gptq.quantize import quantize
228
229 quantize(
230 model_id=model_id,
231 bits=4,
232 groupsize=128,
233 output_dir=output_dir,
234 revision=revision,
235 trust_remote_code=trust_remote_code,
236 upload_to_model_id=upload_to_model_id,
237 percdamp=percdamp,
238 act_order=act_order,
239 )
240
241
242 if __name__ == "__main__":
243 app()
244
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/server/text_generation_server/cli.py b/server/text_generation_server/cli.py
--- a/server/text_generation_server/cli.py
+++ b/server/text_generation_server/cli.py
@@ -150,6 +150,17 @@
if not extension == ".safetensors" or not auto_convert:
raise e
+ else:
+ # Try to load as a local PEFT model
+ try:
+ utils.download_and_unload_peft(
+ model_id, revision, trust_remote_code=trust_remote_code
+ )
+ utils.weight_files(model_id, revision, extension)
+ return
+ except (utils.LocalEntryNotFoundError, utils.EntryNotFoundError):
+ pass
+
# Try to see if there are local pytorch weights
try:
# Get weights for a local model, a hub cached model and inside the WEIGHTS_CACHE_OVERRIDE
| {"golden_diff": "diff --git a/server/text_generation_server/cli.py b/server/text_generation_server/cli.py\n--- a/server/text_generation_server/cli.py\n+++ b/server/text_generation_server/cli.py\n@@ -150,6 +150,17 @@\n if not extension == \".safetensors\" or not auto_convert:\n raise e\n \n+ else:\n+ # Try to load as a local PEFT model\n+ try:\n+ utils.download_and_unload_peft(\n+ model_id, revision, trust_remote_code=trust_remote_code\n+ )\n+ utils.weight_files(model_id, revision, extension)\n+ return\n+ except (utils.LocalEntryNotFoundError, utils.EntryNotFoundError):\n+ pass\n+\n # Try to see if there are local pytorch weights\n try:\n # Get weights for a local model, a hub cached model and inside the WEIGHTS_CACHE_OVERRIDE\n", "issue": "PEFT support does not work with local directories\n### System Info\n\ntext-generation-inference version: main branch\r\n\r\nThe rest of the system info is probably not relevant.\n\n### Information\n\n- [X] Docker\n- [X] The CLI directly\n\n### Tasks\n\n- [X] An officially supported command\n- [ ] My own modifications\n\n### Reproduction\n\nSteps to reproduce:\r\n\r\n1. Create a directory that contains PEFT weights, for example: `mypeft` which contains two files: `adapter_config.json` and `adapter_model.bin`\r\n2. Try to download the base model using the following command: `text-generation-server download-weights mypeft`\n\n### Expected behavior\n\nIt should be able to download the base model weights and merge the PEFT weights into the base model.\r\n\r\nHowever, as mentioned in [this comment](https://github.com/huggingface/text-generation-inference/pull/762#issuecomment-1728042436):\r\n\r\n> it seems the checks for PEFT model is only in the block checking if the model is not a local model\r\n\r\nThere is a workaround as documented in [this comment](https://github.com/huggingface/text-generation-inference/pull/762#issuecomment-1729689087)\r\n\r\n\n", "before_files": [{"content": "import os\nimport sys\nimport typer\n\nfrom pathlib import Path\nfrom loguru import logger\nfrom typing import Optional\nfrom enum import Enum\nfrom huggingface_hub import hf_hub_download\n\n\napp = typer.Typer()\n\n\nclass Quantization(str, Enum):\n bitsandbytes = \"bitsandbytes\"\n bitsandbytes_nf4 = \"bitsandbytes-nf4\"\n bitsandbytes_fp4 = \"bitsandbytes-fp4\"\n gptq = \"gptq\"\n awq = \"awq\"\n eetq = \"eetq\"\n\n\nclass Dtype(str, Enum):\n float16 = \"float16\"\n bloat16 = \"bfloat16\"\n\n\[email protected]()\ndef serve(\n model_id: str,\n revision: Optional[str] = None,\n sharded: bool = False,\n quantize: Optional[Quantization] = None,\n dtype: Optional[Dtype] = None,\n trust_remote_code: bool = False,\n uds_path: Path = \"/tmp/text-generation-server\",\n logger_level: str = \"INFO\",\n json_output: bool = False,\n otlp_endpoint: Optional[str] = None,\n):\n if sharded:\n assert (\n os.getenv(\"RANK\", None) is not None\n ), \"RANK must be set when sharded is True\"\n assert (\n os.getenv(\"WORLD_SIZE\", None) is not None\n ), \"WORLD_SIZE must be set when sharded is True\"\n assert (\n os.getenv(\"MASTER_ADDR\", None) is not None\n ), \"MASTER_ADDR must be set when sharded is True\"\n assert (\n os.getenv(\"MASTER_PORT\", None) is not None\n ), \"MASTER_PORT must be set when sharded is True\"\n\n # Remove default handler\n logger.remove()\n logger.add(\n sys.stdout,\n format=\"{message}\",\n filter=\"text_generation_server\",\n level=logger_level,\n serialize=json_output,\n backtrace=True,\n diagnose=False,\n )\n\n # Import here after the logger is added to log potential import exceptions\n from text_generation_server import server\n from text_generation_server.tracing import setup_tracing\n\n # Setup OpenTelemetry distributed tracing\n if otlp_endpoint is not None:\n setup_tracing(shard=os.getenv(\"RANK\", 0), otlp_endpoint=otlp_endpoint)\n\n # Downgrade enum into str for easier management later on\n quantize = None if quantize is None else quantize.value\n dtype = None if dtype is None else dtype.value\n if dtype is not None and quantize is not None:\n raise RuntimeError(\n \"Only 1 can be set between `dtype` and `quantize`, as they both decide how goes the final model.\"\n )\n server.serve(\n model_id, revision, sharded, quantize, dtype, trust_remote_code, uds_path\n )\n\n\[email protected]()\ndef download_weights(\n model_id: str,\n revision: Optional[str] = None,\n extension: str = \".safetensors\",\n auto_convert: bool = True,\n logger_level: str = \"INFO\",\n json_output: bool = False,\n trust_remote_code: bool = False,\n):\n # Remove default handler\n logger.remove()\n logger.add(\n sys.stdout,\n format=\"{message}\",\n filter=\"text_generation_server\",\n level=logger_level,\n serialize=json_output,\n backtrace=True,\n diagnose=False,\n )\n\n # Import here after the logger is added to log potential import exceptions\n from text_generation_server import utils\n\n # Test if files were already download\n try:\n utils.weight_files(model_id, revision, extension)\n logger.info(\"Files are already present on the host. \" \"Skipping download.\")\n return\n # Local files not found\n except (utils.LocalEntryNotFoundError, FileNotFoundError):\n pass\n\n is_local_model = (Path(model_id).exists() and Path(model_id).is_dir()) or os.getenv(\n \"WEIGHTS_CACHE_OVERRIDE\", None\n ) is not None\n\n if not is_local_model:\n try:\n adapter_config_filename = hf_hub_download(\n model_id, revision=revision, filename=\"adapter_config.json\"\n )\n utils.download_and_unload_peft(\n model_id, revision, trust_remote_code=trust_remote_code\n )\n is_local_model = True\n utils.weight_files(model_id, revision, extension)\n return\n except (utils.LocalEntryNotFoundError, utils.EntryNotFoundError):\n pass\n\n # Try to download weights from the hub\n try:\n filenames = utils.weight_hub_files(model_id, revision, extension)\n utils.download_weights(filenames, model_id, revision)\n # Successfully downloaded weights\n return\n\n # No weights found on the hub with this extension\n except utils.EntryNotFoundError as e:\n # Check if we want to automatically convert to safetensors or if we can use .bin weights instead\n if not extension == \".safetensors\" or not auto_convert:\n raise e\n\n # Try to see if there are local pytorch weights\n try:\n # Get weights for a local model, a hub cached model and inside the WEIGHTS_CACHE_OVERRIDE\n local_pt_files = utils.weight_files(model_id, revision, \".bin\")\n\n # No local pytorch weights\n except utils.LocalEntryNotFoundError:\n if extension == \".safetensors\":\n logger.warning(\n f\"No safetensors weights found for model {model_id} at revision {revision}. \"\n f\"Downloading PyTorch weights.\"\n )\n\n # Try to see if there are pytorch weights on the hub\n pt_filenames = utils.weight_hub_files(model_id, revision, \".bin\")\n # Download pytorch weights\n local_pt_files = utils.download_weights(pt_filenames, model_id, revision)\n\n if auto_convert:\n logger.warning(\n f\"No safetensors weights found for model {model_id} at revision {revision}. \"\n f\"Converting PyTorch weights to safetensors.\"\n )\n\n # Safetensors final filenames\n local_st_files = [\n p.parent / f\"{p.stem.lstrip('pytorch_')}.safetensors\"\n for p in local_pt_files\n ]\n try:\n import transformers\n import json\n\n if is_local_model:\n config_filename = os.path.join(model_id, \"config.json\")\n else:\n config_filename = hf_hub_download(\n model_id, revision=revision, filename=\"config.json\"\n )\n with open(config_filename, \"r\") as f:\n config = json.load(f)\n architecture = config[\"architectures\"][0]\n\n class_ = getattr(transformers, architecture)\n\n # Name for this varible depends on transformers version.\n discard_names = getattr(class_, \"_tied_weights_keys\", [])\n\n except Exception as e:\n discard_names = []\n # Convert pytorch weights to safetensors\n utils.convert_files(local_pt_files, local_st_files, discard_names)\n\n\[email protected]()\ndef quantize(\n model_id: str,\n output_dir: str,\n revision: Optional[str] = None,\n logger_level: str = \"INFO\",\n json_output: bool = False,\n trust_remote_code: bool = False,\n upload_to_model_id: Optional[str] = None,\n percdamp: float = 0.01,\n act_order: bool = False,\n):\n if revision is None:\n revision = \"main\"\n download_weights(\n model_id=model_id,\n revision=revision,\n logger_level=logger_level,\n json_output=json_output,\n )\n from text_generation_server.utils.gptq.quantize import quantize\n\n quantize(\n model_id=model_id,\n bits=4,\n groupsize=128,\n output_dir=output_dir,\n revision=revision,\n trust_remote_code=trust_remote_code,\n upload_to_model_id=upload_to_model_id,\n percdamp=percdamp,\n act_order=act_order,\n )\n\n\nif __name__ == \"__main__\":\n app()\n", "path": "server/text_generation_server/cli.py"}], "after_files": [{"content": "import os\nimport sys\nimport typer\n\nfrom pathlib import Path\nfrom loguru import logger\nfrom typing import Optional\nfrom enum import Enum\nfrom huggingface_hub import hf_hub_download\n\n\napp = typer.Typer()\n\n\nclass Quantization(str, Enum):\n bitsandbytes = \"bitsandbytes\"\n bitsandbytes_nf4 = \"bitsandbytes-nf4\"\n bitsandbytes_fp4 = \"bitsandbytes-fp4\"\n gptq = \"gptq\"\n awq = \"awq\"\n eetq = \"eetq\"\n\n\nclass Dtype(str, Enum):\n float16 = \"float16\"\n bloat16 = \"bfloat16\"\n\n\[email protected]()\ndef serve(\n model_id: str,\n revision: Optional[str] = None,\n sharded: bool = False,\n quantize: Optional[Quantization] = None,\n dtype: Optional[Dtype] = None,\n trust_remote_code: bool = False,\n uds_path: Path = \"/tmp/text-generation-server\",\n logger_level: str = \"INFO\",\n json_output: bool = False,\n otlp_endpoint: Optional[str] = None,\n):\n if sharded:\n assert (\n os.getenv(\"RANK\", None) is not None\n ), \"RANK must be set when sharded is True\"\n assert (\n os.getenv(\"WORLD_SIZE\", None) is not None\n ), \"WORLD_SIZE must be set when sharded is True\"\n assert (\n os.getenv(\"MASTER_ADDR\", None) is not None\n ), \"MASTER_ADDR must be set when sharded is True\"\n assert (\n os.getenv(\"MASTER_PORT\", None) is not None\n ), \"MASTER_PORT must be set when sharded is True\"\n\n # Remove default handler\n logger.remove()\n logger.add(\n sys.stdout,\n format=\"{message}\",\n filter=\"text_generation_server\",\n level=logger_level,\n serialize=json_output,\n backtrace=True,\n diagnose=False,\n )\n\n # Import here after the logger is added to log potential import exceptions\n from text_generation_server import server\n from text_generation_server.tracing import setup_tracing\n\n # Setup OpenTelemetry distributed tracing\n if otlp_endpoint is not None:\n setup_tracing(shard=os.getenv(\"RANK\", 0), otlp_endpoint=otlp_endpoint)\n\n # Downgrade enum into str for easier management later on\n quantize = None if quantize is None else quantize.value\n dtype = None if dtype is None else dtype.value\n if dtype is not None and quantize is not None:\n raise RuntimeError(\n \"Only 1 can be set between `dtype` and `quantize`, as they both decide how goes the final model.\"\n )\n server.serve(\n model_id, revision, sharded, quantize, dtype, trust_remote_code, uds_path\n )\n\n\[email protected]()\ndef download_weights(\n model_id: str,\n revision: Optional[str] = None,\n extension: str = \".safetensors\",\n auto_convert: bool = True,\n logger_level: str = \"INFO\",\n json_output: bool = False,\n trust_remote_code: bool = False,\n):\n # Remove default handler\n logger.remove()\n logger.add(\n sys.stdout,\n format=\"{message}\",\n filter=\"text_generation_server\",\n level=logger_level,\n serialize=json_output,\n backtrace=True,\n diagnose=False,\n )\n\n # Import here after the logger is added to log potential import exceptions\n from text_generation_server import utils\n\n # Test if files were already download\n try:\n utils.weight_files(model_id, revision, extension)\n logger.info(\"Files are already present on the host. \" \"Skipping download.\")\n return\n # Local files not found\n except (utils.LocalEntryNotFoundError, FileNotFoundError):\n pass\n\n is_local_model = (Path(model_id).exists() and Path(model_id).is_dir()) or os.getenv(\n \"WEIGHTS_CACHE_OVERRIDE\", None\n ) is not None\n\n if not is_local_model:\n try:\n adapter_config_filename = hf_hub_download(\n model_id, revision=revision, filename=\"adapter_config.json\"\n )\n utils.download_and_unload_peft(\n model_id, revision, trust_remote_code=trust_remote_code\n )\n is_local_model = True\n utils.weight_files(model_id, revision, extension)\n return\n except (utils.LocalEntryNotFoundError, utils.EntryNotFoundError):\n pass\n\n # Try to download weights from the hub\n try:\n filenames = utils.weight_hub_files(model_id, revision, extension)\n utils.download_weights(filenames, model_id, revision)\n # Successfully downloaded weights\n return\n\n # No weights found on the hub with this extension\n except utils.EntryNotFoundError as e:\n # Check if we want to automatically convert to safetensors or if we can use .bin weights instead\n if not extension == \".safetensors\" or not auto_convert:\n raise e\n\n else:\n # Try to load as a local PEFT model\n try:\n utils.download_and_unload_peft(\n model_id, revision, trust_remote_code=trust_remote_code\n )\n utils.weight_files(model_id, revision, extension)\n return\n except (utils.LocalEntryNotFoundError, utils.EntryNotFoundError):\n pass\n\n # Try to see if there are local pytorch weights\n try:\n # Get weights for a local model, a hub cached model and inside the WEIGHTS_CACHE_OVERRIDE\n local_pt_files = utils.weight_files(model_id, revision, \".bin\")\n\n # No local pytorch weights\n except utils.LocalEntryNotFoundError:\n if extension == \".safetensors\":\n logger.warning(\n f\"No safetensors weights found for model {model_id} at revision {revision}. \"\n f\"Downloading PyTorch weights.\"\n )\n\n # Try to see if there are pytorch weights on the hub\n pt_filenames = utils.weight_hub_files(model_id, revision, \".bin\")\n # Download pytorch weights\n local_pt_files = utils.download_weights(pt_filenames, model_id, revision)\n\n if auto_convert:\n logger.warning(\n f\"No safetensors weights found for model {model_id} at revision {revision}. \"\n f\"Converting PyTorch weights to safetensors.\"\n )\n\n # Safetensors final filenames\n local_st_files = [\n p.parent / f\"{p.stem.lstrip('pytorch_')}.safetensors\"\n for p in local_pt_files\n ]\n try:\n import transformers\n import json\n\n if is_local_model:\n config_filename = os.path.join(model_id, \"config.json\")\n else:\n config_filename = hf_hub_download(\n model_id, revision=revision, filename=\"config.json\"\n )\n with open(config_filename, \"r\") as f:\n config = json.load(f)\n architecture = config[\"architectures\"][0]\n\n class_ = getattr(transformers, architecture)\n\n # Name for this varible depends on transformers version.\n discard_names = getattr(class_, \"_tied_weights_keys\", [])\n\n except Exception as e:\n discard_names = []\n # Convert pytorch weights to safetensors\n utils.convert_files(local_pt_files, local_st_files, discard_names)\n\n\[email protected]()\ndef quantize(\n model_id: str,\n output_dir: str,\n revision: Optional[str] = None,\n logger_level: str = \"INFO\",\n json_output: bool = False,\n trust_remote_code: bool = False,\n upload_to_model_id: Optional[str] = None,\n percdamp: float = 0.01,\n act_order: bool = False,\n):\n if revision is None:\n revision = \"main\"\n download_weights(\n model_id=model_id,\n revision=revision,\n logger_level=logger_level,\n json_output=json_output,\n )\n from text_generation_server.utils.gptq.quantize import quantize\n\n quantize(\n model_id=model_id,\n bits=4,\n groupsize=128,\n output_dir=output_dir,\n revision=revision,\n trust_remote_code=trust_remote_code,\n upload_to_model_id=upload_to_model_id,\n percdamp=percdamp,\n act_order=act_order,\n )\n\n\nif __name__ == \"__main__\":\n app()\n", "path": "server/text_generation_server/cli.py"}]} | 2,918 | 191 |
gh_patches_debug_39167 | rasdani/github-patches | git_diff | wemake-services__wemake-python-styleguide-621 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Incorrect line number for Z331
# Bug report
## What's wrong
```python
def foreign_jwt_authentication(token: bytes): # noqa: Z331
"""
Goes to the remote provider to validate the token.
Returns the payload if everything is fine.
Otherwise raises an exception.
"""
unverified_payload = jwt.decode( # type: ignore
token, key=None, verify=False,
)
_make_authentication_request(settings.TOKEN_VERIFICATION_URL, token)
return unverified_payload
```
Error:
```
./server/rest_app/logics/auth.py:41:1: Z331 Found local variable that are only used in `return` statements
```
## How is that should be
Should be reported on the ` return unverified_payload` line.
Incorrect line number for Z331
# Bug report
## What's wrong
```python
def foreign_jwt_authentication(token: bytes): # noqa: Z331
"""
Goes to the remote provider to validate the token.
Returns the payload if everything is fine.
Otherwise raises an exception.
"""
unverified_payload = jwt.decode( # type: ignore
token, key=None, verify=False,
)
_make_authentication_request(settings.TOKEN_VERIFICATION_URL, token)
return unverified_payload
```
Error:
```
./server/rest_app/logics/auth.py:41:1: Z331 Found local variable that are only used in `return` statements
```
## How is that should be
Should be reported on the ` return unverified_payload` line.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wemake_python_styleguide/visitors/ast/keywords.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 import ast
4 from collections import defaultdict
5 from typing import ClassVar, Dict, List, Type, Union
6
7 from typing_extensions import final
8
9 from wemake_python_styleguide.logics.nodes import get_parent
10 from wemake_python_styleguide.logics.variables import (
11 is_valid_block_variable_definition,
12 )
13 from wemake_python_styleguide.types import AnyFunctionDef, AnyNodes
14 from wemake_python_styleguide.violations.best_practices import (
15 ContextManagerVariableDefinitionViolation,
16 RaiseNotImplementedViolation,
17 WrongKeywordViolation,
18 )
19 from wemake_python_styleguide.violations.consistency import (
20 InconsistentReturnVariableViolation,
21 InconsistentReturnViolation,
22 InconsistentYieldViolation,
23 MultipleContextManagerAssignmentsViolation,
24 )
25 from wemake_python_styleguide.visitors.base import BaseNodeVisitor
26 from wemake_python_styleguide.visitors.decorators import alias
27
28 AnyWith = Union[ast.With, ast.AsyncWith]
29 ReturningViolations = Union[
30 Type[InconsistentReturnViolation],
31 Type[InconsistentYieldViolation],
32 ]
33
34
35 @final
36 class WrongRaiseVisitor(BaseNodeVisitor):
37 """Finds wrong ``raise`` keywords."""
38
39 def _check_exception_type(self, node: ast.Raise) -> None:
40 exception = getattr(node, 'exc', None)
41 if exception is None:
42 return
43
44 exception_func = getattr(exception, 'func', None)
45 if exception_func:
46 exception = exception_func
47
48 exception_name = getattr(exception, 'id', None)
49 if exception_name == 'NotImplemented':
50 self.add_violation(RaiseNotImplementedViolation(node))
51
52 def visit_Raise(self, node: ast.Raise) -> None:
53 """
54 Checks how ``raise`` keyword is used.
55
56 Raises:
57 RaiseNotImplementedViolation
58
59 """
60 self._check_exception_type(node)
61 self.generic_visit(node)
62
63
64 @final
65 @alias('visit_any_function', (
66 'visit_FunctionDef',
67 'visit_AsyncFunctionDef',
68 ))
69 class ConsistentReturningVisitor(BaseNodeVisitor):
70 """Finds incorrect and inconsistent ``return`` and ``yield`` nodes."""
71
72 def _check_last_return_in_function(self, node: ast.Return) -> None:
73 parent = get_parent(node)
74 if not isinstance(parent, (ast.FunctionDef, ast.AsyncFunctionDef)):
75 return
76
77 if node is parent.body[-1] and node.value is None:
78 self.add_violation(InconsistentReturnViolation(node))
79
80 def _iterate_returning_values(
81 self,
82 node: AnyFunctionDef,
83 returning_type, # mypy is not ok with this type declaration
84 violation: ReturningViolations,
85 ):
86 returns: List[ast.Return] = []
87 has_values = False
88 for sub_node in ast.walk(node):
89 if isinstance(sub_node, returning_type):
90 if sub_node.value:
91 has_values = True
92 returns.append(sub_node)
93
94 for sub_node in returns:
95 if not sub_node.value and has_values:
96 self.add_violation(violation(sub_node))
97
98 def _check_return_values(self, node: AnyFunctionDef) -> None:
99 self._iterate_returning_values(
100 node, ast.Return, InconsistentReturnViolation,
101 )
102
103 def _check_yield_values(self, node: AnyFunctionDef) -> None:
104 self._iterate_returning_values(
105 node, ast.Yield, InconsistentYieldViolation,
106 )
107
108 def visit_Return(self, node: ast.Return) -> None:
109 """
110 Checks ``return`` statements for consistency.
111
112 Raises:
113 InconsistentReturnViolation
114
115 """
116 self._check_last_return_in_function(node)
117 self.generic_visit(node)
118
119 def visit_any_function(self, node: AnyFunctionDef) -> None:
120 """
121 Helper to get all ``return`` and ``yield`` nodes in a function at once.
122
123 Raises:
124 InconsistentReturnViolation
125 InconsistentYieldViolation
126
127 """
128 self._check_return_values(node)
129 self._check_yield_values(node)
130 self.generic_visit(node)
131
132
133 @final
134 class WrongKeywordVisitor(BaseNodeVisitor):
135 """Finds wrong keywords."""
136
137 _forbidden_keywords: ClassVar[AnyNodes] = (
138 ast.Pass,
139 ast.Delete,
140 ast.Global,
141 ast.Nonlocal,
142 )
143
144 def _check_keyword(self, node: ast.AST) -> None:
145 if isinstance(node, self._forbidden_keywords):
146 if isinstance(node, ast.Delete):
147 message = 'del'
148 else:
149 message = node.__class__.__qualname__.lower()
150 self.add_violation(WrongKeywordViolation(node, text=message))
151
152 def visit(self, node: ast.AST) -> None:
153 """
154 Used to find wrong keywords.
155
156 Raises:
157 WrongKeywordViolation
158
159 """
160 self._check_keyword(node)
161 self.generic_visit(node)
162
163
164 @final
165 @alias('visit_any_with', (
166 'visit_With',
167 'visit_AsyncWith',
168 ))
169 class WrongContextManagerVisitor(BaseNodeVisitor):
170 """Checks context managers."""
171
172 def _check_target_assignment(self, node: AnyWith):
173 if len(node.items) > 1:
174 self.add_violation(
175 MultipleContextManagerAssignmentsViolation(node),
176 )
177
178 def _check_variable_definitions(self, node: ast.withitem) -> None:
179 if node.optional_vars is None:
180 return
181
182 if not is_valid_block_variable_definition(node.optional_vars):
183 self.add_violation(
184 ContextManagerVariableDefinitionViolation(get_parent(node)),
185 )
186
187 def visit_withitem(self, node: ast.withitem) -> None:
188 """
189 Checks that all variables inside context managers defined correctly.
190
191 Raises:
192 ContextManagerVariableDefinitionViolation
193
194 """
195 self._check_variable_definitions(node)
196 self.generic_visit(node)
197
198 def visit_any_with(self, node: AnyWith) -> None:
199 """
200 Checks the number of assignments for context managers.
201
202 Raises:
203 MultipleContextManagerAssignmentsViolation
204
205 """
206 self._check_target_assignment(node)
207 self.generic_visit(node)
208
209
210 @final
211 @alias('visit_return_variable', (
212 'visit_AsyncFunctionDef',
213 'visit_FunctionDef',
214 ))
215 class ConsistentReturningVariableVisitor(BaseNodeVisitor):
216 """Finds variables that are only used in `return` statements."""
217
218 _checking_nodes: ClassVar[AnyNodes] = (
219 ast.Assign,
220 ast.AnnAssign,
221 ast.AugAssign,
222 ast.Return,
223 ast.Name,
224 )
225
226 def _get_assign_node_variables(self, node: List[ast.AST]) -> List[str]:
227 assign = []
228 for sub_node in node:
229 if isinstance(sub_node, ast.Assign):
230 if isinstance(sub_node.targets[0], ast.Name):
231 assign.append(sub_node.targets[0].id)
232 if isinstance(sub_node, ast.AnnAssign):
233 if isinstance(sub_node.target, ast.Name):
234 assign.append(sub_node.target.id)
235 return assign
236
237 def _get_name_nodes_variable(
238 self,
239 node: List[ast.AST],
240 ) -> Dict[str, List[ast.Name]]:
241 names: Dict[str, List[ast.Name]] = defaultdict(list)
242 for sub_node in node:
243 if isinstance(sub_node, ast.Name):
244 if isinstance(sub_node.ctx, ast.Load):
245 names[sub_node.id].append(sub_node)
246 if isinstance(sub_node, ast.AugAssign):
247 if isinstance(sub_node.target, ast.Name):
248 variable_name = sub_node.target.id
249 names[variable_name].append(sub_node.target)
250 return names
251
252 def _get_return_node_variables(
253 self,
254 node: List[ast.AST],
255 ) -> Dict[str, List[ast.Name]]:
256 returns: Dict[str, List[ast.Name]] = defaultdict(list)
257 for sub_node in node:
258 if isinstance(sub_node, ast.Return):
259 if isinstance(sub_node.value, ast.Name):
260 variable_name = sub_node.value.id
261 returns[variable_name].append(sub_node.value)
262 return returns
263
264 def _check_variables_for_return(self, node: AnyFunctionDef) -> None:
265 nodes = list(
266 filter(
267 lambda sub_node: isinstance(sub_node, self._checking_nodes),
268 ast.walk(node),
269 ),
270 )
271 assign = self._get_assign_node_variables(nodes)
272 names = self._get_name_nodes_variable(nodes)
273 returns = self._get_return_node_variables(nodes)
274
275 returns = {name: returns[name] for name in returns if name in assign}
276
277 for variable_name in returns:
278 if not set(names[variable_name]) - set(returns[variable_name]):
279 self.add_violation(InconsistentReturnVariableViolation(node))
280
281 def visit_return_variable(self, node: AnyFunctionDef) -> None:
282 """
283 Helper to get all ``return`` variables in a function at once.
284
285 Raises:
286 InconsistentReturnVariableViolation
287
288 """
289 self._check_variables_for_return(node)
290 self.generic_visit(node)
291
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wemake_python_styleguide/visitors/ast/keywords.py b/wemake_python_styleguide/visitors/ast/keywords.py
--- a/wemake_python_styleguide/visitors/ast/keywords.py
+++ b/wemake_python_styleguide/visitors/ast/keywords.py
@@ -2,7 +2,7 @@
import ast
from collections import defaultdict
-from typing import ClassVar, Dict, List, Type, Union
+from typing import ClassVar, Dict, List, Tuple, Type, Union
from typing_extensions import final
@@ -26,6 +26,10 @@
from wemake_python_styleguide.visitors.decorators import alias
AnyWith = Union[ast.With, ast.AsyncWith]
+NamesAndReturns = Tuple[
+ Dict[str, List[ast.Name]],
+ Dict[str, ast.Return],
+]
ReturningViolations = Union[
Type[InconsistentReturnViolation],
Type[InconsistentYieldViolation],
@@ -252,14 +256,16 @@
def _get_return_node_variables(
self,
node: List[ast.AST],
- ) -> Dict[str, List[ast.Name]]:
+ ) -> NamesAndReturns:
returns: Dict[str, List[ast.Name]] = defaultdict(list)
+ return_sub_nodes: Dict[str, ast.Return] = defaultdict()
for sub_node in node:
if isinstance(sub_node, ast.Return):
if isinstance(sub_node.value, ast.Name):
variable_name = sub_node.value.id
returns[variable_name].append(sub_node.value)
- return returns
+ return_sub_nodes[variable_name] = sub_node
+ return returns, return_sub_nodes
def _check_variables_for_return(self, node: AnyFunctionDef) -> None:
nodes = list(
@@ -270,13 +276,20 @@
)
assign = self._get_assign_node_variables(nodes)
names = self._get_name_nodes_variable(nodes)
- returns = self._get_return_node_variables(nodes)
+ returns, return_sub_nodes = self._get_return_node_variables(nodes)
returns = {name: returns[name] for name in returns if name in assign}
+ self._check_for_violations(names, return_sub_nodes, returns)
+
+ def _check_for_violations(self, names, return_sub_nodes, returns) -> None:
for variable_name in returns:
if not set(names[variable_name]) - set(returns[variable_name]):
- self.add_violation(InconsistentReturnVariableViolation(node))
+ self.add_violation(
+ InconsistentReturnVariableViolation(
+ return_sub_nodes[variable_name],
+ ),
+ )
def visit_return_variable(self, node: AnyFunctionDef) -> None:
"""
| {"golden_diff": "diff --git a/wemake_python_styleguide/visitors/ast/keywords.py b/wemake_python_styleguide/visitors/ast/keywords.py\n--- a/wemake_python_styleguide/visitors/ast/keywords.py\n+++ b/wemake_python_styleguide/visitors/ast/keywords.py\n@@ -2,7 +2,7 @@\n \n import ast\n from collections import defaultdict\n-from typing import ClassVar, Dict, List, Type, Union\n+from typing import ClassVar, Dict, List, Tuple, Type, Union\n \n from typing_extensions import final\n \n@@ -26,6 +26,10 @@\n from wemake_python_styleguide.visitors.decorators import alias\n \n AnyWith = Union[ast.With, ast.AsyncWith]\n+NamesAndReturns = Tuple[\n+ Dict[str, List[ast.Name]],\n+ Dict[str, ast.Return],\n+]\n ReturningViolations = Union[\n Type[InconsistentReturnViolation],\n Type[InconsistentYieldViolation],\n@@ -252,14 +256,16 @@\n def _get_return_node_variables(\n self,\n node: List[ast.AST],\n- ) -> Dict[str, List[ast.Name]]:\n+ ) -> NamesAndReturns:\n returns: Dict[str, List[ast.Name]] = defaultdict(list)\n+ return_sub_nodes: Dict[str, ast.Return] = defaultdict()\n for sub_node in node:\n if isinstance(sub_node, ast.Return):\n if isinstance(sub_node.value, ast.Name):\n variable_name = sub_node.value.id\n returns[variable_name].append(sub_node.value)\n- return returns\n+ return_sub_nodes[variable_name] = sub_node\n+ return returns, return_sub_nodes\n \n def _check_variables_for_return(self, node: AnyFunctionDef) -> None:\n nodes = list(\n@@ -270,13 +276,20 @@\n )\n assign = self._get_assign_node_variables(nodes)\n names = self._get_name_nodes_variable(nodes)\n- returns = self._get_return_node_variables(nodes)\n+ returns, return_sub_nodes = self._get_return_node_variables(nodes)\n \n returns = {name: returns[name] for name in returns if name in assign}\n \n+ self._check_for_violations(names, return_sub_nodes, returns)\n+\n+ def _check_for_violations(self, names, return_sub_nodes, returns) -> None:\n for variable_name in returns:\n if not set(names[variable_name]) - set(returns[variable_name]):\n- self.add_violation(InconsistentReturnVariableViolation(node))\n+ self.add_violation(\n+ InconsistentReturnVariableViolation(\n+ return_sub_nodes[variable_name],\n+ ),\n+ )\n \n def visit_return_variable(self, node: AnyFunctionDef) -> None:\n \"\"\"\n", "issue": "Incorrect line number for Z331\n# Bug report\r\n\r\n## What's wrong\r\n\r\n```python\r\ndef foreign_jwt_authentication(token: bytes): # noqa: Z331\r\n \"\"\"\r\n Goes to the remote provider to validate the token.\r\n\r\n Returns the payload if everything is fine.\r\n Otherwise raises an exception.\r\n \"\"\"\r\n unverified_payload = jwt.decode( # type: ignore\r\n token, key=None, verify=False,\r\n )\r\n _make_authentication_request(settings.TOKEN_VERIFICATION_URL, token)\r\n return unverified_payload\r\n```\r\n\r\nError:\r\n\r\n```\r\n./server/rest_app/logics/auth.py:41:1: Z331 Found local variable that are only used in `return` statements\r\n```\r\n\r\n## How is that should be\r\n\r\nShould be reported on the ` return unverified_payload` line.\nIncorrect line number for Z331\n# Bug report\r\n\r\n## What's wrong\r\n\r\n```python\r\ndef foreign_jwt_authentication(token: bytes): # noqa: Z331\r\n \"\"\"\r\n Goes to the remote provider to validate the token.\r\n\r\n Returns the payload if everything is fine.\r\n Otherwise raises an exception.\r\n \"\"\"\r\n unverified_payload = jwt.decode( # type: ignore\r\n token, key=None, verify=False,\r\n )\r\n _make_authentication_request(settings.TOKEN_VERIFICATION_URL, token)\r\n return unverified_payload\r\n```\r\n\r\nError:\r\n\r\n```\r\n./server/rest_app/logics/auth.py:41:1: Z331 Found local variable that are only used in `return` statements\r\n```\r\n\r\n## How is that should be\r\n\r\nShould be reported on the ` return unverified_payload` line.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport ast\nfrom collections import defaultdict\nfrom typing import ClassVar, Dict, List, Type, Union\n\nfrom typing_extensions import final\n\nfrom wemake_python_styleguide.logics.nodes import get_parent\nfrom wemake_python_styleguide.logics.variables import (\n is_valid_block_variable_definition,\n)\nfrom wemake_python_styleguide.types import AnyFunctionDef, AnyNodes\nfrom wemake_python_styleguide.violations.best_practices import (\n ContextManagerVariableDefinitionViolation,\n RaiseNotImplementedViolation,\n WrongKeywordViolation,\n)\nfrom wemake_python_styleguide.violations.consistency import (\n InconsistentReturnVariableViolation,\n InconsistentReturnViolation,\n InconsistentYieldViolation,\n MultipleContextManagerAssignmentsViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseNodeVisitor\nfrom wemake_python_styleguide.visitors.decorators import alias\n\nAnyWith = Union[ast.With, ast.AsyncWith]\nReturningViolations = Union[\n Type[InconsistentReturnViolation],\n Type[InconsistentYieldViolation],\n]\n\n\n@final\nclass WrongRaiseVisitor(BaseNodeVisitor):\n \"\"\"Finds wrong ``raise`` keywords.\"\"\"\n\n def _check_exception_type(self, node: ast.Raise) -> None:\n exception = getattr(node, 'exc', None)\n if exception is None:\n return\n\n exception_func = getattr(exception, 'func', None)\n if exception_func:\n exception = exception_func\n\n exception_name = getattr(exception, 'id', None)\n if exception_name == 'NotImplemented':\n self.add_violation(RaiseNotImplementedViolation(node))\n\n def visit_Raise(self, node: ast.Raise) -> None:\n \"\"\"\n Checks how ``raise`` keyword is used.\n\n Raises:\n RaiseNotImplementedViolation\n\n \"\"\"\n self._check_exception_type(node)\n self.generic_visit(node)\n\n\n@final\n@alias('visit_any_function', (\n 'visit_FunctionDef',\n 'visit_AsyncFunctionDef',\n))\nclass ConsistentReturningVisitor(BaseNodeVisitor):\n \"\"\"Finds incorrect and inconsistent ``return`` and ``yield`` nodes.\"\"\"\n\n def _check_last_return_in_function(self, node: ast.Return) -> None:\n parent = get_parent(node)\n if not isinstance(parent, (ast.FunctionDef, ast.AsyncFunctionDef)):\n return\n\n if node is parent.body[-1] and node.value is None:\n self.add_violation(InconsistentReturnViolation(node))\n\n def _iterate_returning_values(\n self,\n node: AnyFunctionDef,\n returning_type, # mypy is not ok with this type declaration\n violation: ReturningViolations,\n ):\n returns: List[ast.Return] = []\n has_values = False\n for sub_node in ast.walk(node):\n if isinstance(sub_node, returning_type):\n if sub_node.value:\n has_values = True\n returns.append(sub_node)\n\n for sub_node in returns:\n if not sub_node.value and has_values:\n self.add_violation(violation(sub_node))\n\n def _check_return_values(self, node: AnyFunctionDef) -> None:\n self._iterate_returning_values(\n node, ast.Return, InconsistentReturnViolation,\n )\n\n def _check_yield_values(self, node: AnyFunctionDef) -> None:\n self._iterate_returning_values(\n node, ast.Yield, InconsistentYieldViolation,\n )\n\n def visit_Return(self, node: ast.Return) -> None:\n \"\"\"\n Checks ``return`` statements for consistency.\n\n Raises:\n InconsistentReturnViolation\n\n \"\"\"\n self._check_last_return_in_function(node)\n self.generic_visit(node)\n\n def visit_any_function(self, node: AnyFunctionDef) -> None:\n \"\"\"\n Helper to get all ``return`` and ``yield`` nodes in a function at once.\n\n Raises:\n InconsistentReturnViolation\n InconsistentYieldViolation\n\n \"\"\"\n self._check_return_values(node)\n self._check_yield_values(node)\n self.generic_visit(node)\n\n\n@final\nclass WrongKeywordVisitor(BaseNodeVisitor):\n \"\"\"Finds wrong keywords.\"\"\"\n\n _forbidden_keywords: ClassVar[AnyNodes] = (\n ast.Pass,\n ast.Delete,\n ast.Global,\n ast.Nonlocal,\n )\n\n def _check_keyword(self, node: ast.AST) -> None:\n if isinstance(node, self._forbidden_keywords):\n if isinstance(node, ast.Delete):\n message = 'del'\n else:\n message = node.__class__.__qualname__.lower()\n self.add_violation(WrongKeywordViolation(node, text=message))\n\n def visit(self, node: ast.AST) -> None:\n \"\"\"\n Used to find wrong keywords.\n\n Raises:\n WrongKeywordViolation\n\n \"\"\"\n self._check_keyword(node)\n self.generic_visit(node)\n\n\n@final\n@alias('visit_any_with', (\n 'visit_With',\n 'visit_AsyncWith',\n))\nclass WrongContextManagerVisitor(BaseNodeVisitor):\n \"\"\"Checks context managers.\"\"\"\n\n def _check_target_assignment(self, node: AnyWith):\n if len(node.items) > 1:\n self.add_violation(\n MultipleContextManagerAssignmentsViolation(node),\n )\n\n def _check_variable_definitions(self, node: ast.withitem) -> None:\n if node.optional_vars is None:\n return\n\n if not is_valid_block_variable_definition(node.optional_vars):\n self.add_violation(\n ContextManagerVariableDefinitionViolation(get_parent(node)),\n )\n\n def visit_withitem(self, node: ast.withitem) -> None:\n \"\"\"\n Checks that all variables inside context managers defined correctly.\n\n Raises:\n ContextManagerVariableDefinitionViolation\n\n \"\"\"\n self._check_variable_definitions(node)\n self.generic_visit(node)\n\n def visit_any_with(self, node: AnyWith) -> None:\n \"\"\"\n Checks the number of assignments for context managers.\n\n Raises:\n MultipleContextManagerAssignmentsViolation\n\n \"\"\"\n self._check_target_assignment(node)\n self.generic_visit(node)\n\n\n@final\n@alias('visit_return_variable', (\n 'visit_AsyncFunctionDef',\n 'visit_FunctionDef',\n))\nclass ConsistentReturningVariableVisitor(BaseNodeVisitor):\n \"\"\"Finds variables that are only used in `return` statements.\"\"\"\n\n _checking_nodes: ClassVar[AnyNodes] = (\n ast.Assign,\n ast.AnnAssign,\n ast.AugAssign,\n ast.Return,\n ast.Name,\n )\n\n def _get_assign_node_variables(self, node: List[ast.AST]) -> List[str]:\n assign = []\n for sub_node in node:\n if isinstance(sub_node, ast.Assign):\n if isinstance(sub_node.targets[0], ast.Name):\n assign.append(sub_node.targets[0].id)\n if isinstance(sub_node, ast.AnnAssign):\n if isinstance(sub_node.target, ast.Name):\n assign.append(sub_node.target.id)\n return assign\n\n def _get_name_nodes_variable(\n self,\n node: List[ast.AST],\n ) -> Dict[str, List[ast.Name]]:\n names: Dict[str, List[ast.Name]] = defaultdict(list)\n for sub_node in node:\n if isinstance(sub_node, ast.Name):\n if isinstance(sub_node.ctx, ast.Load):\n names[sub_node.id].append(sub_node)\n if isinstance(sub_node, ast.AugAssign):\n if isinstance(sub_node.target, ast.Name):\n variable_name = sub_node.target.id\n names[variable_name].append(sub_node.target)\n return names\n\n def _get_return_node_variables(\n self,\n node: List[ast.AST],\n ) -> Dict[str, List[ast.Name]]:\n returns: Dict[str, List[ast.Name]] = defaultdict(list)\n for sub_node in node:\n if isinstance(sub_node, ast.Return):\n if isinstance(sub_node.value, ast.Name):\n variable_name = sub_node.value.id\n returns[variable_name].append(sub_node.value)\n return returns\n\n def _check_variables_for_return(self, node: AnyFunctionDef) -> None:\n nodes = list(\n filter(\n lambda sub_node: isinstance(sub_node, self._checking_nodes),\n ast.walk(node),\n ),\n )\n assign = self._get_assign_node_variables(nodes)\n names = self._get_name_nodes_variable(nodes)\n returns = self._get_return_node_variables(nodes)\n\n returns = {name: returns[name] for name in returns if name in assign}\n\n for variable_name in returns:\n if not set(names[variable_name]) - set(returns[variable_name]):\n self.add_violation(InconsistentReturnVariableViolation(node))\n\n def visit_return_variable(self, node: AnyFunctionDef) -> None:\n \"\"\"\n Helper to get all ``return`` variables in a function at once.\n\n Raises:\n InconsistentReturnVariableViolation\n\n \"\"\"\n self._check_variables_for_return(node)\n self.generic_visit(node)\n", "path": "wemake_python_styleguide/visitors/ast/keywords.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport ast\nfrom collections import defaultdict\nfrom typing import ClassVar, Dict, List, Tuple, Type, Union\n\nfrom typing_extensions import final\n\nfrom wemake_python_styleguide.logics.nodes import get_parent\nfrom wemake_python_styleguide.logics.variables import (\n is_valid_block_variable_definition,\n)\nfrom wemake_python_styleguide.types import AnyFunctionDef, AnyNodes\nfrom wemake_python_styleguide.violations.best_practices import (\n ContextManagerVariableDefinitionViolation,\n RaiseNotImplementedViolation,\n WrongKeywordViolation,\n)\nfrom wemake_python_styleguide.violations.consistency import (\n InconsistentReturnVariableViolation,\n InconsistentReturnViolation,\n InconsistentYieldViolation,\n MultipleContextManagerAssignmentsViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseNodeVisitor\nfrom wemake_python_styleguide.visitors.decorators import alias\n\nAnyWith = Union[ast.With, ast.AsyncWith]\nNamesAndReturns = Tuple[\n Dict[str, List[ast.Name]],\n Dict[str, ast.Return],\n]\nReturningViolations = Union[\n Type[InconsistentReturnViolation],\n Type[InconsistentYieldViolation],\n]\n\n\n@final\nclass WrongRaiseVisitor(BaseNodeVisitor):\n \"\"\"Finds wrong ``raise`` keywords.\"\"\"\n\n def _check_exception_type(self, node: ast.Raise) -> None:\n exception = getattr(node, 'exc', None)\n if exception is None:\n return\n\n exception_func = getattr(exception, 'func', None)\n if exception_func:\n exception = exception_func\n\n exception_name = getattr(exception, 'id', None)\n if exception_name == 'NotImplemented':\n self.add_violation(RaiseNotImplementedViolation(node))\n\n def visit_Raise(self, node: ast.Raise) -> None:\n \"\"\"\n Checks how ``raise`` keyword is used.\n\n Raises:\n RaiseNotImplementedViolation\n\n \"\"\"\n self._check_exception_type(node)\n self.generic_visit(node)\n\n\n@final\n@alias('visit_any_function', (\n 'visit_FunctionDef',\n 'visit_AsyncFunctionDef',\n))\nclass ConsistentReturningVisitor(BaseNodeVisitor):\n \"\"\"Finds incorrect and inconsistent ``return`` and ``yield`` nodes.\"\"\"\n\n def _check_last_return_in_function(self, node: ast.Return) -> None:\n parent = get_parent(node)\n if not isinstance(parent, (ast.FunctionDef, ast.AsyncFunctionDef)):\n return\n\n if node is parent.body[-1] and node.value is None:\n self.add_violation(InconsistentReturnViolation(node))\n\n def _iterate_returning_values(\n self,\n node: AnyFunctionDef,\n returning_type, # mypy is not ok with this type declaration\n violation: ReturningViolations,\n ):\n returns: List[ast.Return] = []\n has_values = False\n for sub_node in ast.walk(node):\n if isinstance(sub_node, returning_type):\n if sub_node.value:\n has_values = True\n returns.append(sub_node)\n\n for sub_node in returns:\n if not sub_node.value and has_values:\n self.add_violation(violation(sub_node))\n\n def _check_return_values(self, node: AnyFunctionDef) -> None:\n self._iterate_returning_values(\n node, ast.Return, InconsistentReturnViolation,\n )\n\n def _check_yield_values(self, node: AnyFunctionDef) -> None:\n self._iterate_returning_values(\n node, ast.Yield, InconsistentYieldViolation,\n )\n\n def visit_Return(self, node: ast.Return) -> None:\n \"\"\"\n Checks ``return`` statements for consistency.\n\n Raises:\n InconsistentReturnViolation\n\n \"\"\"\n self._check_last_return_in_function(node)\n self.generic_visit(node)\n\n def visit_any_function(self, node: AnyFunctionDef) -> None:\n \"\"\"\n Helper to get all ``return`` and ``yield`` nodes in a function at once.\n\n Raises:\n InconsistentReturnViolation\n InconsistentYieldViolation\n\n \"\"\"\n self._check_return_values(node)\n self._check_yield_values(node)\n self.generic_visit(node)\n\n\n@final\nclass WrongKeywordVisitor(BaseNodeVisitor):\n \"\"\"Finds wrong keywords.\"\"\"\n\n _forbidden_keywords: ClassVar[AnyNodes] = (\n ast.Pass,\n ast.Delete,\n ast.Global,\n ast.Nonlocal,\n )\n\n def _check_keyword(self, node: ast.AST) -> None:\n if isinstance(node, self._forbidden_keywords):\n if isinstance(node, ast.Delete):\n message = 'del'\n else:\n message = node.__class__.__qualname__.lower()\n self.add_violation(WrongKeywordViolation(node, text=message))\n\n def visit(self, node: ast.AST) -> None:\n \"\"\"\n Used to find wrong keywords.\n\n Raises:\n WrongKeywordViolation\n\n \"\"\"\n self._check_keyword(node)\n self.generic_visit(node)\n\n\n@final\n@alias('visit_any_with', (\n 'visit_With',\n 'visit_AsyncWith',\n))\nclass WrongContextManagerVisitor(BaseNodeVisitor):\n \"\"\"Checks context managers.\"\"\"\n\n def _check_target_assignment(self, node: AnyWith):\n if len(node.items) > 1:\n self.add_violation(\n MultipleContextManagerAssignmentsViolation(node),\n )\n\n def _check_variable_definitions(self, node: ast.withitem) -> None:\n if node.optional_vars is None:\n return\n\n if not is_valid_block_variable_definition(node.optional_vars):\n self.add_violation(\n ContextManagerVariableDefinitionViolation(get_parent(node)),\n )\n\n def visit_withitem(self, node: ast.withitem) -> None:\n \"\"\"\n Checks that all variables inside context managers defined correctly.\n\n Raises:\n ContextManagerVariableDefinitionViolation\n\n \"\"\"\n self._check_variable_definitions(node)\n self.generic_visit(node)\n\n def visit_any_with(self, node: AnyWith) -> None:\n \"\"\"\n Checks the number of assignments for context managers.\n\n Raises:\n MultipleContextManagerAssignmentsViolation\n\n \"\"\"\n self._check_target_assignment(node)\n self.generic_visit(node)\n\n\n@final\n@alias('visit_return_variable', (\n 'visit_AsyncFunctionDef',\n 'visit_FunctionDef',\n))\nclass ConsistentReturningVariableVisitor(BaseNodeVisitor):\n \"\"\"Finds variables that are only used in `return` statements.\"\"\"\n\n _checking_nodes: ClassVar[AnyNodes] = (\n ast.Assign,\n ast.AnnAssign,\n ast.AugAssign,\n ast.Return,\n ast.Name,\n )\n\n def _get_assign_node_variables(self, node: List[ast.AST]) -> List[str]:\n assign = []\n for sub_node in node:\n if isinstance(sub_node, ast.Assign):\n if isinstance(sub_node.targets[0], ast.Name):\n assign.append(sub_node.targets[0].id)\n if isinstance(sub_node, ast.AnnAssign):\n if isinstance(sub_node.target, ast.Name):\n assign.append(sub_node.target.id)\n return assign\n\n def _get_name_nodes_variable(\n self,\n node: List[ast.AST],\n ) -> Dict[str, List[ast.Name]]:\n names: Dict[str, List[ast.Name]] = defaultdict(list)\n for sub_node in node:\n if isinstance(sub_node, ast.Name):\n if isinstance(sub_node.ctx, ast.Load):\n names[sub_node.id].append(sub_node)\n if isinstance(sub_node, ast.AugAssign):\n if isinstance(sub_node.target, ast.Name):\n variable_name = sub_node.target.id\n names[variable_name].append(sub_node.target)\n return names\n\n def _get_return_node_variables(\n self,\n node: List[ast.AST],\n ) -> NamesAndReturns:\n returns: Dict[str, List[ast.Name]] = defaultdict(list)\n return_sub_nodes: Dict[str, ast.Return] = defaultdict()\n for sub_node in node:\n if isinstance(sub_node, ast.Return):\n if isinstance(sub_node.value, ast.Name):\n variable_name = sub_node.value.id\n returns[variable_name].append(sub_node.value)\n return_sub_nodes[variable_name] = sub_node\n return returns, return_sub_nodes\n\n def _check_variables_for_return(self, node: AnyFunctionDef) -> None:\n nodes = list(\n filter(\n lambda sub_node: isinstance(sub_node, self._checking_nodes),\n ast.walk(node),\n ),\n )\n assign = self._get_assign_node_variables(nodes)\n names = self._get_name_nodes_variable(nodes)\n returns, return_sub_nodes = self._get_return_node_variables(nodes)\n\n returns = {name: returns[name] for name in returns if name in assign}\n\n self._check_for_violations(names, return_sub_nodes, returns)\n\n def _check_for_violations(self, names, return_sub_nodes, returns) -> None:\n for variable_name in returns:\n if not set(names[variable_name]) - set(returns[variable_name]):\n self.add_violation(\n InconsistentReturnVariableViolation(\n return_sub_nodes[variable_name],\n ),\n )\n\n def visit_return_variable(self, node: AnyFunctionDef) -> None:\n \"\"\"\n Helper to get all ``return`` variables in a function at once.\n\n Raises:\n InconsistentReturnVariableViolation\n\n \"\"\"\n self._check_variables_for_return(node)\n self.generic_visit(node)\n", "path": "wemake_python_styleguide/visitors/ast/keywords.py"}]} | 3,301 | 610 |
gh_patches_debug_34341 | rasdani/github-patches | git_diff | PrefectHQ__prefect-1729 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
When a flow has no storage attached, the agent should mark the run as failed
It appears that if the Agent picks up a run with no storage, it will mark it as submitted. This run will then exist in limbo forever, as it can never be run. The agent should put it in a failed state if it can recognize it'll never run.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/prefect/agent/local/agent.py`
Content:
```
1 from sys import platform
2 from typing import Iterable
3
4 import docker
5
6 from prefect import config, context
7 from prefect.agent import Agent
8 from prefect.environments.storage import Docker
9 from prefect.serialization.storage import StorageSchema
10 from prefect.utilities.graphql import GraphQLResult
11
12
13 class LocalAgent(Agent):
14 """
15 Agent which deploys flow runs locally as Docker containers. Information on using the
16 Local Agent can be found at https://docs.prefect.io/cloud/agent/local.html
17
18 Args:
19 - name (str, optional): An optional name to give this agent. Can also be set through
20 the environment variable `PREFECT__CLOUD__AGENT__NAME`. Defaults to "agent"
21 - labels (List[str], optional): a list of labels, which are arbitrary string identifiers used by Prefect
22 Agents when polling for work
23 - base_url (str, optional): URL for a Docker daemon server. Defaults to
24 `unix:///var/run/docker.sock` however other hosts such as
25 `tcp://0.0.0.0:2375` can be provided
26 - no_pull (bool, optional): Flag on whether or not to pull flow images.
27 Defaults to `False` if not provided here or in context.
28 """
29
30 def __init__(
31 self,
32 name: str = None,
33 labels: Iterable[str] = None,
34 base_url: str = None,
35 no_pull: bool = None,
36 ) -> None:
37 super().__init__(name=name, labels=labels)
38
39 if platform == "win32":
40 default_url = "npipe:////./pipe/docker_engine"
41 else:
42 default_url = "unix://var/run/docker.sock"
43 self.logger.debug(
44 "Platform {} and default docker daemon {}".format(platform, default_url)
45 )
46
47 # Determine Daemon URL
48 self.base_url = base_url or context.get("base_url", default_url)
49 self.logger.debug("Base docker daemon url {}".format(self.base_url))
50
51 # Determine pull specification
52 self.no_pull = no_pull or context.get("no_pull", False)
53 self.logger.debug("no_pull set to {}".format(self.no_pull))
54
55 self.docker_client = docker.APIClient(base_url=self.base_url, version="auto")
56
57 # Ping Docker daemon for connection issues
58 try:
59 self.logger.debug("Pinging docker daemon")
60 self.docker_client.ping()
61 except Exception as exc:
62 self.logger.exception(
63 "Issue connecting to the Docker daemon. Make sure it is running."
64 )
65 raise exc
66
67 def deploy_flows(self, flow_runs: list) -> None:
68 """
69 Deploy flow runs on your local machine as Docker containers
70
71 Args:
72 - flow_runs (list): A list of GraphQLResult flow run objects
73 """
74 for flow_run in flow_runs:
75 self.logger.info(
76 "Deploying flow run {}".format(flow_run.id) # type: ignore
77 )
78
79 storage = StorageSchema().load(flow_run.flow.storage)
80 if not isinstance(StorageSchema().load(flow_run.flow.storage), Docker):
81 self.logger.error(
82 "Storage for flow run {} is not of type Docker.".format(flow_run.id)
83 )
84 continue
85
86 env_vars = self.populate_env_vars(flow_run=flow_run)
87
88 if not self.no_pull and storage.registry_url:
89 self.logger.info("Pulling image {}...".format(storage.name))
90 try:
91 pull_output = self.docker_client.pull(
92 storage.name, stream=True, decode=True
93 )
94 for line in pull_output:
95 self.logger.debug(line)
96 self.logger.info(
97 "Successfully pulled image {}...".format(storage.name)
98 )
99 except docker.errors.APIError as exc:
100 self.logger.error("Issue pulling image {}".format(storage.name))
101
102 # Create a container
103 self.logger.debug("Creating Docker container {}".format(storage.name))
104 container = self.docker_client.create_container(
105 storage.name, command="prefect execute cloud-flow", environment=env_vars
106 )
107
108 # Start the container
109 self.logger.debug(
110 "Starting Docker container with ID {}".format(container.get("Id"))
111 )
112 self.docker_client.start(container=container.get("Id"))
113
114 def populate_env_vars(self, flow_run: GraphQLResult) -> dict:
115 """
116 Populate metadata and variables in the environment variables for a flow run
117
118 Args:
119 - flow_run (GraphQLResult): A flow run object
120
121 Returns:
122 - dict: a dictionary representing the populated environment variables
123 """
124 return {
125 "PREFECT__CLOUD__API": config.cloud.api,
126 "PREFECT__CLOUD__AUTH_TOKEN": config.cloud.agent.auth_token,
127 "PREFECT__CLOUD__AGENT__LABELS": str(self.labels),
128 "PREFECT__CONTEXT__FLOW_RUN_ID": flow_run.id, # type: ignore
129 "PREFECT__CLOUD__USE_LOCAL_SECRETS": "false",
130 "PREFECT__LOGGING__LOG_TO_CLOUD": "true",
131 "PREFECT__LOGGING__LEVEL": "DEBUG",
132 "PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS": "prefect.engine.cloud.CloudFlowRunner",
133 "PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS": "prefect.engine.cloud.CloudTaskRunner",
134 }
135
136
137 if __name__ == "__main__":
138 LocalAgent().start()
139
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/prefect/agent/local/agent.py b/src/prefect/agent/local/agent.py
--- a/src/prefect/agent/local/agent.py
+++ b/src/prefect/agent/local/agent.py
@@ -5,6 +5,7 @@
from prefect import config, context
from prefect.agent import Agent
+from prefect.engine.state import Failed
from prefect.environments.storage import Docker
from prefect.serialization.storage import StorageSchema
from prefect.utilities.graphql import GraphQLResult
@@ -78,9 +79,14 @@
storage = StorageSchema().load(flow_run.flow.storage)
if not isinstance(StorageSchema().load(flow_run.flow.storage), Docker):
- self.logger.error(
- "Storage for flow run {} is not of type Docker.".format(flow_run.id)
+ msg = "Storage for flow run {} is not of type Docker.".format(
+ flow_run.id
)
+ state_msg = "Agent {} failed to run flow: ".format(self.name) + msg
+ self.client.set_flow_run_state(
+ flow_run.id, version=flow_run.version, state=Failed(state_msg)
+ )
+ self.logger.error(msg)
continue
env_vars = self.populate_env_vars(flow_run=flow_run)
@@ -97,7 +103,15 @@
"Successfully pulled image {}...".format(storage.name)
)
except docker.errors.APIError as exc:
- self.logger.error("Issue pulling image {}".format(storage.name))
+ msg = "Issue pulling image {}".format(storage.name)
+ state_msg = (
+ "Agent {} failed to pull image for flow: ".format(self.name)
+ + msg
+ )
+ self.client.set_flow_run_state(
+ flow_run.id, version=flow_run.version, state=Failed(msg)
+ )
+ self.logger.error(msg)
# Create a container
self.logger.debug("Creating Docker container {}".format(storage.name))
| {"golden_diff": "diff --git a/src/prefect/agent/local/agent.py b/src/prefect/agent/local/agent.py\n--- a/src/prefect/agent/local/agent.py\n+++ b/src/prefect/agent/local/agent.py\n@@ -5,6 +5,7 @@\n \n from prefect import config, context\n from prefect.agent import Agent\n+from prefect.engine.state import Failed\n from prefect.environments.storage import Docker\n from prefect.serialization.storage import StorageSchema\n from prefect.utilities.graphql import GraphQLResult\n@@ -78,9 +79,14 @@\n \n storage = StorageSchema().load(flow_run.flow.storage)\n if not isinstance(StorageSchema().load(flow_run.flow.storage), Docker):\n- self.logger.error(\n- \"Storage for flow run {} is not of type Docker.\".format(flow_run.id)\n+ msg = \"Storage for flow run {} is not of type Docker.\".format(\n+ flow_run.id\n )\n+ state_msg = \"Agent {} failed to run flow: \".format(self.name) + msg\n+ self.client.set_flow_run_state(\n+ flow_run.id, version=flow_run.version, state=Failed(state_msg)\n+ )\n+ self.logger.error(msg)\n continue\n \n env_vars = self.populate_env_vars(flow_run=flow_run)\n@@ -97,7 +103,15 @@\n \"Successfully pulled image {}...\".format(storage.name)\n )\n except docker.errors.APIError as exc:\n- self.logger.error(\"Issue pulling image {}\".format(storage.name))\n+ msg = \"Issue pulling image {}\".format(storage.name)\n+ state_msg = (\n+ \"Agent {} failed to pull image for flow: \".format(self.name)\n+ + msg\n+ )\n+ self.client.set_flow_run_state(\n+ flow_run.id, version=flow_run.version, state=Failed(msg)\n+ )\n+ self.logger.error(msg)\n \n # Create a container\n self.logger.debug(\"Creating Docker container {}\".format(storage.name))\n", "issue": "When a flow has no storage attached, the agent should mark the run as failed\n\r\nIt appears that if the Agent picks up a run with no storage, it will mark it as submitted. This run will then exist in limbo forever, as it can never be run. The agent should put it in a failed state if it can recognize it'll never run.\n", "before_files": [{"content": "from sys import platform\nfrom typing import Iterable\n\nimport docker\n\nfrom prefect import config, context\nfrom prefect.agent import Agent\nfrom prefect.environments.storage import Docker\nfrom prefect.serialization.storage import StorageSchema\nfrom prefect.utilities.graphql import GraphQLResult\n\n\nclass LocalAgent(Agent):\n \"\"\"\n Agent which deploys flow runs locally as Docker containers. Information on using the\n Local Agent can be found at https://docs.prefect.io/cloud/agent/local.html\n\n Args:\n - name (str, optional): An optional name to give this agent. Can also be set through\n the environment variable `PREFECT__CLOUD__AGENT__NAME`. Defaults to \"agent\"\n - labels (List[str], optional): a list of labels, which are arbitrary string identifiers used by Prefect\n Agents when polling for work\n - base_url (str, optional): URL for a Docker daemon server. Defaults to\n `unix:///var/run/docker.sock` however other hosts such as\n `tcp://0.0.0.0:2375` can be provided\n - no_pull (bool, optional): Flag on whether or not to pull flow images.\n Defaults to `False` if not provided here or in context.\n \"\"\"\n\n def __init__(\n self,\n name: str = None,\n labels: Iterable[str] = None,\n base_url: str = None,\n no_pull: bool = None,\n ) -> None:\n super().__init__(name=name, labels=labels)\n\n if platform == \"win32\":\n default_url = \"npipe:////./pipe/docker_engine\"\n else:\n default_url = \"unix://var/run/docker.sock\"\n self.logger.debug(\n \"Platform {} and default docker daemon {}\".format(platform, default_url)\n )\n\n # Determine Daemon URL\n self.base_url = base_url or context.get(\"base_url\", default_url)\n self.logger.debug(\"Base docker daemon url {}\".format(self.base_url))\n\n # Determine pull specification\n self.no_pull = no_pull or context.get(\"no_pull\", False)\n self.logger.debug(\"no_pull set to {}\".format(self.no_pull))\n\n self.docker_client = docker.APIClient(base_url=self.base_url, version=\"auto\")\n\n # Ping Docker daemon for connection issues\n try:\n self.logger.debug(\"Pinging docker daemon\")\n self.docker_client.ping()\n except Exception as exc:\n self.logger.exception(\n \"Issue connecting to the Docker daemon. Make sure it is running.\"\n )\n raise exc\n\n def deploy_flows(self, flow_runs: list) -> None:\n \"\"\"\n Deploy flow runs on your local machine as Docker containers\n\n Args:\n - flow_runs (list): A list of GraphQLResult flow run objects\n \"\"\"\n for flow_run in flow_runs:\n self.logger.info(\n \"Deploying flow run {}\".format(flow_run.id) # type: ignore\n )\n\n storage = StorageSchema().load(flow_run.flow.storage)\n if not isinstance(StorageSchema().load(flow_run.flow.storage), Docker):\n self.logger.error(\n \"Storage for flow run {} is not of type Docker.\".format(flow_run.id)\n )\n continue\n\n env_vars = self.populate_env_vars(flow_run=flow_run)\n\n if not self.no_pull and storage.registry_url:\n self.logger.info(\"Pulling image {}...\".format(storage.name))\n try:\n pull_output = self.docker_client.pull(\n storage.name, stream=True, decode=True\n )\n for line in pull_output:\n self.logger.debug(line)\n self.logger.info(\n \"Successfully pulled image {}...\".format(storage.name)\n )\n except docker.errors.APIError as exc:\n self.logger.error(\"Issue pulling image {}\".format(storage.name))\n\n # Create a container\n self.logger.debug(\"Creating Docker container {}\".format(storage.name))\n container = self.docker_client.create_container(\n storage.name, command=\"prefect execute cloud-flow\", environment=env_vars\n )\n\n # Start the container\n self.logger.debug(\n \"Starting Docker container with ID {}\".format(container.get(\"Id\"))\n )\n self.docker_client.start(container=container.get(\"Id\"))\n\n def populate_env_vars(self, flow_run: GraphQLResult) -> dict:\n \"\"\"\n Populate metadata and variables in the environment variables for a flow run\n\n Args:\n - flow_run (GraphQLResult): A flow run object\n\n Returns:\n - dict: a dictionary representing the populated environment variables\n \"\"\"\n return {\n \"PREFECT__CLOUD__API\": config.cloud.api,\n \"PREFECT__CLOUD__AUTH_TOKEN\": config.cloud.agent.auth_token,\n \"PREFECT__CLOUD__AGENT__LABELS\": str(self.labels),\n \"PREFECT__CONTEXT__FLOW_RUN_ID\": flow_run.id, # type: ignore\n \"PREFECT__CLOUD__USE_LOCAL_SECRETS\": \"false\",\n \"PREFECT__LOGGING__LOG_TO_CLOUD\": \"true\",\n \"PREFECT__LOGGING__LEVEL\": \"DEBUG\",\n \"PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS\": \"prefect.engine.cloud.CloudFlowRunner\",\n \"PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS\": \"prefect.engine.cloud.CloudTaskRunner\",\n }\n\n\nif __name__ == \"__main__\":\n LocalAgent().start()\n", "path": "src/prefect/agent/local/agent.py"}], "after_files": [{"content": "from sys import platform\nfrom typing import Iterable\n\nimport docker\n\nfrom prefect import config, context\nfrom prefect.agent import Agent\nfrom prefect.engine.state import Failed\nfrom prefect.environments.storage import Docker\nfrom prefect.serialization.storage import StorageSchema\nfrom prefect.utilities.graphql import GraphQLResult\n\n\nclass LocalAgent(Agent):\n \"\"\"\n Agent which deploys flow runs locally as Docker containers. Information on using the\n Local Agent can be found at https://docs.prefect.io/cloud/agent/local.html\n\n Args:\n - name (str, optional): An optional name to give this agent. Can also be set through\n the environment variable `PREFECT__CLOUD__AGENT__NAME`. Defaults to \"agent\"\n - labels (List[str], optional): a list of labels, which are arbitrary string identifiers used by Prefect\n Agents when polling for work\n - base_url (str, optional): URL for a Docker daemon server. Defaults to\n `unix:///var/run/docker.sock` however other hosts such as\n `tcp://0.0.0.0:2375` can be provided\n - no_pull (bool, optional): Flag on whether or not to pull flow images.\n Defaults to `False` if not provided here or in context.\n \"\"\"\n\n def __init__(\n self,\n name: str = None,\n labels: Iterable[str] = None,\n base_url: str = None,\n no_pull: bool = None,\n ) -> None:\n super().__init__(name=name, labels=labels)\n\n if platform == \"win32\":\n default_url = \"npipe:////./pipe/docker_engine\"\n else:\n default_url = \"unix://var/run/docker.sock\"\n self.logger.debug(\n \"Platform {} and default docker daemon {}\".format(platform, default_url)\n )\n\n # Determine Daemon URL\n self.base_url = base_url or context.get(\"base_url\", default_url)\n self.logger.debug(\"Base docker daemon url {}\".format(self.base_url))\n\n # Determine pull specification\n self.no_pull = no_pull or context.get(\"no_pull\", False)\n self.logger.debug(\"no_pull set to {}\".format(self.no_pull))\n\n self.docker_client = docker.APIClient(base_url=self.base_url, version=\"auto\")\n\n # Ping Docker daemon for connection issues\n try:\n self.logger.debug(\"Pinging docker daemon\")\n self.docker_client.ping()\n except Exception as exc:\n self.logger.exception(\n \"Issue connecting to the Docker daemon. Make sure it is running.\"\n )\n raise exc\n\n def deploy_flows(self, flow_runs: list) -> None:\n \"\"\"\n Deploy flow runs on your local machine as Docker containers\n\n Args:\n - flow_runs (list): A list of GraphQLResult flow run objects\n \"\"\"\n for flow_run in flow_runs:\n self.logger.info(\n \"Deploying flow run {}\".format(flow_run.id) # type: ignore\n )\n\n storage = StorageSchema().load(flow_run.flow.storage)\n if not isinstance(StorageSchema().load(flow_run.flow.storage), Docker):\n msg = \"Storage for flow run {} is not of type Docker.\".format(\n flow_run.id\n )\n state_msg = \"Agent {} failed to run flow: \".format(self.name) + msg\n self.client.set_flow_run_state(\n flow_run.id, version=flow_run.version, state=Failed(state_msg)\n )\n self.logger.error(msg)\n continue\n\n env_vars = self.populate_env_vars(flow_run=flow_run)\n\n if not self.no_pull and storage.registry_url:\n self.logger.info(\"Pulling image {}...\".format(storage.name))\n try:\n pull_output = self.docker_client.pull(\n storage.name, stream=True, decode=True\n )\n for line in pull_output:\n self.logger.debug(line)\n self.logger.info(\n \"Successfully pulled image {}...\".format(storage.name)\n )\n except docker.errors.APIError as exc:\n msg = \"Issue pulling image {}\".format(storage.name)\n state_msg = (\n \"Agent {} failed to pull image for flow: \".format(self.name)\n + msg\n )\n self.client.set_flow_run_state(\n flow_run.id, version=flow_run.version, state=Failed(msg)\n )\n self.logger.error(msg)\n\n # Create a container\n self.logger.debug(\"Creating Docker container {}\".format(storage.name))\n container = self.docker_client.create_container(\n storage.name, command=\"prefect execute cloud-flow\", environment=env_vars\n )\n\n # Start the container\n self.logger.debug(\n \"Starting Docker container with ID {}\".format(container.get(\"Id\"))\n )\n self.docker_client.start(container=container.get(\"Id\"))\n\n def populate_env_vars(self, flow_run: GraphQLResult) -> dict:\n \"\"\"\n Populate metadata and variables in the environment variables for a flow run\n\n Args:\n - flow_run (GraphQLResult): A flow run object\n\n Returns:\n - dict: a dictionary representing the populated environment variables\n \"\"\"\n return {\n \"PREFECT__CLOUD__API\": config.cloud.api,\n \"PREFECT__CLOUD__AUTH_TOKEN\": config.cloud.agent.auth_token,\n \"PREFECT__CLOUD__AGENT__LABELS\": str(self.labels),\n \"PREFECT__CONTEXT__FLOW_RUN_ID\": flow_run.id, # type: ignore\n \"PREFECT__CLOUD__USE_LOCAL_SECRETS\": \"false\",\n \"PREFECT__LOGGING__LOG_TO_CLOUD\": \"true\",\n \"PREFECT__LOGGING__LEVEL\": \"DEBUG\",\n \"PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS\": \"prefect.engine.cloud.CloudFlowRunner\",\n \"PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS\": \"prefect.engine.cloud.CloudTaskRunner\",\n }\n\n\nif __name__ == \"__main__\":\n LocalAgent().start()\n", "path": "src/prefect/agent/local/agent.py"}]} | 1,799 | 428 |
gh_patches_debug_4680 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-5936 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Espaces insécables dans le titre de mise en Une
**Description du bug**
Espace non insécable dans l'aperçu de mis en Une. Ce problème n'a pas pu être par #5292 car l'aperçu est généré par du JavaScript.
**Comment reproduire ?**
La liste des étapes qui permet de reproduire le bug :
1. Se connecter en tant qu'administrateur ;
2. Accéder à la page d'un contenu ;
3. Mettre ce contenu en Une ;
4. Mettre le titre suivant : « `Comment alerter les secours ?` ».
**Comportement attendu**
L'espace doit automatiquement être remplacé par un espace insécable, avant ou après les signes concernés.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zds/utils/templatetags/french_typography.py`
Content:
```
1 from django import template
2 from django.template.defaultfilters import stringfilter
3 from django.utils.safestring import mark_safe
4
5 register = template.Library()
6
7
8 @register.filter()
9 @stringfilter
10 def french_typography(str):
11 """
12 Replaces spaces with non-breaking-spaces or narrow non-breaking-spaces
13 before or after some symbols, according to French typography.
14
15 This filter is naive and should not be used on Markdown content.
16 """
17 return mark_safe(
18 # Narrow non-breaking space:  
19 str.replace(' ;', ' ;')
20 .replace(' ?', ' ?')
21 .replace(' !', ' !')
22 .replace(' %', ' %')
23 # Non-breaking space:
24 .replace('« ', '« ')
25 .replace(' »', ' »')
26 .replace(' :', ' :')
27 )
28
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zds/utils/templatetags/french_typography.py b/zds/utils/templatetags/french_typography.py
--- a/zds/utils/templatetags/french_typography.py
+++ b/zds/utils/templatetags/french_typography.py
@@ -13,6 +13,9 @@
before or after some symbols, according to French typography.
This filter is naive and should not be used on Markdown content.
+
+
+ Any change here should also be made in assets/js/featured-resource-preview.js
"""
return mark_safe(
# Narrow non-breaking space:  
| {"golden_diff": "diff --git a/zds/utils/templatetags/french_typography.py b/zds/utils/templatetags/french_typography.py\n--- a/zds/utils/templatetags/french_typography.py\n+++ b/zds/utils/templatetags/french_typography.py\n@@ -13,6 +13,9 @@\n before or after some symbols, according to French typography.\n \n This filter is naive and should not be used on Markdown content.\n+\n+\n+ Any change here should also be made in assets/js/featured-resource-preview.js\n \"\"\"\n return mark_safe(\n # Narrow non-breaking space:  \n", "issue": "Espaces ins\u00e9cables dans le titre de mise en Une\n**Description du bug**\r\n\r\nEspace non ins\u00e9cable dans l'aper\u00e7u de mis en Une. Ce probl\u00e8me n'a pas pu \u00eatre par #5292 car l'aper\u00e7u est g\u00e9n\u00e9r\u00e9 par du JavaScript.\r\n\r\n**Comment reproduire ?**\r\n\r\nLa liste des \u00e9tapes qui permet de reproduire le bug :\r\n\r\n1. Se connecter en tant qu'administrateur\u00a0;\r\n2. Acc\u00e9der \u00e0 la page d'un contenu\u00a0;\r\n3. Mettre ce contenu en Une\u00a0;\r\n4. Mettre le titre suivant\u00a0: \u00ab\u00a0`Comment alerter les secours ?` \u00bb.\r\n\r\n**Comportement attendu**\r\n\r\nL'espace doit automatiquement \u00eatre remplac\u00e9 par un espace ins\u00e9cable, avant ou apr\u00e8s les signes concern\u00e9s.\n", "before_files": [{"content": "from django import template\nfrom django.template.defaultfilters import stringfilter\nfrom django.utils.safestring import mark_safe\n\nregister = template.Library()\n\n\[email protected]()\n@stringfilter\ndef french_typography(str):\n \"\"\"\n Replaces spaces with non-breaking-spaces or narrow non-breaking-spaces\n before or after some symbols, according to French typography.\n\n This filter is naive and should not be used on Markdown content.\n \"\"\"\n return mark_safe(\n # Narrow non-breaking space:  \n str.replace(' ;', ' ;')\n .replace(' ?', ' ?')\n .replace(' !', ' !')\n .replace(' %', ' %')\n # Non-breaking space: \n .replace('\u00ab ', '\u00ab ')\n .replace(' \u00bb', ' \u00bb')\n .replace(' :', ' :')\n )\n", "path": "zds/utils/templatetags/french_typography.py"}], "after_files": [{"content": "from django import template\nfrom django.template.defaultfilters import stringfilter\nfrom django.utils.safestring import mark_safe\n\nregister = template.Library()\n\n\[email protected]()\n@stringfilter\ndef french_typography(str):\n \"\"\"\n Replaces spaces with non-breaking-spaces or narrow non-breaking-spaces\n before or after some symbols, according to French typography.\n\n This filter is naive and should not be used on Markdown content.\n\n\n Any change here should also be made in assets/js/featured-resource-preview.js\n \"\"\"\n return mark_safe(\n # Narrow non-breaking space:  \n str.replace(' ;', ' ;')\n .replace(' ?', ' ?')\n .replace(' !', ' !')\n .replace(' %', ' %')\n # Non-breaking space: \n .replace('\u00ab ', '\u00ab ')\n .replace(' \u00bb', ' \u00bb')\n .replace(' :', ' :')\n )\n", "path": "zds/utils/templatetags/french_typography.py"}]} | 697 | 143 |
gh_patches_debug_22737 | rasdani/github-patches | git_diff | aio-libs__aiohttp-6144 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Switch http_parser to llhttp
Nodejs decide to move from their hard-coded C parser to this one.
https://github.com/nodejs/http-parser/pull/285#issuecomment-456025694
*Upd:*
proposed parser is https://llhttp.org
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import os
2 import pathlib
3 import re
4 import sys
5
6 from setuptools import Extension, setup
7
8 if sys.version_info < (3, 7):
9 raise RuntimeError("aiohttp 4.x requires Python 3.7+")
10
11
12 NO_EXTENSIONS = bool(os.environ.get("AIOHTTP_NO_EXTENSIONS")) # type: bool
13 HERE = pathlib.Path(__file__).parent
14 IS_GIT_REPO = (HERE / ".git").exists()
15
16
17 if sys.implementation.name != "cpython":
18 NO_EXTENSIONS = True
19
20
21 if IS_GIT_REPO and not (HERE / "vendor/http-parser/README.md").exists():
22 print("Install submodules when building from git clone", file=sys.stderr)
23 print("Hint:", file=sys.stderr)
24 print(" git submodule update --init", file=sys.stderr)
25 sys.exit(2)
26
27
28 # NOTE: makefile cythonizes all Cython modules
29
30 extensions = [
31 Extension("aiohttp._websocket", ["aiohttp/_websocket.c"]),
32 Extension(
33 "aiohttp._http_parser",
34 [
35 "aiohttp/_http_parser.c",
36 "vendor/http-parser/http_parser.c",
37 "aiohttp/_find_header.c",
38 ],
39 define_macros=[("HTTP_PARSER_STRICT", 0)],
40 ),
41 Extension("aiohttp._helpers", ["aiohttp/_helpers.c"]),
42 Extension("aiohttp._http_writer", ["aiohttp/_http_writer.c"]),
43 ]
44
45
46 txt = (HERE / "aiohttp" / "__init__.py").read_text("utf-8")
47 try:
48 version = re.findall(r'^__version__ = "([^"]+)"\r?$', txt, re.M)[0]
49 except IndexError:
50 raise RuntimeError("Unable to determine version.")
51
52 install_requires = [
53 "charset-normalizer>=2.0,<3.0",
54 "multidict>=4.5,<7.0",
55 "async_timeout>=4.0a2,<5.0",
56 'asynctest==0.13.0; python_version<"3.8"',
57 "yarl>=1.0,<2.0",
58 "typing_extensions>=3.7.4",
59 "frozenlist>=1.1.1",
60 "aiosignal>=1.1.2",
61 ]
62
63
64 def read(f):
65 return (HERE / f).read_text("utf-8").strip()
66
67
68 args = dict(
69 name="aiohttp",
70 version=version,
71 description="Async http client/server framework (asyncio)",
72 long_description="\n\n".join((read("README.rst"), read("CHANGES.rst"))),
73 long_description_content_type="text/x-rst",
74 classifiers=[
75 "License :: OSI Approved :: Apache Software License",
76 "Intended Audience :: Developers",
77 "Programming Language :: Python",
78 "Programming Language :: Python :: 3",
79 "Programming Language :: Python :: 3.7",
80 "Programming Language :: Python :: 3.8",
81 "Programming Language :: Python :: 3.9",
82 "Programming Language :: Python :: 3.10",
83 "Development Status :: 5 - Production/Stable",
84 "Operating System :: POSIX",
85 "Operating System :: MacOS :: MacOS X",
86 "Operating System :: Microsoft :: Windows",
87 "Topic :: Internet :: WWW/HTTP",
88 "Framework :: AsyncIO",
89 ],
90 author="Nikolay Kim",
91 author_email="[email protected]",
92 maintainer=", ".join(
93 (
94 "Nikolay Kim <[email protected]>",
95 "Andrew Svetlov <[email protected]>",
96 )
97 ),
98 maintainer_email="[email protected]",
99 url="https://github.com/aio-libs/aiohttp",
100 project_urls={
101 "Chat: Gitter": "https://gitter.im/aio-libs/Lobby",
102 "CI: GitHub Actions": "https://github.com/aio-libs/aiohttp/actions?query=workflow%3ACI", # noqa
103 "Coverage: codecov": "https://codecov.io/github/aio-libs/aiohttp",
104 "Docs: Changelog": "https://docs.aiohttp.org/en/stable/changes.html",
105 "Docs: RTD": "https://docs.aiohttp.org",
106 "GitHub: issues": "https://github.com/aio-libs/aiohttp/issues",
107 "GitHub: repo": "https://github.com/aio-libs/aiohttp",
108 },
109 license="Apache 2",
110 packages=["aiohttp"],
111 python_requires=">=3.7",
112 install_requires=install_requires,
113 extras_require={
114 "speedups": [
115 "aiodns>=1.1",
116 "Brotli",
117 "cchardet",
118 ],
119 },
120 include_package_data=True,
121 )
122
123 if not NO_EXTENSIONS:
124 print("*********************")
125 print("* Accelerated build *")
126 print("*********************")
127 setup(ext_modules=extensions, **args)
128 else:
129 print("*********************")
130 print("* Pure Python build *")
131 print("*********************")
132 setup(**args)
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -18,7 +18,7 @@
NO_EXTENSIONS = True
-if IS_GIT_REPO and not (HERE / "vendor/http-parser/README.md").exists():
+if IS_GIT_REPO and not (HERE / "vendor/llhttp/README.md").exists():
print("Install submodules when building from git clone", file=sys.stderr)
print("Hint:", file=sys.stderr)
print(" git submodule update --init", file=sys.stderr)
@@ -33,10 +33,13 @@
"aiohttp._http_parser",
[
"aiohttp/_http_parser.c",
- "vendor/http-parser/http_parser.c",
"aiohttp/_find_header.c",
+ "vendor/llhttp/build/c/llhttp.c",
+ "vendor/llhttp/src/native/api.c",
+ "vendor/llhttp/src/native/http.c",
],
- define_macros=[("HTTP_PARSER_STRICT", 0)],
+ define_macros=[("LLHTTP_STRICT_MODE", 0)],
+ include_dirs=["vendor/llhttp/build"],
),
Extension("aiohttp._helpers", ["aiohttp/_helpers.c"]),
Extension("aiohttp._http_writer", ["aiohttp/_http_writer.c"]),
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -18,7 +18,7 @@\n NO_EXTENSIONS = True\n \n \n-if IS_GIT_REPO and not (HERE / \"vendor/http-parser/README.md\").exists():\n+if IS_GIT_REPO and not (HERE / \"vendor/llhttp/README.md\").exists():\n print(\"Install submodules when building from git clone\", file=sys.stderr)\n print(\"Hint:\", file=sys.stderr)\n print(\" git submodule update --init\", file=sys.stderr)\n@@ -33,10 +33,13 @@\n \"aiohttp._http_parser\",\n [\n \"aiohttp/_http_parser.c\",\n- \"vendor/http-parser/http_parser.c\",\n \"aiohttp/_find_header.c\",\n+ \"vendor/llhttp/build/c/llhttp.c\",\n+ \"vendor/llhttp/src/native/api.c\",\n+ \"vendor/llhttp/src/native/http.c\",\n ],\n- define_macros=[(\"HTTP_PARSER_STRICT\", 0)],\n+ define_macros=[(\"LLHTTP_STRICT_MODE\", 0)],\n+ include_dirs=[\"vendor/llhttp/build\"],\n ),\n Extension(\"aiohttp._helpers\", [\"aiohttp/_helpers.c\"]),\n Extension(\"aiohttp._http_writer\", [\"aiohttp/_http_writer.c\"]),\n", "issue": "Switch http_parser to llhttp\nNodejs decide to move from their hard-coded C parser to this one.\r\n\r\nhttps://github.com/nodejs/http-parser/pull/285#issuecomment-456025694\r\n\r\n*Upd:* \r\nproposed parser is https://llhttp.org\r\n\n", "before_files": [{"content": "import os\nimport pathlib\nimport re\nimport sys\n\nfrom setuptools import Extension, setup\n\nif sys.version_info < (3, 7):\n raise RuntimeError(\"aiohttp 4.x requires Python 3.7+\")\n\n\nNO_EXTENSIONS = bool(os.environ.get(\"AIOHTTP_NO_EXTENSIONS\")) # type: bool\nHERE = pathlib.Path(__file__).parent\nIS_GIT_REPO = (HERE / \".git\").exists()\n\n\nif sys.implementation.name != \"cpython\":\n NO_EXTENSIONS = True\n\n\nif IS_GIT_REPO and not (HERE / \"vendor/http-parser/README.md\").exists():\n print(\"Install submodules when building from git clone\", file=sys.stderr)\n print(\"Hint:\", file=sys.stderr)\n print(\" git submodule update --init\", file=sys.stderr)\n sys.exit(2)\n\n\n# NOTE: makefile cythonizes all Cython modules\n\nextensions = [\n Extension(\"aiohttp._websocket\", [\"aiohttp/_websocket.c\"]),\n Extension(\n \"aiohttp._http_parser\",\n [\n \"aiohttp/_http_parser.c\",\n \"vendor/http-parser/http_parser.c\",\n \"aiohttp/_find_header.c\",\n ],\n define_macros=[(\"HTTP_PARSER_STRICT\", 0)],\n ),\n Extension(\"aiohttp._helpers\", [\"aiohttp/_helpers.c\"]),\n Extension(\"aiohttp._http_writer\", [\"aiohttp/_http_writer.c\"]),\n]\n\n\ntxt = (HERE / \"aiohttp\" / \"__init__.py\").read_text(\"utf-8\")\ntry:\n version = re.findall(r'^__version__ = \"([^\"]+)\"\\r?$', txt, re.M)[0]\nexcept IndexError:\n raise RuntimeError(\"Unable to determine version.\")\n\ninstall_requires = [\n \"charset-normalizer>=2.0,<3.0\",\n \"multidict>=4.5,<7.0\",\n \"async_timeout>=4.0a2,<5.0\",\n 'asynctest==0.13.0; python_version<\"3.8\"',\n \"yarl>=1.0,<2.0\",\n \"typing_extensions>=3.7.4\",\n \"frozenlist>=1.1.1\",\n \"aiosignal>=1.1.2\",\n]\n\n\ndef read(f):\n return (HERE / f).read_text(\"utf-8\").strip()\n\n\nargs = dict(\n name=\"aiohttp\",\n version=version,\n description=\"Async http client/server framework (asyncio)\",\n long_description=\"\\n\\n\".join((read(\"README.rst\"), read(\"CHANGES.rst\"))),\n long_description_content_type=\"text/x-rst\",\n classifiers=[\n \"License :: OSI Approved :: Apache Software License\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Development Status :: 5 - Production/Stable\",\n \"Operating System :: POSIX\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: Microsoft :: Windows\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Framework :: AsyncIO\",\n ],\n author=\"Nikolay Kim\",\n author_email=\"[email protected]\",\n maintainer=\", \".join(\n (\n \"Nikolay Kim <[email protected]>\",\n \"Andrew Svetlov <[email protected]>\",\n )\n ),\n maintainer_email=\"[email protected]\",\n url=\"https://github.com/aio-libs/aiohttp\",\n project_urls={\n \"Chat: Gitter\": \"https://gitter.im/aio-libs/Lobby\",\n \"CI: GitHub Actions\": \"https://github.com/aio-libs/aiohttp/actions?query=workflow%3ACI\", # noqa\n \"Coverage: codecov\": \"https://codecov.io/github/aio-libs/aiohttp\",\n \"Docs: Changelog\": \"https://docs.aiohttp.org/en/stable/changes.html\",\n \"Docs: RTD\": \"https://docs.aiohttp.org\",\n \"GitHub: issues\": \"https://github.com/aio-libs/aiohttp/issues\",\n \"GitHub: repo\": \"https://github.com/aio-libs/aiohttp\",\n },\n license=\"Apache 2\",\n packages=[\"aiohttp\"],\n python_requires=\">=3.7\",\n install_requires=install_requires,\n extras_require={\n \"speedups\": [\n \"aiodns>=1.1\",\n \"Brotli\",\n \"cchardet\",\n ],\n },\n include_package_data=True,\n)\n\nif not NO_EXTENSIONS:\n print(\"*********************\")\n print(\"* Accelerated build *\")\n print(\"*********************\")\n setup(ext_modules=extensions, **args)\nelse:\n print(\"*********************\")\n print(\"* Pure Python build *\")\n print(\"*********************\")\n setup(**args)\n", "path": "setup.py"}], "after_files": [{"content": "import os\nimport pathlib\nimport re\nimport sys\n\nfrom setuptools import Extension, setup\n\nif sys.version_info < (3, 7):\n raise RuntimeError(\"aiohttp 4.x requires Python 3.7+\")\n\n\nNO_EXTENSIONS = bool(os.environ.get(\"AIOHTTP_NO_EXTENSIONS\")) # type: bool\nHERE = pathlib.Path(__file__).parent\nIS_GIT_REPO = (HERE / \".git\").exists()\n\n\nif sys.implementation.name != \"cpython\":\n NO_EXTENSIONS = True\n\n\nif IS_GIT_REPO and not (HERE / \"vendor/llhttp/README.md\").exists():\n print(\"Install submodules when building from git clone\", file=sys.stderr)\n print(\"Hint:\", file=sys.stderr)\n print(\" git submodule update --init\", file=sys.stderr)\n sys.exit(2)\n\n\n# NOTE: makefile cythonizes all Cython modules\n\nextensions = [\n Extension(\"aiohttp._websocket\", [\"aiohttp/_websocket.c\"]),\n Extension(\n \"aiohttp._http_parser\",\n [\n \"aiohttp/_http_parser.c\",\n \"aiohttp/_find_header.c\",\n \"vendor/llhttp/build/c/llhttp.c\",\n \"vendor/llhttp/src/native/api.c\",\n \"vendor/llhttp/src/native/http.c\",\n ],\n define_macros=[(\"LLHTTP_STRICT_MODE\", 0)],\n include_dirs=[\"vendor/llhttp/build\"],\n ),\n Extension(\"aiohttp._helpers\", [\"aiohttp/_helpers.c\"]),\n Extension(\"aiohttp._http_writer\", [\"aiohttp/_http_writer.c\"]),\n]\n\n\ntxt = (HERE / \"aiohttp\" / \"__init__.py\").read_text(\"utf-8\")\ntry:\n version = re.findall(r'^__version__ = \"([^\"]+)\"\\r?$', txt, re.M)[0]\nexcept IndexError:\n raise RuntimeError(\"Unable to determine version.\")\n\ninstall_requires = [\n \"charset-normalizer>=2.0,<3.0\",\n \"multidict>=4.5,<7.0\",\n \"async_timeout>=4.0a2,<5.0\",\n 'asynctest==0.13.0; python_version<\"3.8\"',\n \"yarl>=1.0,<2.0\",\n \"typing_extensions>=3.7.4\",\n \"frozenlist>=1.1.1\",\n \"aiosignal>=1.1.2\",\n]\n\n\ndef read(f):\n return (HERE / f).read_text(\"utf-8\").strip()\n\n\nargs = dict(\n name=\"aiohttp\",\n version=version,\n description=\"Async http client/server framework (asyncio)\",\n long_description=\"\\n\\n\".join((read(\"README.rst\"), read(\"CHANGES.rst\"))),\n long_description_content_type=\"text/x-rst\",\n classifiers=[\n \"License :: OSI Approved :: Apache Software License\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Development Status :: 5 - Production/Stable\",\n \"Operating System :: POSIX\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: Microsoft :: Windows\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Framework :: AsyncIO\",\n ],\n author=\"Nikolay Kim\",\n author_email=\"[email protected]\",\n maintainer=\", \".join(\n (\n \"Nikolay Kim <[email protected]>\",\n \"Andrew Svetlov <[email protected]>\",\n )\n ),\n maintainer_email=\"[email protected]\",\n url=\"https://github.com/aio-libs/aiohttp\",\n project_urls={\n \"Chat: Gitter\": \"https://gitter.im/aio-libs/Lobby\",\n \"CI: GitHub Actions\": \"https://github.com/aio-libs/aiohttp/actions?query=workflow%3ACI\", # noqa\n \"Coverage: codecov\": \"https://codecov.io/github/aio-libs/aiohttp\",\n \"Docs: Changelog\": \"https://docs.aiohttp.org/en/stable/changes.html\",\n \"Docs: RTD\": \"https://docs.aiohttp.org\",\n \"GitHub: issues\": \"https://github.com/aio-libs/aiohttp/issues\",\n \"GitHub: repo\": \"https://github.com/aio-libs/aiohttp\",\n },\n license=\"Apache 2\",\n packages=[\"aiohttp\"],\n python_requires=\">=3.7\",\n install_requires=install_requires,\n extras_require={\n \"speedups\": [\n \"aiodns>=1.1\",\n \"Brotli\",\n \"cchardet\",\n ],\n },\n include_package_data=True,\n)\n\nif not NO_EXTENSIONS:\n print(\"*********************\")\n print(\"* Accelerated build *\")\n print(\"*********************\")\n setup(ext_modules=extensions, **args)\nelse:\n print(\"*********************\")\n print(\"* Pure Python build *\")\n print(\"*********************\")\n setup(**args)\n", "path": "setup.py"}]} | 1,729 | 293 |
gh_patches_debug_36882 | rasdani/github-patches | git_diff | ipython__ipython-5963 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Problems when displaying multiple views of ContainerWidget.
`ContainerView.render` registers anonymous functions to `this.model.on('displayed')` . This is a problem because when rendering multiple views of the same widget container, multiple instances of this anonymous function get registered to this event.
PS: This is also related to #5958
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `IPython/html/widgets/widget_container.py`
Content:
```
1 """ContainerWidget class.
2
3 Represents a container that can be used to group other widgets.
4 """
5 #-----------------------------------------------------------------------------
6 # Copyright (c) 2013, the IPython Development Team.
7 #
8 # Distributed under the terms of the Modified BSD License.
9 #
10 # The full license is in the file COPYING.txt, distributed with this software.
11 #-----------------------------------------------------------------------------
12
13 #-----------------------------------------------------------------------------
14 # Imports
15 #-----------------------------------------------------------------------------
16 from .widget import DOMWidget
17 from IPython.utils.traitlets import Unicode, Tuple, TraitError
18
19 #-----------------------------------------------------------------------------
20 # Classes
21 #-----------------------------------------------------------------------------
22
23 class ContainerWidget(DOMWidget):
24 _view_name = Unicode('ContainerView', sync=True)
25
26 # Child widgets in the container.
27 # Using a tuple here to force reassignment to update the list.
28 # When a proper notifying-list trait exists, that is what should be used here.
29 children = Tuple()
30 _children = Tuple(sync=True)
31
32
33 def __init__(self, **kwargs):
34 super(ContainerWidget, self).__init__(**kwargs)
35 self.on_displayed(ContainerWidget._fire_children_displayed)
36
37 def _fire_children_displayed(self):
38 for child in self._children:
39 child._handle_displayed()
40
41 def _children_changed(self, name, old, new):
42 """Validate children list.
43
44 Makes sure only one instance of any given model can exist in the
45 children list.
46 An excellent post on uniqifiers is available at
47 http://www.peterbe.com/plog/uniqifiers-benchmark
48 which provides the inspiration for using this implementation. Below
49 I've implemented the `f5` algorithm using Python comprehensions."""
50 if new is not None:
51 seen = {}
52 def add_item(i):
53 seen[i.model_id] = True
54 return i
55 self._children = [add_item(i) for i in new if not i.model_id in seen]
56
57
58 class PopupWidget(ContainerWidget):
59 _view_name = Unicode('PopupView', sync=True)
60
61 description = Unicode(sync=True)
62 button_text = Unicode(sync=True)
63
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/IPython/html/widgets/widget_container.py b/IPython/html/widgets/widget_container.py
--- a/IPython/html/widgets/widget_container.py
+++ b/IPython/html/widgets/widget_container.py
@@ -2,58 +2,29 @@
Represents a container that can be used to group other widgets.
"""
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, the IPython Development Team.
-#
+
+# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
from .widget import DOMWidget
from IPython.utils.traitlets import Unicode, Tuple, TraitError
-#-----------------------------------------------------------------------------
-# Classes
-#-----------------------------------------------------------------------------
-
class ContainerWidget(DOMWidget):
_view_name = Unicode('ContainerView', sync=True)
# Child widgets in the container.
# Using a tuple here to force reassignment to update the list.
# When a proper notifying-list trait exists, that is what should be used here.
- children = Tuple()
- _children = Tuple(sync=True)
-
+ children = Tuple(sync=True)
def __init__(self, **kwargs):
super(ContainerWidget, self).__init__(**kwargs)
self.on_displayed(ContainerWidget._fire_children_displayed)
def _fire_children_displayed(self):
- for child in self._children:
+ for child in self.children:
child._handle_displayed()
- def _children_changed(self, name, old, new):
- """Validate children list.
-
- Makes sure only one instance of any given model can exist in the
- children list.
- An excellent post on uniqifiers is available at
- http://www.peterbe.com/plog/uniqifiers-benchmark
- which provides the inspiration for using this implementation. Below
- I've implemented the `f5` algorithm using Python comprehensions."""
- if new is not None:
- seen = {}
- def add_item(i):
- seen[i.model_id] = True
- return i
- self._children = [add_item(i) for i in new if not i.model_id in seen]
-
class PopupWidget(ContainerWidget):
_view_name = Unicode('PopupView', sync=True)
| {"golden_diff": "diff --git a/IPython/html/widgets/widget_container.py b/IPython/html/widgets/widget_container.py\n--- a/IPython/html/widgets/widget_container.py\n+++ b/IPython/html/widgets/widget_container.py\n@@ -2,58 +2,29 @@\n \n Represents a container that can be used to group other widgets.\n \"\"\"\n-#-----------------------------------------------------------------------------\n-# Copyright (c) 2013, the IPython Development Team.\n-#\n+\n+# Copyright (c) IPython Development Team.\n # Distributed under the terms of the Modified BSD License.\n-#\n-# The full license is in the file COPYING.txt, distributed with this software.\n-#-----------------------------------------------------------------------------\n \n-#-----------------------------------------------------------------------------\n-# Imports\n-#-----------------------------------------------------------------------------\n from .widget import DOMWidget\n from IPython.utils.traitlets import Unicode, Tuple, TraitError\n \n-#-----------------------------------------------------------------------------\n-# Classes\n-#-----------------------------------------------------------------------------\n-\n class ContainerWidget(DOMWidget):\n _view_name = Unicode('ContainerView', sync=True)\n \n # Child widgets in the container.\n # Using a tuple here to force reassignment to update the list.\n # When a proper notifying-list trait exists, that is what should be used here.\n- children = Tuple()\n- _children = Tuple(sync=True)\n-\n+ children = Tuple(sync=True)\n \n def __init__(self, **kwargs):\n super(ContainerWidget, self).__init__(**kwargs)\n self.on_displayed(ContainerWidget._fire_children_displayed)\n \n def _fire_children_displayed(self):\n- for child in self._children:\n+ for child in self.children:\n child._handle_displayed()\n \n- def _children_changed(self, name, old, new):\n- \"\"\"Validate children list.\n-\n- Makes sure only one instance of any given model can exist in the \n- children list.\n- An excellent post on uniqifiers is available at \n- http://www.peterbe.com/plog/uniqifiers-benchmark\n- which provides the inspiration for using this implementation. Below\n- I've implemented the `f5` algorithm using Python comprehensions.\"\"\"\n- if new is not None:\n- seen = {}\n- def add_item(i):\n- seen[i.model_id] = True\n- return i\n- self._children = [add_item(i) for i in new if not i.model_id in seen]\n-\n \n class PopupWidget(ContainerWidget):\n _view_name = Unicode('PopupView', sync=True)\n", "issue": "Problems when displaying multiple views of ContainerWidget. \n`ContainerView.render` registers anonymous functions to `this.model.on('displayed')` . This is a problem because when rendering multiple views of the same widget container, multiple instances of this anonymous function get registered to this event. \n\nPS: This is also related to #5958\n\n", "before_files": [{"content": "\"\"\"ContainerWidget class. \n\nRepresents a container that can be used to group other widgets.\n\"\"\"\n#-----------------------------------------------------------------------------\n# Copyright (c) 2013, the IPython Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\nfrom .widget import DOMWidget\nfrom IPython.utils.traitlets import Unicode, Tuple, TraitError\n\n#-----------------------------------------------------------------------------\n# Classes\n#-----------------------------------------------------------------------------\n\nclass ContainerWidget(DOMWidget):\n _view_name = Unicode('ContainerView', sync=True)\n\n # Child widgets in the container.\n # Using a tuple here to force reassignment to update the list.\n # When a proper notifying-list trait exists, that is what should be used here.\n children = Tuple()\n _children = Tuple(sync=True)\n\n\n def __init__(self, **kwargs):\n super(ContainerWidget, self).__init__(**kwargs)\n self.on_displayed(ContainerWidget._fire_children_displayed)\n\n def _fire_children_displayed(self):\n for child in self._children:\n child._handle_displayed()\n\n def _children_changed(self, name, old, new):\n \"\"\"Validate children list.\n\n Makes sure only one instance of any given model can exist in the \n children list.\n An excellent post on uniqifiers is available at \n http://www.peterbe.com/plog/uniqifiers-benchmark\n which provides the inspiration for using this implementation. Below\n I've implemented the `f5` algorithm using Python comprehensions.\"\"\"\n if new is not None:\n seen = {}\n def add_item(i):\n seen[i.model_id] = True\n return i\n self._children = [add_item(i) for i in new if not i.model_id in seen]\n\n\nclass PopupWidget(ContainerWidget):\n _view_name = Unicode('PopupView', sync=True)\n \n description = Unicode(sync=True)\n button_text = Unicode(sync=True)\n", "path": "IPython/html/widgets/widget_container.py"}], "after_files": [{"content": "\"\"\"ContainerWidget class. \n\nRepresents a container that can be used to group other widgets.\n\"\"\"\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom .widget import DOMWidget\nfrom IPython.utils.traitlets import Unicode, Tuple, TraitError\n\nclass ContainerWidget(DOMWidget):\n _view_name = Unicode('ContainerView', sync=True)\n\n # Child widgets in the container.\n # Using a tuple here to force reassignment to update the list.\n # When a proper notifying-list trait exists, that is what should be used here.\n children = Tuple(sync=True)\n\n def __init__(self, **kwargs):\n super(ContainerWidget, self).__init__(**kwargs)\n self.on_displayed(ContainerWidget._fire_children_displayed)\n\n def _fire_children_displayed(self):\n for child in self.children:\n child._handle_displayed()\n\n\nclass PopupWidget(ContainerWidget):\n _view_name = Unicode('PopupView', sync=True)\n \n description = Unicode(sync=True)\n button_text = Unicode(sync=True)\n", "path": "IPython/html/widgets/widget_container.py"}]} | 886 | 516 |
gh_patches_debug_2794 | rasdani/github-patches | git_diff | bridgecrewio__checkov-5468 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[CKV_DOCKER_7] Erroneous failed check when --platform flag is used in multistage Dockerfile
**Describe the issue**
In the `CKV_DOCKER_7` check, when the `--platform` flag is used in a multistage Dockerfile, the check fails erroneously for images that have been created within the current Dockerfile and thus no version pegging should be required.
**Examples**
The Dockerfile below is green
```
FROM golang:1.20-bullseye AS base
FROM base AS build
```
but as soon as a `--platform` flag is added, the check fails:
```
FROM --platform=$BUILDPLATFORM golang:1.20-bullseye AS base
FROM base AS build
```
**Version (please complete the following information):**
- Checkov Version 2.4.4
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/dockerfile/checks/ReferenceLatestTag.py`
Content:
```
1 from __future__ import annotations
2
3 import re
4 from typing import TYPE_CHECKING
5
6 from checkov.common.models.enums import CheckCategories, CheckResult
7 from checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck
8
9 if TYPE_CHECKING:
10 from dockerfile_parse.parser import _Instruction
11
12 MULTI_STAGE_PATTERN = re.compile(r"(\S+)\s+as\s+(\S+)", re.IGNORECASE)
13
14
15 class ReferenceLatestTag(BaseDockerfileCheck):
16 def __init__(self) -> None:
17 name = "Ensure the base image uses a non latest version tag"
18 id = "CKV_DOCKER_7"
19 supported_instructions = ("FROM",)
20 categories = (CheckCategories.CONVENTION,)
21 super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)
22
23 def scan_resource_conf(self, conf: list[_Instruction]) -> tuple[CheckResult, list[_Instruction] | None]:
24 stages = []
25
26 for content in conf:
27 base_image = content["value"]
28 if " as " in base_image.lower():
29 # do an initial lookup before using the regex
30 multi_stage = re.match(MULTI_STAGE_PATTERN, base_image)
31 if multi_stage:
32 base_image = multi_stage[1]
33 stages.append(multi_stage[2])
34
35 if ":" not in base_image and base_image not in stages and base_image != "scratch":
36 return CheckResult.FAILED, [content]
37 elif base_image.endswith(":latest"):
38 return CheckResult.FAILED, [content]
39 return CheckResult.PASSED, [content]
40
41
42 check = ReferenceLatestTag()
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/dockerfile/checks/ReferenceLatestTag.py b/checkov/dockerfile/checks/ReferenceLatestTag.py
--- a/checkov/dockerfile/checks/ReferenceLatestTag.py
+++ b/checkov/dockerfile/checks/ReferenceLatestTag.py
@@ -9,7 +9,7 @@
if TYPE_CHECKING:
from dockerfile_parse.parser import _Instruction
-MULTI_STAGE_PATTERN = re.compile(r"(\S+)\s+as\s+(\S+)", re.IGNORECASE)
+MULTI_STAGE_PATTERN = re.compile(r"(?:--platform=\S+\s+)?(\S+)\s+as\s+(\S+)", re.IGNORECASE)
class ReferenceLatestTag(BaseDockerfileCheck):
| {"golden_diff": "diff --git a/checkov/dockerfile/checks/ReferenceLatestTag.py b/checkov/dockerfile/checks/ReferenceLatestTag.py\n--- a/checkov/dockerfile/checks/ReferenceLatestTag.py\n+++ b/checkov/dockerfile/checks/ReferenceLatestTag.py\n@@ -9,7 +9,7 @@\n if TYPE_CHECKING:\n from dockerfile_parse.parser import _Instruction\n \n-MULTI_STAGE_PATTERN = re.compile(r\"(\\S+)\\s+as\\s+(\\S+)\", re.IGNORECASE)\n+MULTI_STAGE_PATTERN = re.compile(r\"(?:--platform=\\S+\\s+)?(\\S+)\\s+as\\s+(\\S+)\", re.IGNORECASE)\n \n \n class ReferenceLatestTag(BaseDockerfileCheck):\n", "issue": "[CKV_DOCKER_7] Erroneous failed check when --platform flag is used in multistage Dockerfile\n**Describe the issue**\r\nIn the `CKV_DOCKER_7` check, when the `--platform` flag is used in a multistage Dockerfile, the check fails erroneously for images that have been created within the current Dockerfile and thus no version pegging should be required.\r\n\r\n**Examples**\r\nThe Dockerfile below is green\r\n\r\n```\r\nFROM golang:1.20-bullseye AS base\r\n\r\nFROM base AS build\r\n```\r\n\r\nbut as soon as a `--platform` flag is added, the check fails:\r\n\r\n```\r\nFROM --platform=$BUILDPLATFORM golang:1.20-bullseye AS base\r\n\r\nFROM base AS build\r\n```\r\n\r\n**Version (please complete the following information):**\r\n - Checkov Version 2.4.4\n", "before_files": [{"content": "from __future__ import annotations\n\nimport re\nfrom typing import TYPE_CHECKING\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck\n\nif TYPE_CHECKING:\n from dockerfile_parse.parser import _Instruction\n\nMULTI_STAGE_PATTERN = re.compile(r\"(\\S+)\\s+as\\s+(\\S+)\", re.IGNORECASE)\n\n\nclass ReferenceLatestTag(BaseDockerfileCheck):\n def __init__(self) -> None:\n name = \"Ensure the base image uses a non latest version tag\"\n id = \"CKV_DOCKER_7\"\n supported_instructions = (\"FROM\",)\n categories = (CheckCategories.CONVENTION,)\n super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)\n\n def scan_resource_conf(self, conf: list[_Instruction]) -> tuple[CheckResult, list[_Instruction] | None]:\n stages = []\n\n for content in conf:\n base_image = content[\"value\"]\n if \" as \" in base_image.lower():\n # do an initial lookup before using the regex\n multi_stage = re.match(MULTI_STAGE_PATTERN, base_image)\n if multi_stage:\n base_image = multi_stage[1]\n stages.append(multi_stage[2])\n\n if \":\" not in base_image and base_image not in stages and base_image != \"scratch\":\n return CheckResult.FAILED, [content]\n elif base_image.endswith(\":latest\"):\n return CheckResult.FAILED, [content]\n return CheckResult.PASSED, [content]\n\n\ncheck = ReferenceLatestTag()\n", "path": "checkov/dockerfile/checks/ReferenceLatestTag.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport re\nfrom typing import TYPE_CHECKING\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck\n\nif TYPE_CHECKING:\n from dockerfile_parse.parser import _Instruction\n\nMULTI_STAGE_PATTERN = re.compile(r\"(?:--platform=\\S+\\s+)?(\\S+)\\s+as\\s+(\\S+)\", re.IGNORECASE)\n\n\nclass ReferenceLatestTag(BaseDockerfileCheck):\n def __init__(self) -> None:\n name = \"Ensure the base image uses a non latest version tag\"\n id = \"CKV_DOCKER_7\"\n supported_instructions = (\"FROM\",)\n categories = (CheckCategories.CONVENTION,)\n super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)\n\n def scan_resource_conf(self, conf: list[_Instruction]) -> tuple[CheckResult, list[_Instruction] | None]:\n stages = []\n\n for content in conf:\n base_image = content[\"value\"]\n if \" as \" in base_image.lower():\n # do an initial lookup before using the regex\n multi_stage = re.match(MULTI_STAGE_PATTERN, base_image)\n if multi_stage:\n base_image = multi_stage[1]\n stages.append(multi_stage[2])\n\n if \":\" not in base_image and base_image not in stages and base_image != \"scratch\":\n return CheckResult.FAILED, [content]\n elif base_image.endswith(\":latest\"):\n return CheckResult.FAILED, [content]\n return CheckResult.PASSED, [content]\n\n\ncheck = ReferenceLatestTag()\n", "path": "checkov/dockerfile/checks/ReferenceLatestTag.py"}]} | 883 | 151 |
gh_patches_debug_31303 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3410 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider anytime_fitness is broken
During the global build at 2021-09-29-14-42-48, spider **anytime_fitness** failed with **0 features** and **0 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-09-29-14-42-48/logs/anytime_fitness.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-09-29-14-42-48/output/anytime_fitness.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-09-29-14-42-48/output/anytime_fitness.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/anytime_fitness.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import scrapy
3 from locations.items import GeojsonPointItem
4 import json
5 import html
6
7
8 class AnytimeFitnessSpider(scrapy.Spider):
9 name = 'anytime_fitness'
10 item_attributes = { 'brand': "Anytime Fitness" }
11 allowed_domains = ['www.anytimefitness.com']
12
13 def start_requests(self):
14 url = 'https://www.anytimefitness.com/wp-content/uploads/gyms.json'
15 yield scrapy.Request(url, callback=self.parse)
16
17 def parse(self, response):
18 gyms = json.loads(response.body_as_unicode())
19
20 for gym in gyms:
21 yield GeojsonPointItem(
22 lat = gym['latitude'],
23 lon = gym['longitude'],
24 addr_full = ", ".join(filter(None, [gym['content']['address'], gym['content']['address2']])),
25 city = gym['content']['city'],
26 phone = gym['content']['phone'],
27 state = gym['content']['state_abbr'],
28 postcode = gym['content']['zip'],
29 ref = gym['content']['url'],
30 country = gym['content']['country'],
31 name = html.unescape(gym['content']['title']),
32 extras = {"number": gym['content']['number']}
33 )
34
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/anytime_fitness.py b/locations/spiders/anytime_fitness.py
--- a/locations/spiders/anytime_fitness.py
+++ b/locations/spiders/anytime_fitness.py
@@ -7,27 +7,27 @@
class AnytimeFitnessSpider(scrapy.Spider):
name = 'anytime_fitness'
- item_attributes = { 'brand': "Anytime Fitness" }
+ item_attributes = {'brand': "Anytime Fitness", 'brand_wikidata': "Q4778364"}
allowed_domains = ['www.anytimefitness.com']
def start_requests(self):
- url = 'https://www.anytimefitness.com/wp-content/uploads/gyms.json'
+ url = 'https://www.anytimefitness.com/wp-content/uploads/locations.json'
yield scrapy.Request(url, callback=self.parse)
def parse(self, response):
- gyms = json.loads(response.body_as_unicode())
+ gyms = json.loads(response.text)
for gym in gyms:
yield GeojsonPointItem(
- lat = gym['latitude'],
- lon = gym['longitude'],
- addr_full = ", ".join(filter(None, [gym['content']['address'], gym['content']['address2']])),
- city = gym['content']['city'],
- phone = gym['content']['phone'],
- state = gym['content']['state_abbr'],
- postcode = gym['content']['zip'],
- ref = gym['content']['url'],
- country = gym['content']['country'],
- name = html.unescape(gym['content']['title']),
- extras = {"number": gym['content']['number']}
- )
+ lat=gym['latitude'],
+ lon=gym['longitude'],
+ addr_full=", ".join(filter(None, [gym['content']['address'], gym['content']['address2']])),
+ city=gym['content']['city'],
+ phone=gym['content']['phone'],
+ state=gym['content']['state_abbr'],
+ postcode=gym['content']['zip'],
+ ref=gym['content']['url'],
+ country=gym['content']['country'],
+ name=html.unescape(gym['content']['title']),
+ extras={"number": gym['content']['number']}
+ )
| {"golden_diff": "diff --git a/locations/spiders/anytime_fitness.py b/locations/spiders/anytime_fitness.py\n--- a/locations/spiders/anytime_fitness.py\n+++ b/locations/spiders/anytime_fitness.py\n@@ -7,27 +7,27 @@\n \n class AnytimeFitnessSpider(scrapy.Spider):\n name = 'anytime_fitness'\n- item_attributes = { 'brand': \"Anytime Fitness\" }\n+ item_attributes = {'brand': \"Anytime Fitness\", 'brand_wikidata': \"Q4778364\"}\n allowed_domains = ['www.anytimefitness.com']\n \n def start_requests(self):\n- url = 'https://www.anytimefitness.com/wp-content/uploads/gyms.json'\n+ url = 'https://www.anytimefitness.com/wp-content/uploads/locations.json'\n yield scrapy.Request(url, callback=self.parse)\n \n def parse(self, response):\n- gyms = json.loads(response.body_as_unicode())\n+ gyms = json.loads(response.text)\n \n for gym in gyms:\n yield GeojsonPointItem(\n- lat = gym['latitude'],\n- lon = gym['longitude'],\n- addr_full = \", \".join(filter(None, [gym['content']['address'], gym['content']['address2']])),\n- city = gym['content']['city'],\n- phone = gym['content']['phone'],\n- state = gym['content']['state_abbr'],\n- postcode = gym['content']['zip'],\n- ref = gym['content']['url'],\n- country = gym['content']['country'],\n- name = html.unescape(gym['content']['title']),\n- extras = {\"number\": gym['content']['number']}\n- )\n+ lat=gym['latitude'],\n+ lon=gym['longitude'],\n+ addr_full=\", \".join(filter(None, [gym['content']['address'], gym['content']['address2']])),\n+ city=gym['content']['city'],\n+ phone=gym['content']['phone'],\n+ state=gym['content']['state_abbr'],\n+ postcode=gym['content']['zip'],\n+ ref=gym['content']['url'],\n+ country=gym['content']['country'],\n+ name=html.unescape(gym['content']['title']),\n+ extras={\"number\": gym['content']['number']}\n+ )\n", "issue": "Spider anytime_fitness is broken\nDuring the global build at 2021-09-29-14-42-48, spider **anytime_fitness** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-09-29-14-42-48/logs/anytime_fitness.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-09-29-14-42-48/output/anytime_fitness.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-09-29-14-42-48/output/anytime_fitness.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom locations.items import GeojsonPointItem\nimport json\nimport html\n\n\nclass AnytimeFitnessSpider(scrapy.Spider):\n name = 'anytime_fitness'\n item_attributes = { 'brand': \"Anytime Fitness\" }\n allowed_domains = ['www.anytimefitness.com']\n\n def start_requests(self):\n url = 'https://www.anytimefitness.com/wp-content/uploads/gyms.json'\n yield scrapy.Request(url, callback=self.parse)\n\n def parse(self, response):\n gyms = json.loads(response.body_as_unicode())\n\n for gym in gyms:\n yield GeojsonPointItem(\n lat = gym['latitude'],\n lon = gym['longitude'],\n addr_full = \", \".join(filter(None, [gym['content']['address'], gym['content']['address2']])),\n city = gym['content']['city'],\n phone = gym['content']['phone'],\n state = gym['content']['state_abbr'],\n postcode = gym['content']['zip'],\n ref = gym['content']['url'],\n country = gym['content']['country'],\n name = html.unescape(gym['content']['title']),\n extras = {\"number\": gym['content']['number']}\n )\n", "path": "locations/spiders/anytime_fitness.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom locations.items import GeojsonPointItem\nimport json\nimport html\n\n\nclass AnytimeFitnessSpider(scrapy.Spider):\n name = 'anytime_fitness'\n item_attributes = {'brand': \"Anytime Fitness\", 'brand_wikidata': \"Q4778364\"}\n allowed_domains = ['www.anytimefitness.com']\n\n def start_requests(self):\n url = 'https://www.anytimefitness.com/wp-content/uploads/locations.json'\n yield scrapy.Request(url, callback=self.parse)\n\n def parse(self, response):\n gyms = json.loads(response.text)\n\n for gym in gyms:\n yield GeojsonPointItem(\n lat=gym['latitude'],\n lon=gym['longitude'],\n addr_full=\", \".join(filter(None, [gym['content']['address'], gym['content']['address2']])),\n city=gym['content']['city'],\n phone=gym['content']['phone'],\n state=gym['content']['state_abbr'],\n postcode=gym['content']['zip'],\n ref=gym['content']['url'],\n country=gym['content']['country'],\n name=html.unescape(gym['content']['title']),\n extras={\"number\": gym['content']['number']}\n )\n", "path": "locations/spiders/anytime_fitness.py"}]} | 772 | 503 |
gh_patches_debug_17957 | rasdani/github-patches | git_diff | voxel51__fiftyone-185 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Can't load dataset on latest develop
On latest `develop`, after rerunning the install script, I see:
```py
import fiftyone as fo
import fiftyone.zoo as foz
dataset = foz.load_zoo_dataset("cifar10", split="test")
Split 'test' already downloaded
Loading existing dataset 'cifar10-test'. To reload from disk, first delete the existing dataset
fo.launch_dashboard(dataset)
```
<img width="1662" alt="Screen Shot 2020-06-10 at 5 50 09 PM" src="https://user-images.githubusercontent.com/25985824/84322368-dd775d00-ab42-11ea-8d96-31dcdb3ea85b.png">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `fiftyone/server/main.py`
Content:
```
1 """
2 FiftyOne Flask server.
3
4 | Copyright 2017-2020, Voxel51, Inc.
5 | `voxel51.com <https://voxel51.com/>`_
6 |
7 """
8 # pragma pylint: disable=redefined-builtin
9 # pragma pylint: disable=unused-wildcard-import
10 # pragma pylint: disable=wildcard-import
11 from __future__ import absolute_import
12 from __future__ import division
13 from __future__ import print_function
14 from __future__ import unicode_literals
15 from builtins import *
16
17 # pragma pylint: enable=redefined-builtin
18 # pragma pylint: enable=unused-wildcard-import
19 # pragma pylint: enable=wildcard-import
20
21 import logging
22 import os
23
24 from flask import Flask, request, send_file
25 from flask_socketio import emit, Namespace, SocketIO
26
27 os.environ["FIFTYONE_SERVER"] = "1"
28 import fiftyone.core.fields as fof
29 import fiftyone.core.state as fos
30
31 from util import get_image_size
32 from pipelines import DISTRIBUTION_PIPELINES, LABELS, SCALARS
33
34 logger = logging.getLogger(__name__)
35
36 app = Flask(__name__)
37 app.config["SECRET_KEY"] = "fiftyone"
38
39 socketio = SocketIO(app, async_mode="eventlet", cors_allowed_origin="*")
40
41
42 @app.route("/")
43 def get_sample_media():
44 """Gets the sample media.
45
46 Returns:
47 bytes
48 """
49 path = request.args.get("path")
50 return send_file(path)
51
52
53 def _load_state(func):
54 def wrapper(self, *args, **kwargs):
55 state = fos.StateDescription.from_dict(self.state)
56 state = func(self, state, *args, **kwargs)
57 self.state = state.serialize()
58 emit("update", self.state, broadcast=True, include_self=False)
59 return self.state
60
61 return wrapper
62
63
64 class StateController(Namespace):
65 """State controller.
66
67 Attributes:
68 state: a :class:`fiftyone.core.state.StateDescription` instance
69
70 Args:
71 **args: postional arguments for ``flask_socketio.Namespace``
72 **kwargs: keyword arguments for ``flask_socketio.Namespace``
73 """
74
75 def __init__(self, *args, **kwargs):
76 self.state = fos.StateDescription().serialize()
77 super(StateController, self).__init__(*args, **kwargs)
78
79 def on_connect(self):
80 """Handles connection to the server."""
81 pass
82
83 def on_disconnect(self):
84 """Handles disconnection from the server."""
85 pass
86
87 def on_update(self, state):
88 """Updates the state.
89
90 Args:
91 state: a serialized :class:`fiftyone.core.state.StateDescription`
92 """
93 self.state = state
94 emit("update", state, broadcast=True, include_self=False)
95
96 def on_get_current_state(self, _):
97 """Gets the current state.
98
99 Returns:
100 a :class:`fiftyone.core.state.StateDescription`
101 """
102 return self.state
103
104 @_load_state
105 def on_add_selection(self, state, _id):
106 """Adds a sample to the selected samples list.
107
108 Args:
109 state: the current :class:`fiftyone.core.state.StateDescription`
110 _id: the sample ID
111
112 Returns:
113 the updated :class:`fiftyone.core.state.StateDescription`
114 """
115 selected = set(state.selected)
116 selected.add(_id)
117 state.selected = list(selected)
118 return state
119
120 @_load_state
121 def on_remove_selection(self, state, _id):
122 """Remove a sample from the selected samples list
123
124 Args:
125 state: the current :class:`fiftyone.core.state.StateDescription`
126 _id: the sample ID
127
128 Returns:
129 the updated :class:`fiftyone.core.state.StateDescription`
130 """
131 selected = set(state.selected)
132 selected.remove(_id)
133 state.selected = list(selected)
134 return state
135
136 def on_page(self, page, page_length=20):
137 """Gets the requested page of samples.
138
139 Args:
140 page: the page number
141 page_length: the page length
142
143 Returns:
144 the list of sample dicts for the page
145 """
146 state = fos.StateDescription.from_dict(self.state)
147 if state.view is not None:
148 view = state.view
149 elif state.dataset is not None:
150 view = state.dataset.view()
151 else:
152 return []
153
154 view = view.skip((page - 1) * page_length).limit(page_length + 1)
155 samples = [s.to_dict(extended=True) for s in view]
156 more = False
157 if len(samples) > page_length:
158 samples = samples[:page_length]
159 more = page + 1
160
161 results = [{"sample": s} for s in samples]
162 for r in results:
163 w, h = get_image_size(r["sample"]["filepath"])
164 r["width"] = w
165 r["height"] = h
166
167 return {"results": results, "more": more}
168
169 def on_lengths(self, _):
170 state = fos.StateDescription.from_dict(self.state)
171 if state.view is not None:
172 view = state.view
173 elif state.dataset is not None:
174 view = state.dataset.view()
175 else:
176 return []
177 return {"labels": view.get_label_fields(), "tags": view.get_tags()}
178
179 def on_get_distributions(self, group):
180 """Gets the distributions for the current state with respect to a group,
181
182 Args:
183 group: one of "labels", "tags", or "scalars"
184
185 Returns:
186 a list of distributions
187 """
188 state = fos.StateDescription.from_dict(self.state)
189 if state.view is not None:
190 view = state.view
191 elif state.dataset is not None:
192 view = state.dataset.view()
193 else:
194 return []
195
196 return _get_distributions(view, group)
197
198 def on_get_facets(self, _):
199 """Gets the facets for the current state.
200
201 Args:
202 _: the message, which is not used
203
204 Returns:
205 the list of facets
206 """
207 state = fos.StateDescription.from_dict(self.state)
208 if state.view is not None:
209 view = state.view
210 elif state.dataset is not None:
211 view = state.dataset.view()
212 else:
213 return []
214
215 return view._get_facets()
216
217 def on_set_facets(self, facets):
218 """Sets the facets for the current state.
219
220 Args:
221 facets: the facets string
222 """
223 _, value = facets.split(".")
224 state = fos.StateDescription.from_dict(self.state)
225 state.view = state.dataset.view().match_tag(value)
226 self.state = state.serialize()
227 emit("update", self.state, broadcast=True, include_self=True)
228
229
230 def _get_distributions(view, group):
231 pipeline = DISTRIBUTION_PIPELINES[group]
232
233 # we add a sub-pipeline for each numeric as it looks like multiple
234 # buckets in a single pipeline is not supported
235 if group == SCALARS:
236 _numeric_distribution_pipelines(view, pipeline)
237
238 result = list(view.aggregate(pipeline))
239
240 if group in {LABELS, SCALARS}:
241 new_result = []
242 for f in result[0].values():
243 new_result += f
244 result = new_result
245
246 if group != SCALARS:
247 for idx, dist in enumerate(result):
248 result[idx]["data"] = sorted(
249 result[idx]["data"], key=lambda c: c["count"], reverse=True
250 )
251
252 return sorted(result, key=lambda d: d["name"])
253
254
255 def _numeric_bounds(view, numerics):
256 bounds_pipeline = [{"$facet": {}}]
257 for idx, (k, v) in enumerate(numerics.items()):
258 bounds_pipeline[0]["$facet"]["numeric-%d" % idx] = [
259 {
260 "$group": {
261 "_id": k,
262 "min": {"$min": "$%s" % k},
263 "max": {"$max": "$%s" % k},
264 },
265 }
266 ]
267
268 return list(view.aggregate(bounds_pipeline))[0] if len(numerics) else {}
269
270
271 def _numeric_distribution_pipelines(view, pipeline, buckets=50):
272 numerics = view._dataset.get_field_schema(ftype=fof.IntField)
273 numerics.update(view._dataset.get_field_schema(ftype=fof.FloatField))
274
275 # here we query the min and max for each numeric field
276 # unfortunately, it looks like this has to be a separate query
277 bounds = _numeric_bounds(view, numerics)
278
279 # for each numeric field, build the boundaries array with the
280 # min/max results when adding the field's sub-pipeline
281 for idx, (k, v) in enumerate(numerics.items()):
282 sub_pipeline = "numeric-%d" % idx
283 field_bounds = bounds[sub_pipeline][0]
284 mn = field_bounds["min"]
285 mx = field_bounds["max"]
286 step = (mx - mn) / buckets
287 boundaries = [mn + step * s for s in range(0, buckets)]
288
289 pipeline[0]["$facet"][sub_pipeline] = [
290 {
291 "$bucket": {
292 "groupBy": "$%s" % k,
293 "boundaries": boundaries,
294 "default": "null",
295 "output": {"count": {"$sum": 1}},
296 }
297 },
298 {
299 "$group": {
300 "_id": k,
301 "data": {
302 "$push": {
303 "key": {
304 "$cond": [
305 {"$ne": ["$_id", "null"]},
306 {"$add": ["$_id", step / 2]},
307 "null",
308 ]
309 },
310 "count": "$count",
311 }
312 },
313 }
314 },
315 {
316 "$project": {
317 "name": k,
318 "type": v.__class__.__name__[
319 : -len("Field") # grab field type from the class
320 ].lower(),
321 "data": "$data",
322 }
323 },
324 ]
325
326
327 socketio.on_namespace(StateController("/state"))
328
329
330 if __name__ == "__main__":
331 socketio.run(app, debug=True)
332
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/fiftyone/server/main.py b/fiftyone/server/main.py
--- a/fiftyone/server/main.py
+++ b/fiftyone/server/main.py
@@ -18,9 +18,11 @@
# pragma pylint: enable=unused-wildcard-import
# pragma pylint: enable=wildcard-import
+import json
import logging
import os
+from bson import json_util
from flask import Flask, request, send_file
from flask_socketio import emit, Namespace, SocketIO
@@ -152,7 +154,9 @@
return []
view = view.skip((page - 1) * page_length).limit(page_length + 1)
- samples = [s.to_dict(extended=True) for s in view]
+ samples = [
+ json.loads(json_util.dumps(s.to_mongo_dict())) for s in view
+ ]
more = False
if len(samples) > page_length:
samples = samples[:page_length]
| {"golden_diff": "diff --git a/fiftyone/server/main.py b/fiftyone/server/main.py\n--- a/fiftyone/server/main.py\n+++ b/fiftyone/server/main.py\n@@ -18,9 +18,11 @@\n # pragma pylint: enable=unused-wildcard-import\n # pragma pylint: enable=wildcard-import\n \n+import json\n import logging\n import os\n \n+from bson import json_util\n from flask import Flask, request, send_file\n from flask_socketio import emit, Namespace, SocketIO\n \n@@ -152,7 +154,9 @@\n return []\n \n view = view.skip((page - 1) * page_length).limit(page_length + 1)\n- samples = [s.to_dict(extended=True) for s in view]\n+ samples = [\n+ json.loads(json_util.dumps(s.to_mongo_dict())) for s in view\n+ ]\n more = False\n if len(samples) > page_length:\n samples = samples[:page_length]\n", "issue": "Can't load dataset on latest develop\nOn latest `develop`, after rerunning the install script, I see:\r\n```py\r\nimport fiftyone as fo \r\nimport fiftyone.zoo as foz \r\n\r\ndataset = foz.load_zoo_dataset(\"cifar10\", split=\"test\") \r\nSplit 'test' already downloaded\r\nLoading existing dataset 'cifar10-test'. To reload from disk, first delete the existing dataset\r\n\r\nfo.launch_dashboard(dataset) \r\n```\r\n\r\n<img width=\"1662\" alt=\"Screen Shot 2020-06-10 at 5 50 09 PM\" src=\"https://user-images.githubusercontent.com/25985824/84322368-dd775d00-ab42-11ea-8d96-31dcdb3ea85b.png\">\r\n\n", "before_files": [{"content": "\"\"\"\nFiftyOne Flask server.\n\n| Copyright 2017-2020, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\n# pragma pylint: disable=redefined-builtin\n# pragma pylint: disable=unused-wildcard-import\n# pragma pylint: disable=wildcard-import\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom builtins import *\n\n# pragma pylint: enable=redefined-builtin\n# pragma pylint: enable=unused-wildcard-import\n# pragma pylint: enable=wildcard-import\n\nimport logging\nimport os\n\nfrom flask import Flask, request, send_file\nfrom flask_socketio import emit, Namespace, SocketIO\n\nos.environ[\"FIFTYONE_SERVER\"] = \"1\"\nimport fiftyone.core.fields as fof\nimport fiftyone.core.state as fos\n\nfrom util import get_image_size\nfrom pipelines import DISTRIBUTION_PIPELINES, LABELS, SCALARS\n\nlogger = logging.getLogger(__name__)\n\napp = Flask(__name__)\napp.config[\"SECRET_KEY\"] = \"fiftyone\"\n\nsocketio = SocketIO(app, async_mode=\"eventlet\", cors_allowed_origin=\"*\")\n\n\[email protected](\"/\")\ndef get_sample_media():\n \"\"\"Gets the sample media.\n\n Returns:\n bytes\n \"\"\"\n path = request.args.get(\"path\")\n return send_file(path)\n\n\ndef _load_state(func):\n def wrapper(self, *args, **kwargs):\n state = fos.StateDescription.from_dict(self.state)\n state = func(self, state, *args, **kwargs)\n self.state = state.serialize()\n emit(\"update\", self.state, broadcast=True, include_self=False)\n return self.state\n\n return wrapper\n\n\nclass StateController(Namespace):\n \"\"\"State controller.\n\n Attributes:\n state: a :class:`fiftyone.core.state.StateDescription` instance\n\n Args:\n **args: postional arguments for ``flask_socketio.Namespace``\n **kwargs: keyword arguments for ``flask_socketio.Namespace``\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.state = fos.StateDescription().serialize()\n super(StateController, self).__init__(*args, **kwargs)\n\n def on_connect(self):\n \"\"\"Handles connection to the server.\"\"\"\n pass\n\n def on_disconnect(self):\n \"\"\"Handles disconnection from the server.\"\"\"\n pass\n\n def on_update(self, state):\n \"\"\"Updates the state.\n\n Args:\n state: a serialized :class:`fiftyone.core.state.StateDescription`\n \"\"\"\n self.state = state\n emit(\"update\", state, broadcast=True, include_self=False)\n\n def on_get_current_state(self, _):\n \"\"\"Gets the current state.\n\n Returns:\n a :class:`fiftyone.core.state.StateDescription`\n \"\"\"\n return self.state\n\n @_load_state\n def on_add_selection(self, state, _id):\n \"\"\"Adds a sample to the selected samples list.\n\n Args:\n state: the current :class:`fiftyone.core.state.StateDescription`\n _id: the sample ID\n\n Returns:\n the updated :class:`fiftyone.core.state.StateDescription`\n \"\"\"\n selected = set(state.selected)\n selected.add(_id)\n state.selected = list(selected)\n return state\n\n @_load_state\n def on_remove_selection(self, state, _id):\n \"\"\"Remove a sample from the selected samples list\n\n Args:\n state: the current :class:`fiftyone.core.state.StateDescription`\n _id: the sample ID\n\n Returns:\n the updated :class:`fiftyone.core.state.StateDescription`\n \"\"\"\n selected = set(state.selected)\n selected.remove(_id)\n state.selected = list(selected)\n return state\n\n def on_page(self, page, page_length=20):\n \"\"\"Gets the requested page of samples.\n\n Args:\n page: the page number\n page_length: the page length\n\n Returns:\n the list of sample dicts for the page\n \"\"\"\n state = fos.StateDescription.from_dict(self.state)\n if state.view is not None:\n view = state.view\n elif state.dataset is not None:\n view = state.dataset.view()\n else:\n return []\n\n view = view.skip((page - 1) * page_length).limit(page_length + 1)\n samples = [s.to_dict(extended=True) for s in view]\n more = False\n if len(samples) > page_length:\n samples = samples[:page_length]\n more = page + 1\n\n results = [{\"sample\": s} for s in samples]\n for r in results:\n w, h = get_image_size(r[\"sample\"][\"filepath\"])\n r[\"width\"] = w\n r[\"height\"] = h\n\n return {\"results\": results, \"more\": more}\n\n def on_lengths(self, _):\n state = fos.StateDescription.from_dict(self.state)\n if state.view is not None:\n view = state.view\n elif state.dataset is not None:\n view = state.dataset.view()\n else:\n return []\n return {\"labels\": view.get_label_fields(), \"tags\": view.get_tags()}\n\n def on_get_distributions(self, group):\n \"\"\"Gets the distributions for the current state with respect to a group,\n\n Args:\n group: one of \"labels\", \"tags\", or \"scalars\"\n\n Returns:\n a list of distributions\n \"\"\"\n state = fos.StateDescription.from_dict(self.state)\n if state.view is not None:\n view = state.view\n elif state.dataset is not None:\n view = state.dataset.view()\n else:\n return []\n\n return _get_distributions(view, group)\n\n def on_get_facets(self, _):\n \"\"\"Gets the facets for the current state.\n\n Args:\n _: the message, which is not used\n\n Returns:\n the list of facets\n \"\"\"\n state = fos.StateDescription.from_dict(self.state)\n if state.view is not None:\n view = state.view\n elif state.dataset is not None:\n view = state.dataset.view()\n else:\n return []\n\n return view._get_facets()\n\n def on_set_facets(self, facets):\n \"\"\"Sets the facets for the current state.\n\n Args:\n facets: the facets string\n \"\"\"\n _, value = facets.split(\".\")\n state = fos.StateDescription.from_dict(self.state)\n state.view = state.dataset.view().match_tag(value)\n self.state = state.serialize()\n emit(\"update\", self.state, broadcast=True, include_self=True)\n\n\ndef _get_distributions(view, group):\n pipeline = DISTRIBUTION_PIPELINES[group]\n\n # we add a sub-pipeline for each numeric as it looks like multiple\n # buckets in a single pipeline is not supported\n if group == SCALARS:\n _numeric_distribution_pipelines(view, pipeline)\n\n result = list(view.aggregate(pipeline))\n\n if group in {LABELS, SCALARS}:\n new_result = []\n for f in result[0].values():\n new_result += f\n result = new_result\n\n if group != SCALARS:\n for idx, dist in enumerate(result):\n result[idx][\"data\"] = sorted(\n result[idx][\"data\"], key=lambda c: c[\"count\"], reverse=True\n )\n\n return sorted(result, key=lambda d: d[\"name\"])\n\n\ndef _numeric_bounds(view, numerics):\n bounds_pipeline = [{\"$facet\": {}}]\n for idx, (k, v) in enumerate(numerics.items()):\n bounds_pipeline[0][\"$facet\"][\"numeric-%d\" % idx] = [\n {\n \"$group\": {\n \"_id\": k,\n \"min\": {\"$min\": \"$%s\" % k},\n \"max\": {\"$max\": \"$%s\" % k},\n },\n }\n ]\n\n return list(view.aggregate(bounds_pipeline))[0] if len(numerics) else {}\n\n\ndef _numeric_distribution_pipelines(view, pipeline, buckets=50):\n numerics = view._dataset.get_field_schema(ftype=fof.IntField)\n numerics.update(view._dataset.get_field_schema(ftype=fof.FloatField))\n\n # here we query the min and max for each numeric field\n # unfortunately, it looks like this has to be a separate query\n bounds = _numeric_bounds(view, numerics)\n\n # for each numeric field, build the boundaries array with the\n # min/max results when adding the field's sub-pipeline\n for idx, (k, v) in enumerate(numerics.items()):\n sub_pipeline = \"numeric-%d\" % idx\n field_bounds = bounds[sub_pipeline][0]\n mn = field_bounds[\"min\"]\n mx = field_bounds[\"max\"]\n step = (mx - mn) / buckets\n boundaries = [mn + step * s for s in range(0, buckets)]\n\n pipeline[0][\"$facet\"][sub_pipeline] = [\n {\n \"$bucket\": {\n \"groupBy\": \"$%s\" % k,\n \"boundaries\": boundaries,\n \"default\": \"null\",\n \"output\": {\"count\": {\"$sum\": 1}},\n }\n },\n {\n \"$group\": {\n \"_id\": k,\n \"data\": {\n \"$push\": {\n \"key\": {\n \"$cond\": [\n {\"$ne\": [\"$_id\", \"null\"]},\n {\"$add\": [\"$_id\", step / 2]},\n \"null\",\n ]\n },\n \"count\": \"$count\",\n }\n },\n }\n },\n {\n \"$project\": {\n \"name\": k,\n \"type\": v.__class__.__name__[\n : -len(\"Field\") # grab field type from the class\n ].lower(),\n \"data\": \"$data\",\n }\n },\n ]\n\n\nsocketio.on_namespace(StateController(\"/state\"))\n\n\nif __name__ == \"__main__\":\n socketio.run(app, debug=True)\n", "path": "fiftyone/server/main.py"}], "after_files": [{"content": "\"\"\"\nFiftyOne Flask server.\n\n| Copyright 2017-2020, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\n# pragma pylint: disable=redefined-builtin\n# pragma pylint: disable=unused-wildcard-import\n# pragma pylint: disable=wildcard-import\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom builtins import *\n\n# pragma pylint: enable=redefined-builtin\n# pragma pylint: enable=unused-wildcard-import\n# pragma pylint: enable=wildcard-import\n\nimport json\nimport logging\nimport os\n\nfrom bson import json_util\nfrom flask import Flask, request, send_file\nfrom flask_socketio import emit, Namespace, SocketIO\n\nos.environ[\"FIFTYONE_SERVER\"] = \"1\"\nimport fiftyone.core.fields as fof\nimport fiftyone.core.state as fos\n\nfrom util import get_image_size\nfrom pipelines import DISTRIBUTION_PIPELINES, LABELS, SCALARS\n\nlogger = logging.getLogger(__name__)\n\napp = Flask(__name__)\napp.config[\"SECRET_KEY\"] = \"fiftyone\"\n\nsocketio = SocketIO(app, async_mode=\"eventlet\", cors_allowed_origin=\"*\")\n\n\[email protected](\"/\")\ndef get_sample_media():\n \"\"\"Gets the sample media.\n\n Returns:\n bytes\n \"\"\"\n path = request.args.get(\"path\")\n return send_file(path)\n\n\ndef _load_state(func):\n def wrapper(self, *args, **kwargs):\n state = fos.StateDescription.from_dict(self.state)\n state = func(self, state, *args, **kwargs)\n self.state = state.serialize()\n emit(\"update\", self.state, broadcast=True, include_self=False)\n return self.state\n\n return wrapper\n\n\nclass StateController(Namespace):\n \"\"\"State controller.\n\n Attributes:\n state: a :class:`fiftyone.core.state.StateDescription` instance\n\n Args:\n **args: postional arguments for ``flask_socketio.Namespace``\n **kwargs: keyword arguments for ``flask_socketio.Namespace``\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.state = fos.StateDescription().serialize()\n super(StateController, self).__init__(*args, **kwargs)\n\n def on_connect(self):\n \"\"\"Handles connection to the server.\"\"\"\n pass\n\n def on_disconnect(self):\n \"\"\"Handles disconnection from the server.\"\"\"\n pass\n\n def on_update(self, state):\n \"\"\"Updates the state.\n\n Args:\n state: a serialized :class:`fiftyone.core.state.StateDescription`\n \"\"\"\n self.state = state\n emit(\"update\", state, broadcast=True, include_self=False)\n\n def on_get_current_state(self, _):\n \"\"\"Gets the current state.\n\n Returns:\n a :class:`fiftyone.core.state.StateDescription`\n \"\"\"\n return self.state\n\n @_load_state\n def on_add_selection(self, state, _id):\n \"\"\"Adds a sample to the selected samples list.\n\n Args:\n state: the current :class:`fiftyone.core.state.StateDescription`\n _id: the sample ID\n\n Returns:\n the updated :class:`fiftyone.core.state.StateDescription`\n \"\"\"\n selected = set(state.selected)\n selected.add(_id)\n state.selected = list(selected)\n return state\n\n @_load_state\n def on_remove_selection(self, state, _id):\n \"\"\"Remove a sample from the selected samples list\n\n Args:\n state: the current :class:`fiftyone.core.state.StateDescription`\n _id: the sample ID\n\n Returns:\n the updated :class:`fiftyone.core.state.StateDescription`\n \"\"\"\n selected = set(state.selected)\n selected.remove(_id)\n state.selected = list(selected)\n return state\n\n def on_page(self, page, page_length=20):\n \"\"\"Gets the requested page of samples.\n\n Args:\n page: the page number\n page_length: the page length\n\n Returns:\n the list of sample dicts for the page\n \"\"\"\n state = fos.StateDescription.from_dict(self.state)\n if state.view is not None:\n view = state.view\n elif state.dataset is not None:\n view = state.dataset.view()\n else:\n return []\n\n view = view.skip((page - 1) * page_length).limit(page_length + 1)\n samples = [\n json.loads(json_util.dumps(s.to_mongo_dict())) for s in view\n ]\n more = False\n if len(samples) > page_length:\n samples = samples[:page_length]\n more = page + 1\n\n results = [{\"sample\": s} for s in samples]\n for r in results:\n w, h = get_image_size(r[\"sample\"][\"filepath\"])\n r[\"width\"] = w\n r[\"height\"] = h\n\n return {\"results\": results, \"more\": more}\n\n def on_lengths(self, _):\n state = fos.StateDescription.from_dict(self.state)\n if state.view is not None:\n view = state.view\n elif state.dataset is not None:\n view = state.dataset.view()\n else:\n return []\n return {\"labels\": view.get_label_fields(), \"tags\": view.get_tags()}\n\n def on_get_distributions(self, group):\n \"\"\"Gets the distributions for the current state with respect to a group,\n\n Args:\n group: one of \"labels\", \"tags\", or \"scalars\"\n\n Returns:\n a list of distributions\n \"\"\"\n state = fos.StateDescription.from_dict(self.state)\n if state.view is not None:\n view = state.view\n elif state.dataset is not None:\n view = state.dataset.view()\n else:\n return []\n\n return _get_distributions(view, group)\n\n def on_get_facets(self, _):\n \"\"\"Gets the facets for the current state.\n\n Args:\n _: the message, which is not used\n\n Returns:\n the list of facets\n \"\"\"\n state = fos.StateDescription.from_dict(self.state)\n if state.view is not None:\n view = state.view\n elif state.dataset is not None:\n view = state.dataset.view()\n else:\n return []\n\n return view._get_facets()\n\n def on_set_facets(self, facets):\n \"\"\"Sets the facets for the current state.\n\n Args:\n facets: the facets string\n \"\"\"\n _, value = facets.split(\".\")\n state = fos.StateDescription.from_dict(self.state)\n state.view = state.dataset.view().match_tag(value)\n self.state = state.serialize()\n emit(\"update\", self.state, broadcast=True, include_self=True)\n\n\ndef _get_distributions(view, group):\n pipeline = DISTRIBUTION_PIPELINES[group]\n\n # we add a sub-pipeline for each numeric as it looks like multiple\n # buckets in a single pipeline is not supported\n if group == SCALARS:\n _numeric_distribution_pipelines(view, pipeline)\n\n result = list(view.aggregate(pipeline))\n\n if group in {LABELS, SCALARS}:\n new_result = []\n for f in result[0].values():\n new_result += f\n result = new_result\n\n if group != SCALARS:\n for idx, dist in enumerate(result):\n result[idx][\"data\"] = sorted(\n result[idx][\"data\"], key=lambda c: c[\"count\"], reverse=True\n )\n\n return sorted(result, key=lambda d: d[\"name\"])\n\n\ndef _numeric_bounds(view, numerics):\n bounds_pipeline = [{\"$facet\": {}}]\n for idx, (k, v) in enumerate(numerics.items()):\n bounds_pipeline[0][\"$facet\"][\"numeric-%d\" % idx] = [\n {\n \"$group\": {\n \"_id\": k,\n \"min\": {\"$min\": \"$%s\" % k},\n \"max\": {\"$max\": \"$%s\" % k},\n },\n }\n ]\n\n return list(view.aggregate(bounds_pipeline))[0] if len(numerics) else {}\n\n\ndef _numeric_distribution_pipelines(view, pipeline, buckets=50):\n numerics = view._dataset.get_field_schema(ftype=fof.IntField)\n numerics.update(view._dataset.get_field_schema(ftype=fof.FloatField))\n\n # here we query the min and max for each numeric field\n # unfortunately, it looks like this has to be a separate query\n bounds = _numeric_bounds(view, numerics)\n\n # for each numeric field, build the boundaries array with the\n # min/max results when adding the field's sub-pipeline\n for idx, (k, v) in enumerate(numerics.items()):\n sub_pipeline = \"numeric-%d\" % idx\n field_bounds = bounds[sub_pipeline][0]\n mn = field_bounds[\"min\"]\n mx = field_bounds[\"max\"]\n step = (mx - mn) / buckets\n boundaries = [mn + step * s for s in range(0, buckets)]\n\n pipeline[0][\"$facet\"][sub_pipeline] = [\n {\n \"$bucket\": {\n \"groupBy\": \"$%s\" % k,\n \"boundaries\": boundaries,\n \"default\": \"null\",\n \"output\": {\"count\": {\"$sum\": 1}},\n }\n },\n {\n \"$group\": {\n \"_id\": k,\n \"data\": {\n \"$push\": {\n \"key\": {\n \"$cond\": [\n {\"$ne\": [\"$_id\", \"null\"]},\n {\"$add\": [\"$_id\", step / 2]},\n \"null\",\n ]\n },\n \"count\": \"$count\",\n }\n },\n }\n },\n {\n \"$project\": {\n \"name\": k,\n \"type\": v.__class__.__name__[\n : -len(\"Field\") # grab field type from the class\n ].lower(),\n \"data\": \"$data\",\n }\n },\n ]\n\n\nsocketio.on_namespace(StateController(\"/state\"))\n\n\nif __name__ == \"__main__\":\n socketio.run(app, debug=True)\n", "path": "fiftyone/server/main.py"}]} | 3,540 | 215 |
gh_patches_debug_9780 | rasdani/github-patches | git_diff | sunpy__sunpy-4309 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
QueryResponse.show not flexible enough
It would be nice if one could change the columns that will be displayed.
Compare the function `display_entries` of my database package to get an idea of what I imagine (implementation https://github.com/derdon/sunpy/blob/database/sunpy/database/tables.py#L547, test at https://github.com/derdon/sunpy/blob/database/sunpy/tests/database/test_tables.py#L302).
So I'd like the signature of `QueryResponse.show` to be `self, columns=['Start time', 'End time', 'Source','Instrument', 'Type']`. This way, the default behaviour will not change to the current implementation.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sunpy/net/base_client.py`
Content:
```
1 import importlib
2 from abc import ABC, abstractmethod
3 from textwrap import dedent
4 from collections.abc import Sequence
5
6 from astropy.table import Table
7
8 from sunpy.util.util import get_width
9
10 __all__ = ['BaseQueryResponse', 'BaseClient']
11
12
13 class BaseQueryResponse(Sequence):
14 """
15 An Abstract Base Class for results returned from BaseClient.
16
17 Notes
18 -----
19 * A QueryResponse object must be able to be instantiated with only one
20 iterable argument. (i.e. the ``__init__`` must only have one required
21 argument).
22 * The `client` property must be settable.
23 * The base class does not prescribe how you store the results from your
24 client, only that it must be possible to represent them as an astropy
25 table in the ``build_table`` method.
26 * `__getitem__` **must** return an instance of the type it was called on.
27 I.e. it must always return an object of ``type(self)``.
28
29 """
30
31 @abstractmethod
32 def build_table(self):
33 """
34 Return an `astropy.table.Table` representation of the query response.
35 """
36
37 @property
38 @abstractmethod
39 def client(self):
40 """
41 An instance of `BaseClient` used to generate the results.
42
43 Generally this is used to fetch the results later.
44
45 .. note::
46
47 In general, this doesn't have to be the same instance of
48 ``BaseClient``, this is left to the client developer. If there is a
49 significant connection overhead in creating an instance of a client
50 you might want it to be the same instance as used for the search.
51 """
52
53 @client.setter
54 @abstractmethod
55 def client(self, value):
56 pass
57
58 @property
59 @abstractmethod
60 def blocks(self):
61 """
62 A `collections.abc.Sequence` object which contains the records
63 contained within the Query Response.
64 """
65
66 @abstractmethod
67 def response_block_properties(self):
68 """
69 Returns a set of class attributes on all the response blocks.
70
71 Returns
72 -------
73 s : `set`
74 List of strings, containing attribute names in the response blocks.
75 """
76
77 def __str__(self):
78 """Print out human-readable summary of records retrieved"""
79 return '\n'.join(self.build_table().pformat(show_dtype=False))
80
81 def __repr__(self):
82 """Print out human-readable summary of records retrieved"""
83 return object.__repr__(self) + "\n" + str(self)
84
85 def _repr_html_(self):
86 return self.build_table()._repr_html_()
87
88
89 def _print_client(client, html=False):
90 """
91 Given a BaseClient instance will print out each registered attribute.
92
93 Parameters
94 ----------
95 client : `sunpy.net.base_client.BaseClient`
96 The instance class to print for.
97 html : bool
98 Will return a html table instead.
99
100 Returns
101 -------
102 `str`
103 String with the client.
104 """
105 width = -1 if html else get_width()
106 class_name = f"{client.__module__+'.' or ''}{client.__class__.__name__}"
107 attrs = client.register_values()
108 lines = []
109 t = Table(names=["Attr Type", "Name", "Description"],
110 dtype=["U80", "U80", "U80"])
111 for client_key in attrs.keys():
112 for name, desc in attrs[client_key]:
113 t.add_row((client_key.__name__, name, desc))
114 lines = [class_name, dedent(client.__doc__.partition("\n\n")[0])]
115 if html:
116 lines = [f"<p>{line}</p>" for line in lines]
117 lines.extend(t.pformat_all(show_dtype=False, max_width=width, align="<", html=html))
118 return '\n'.join(lines)
119
120
121 class BaseClient(ABC):
122 """
123 This defines the Abstract Base Class for each download client.
124
125 The BaseClient has several abstract methods that ensure that any subclass enforces the bare minimum API.
126 These are `search`, `fetch` and `_can_handle_query`.
127 The last one ensures that each download client can be registered with Fido.
128
129 Most download clients should subclass `~sunpy.net.dataretriever.GenericClient`.
130 If the structure of `~sunpy.net.dataretriever.GenericClient`
131 is not useful you should use `~sunpy.net.BaseClient`.
132 `~sunpy.net.vso.VSOClient` and `~sunpy.net.jsoc.JSOCClient`
133 are examples of download clients that subclass ``BaseClient``.
134 """
135
136 _registry = dict()
137
138 def __init_subclass__(cls, *args, **kwargs):
139 """
140 An __init_subclass__ hook initializes all of the subclasses of a given class.
141 So for each subclass, it will call this block of code on import.
142 This replicates some metaclass magic without the need to be aware of metaclasses.
143 Here we use this to register each subclass in a dict that has the `_can_handle_query` attribute.
144 This is then passed into the UnifiedDownloaderFactory so we can register them.
145 This means that Fido can use the clients internally.
146 """
147 super().__init_subclass__(**kwargs)
148
149 # We do not want to register GenericClient since its a dummy client.
150 if cls.__name__ in ('GenericClient'):
151 return
152
153 cls._registry[cls] = cls._can_handle_query
154
155 if hasattr(cls, "_attrs_module"):
156 from sunpy.net import attrs
157
158 name, module = cls._attrs_module()
159 module_obj = importlib.import_module(module)
160
161 existing_mod = getattr(attrs, name, None)
162 if existing_mod and existing_mod is not module_obj:
163 raise NameError(f"{name} has already been registered as an attrs name.")
164
165 setattr(attrs, name, module_obj)
166
167 if name not in attrs.__all__:
168 attrs.__all__.append(name)
169
170 # Register client attrs after it has regsitered its own attrs
171 from sunpy.net import attr
172 values = cls.register_values()
173 # If the client has no support, we won't try to register attrs
174 if values:
175 attr.Attr.update_values({cls: values})
176
177 def __repr__(self):
178 """
179 Returns the normal repr plus the pretty client __str__.
180 """
181 return object.__repr__(self) + "\n" + str(self)
182
183 def __str__(self):
184 """
185 This enables the "pretty" printing of BaseClient.
186 """
187 return _print_client(self)
188
189 def _repr_html_(self):
190 """
191 This enables the "pretty" printing of the BaseClient with html.
192 """
193 return _print_client(self, html=True)
194
195 @abstractmethod
196 def search(self, *args, **kwargs):
197 """
198 This enables the user to search for data using the client.
199
200 Must return a subclass of `BaseQueryResponse`.
201 """
202
203 @abstractmethod
204 def fetch(self, *query_results, path=None, overwrite=False, progress=True,
205 max_conn=5, downloader=None, wait=True, **kwargs):
206 """
207 This enables the user to fetch the data using the client, after a search.
208
209 Parameters
210 ----------
211 query_results:
212 Results to download.
213 path : `str` or `pathlib.Path`, optional
214 Path to the download directory
215 overwrite : `bool`, optional
216 Replace files with the same name if True.
217 progress : `bool`, optional
218 Print progress info to terminal.
219 max_conns : `int`, optional
220 Maximum number of download connections.
221 downloader : `parfive.Downloader`, optional
222 The download manager to use.
223 wait : `bool`, optional
224 If `False` ``downloader.download()`` will not be called. Only has
225 any effect if `downloader` is not `None`.
226
227 Returns
228 -------
229 `parfive.Results`
230 The results object, can be `None` if ``wait`` is `False`.
231 """
232
233 @classmethod
234 @abstractmethod
235 def _can_handle_query(cls, *query):
236 """
237 This enables the client to register what kind of searches it can handle, to prevent Fido
238 using the incorrect client.
239 """
240
241 @staticmethod
242 def check_attr_types_in_query(query, required_attrs={}, optional_attrs={}):
243 """
244 Check a query againsted required and optional attributes.
245
246 Returns `True` if *query* contains all the attrs in *required_attrs*,
247 and if *query* contains only attrs in both *required_attrs* and *optional_attrs*.
248 """
249 query_attrs = {type(x) for x in query}
250 all_attrs = required_attrs.union(optional_attrs)
251
252 return required_attrs.issubset(query_attrs) and query_attrs.issubset(all_attrs)
253
254 @classmethod
255 def register_values(cls, *query):
256 """
257 This enables the client to register what kind of Attrs it can use directly.
258
259 Returns
260 -------
261 `dict`
262 A dictionary with key values of Attrs and the values are a tuple of
263 ("Attr Type", "Name", "Description").
264 """
265 return {}
266
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sunpy/net/base_client.py b/sunpy/net/base_client.py
--- a/sunpy/net/base_client.py
+++ b/sunpy/net/base_client.py
@@ -85,6 +85,25 @@
def _repr_html_(self):
return self.build_table()._repr_html_()
+ def show(self, *cols):
+ """
+ Returns response tables with desired columns for the Query.
+
+ Parameters
+ ----------
+ \\*cols : `tuple`
+ Name of columns to be shown.
+
+ Returns
+ -------
+ `astropy.table.Table`
+ A table showing values for specified columns.
+ """
+ table = self.build_table()
+ if len(cols) == 0:
+ return table
+ return table[list(cols)]
+
def _print_client(client, html=False):
"""
| {"golden_diff": "diff --git a/sunpy/net/base_client.py b/sunpy/net/base_client.py\n--- a/sunpy/net/base_client.py\n+++ b/sunpy/net/base_client.py\n@@ -85,6 +85,25 @@\n def _repr_html_(self):\n return self.build_table()._repr_html_()\n \n+ def show(self, *cols):\n+ \"\"\"\n+ Returns response tables with desired columns for the Query.\n+\n+ Parameters\n+ ----------\n+ \\\\*cols : `tuple`\n+ Name of columns to be shown.\n+\n+ Returns\n+ -------\n+ `astropy.table.Table`\n+ A table showing values for specified columns.\n+ \"\"\"\n+ table = self.build_table()\n+ if len(cols) == 0:\n+ return table\n+ return table[list(cols)]\n+\n \n def _print_client(client, html=False):\n \"\"\"\n", "issue": "QueryResponse.show not flexible enough\nIt would be nice if one could change the columns that will be displayed. \r\n\r\nCompare the function `display_entries` of my database package to get an idea of what I imagine (implementation https://github.com/derdon/sunpy/blob/database/sunpy/database/tables.py#L547, test at https://github.com/derdon/sunpy/blob/database/sunpy/tests/database/test_tables.py#L302). \r\n\r\nSo I'd like the signature of `QueryResponse.show` to be `self, columns=['Start time', 'End time', 'Source','Instrument', 'Type']`. This way, the default behaviour will not change to the current implementation.\r\n\n", "before_files": [{"content": "import importlib\nfrom abc import ABC, abstractmethod\nfrom textwrap import dedent\nfrom collections.abc import Sequence\n\nfrom astropy.table import Table\n\nfrom sunpy.util.util import get_width\n\n__all__ = ['BaseQueryResponse', 'BaseClient']\n\n\nclass BaseQueryResponse(Sequence):\n \"\"\"\n An Abstract Base Class for results returned from BaseClient.\n\n Notes\n -----\n * A QueryResponse object must be able to be instantiated with only one\n iterable argument. (i.e. the ``__init__`` must only have one required\n argument).\n * The `client` property must be settable.\n * The base class does not prescribe how you store the results from your\n client, only that it must be possible to represent them as an astropy\n table in the ``build_table`` method.\n * `__getitem__` **must** return an instance of the type it was called on.\n I.e. it must always return an object of ``type(self)``.\n\n \"\"\"\n\n @abstractmethod\n def build_table(self):\n \"\"\"\n Return an `astropy.table.Table` representation of the query response.\n \"\"\"\n\n @property\n @abstractmethod\n def client(self):\n \"\"\"\n An instance of `BaseClient` used to generate the results.\n\n Generally this is used to fetch the results later.\n\n .. note::\n\n In general, this doesn't have to be the same instance of\n ``BaseClient``, this is left to the client developer. If there is a\n significant connection overhead in creating an instance of a client\n you might want it to be the same instance as used for the search.\n \"\"\"\n\n @client.setter\n @abstractmethod\n def client(self, value):\n pass\n\n @property\n @abstractmethod\n def blocks(self):\n \"\"\"\n A `collections.abc.Sequence` object which contains the records\n contained within the Query Response.\n \"\"\"\n\n @abstractmethod\n def response_block_properties(self):\n \"\"\"\n Returns a set of class attributes on all the response blocks.\n\n Returns\n -------\n s : `set`\n List of strings, containing attribute names in the response blocks.\n \"\"\"\n\n def __str__(self):\n \"\"\"Print out human-readable summary of records retrieved\"\"\"\n return '\\n'.join(self.build_table().pformat(show_dtype=False))\n\n def __repr__(self):\n \"\"\"Print out human-readable summary of records retrieved\"\"\"\n return object.__repr__(self) + \"\\n\" + str(self)\n\n def _repr_html_(self):\n return self.build_table()._repr_html_()\n\n\ndef _print_client(client, html=False):\n \"\"\"\n Given a BaseClient instance will print out each registered attribute.\n\n Parameters\n ----------\n client : `sunpy.net.base_client.BaseClient`\n The instance class to print for.\n html : bool\n Will return a html table instead.\n\n Returns\n -------\n `str`\n String with the client.\n \"\"\"\n width = -1 if html else get_width()\n class_name = f\"{client.__module__+'.' or ''}{client.__class__.__name__}\"\n attrs = client.register_values()\n lines = []\n t = Table(names=[\"Attr Type\", \"Name\", \"Description\"],\n dtype=[\"U80\", \"U80\", \"U80\"])\n for client_key in attrs.keys():\n for name, desc in attrs[client_key]:\n t.add_row((client_key.__name__, name, desc))\n lines = [class_name, dedent(client.__doc__.partition(\"\\n\\n\")[0])]\n if html:\n lines = [f\"<p>{line}</p>\" for line in lines]\n lines.extend(t.pformat_all(show_dtype=False, max_width=width, align=\"<\", html=html))\n return '\\n'.join(lines)\n\n\nclass BaseClient(ABC):\n \"\"\"\n This defines the Abstract Base Class for each download client.\n\n The BaseClient has several abstract methods that ensure that any subclass enforces the bare minimum API.\n These are `search`, `fetch` and `_can_handle_query`.\n The last one ensures that each download client can be registered with Fido.\n\n Most download clients should subclass `~sunpy.net.dataretriever.GenericClient`.\n If the structure of `~sunpy.net.dataretriever.GenericClient`\n is not useful you should use `~sunpy.net.BaseClient`.\n `~sunpy.net.vso.VSOClient` and `~sunpy.net.jsoc.JSOCClient`\n are examples of download clients that subclass ``BaseClient``.\n \"\"\"\n\n _registry = dict()\n\n def __init_subclass__(cls, *args, **kwargs):\n \"\"\"\n An __init_subclass__ hook initializes all of the subclasses of a given class.\n So for each subclass, it will call this block of code on import.\n This replicates some metaclass magic without the need to be aware of metaclasses.\n Here we use this to register each subclass in a dict that has the `_can_handle_query` attribute.\n This is then passed into the UnifiedDownloaderFactory so we can register them.\n This means that Fido can use the clients internally.\n \"\"\"\n super().__init_subclass__(**kwargs)\n\n # We do not want to register GenericClient since its a dummy client.\n if cls.__name__ in ('GenericClient'):\n return\n\n cls._registry[cls] = cls._can_handle_query\n\n if hasattr(cls, \"_attrs_module\"):\n from sunpy.net import attrs\n\n name, module = cls._attrs_module()\n module_obj = importlib.import_module(module)\n\n existing_mod = getattr(attrs, name, None)\n if existing_mod and existing_mod is not module_obj:\n raise NameError(f\"{name} has already been registered as an attrs name.\")\n\n setattr(attrs, name, module_obj)\n\n if name not in attrs.__all__:\n attrs.__all__.append(name)\n\n # Register client attrs after it has regsitered its own attrs\n from sunpy.net import attr\n values = cls.register_values()\n # If the client has no support, we won't try to register attrs\n if values:\n attr.Attr.update_values({cls: values})\n\n def __repr__(self):\n \"\"\"\n Returns the normal repr plus the pretty client __str__.\n \"\"\"\n return object.__repr__(self) + \"\\n\" + str(self)\n\n def __str__(self):\n \"\"\"\n This enables the \"pretty\" printing of BaseClient.\n \"\"\"\n return _print_client(self)\n\n def _repr_html_(self):\n \"\"\"\n This enables the \"pretty\" printing of the BaseClient with html.\n \"\"\"\n return _print_client(self, html=True)\n\n @abstractmethod\n def search(self, *args, **kwargs):\n \"\"\"\n This enables the user to search for data using the client.\n\n Must return a subclass of `BaseQueryResponse`.\n \"\"\"\n\n @abstractmethod\n def fetch(self, *query_results, path=None, overwrite=False, progress=True,\n max_conn=5, downloader=None, wait=True, **kwargs):\n \"\"\"\n This enables the user to fetch the data using the client, after a search.\n\n Parameters\n ----------\n query_results:\n Results to download.\n path : `str` or `pathlib.Path`, optional\n Path to the download directory\n overwrite : `bool`, optional\n Replace files with the same name if True.\n progress : `bool`, optional\n Print progress info to terminal.\n max_conns : `int`, optional\n Maximum number of download connections.\n downloader : `parfive.Downloader`, optional\n The download manager to use.\n wait : `bool`, optional\n If `False` ``downloader.download()`` will not be called. Only has\n any effect if `downloader` is not `None`.\n\n Returns\n -------\n `parfive.Results`\n The results object, can be `None` if ``wait`` is `False`.\n \"\"\"\n\n @classmethod\n @abstractmethod\n def _can_handle_query(cls, *query):\n \"\"\"\n This enables the client to register what kind of searches it can handle, to prevent Fido\n using the incorrect client.\n \"\"\"\n\n @staticmethod\n def check_attr_types_in_query(query, required_attrs={}, optional_attrs={}):\n \"\"\"\n Check a query againsted required and optional attributes.\n\n Returns `True` if *query* contains all the attrs in *required_attrs*,\n and if *query* contains only attrs in both *required_attrs* and *optional_attrs*.\n \"\"\"\n query_attrs = {type(x) for x in query}\n all_attrs = required_attrs.union(optional_attrs)\n\n return required_attrs.issubset(query_attrs) and query_attrs.issubset(all_attrs)\n\n @classmethod\n def register_values(cls, *query):\n \"\"\"\n This enables the client to register what kind of Attrs it can use directly.\n\n Returns\n -------\n `dict`\n A dictionary with key values of Attrs and the values are a tuple of\n (\"Attr Type\", \"Name\", \"Description\").\n \"\"\"\n return {}\n", "path": "sunpy/net/base_client.py"}], "after_files": [{"content": "import importlib\nfrom abc import ABC, abstractmethod\nfrom textwrap import dedent\nfrom collections.abc import Sequence\n\nfrom astropy.table import Table\n\nfrom sunpy.util.util import get_width\n\n__all__ = ['BaseQueryResponse', 'BaseClient']\n\n\nclass BaseQueryResponse(Sequence):\n \"\"\"\n An Abstract Base Class for results returned from BaseClient.\n\n Notes\n -----\n * A QueryResponse object must be able to be instantiated with only one\n iterable argument. (i.e. the ``__init__`` must only have one required\n argument).\n * The `client` property must be settable.\n * The base class does not prescribe how you store the results from your\n client, only that it must be possible to represent them as an astropy\n table in the ``build_table`` method.\n * `__getitem__` **must** return an instance of the type it was called on.\n I.e. it must always return an object of ``type(self)``.\n\n \"\"\"\n\n @abstractmethod\n def build_table(self):\n \"\"\"\n Return an `astropy.table.Table` representation of the query response.\n \"\"\"\n\n @property\n @abstractmethod\n def client(self):\n \"\"\"\n An instance of `BaseClient` used to generate the results.\n\n Generally this is used to fetch the results later.\n\n .. note::\n\n In general, this doesn't have to be the same instance of\n ``BaseClient``, this is left to the client developer. If there is a\n significant connection overhead in creating an instance of a client\n you might want it to be the same instance as used for the search.\n \"\"\"\n\n @client.setter\n @abstractmethod\n def client(self, value):\n pass\n\n @property\n @abstractmethod\n def blocks(self):\n \"\"\"\n A `collections.abc.Sequence` object which contains the records\n contained within the Query Response.\n \"\"\"\n\n @abstractmethod\n def response_block_properties(self):\n \"\"\"\n Returns a set of class attributes on all the response blocks.\n\n Returns\n -------\n s : `set`\n List of strings, containing attribute names in the response blocks.\n \"\"\"\n\n def __str__(self):\n \"\"\"Print out human-readable summary of records retrieved\"\"\"\n return '\\n'.join(self.build_table().pformat(show_dtype=False))\n\n def __repr__(self):\n \"\"\"Print out human-readable summary of records retrieved\"\"\"\n return object.__repr__(self) + \"\\n\" + str(self)\n\n def _repr_html_(self):\n return self.build_table()._repr_html_()\n\n def show(self, *cols):\n \"\"\"\n Returns response tables with desired columns for the Query.\n\n Parameters\n ----------\n \\\\*cols : `tuple`\n Name of columns to be shown.\n\n Returns\n -------\n `astropy.table.Table`\n A table showing values for specified columns.\n \"\"\"\n table = self.build_table()\n if len(cols) == 0:\n return table\n return table[list(cols)]\n\n\ndef _print_client(client, html=False):\n \"\"\"\n Given a BaseClient instance will print out each registered attribute.\n\n Parameters\n ----------\n client : `sunpy.net.base_client.BaseClient`\n The instance class to print for.\n html : bool\n Will return a html table instead.\n\n Returns\n -------\n `str`\n String with the client.\n \"\"\"\n width = -1 if html else get_width()\n class_name = f\"{client.__module__+'.' or ''}{client.__class__.__name__}\"\n attrs = client.register_values()\n lines = []\n t = Table(names=[\"Attr Type\", \"Name\", \"Description\"],\n dtype=[\"U80\", \"U80\", \"U80\"])\n for client_key in attrs.keys():\n for name, desc in attrs[client_key]:\n t.add_row((client_key.__name__, name, desc))\n lines = [class_name, dedent(client.__doc__.partition(\"\\n\\n\")[0])]\n if html:\n lines = [f\"<p>{line}</p>\" for line in lines]\n lines.extend(t.pformat_all(show_dtype=False, max_width=width, align=\"<\", html=html))\n return '\\n'.join(lines)\n\n\nclass BaseClient(ABC):\n \"\"\"\n This defines the Abstract Base Class for each download client.\n\n The BaseClient has several abstract methods that ensure that any subclass enforces the bare minimum API.\n These are `search`, `fetch` and `_can_handle_query`.\n The last one ensures that each download client can be registered with Fido.\n\n Most download clients should subclass `~sunpy.net.dataretriever.GenericClient`.\n If the structure of `~sunpy.net.dataretriever.GenericClient`\n is not useful you should use `~sunpy.net.BaseClient`.\n `~sunpy.net.vso.VSOClient` and `~sunpy.net.jsoc.JSOCClient`\n are examples of download clients that subclass ``BaseClient``.\n \"\"\"\n\n _registry = dict()\n\n def __init_subclass__(cls, *args, **kwargs):\n \"\"\"\n An __init_subclass__ hook initializes all of the subclasses of a given class.\n So for each subclass, it will call this block of code on import.\n This replicates some metaclass magic without the need to be aware of metaclasses.\n Here we use this to register each subclass in a dict that has the `_can_handle_query` attribute.\n This is then passed into the UnifiedDownloaderFactory so we can register them.\n This means that Fido can use the clients internally.\n \"\"\"\n super().__init_subclass__(**kwargs)\n\n # We do not want to register GenericClient since its a dummy client.\n if cls.__name__ in ('GenericClient'):\n return\n\n cls._registry[cls] = cls._can_handle_query\n\n if hasattr(cls, \"_attrs_module\"):\n from sunpy.net import attrs\n\n name, module = cls._attrs_module()\n module_obj = importlib.import_module(module)\n\n existing_mod = getattr(attrs, name, None)\n if existing_mod and existing_mod is not module_obj:\n raise NameError(f\"{name} has already been registered as an attrs name.\")\n\n setattr(attrs, name, module_obj)\n\n if name not in attrs.__all__:\n attrs.__all__.append(name)\n\n # Register client attrs after it has regsitered its own attrs\n from sunpy.net import attr\n values = cls.register_values()\n # If the client has no support, we won't try to register attrs\n if values:\n attr.Attr.update_values({cls: values})\n\n def __repr__(self):\n \"\"\"\n Returns the normal repr plus the pretty client __str__.\n \"\"\"\n return object.__repr__(self) + \"\\n\" + str(self)\n\n def __str__(self):\n \"\"\"\n This enables the \"pretty\" printing of BaseClient.\n \"\"\"\n return _print_client(self)\n\n def _repr_html_(self):\n \"\"\"\n This enables the \"pretty\" printing of the BaseClient with html.\n \"\"\"\n return _print_client(self, html=True)\n\n @abstractmethod\n def search(self, *args, **kwargs):\n \"\"\"\n This enables the user to search for data using the client.\n\n Must return a subclass of `BaseQueryResponse`.\n \"\"\"\n\n @abstractmethod\n def fetch(self, *query_results, path=None, overwrite=False, progress=True,\n max_conn=5, downloader=None, wait=True, **kwargs):\n \"\"\"\n This enables the user to fetch the data using the client, after a search.\n\n Parameters\n ----------\n query_results:\n Results to download.\n path : `str` or `pathlib.Path`, optional\n Path to the download directory\n overwrite : `bool`, optional\n Replace files with the same name if True.\n progress : `bool`, optional\n Print progress info to terminal.\n max_conns : `int`, optional\n Maximum number of download connections.\n downloader : `parfive.Downloader`, optional\n The download manager to use.\n wait : `bool`, optional\n If `False` ``downloader.download()`` will not be called. Only has\n any effect if `downloader` is not `None`.\n\n Returns\n -------\n `parfive.Results`\n The results object, can be `None` if ``wait`` is `False`.\n \"\"\"\n\n @classmethod\n @abstractmethod\n def _can_handle_query(cls, *query):\n \"\"\"\n This enables the client to register what kind of searches it can handle, to prevent Fido\n using the incorrect client.\n \"\"\"\n\n @staticmethod\n def check_attr_types_in_query(query, required_attrs={}, optional_attrs={}):\n \"\"\"\n Check a query againsted required and optional attributes.\n\n Returns `True` if *query* contains all the attrs in *required_attrs*,\n and if *query* contains only attrs in both *required_attrs* and *optional_attrs*.\n \"\"\"\n query_attrs = {type(x) for x in query}\n all_attrs = required_attrs.union(optional_attrs)\n\n return required_attrs.issubset(query_attrs) and query_attrs.issubset(all_attrs)\n\n @classmethod\n def register_values(cls, *query):\n \"\"\"\n This enables the client to register what kind of Attrs it can use directly.\n\n Returns\n -------\n `dict`\n A dictionary with key values of Attrs and the values are a tuple of\n (\"Attr Type\", \"Name\", \"Description\").\n \"\"\"\n return {}\n", "path": "sunpy/net/base_client.py"}]} | 3,085 | 193 |
gh_patches_debug_749 | rasdani/github-patches | git_diff | scikit-hep__awkward-2213 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Example in merge_union_of_records is a no-op
### Which documentation?
Python docstrings
### What needs to be documented?
In the example given, the result type is the same as the input type:
<details>
<summary> Quote from the docstring </summary>
```python
Simplifies unions of records, e.g.
>>> array = ak.Array([{"a": 1}, {"b": 2}])
into records of options, i.e.
>>> ak.merge_union_of_records(array)
<Array [{a: 1, b: None}, {a: None, ...}] type='2 * {a: ?int64, b: ?int64}'>
```
</details>
```python
>>> import awkward as ak
>>> ak.Array([{"a": 1}, {"b": 2}]).type.show()
2 * {
a: ?int64,
b: ?int64
}
>>> ak.merge_union_of_records(ak.Array([{"a": 1}, {"b": 2}])).type.show()
2 * {
a: ?int64,
b: ?int64
}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/awkward/operations/ak_merge_union_of_records.py`
Content:
```
1 # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
2
3
4 import awkward as ak
5 from awkward._nplikes.numpylike import NumpyMetadata
6
7 np = NumpyMetadata.instance()
8 cpu = ak._backends.NumpyBackend.instance()
9
10
11 def merge_union_of_records(array, axis=-1, *, highlevel=True, behavior=None):
12 """
13 Args:
14 array: Array-like data (anything #ak.to_layout recognizes).
15 axis (int): The dimension at which this operation is applied.
16 The outermost dimension is `0`, followed by `1`, etc., and negative
17 values count backward from the innermost: `-1` is the innermost
18 dimension, `-2` is the next level up, etc.
19 highlevel (bool): If True, return an #ak.Array; otherwise, return
20 a low-level #ak.contents.Content subclass.
21 behavior (None or dict): Custom #ak.behavior for the output array, if
22 high-level.
23
24 Simplifies unions of records, e.g.
25
26 >>> array = ak.Array([{"a": 1}, {"b": 2}])
27
28 into records of options, i.e.
29
30 >>> ak.merge_union_of_records(array)
31 <Array [{a: 1, b: None}, {a: None, ...}] type='2 * {a: ?int64, b: ?int64}'>
32 """
33 with ak._errors.OperationErrorContext(
34 "ak.merge_union_of_records",
35 {"array": array, "axis": axis, "highlevel": highlevel, "behavior": behavior},
36 ):
37 return _impl(array, axis, highlevel, behavior)
38
39
40 def _impl(array, axis, highlevel, behavior):
41 behavior = ak._util.behavior_of(array, behavior=behavior)
42 layout = ak.to_layout(array, allow_record=False)
43
44 def apply_displace_index(layout, backend, **kwargs):
45 if layout.is_record:
46 return layout
47 elif layout.is_option and layout.content.is_record:
48 raise ak._errors.wrap_error(
49 TypeError(
50 "optional records cannot be merged by this function. First call `ak.merge_option_of_records` "
51 "to convert these into records of options."
52 )
53 )
54 elif layout.is_indexed and layout.content.is_record:
55 record = layout.content
56 # Transpose index-of-record to record-of-index
57 return ak.contents.RecordArray(
58 [
59 ak.contents.IndexedArray.simplified(
60 layout.index, c, parameters=layout._parameters
61 )
62 for c in record.contents
63 ],
64 record.fields,
65 record.length,
66 backend=backend,
67 )
68 else:
69 raise ak._errors.wrap_error(TypeError(layout))
70
71 def apply(layout, depth, backend, **kwargs):
72 posaxis = ak._util.maybe_posaxis(layout, axis, depth)
73 if depth < posaxis + 1 and layout.is_leaf:
74 raise ak._errors.wrap_error(
75 np.AxisError(f"axis={axis} exceeds the depth of this array ({depth})")
76 )
77 elif depth == posaxis + 1 and layout.is_union:
78 if all(x.is_record for x in layout.contents):
79 # First, find all ordered fields, regularising any index-of-record
80 # such that we have record-of-index
81 seen_fields = set()
82 all_fields = []
83 regularised_contents = []
84 for content in layout.contents:
85 # Ensure that we have record-of-index
86 regularised_content = ak._do.recursively_apply(
87 content, apply_displace_index
88 )
89 regularised_contents.append(regularised_content)
90
91 # Find new fields
92 for field in regularised_content.fields:
93 if field not in seen_fields:
94 seen_fields.add(field)
95 all_fields.append(field)
96
97 # Build unions for each field
98 outer_field_contents = []
99 for field in all_fields:
100 field_tags = backend.index_nplike.asarray(layout.tags, copy=True)
101 field_index = backend.index_nplike.asarray(layout.index, copy=True)
102
103 # Build contents for union representing current field
104 field_contents = [
105 c.content(field)
106 for c in regularised_contents
107 if c.has_field(field)
108 ]
109
110 # Find the best location for option type.
111 # We will potentially have fewer contents in this per-field union
112 # than the original outer union-of-records, because some recordarrays
113 # may not have the given field.
114 tag_for_missing = 0
115 for i, content in enumerate(field_contents):
116 if content.is_option:
117 tag_for_missing = i
118 break
119
120 # If at least one recordarray doesn't have this field, we add
121 # a special option
122 if len(field_contents) < len(regularised_contents):
123 # Make the tagged content an option, growing by one to ensure we
124 # have a known `None` value to index into
125 tagged_content = field_contents[tag_for_missing]
126 indexedoption_index = backend.index_nplike.arange(
127 tagged_content.length + 1, dtype=np.int64
128 )
129 indexedoption_index[tagged_content.length] = -1
130 field_contents[
131 tag_for_missing
132 ] = ak.contents.IndexedOptionArray.simplified(
133 ak.index.Index64(indexedoption_index), tagged_content
134 )
135
136 # Now build contents for union, by looping over outermost index
137 # Overwrite tags to adjust for new contents length
138 # and use the tagged content for any missing values
139 k = 0
140 for j, content in enumerate(regularised_contents):
141 tag_is_j = field_tags == j
142
143 if content.has_field(field):
144 # Rewrite tags to account for missing fields
145 field_tags[tag_is_j] = k
146 k += 1
147
148 else:
149 # Rewrite tags to point to option content
150 field_tags[tag_is_j] = tag_for_missing
151 # Point each value to missing value
152 field_index[tag_is_j] = (
153 field_contents[tag_for_missing].length - 1
154 )
155
156 outer_field_contents.append(
157 ak.contents.UnionArray.simplified(
158 ak.index.Index8(field_tags),
159 ak.index.Index64(field_index),
160 field_contents,
161 )
162 )
163 return ak.contents.RecordArray(
164 outer_field_contents, all_fields, backend=backend
165 )
166
167 out = ak._do.recursively_apply(layout, apply)
168 return ak._util.wrap(out, highlevel=highlevel, behavior=behavior)
169
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/awkward/operations/ak_merge_union_of_records.py b/src/awkward/operations/ak_merge_union_of_records.py
--- a/src/awkward/operations/ak_merge_union_of_records.py
+++ b/src/awkward/operations/ak_merge_union_of_records.py
@@ -23,7 +23,7 @@
Simplifies unions of records, e.g.
- >>> array = ak.Array([{"a": 1}, {"b": 2}])
+ >>> array = ak.concatenate(([{"a": 1}], [{"b": 2}]))
into records of options, i.e.
| {"golden_diff": "diff --git a/src/awkward/operations/ak_merge_union_of_records.py b/src/awkward/operations/ak_merge_union_of_records.py\n--- a/src/awkward/operations/ak_merge_union_of_records.py\n+++ b/src/awkward/operations/ak_merge_union_of_records.py\n@@ -23,7 +23,7 @@\n \n Simplifies unions of records, e.g.\n \n- >>> array = ak.Array([{\"a\": 1}, {\"b\": 2}])\n+ >>> array = ak.concatenate(([{\"a\": 1}], [{\"b\": 2}]))\n \n into records of options, i.e.\n", "issue": "Example in merge_union_of_records is a no-op\n### Which documentation?\n\nPython docstrings\n\n### What needs to be documented?\n\nIn the example given, the result type is the same as the input type:\r\n\r\n<details>\r\n<summary> Quote from the docstring </summary>\r\n\r\n```python\r\nSimplifies unions of records, e.g.\r\n\r\n >>> array = ak.Array([{\"a\": 1}, {\"b\": 2}])\r\n\r\ninto records of options, i.e.\r\n\r\n >>> ak.merge_union_of_records(array)\r\n <Array [{a: 1, b: None}, {a: None, ...}] type='2 * {a: ?int64, b: ?int64}'>\r\n```\r\n\r\n</details>\r\n\r\n```python\r\n>>> import awkward as ak\r\n>>> ak.Array([{\"a\": 1}, {\"b\": 2}]).type.show()\r\n2 * {\r\n a: ?int64,\r\n b: ?int64\r\n}\r\n>>> ak.merge_union_of_records(ak.Array([{\"a\": 1}, {\"b\": 2}])).type.show()\r\n2 * {\r\n a: ?int64,\r\n b: ?int64\r\n}\r\n```\n", "before_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\n\nimport awkward as ak\nfrom awkward._nplikes.numpylike import NumpyMetadata\n\nnp = NumpyMetadata.instance()\ncpu = ak._backends.NumpyBackend.instance()\n\n\ndef merge_union_of_records(array, axis=-1, *, highlevel=True, behavior=None):\n \"\"\"\n Args:\n array: Array-like data (anything #ak.to_layout recognizes).\n axis (int): The dimension at which this operation is applied.\n The outermost dimension is `0`, followed by `1`, etc., and negative\n values count backward from the innermost: `-1` is the innermost\n dimension, `-2` is the next level up, etc.\n highlevel (bool): If True, return an #ak.Array; otherwise, return\n a low-level #ak.contents.Content subclass.\n behavior (None or dict): Custom #ak.behavior for the output array, if\n high-level.\n\n Simplifies unions of records, e.g.\n\n >>> array = ak.Array([{\"a\": 1}, {\"b\": 2}])\n\n into records of options, i.e.\n\n >>> ak.merge_union_of_records(array)\n <Array [{a: 1, b: None}, {a: None, ...}] type='2 * {a: ?int64, b: ?int64}'>\n \"\"\"\n with ak._errors.OperationErrorContext(\n \"ak.merge_union_of_records\",\n {\"array\": array, \"axis\": axis, \"highlevel\": highlevel, \"behavior\": behavior},\n ):\n return _impl(array, axis, highlevel, behavior)\n\n\ndef _impl(array, axis, highlevel, behavior):\n behavior = ak._util.behavior_of(array, behavior=behavior)\n layout = ak.to_layout(array, allow_record=False)\n\n def apply_displace_index(layout, backend, **kwargs):\n if layout.is_record:\n return layout\n elif layout.is_option and layout.content.is_record:\n raise ak._errors.wrap_error(\n TypeError(\n \"optional records cannot be merged by this function. First call `ak.merge_option_of_records` \"\n \"to convert these into records of options.\"\n )\n )\n elif layout.is_indexed and layout.content.is_record:\n record = layout.content\n # Transpose index-of-record to record-of-index\n return ak.contents.RecordArray(\n [\n ak.contents.IndexedArray.simplified(\n layout.index, c, parameters=layout._parameters\n )\n for c in record.contents\n ],\n record.fields,\n record.length,\n backend=backend,\n )\n else:\n raise ak._errors.wrap_error(TypeError(layout))\n\n def apply(layout, depth, backend, **kwargs):\n posaxis = ak._util.maybe_posaxis(layout, axis, depth)\n if depth < posaxis + 1 and layout.is_leaf:\n raise ak._errors.wrap_error(\n np.AxisError(f\"axis={axis} exceeds the depth of this array ({depth})\")\n )\n elif depth == posaxis + 1 and layout.is_union:\n if all(x.is_record for x in layout.contents):\n # First, find all ordered fields, regularising any index-of-record\n # such that we have record-of-index\n seen_fields = set()\n all_fields = []\n regularised_contents = []\n for content in layout.contents:\n # Ensure that we have record-of-index\n regularised_content = ak._do.recursively_apply(\n content, apply_displace_index\n )\n regularised_contents.append(regularised_content)\n\n # Find new fields\n for field in regularised_content.fields:\n if field not in seen_fields:\n seen_fields.add(field)\n all_fields.append(field)\n\n # Build unions for each field\n outer_field_contents = []\n for field in all_fields:\n field_tags = backend.index_nplike.asarray(layout.tags, copy=True)\n field_index = backend.index_nplike.asarray(layout.index, copy=True)\n\n # Build contents for union representing current field\n field_contents = [\n c.content(field)\n for c in regularised_contents\n if c.has_field(field)\n ]\n\n # Find the best location for option type.\n # We will potentially have fewer contents in this per-field union\n # than the original outer union-of-records, because some recordarrays\n # may not have the given field.\n tag_for_missing = 0\n for i, content in enumerate(field_contents):\n if content.is_option:\n tag_for_missing = i\n break\n\n # If at least one recordarray doesn't have this field, we add\n # a special option\n if len(field_contents) < len(regularised_contents):\n # Make the tagged content an option, growing by one to ensure we\n # have a known `None` value to index into\n tagged_content = field_contents[tag_for_missing]\n indexedoption_index = backend.index_nplike.arange(\n tagged_content.length + 1, dtype=np.int64\n )\n indexedoption_index[tagged_content.length] = -1\n field_contents[\n tag_for_missing\n ] = ak.contents.IndexedOptionArray.simplified(\n ak.index.Index64(indexedoption_index), tagged_content\n )\n\n # Now build contents for union, by looping over outermost index\n # Overwrite tags to adjust for new contents length\n # and use the tagged content for any missing values\n k = 0\n for j, content in enumerate(regularised_contents):\n tag_is_j = field_tags == j\n\n if content.has_field(field):\n # Rewrite tags to account for missing fields\n field_tags[tag_is_j] = k\n k += 1\n\n else:\n # Rewrite tags to point to option content\n field_tags[tag_is_j] = tag_for_missing\n # Point each value to missing value\n field_index[tag_is_j] = (\n field_contents[tag_for_missing].length - 1\n )\n\n outer_field_contents.append(\n ak.contents.UnionArray.simplified(\n ak.index.Index8(field_tags),\n ak.index.Index64(field_index),\n field_contents,\n )\n )\n return ak.contents.RecordArray(\n outer_field_contents, all_fields, backend=backend\n )\n\n out = ak._do.recursively_apply(layout, apply)\n return ak._util.wrap(out, highlevel=highlevel, behavior=behavior)\n", "path": "src/awkward/operations/ak_merge_union_of_records.py"}], "after_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\n\nimport awkward as ak\nfrom awkward._nplikes.numpylike import NumpyMetadata\n\nnp = NumpyMetadata.instance()\ncpu = ak._backends.NumpyBackend.instance()\n\n\ndef merge_union_of_records(array, axis=-1, *, highlevel=True, behavior=None):\n \"\"\"\n Args:\n array: Array-like data (anything #ak.to_layout recognizes).\n axis (int): The dimension at which this operation is applied.\n The outermost dimension is `0`, followed by `1`, etc., and negative\n values count backward from the innermost: `-1` is the innermost\n dimension, `-2` is the next level up, etc.\n highlevel (bool): If True, return an #ak.Array; otherwise, return\n a low-level #ak.contents.Content subclass.\n behavior (None or dict): Custom #ak.behavior for the output array, if\n high-level.\n\n Simplifies unions of records, e.g.\n\n >>> array = ak.concatenate(([{\"a\": 1}], [{\"b\": 2}]))\n\n into records of options, i.e.\n\n >>> ak.merge_union_of_records(array)\n <Array [{a: 1, b: None}, {a: None, ...}] type='2 * {a: ?int64, b: ?int64}'>\n \"\"\"\n with ak._errors.OperationErrorContext(\n \"ak.merge_union_of_records\",\n {\"array\": array, \"axis\": axis, \"highlevel\": highlevel, \"behavior\": behavior},\n ):\n return _impl(array, axis, highlevel, behavior)\n\n\ndef _impl(array, axis, highlevel, behavior):\n behavior = ak._util.behavior_of(array, behavior=behavior)\n layout = ak.to_layout(array, allow_record=False)\n\n def apply_displace_index(layout, backend, **kwargs):\n if layout.is_record:\n return layout\n elif layout.is_option and layout.content.is_record:\n raise ak._errors.wrap_error(\n TypeError(\n \"optional records cannot be merged by this function. First call `ak.merge_option_of_records` \"\n \"to convert these into records of options.\"\n )\n )\n elif layout.is_indexed and layout.content.is_record:\n record = layout.content\n # Transpose index-of-record to record-of-index\n return ak.contents.RecordArray(\n [\n ak.contents.IndexedArray.simplified(\n layout.index, c, parameters=layout._parameters\n )\n for c in record.contents\n ],\n record.fields,\n record.length,\n backend=backend,\n )\n else:\n raise ak._errors.wrap_error(TypeError(layout))\n\n def apply(layout, depth, backend, **kwargs):\n posaxis = ak._util.maybe_posaxis(layout, axis, depth)\n if depth < posaxis + 1 and layout.is_leaf:\n raise ak._errors.wrap_error(\n np.AxisError(f\"axis={axis} exceeds the depth of this array ({depth})\")\n )\n elif depth == posaxis + 1 and layout.is_union:\n if all(x.is_record for x in layout.contents):\n # First, find all ordered fields, regularising any index-of-record\n # such that we have record-of-index\n seen_fields = set()\n all_fields = []\n regularised_contents = []\n for content in layout.contents:\n # Ensure that we have record-of-index\n regularised_content = ak._do.recursively_apply(\n content, apply_displace_index\n )\n regularised_contents.append(regularised_content)\n\n # Find new fields\n for field in regularised_content.fields:\n if field not in seen_fields:\n seen_fields.add(field)\n all_fields.append(field)\n\n # Build unions for each field\n outer_field_contents = []\n for field in all_fields:\n field_tags = backend.index_nplike.asarray(layout.tags, copy=True)\n field_index = backend.index_nplike.asarray(layout.index, copy=True)\n\n # Build contents for union representing current field\n field_contents = [\n c.content(field)\n for c in regularised_contents\n if c.has_field(field)\n ]\n\n # Find the best location for option type.\n # We will potentially have fewer contents in this per-field union\n # than the original outer union-of-records, because some recordarrays\n # may not have the given field.\n tag_for_missing = 0\n for i, content in enumerate(field_contents):\n if content.is_option:\n tag_for_missing = i\n break\n\n # If at least one recordarray doesn't have this field, we add\n # a special option\n if len(field_contents) < len(regularised_contents):\n # Make the tagged content an option, growing by one to ensure we\n # have a known `None` value to index into\n tagged_content = field_contents[tag_for_missing]\n indexedoption_index = backend.index_nplike.arange(\n tagged_content.length + 1, dtype=np.int64\n )\n indexedoption_index[tagged_content.length] = -1\n field_contents[\n tag_for_missing\n ] = ak.contents.IndexedOptionArray.simplified(\n ak.index.Index64(indexedoption_index), tagged_content\n )\n\n # Now build contents for union, by looping over outermost index\n # Overwrite tags to adjust for new contents length\n # and use the tagged content for any missing values\n k = 0\n for j, content in enumerate(regularised_contents):\n tag_is_j = field_tags == j\n\n if content.has_field(field):\n # Rewrite tags to account for missing fields\n field_tags[tag_is_j] = k\n k += 1\n\n else:\n # Rewrite tags to point to option content\n field_tags[tag_is_j] = tag_for_missing\n # Point each value to missing value\n field_index[tag_is_j] = (\n field_contents[tag_for_missing].length - 1\n )\n\n outer_field_contents.append(\n ak.contents.UnionArray.simplified(\n ak.index.Index8(field_tags),\n ak.index.Index64(field_index),\n field_contents,\n )\n )\n return ak.contents.RecordArray(\n outer_field_contents, all_fields, backend=backend\n )\n\n out = ak._do.recursively_apply(layout, apply)\n return ak._util.wrap(out, highlevel=highlevel, behavior=behavior)\n", "path": "src/awkward/operations/ak_merge_union_of_records.py"}]} | 2,319 | 140 |
gh_patches_debug_20307 | rasdani/github-patches | git_diff | pyro-ppl__numpyro-1320 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`numpyro.render_model` behaviour changed in 0.9.0
I noticed that the behaviour of `numpyro.render_model` appears to have changed since the latest release, specifically arrows signifying conditional dependence are not being rendered properly.
I haven't had time to dig into why this is yet, but here is a simple recreation
```python
import numpyro
import numpyro.distributions as dist
def model():
theta = numpyro.sample("theta", dist.Uniform())
with numpyro.plate("N", size=10):
numpyro.sample("y", dist.Bernoulli(theta))
if __name__ == "__main__":
g = numpyro.render_model(model)
g.render(f"model-{numpyro.__version__}", format="png")
```
Running this with 0.8.0 and 0.9.0 I get (respectively)


--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright Contributors to the Pyro project.
2 # SPDX-License-Identifier: Apache-2.0
3
4 from __future__ import absolute_import, division, print_function
5
6 import os
7 import sys
8
9 from setuptools import find_packages, setup
10
11 PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))
12 _jax_version_constraints = ">=0.2.13,<0.2.28"
13 _jaxlib_version_constraints = ">=0.1.65"
14
15 # Find version
16 for line in open(os.path.join(PROJECT_PATH, "numpyro", "version.py")):
17 if line.startswith("__version__ = "):
18 version = line.strip().split()[2][1:-1]
19
20 # READ README.md for long description on PyPi.
21 try:
22 long_description = open("README.md", encoding="utf-8").read()
23 except Exception as e:
24 sys.stderr.write("Failed to read README.md:\n {}\n".format(e))
25 sys.stderr.flush()
26 long_description = ""
27
28 setup(
29 name="numpyro",
30 version=version,
31 description="Pyro PPL on NumPy",
32 packages=find_packages(include=["numpyro", "numpyro.*"]),
33 url="https://github.com/pyro-ppl/numpyro",
34 author="Uber AI Labs",
35 install_requires=[
36 f"jax{_jax_version_constraints}",
37 f"jaxlib{_jaxlib_version_constraints}",
38 "multipledispatch",
39 "numpy",
40 "tqdm",
41 ],
42 extras_require={
43 "doc": [
44 "ipython", # sphinx needs this to render codes
45 "nbsphinx>=0.8.5",
46 "readthedocs-sphinx-search==0.1.0",
47 "sphinx",
48 "sphinx_rtd_theme",
49 "sphinx-gallery",
50 ],
51 "test": [
52 "black[jupyter]>=21.8b0",
53 "flake8",
54 "isort>=5.0",
55 "pytest>=4.1",
56 "pyro-api>=0.1.1",
57 "scipy>=1.6,<1.7",
58 ],
59 "dev": [
60 "dm-haiku",
61 "flax",
62 "funsor>=0.4.1",
63 "graphviz",
64 "jaxns==0.0.7",
65 "optax>=0.0.6",
66 "tensorflow_probability>=0.15.0",
67 ],
68 "examples": [
69 "arviz",
70 "jupyter",
71 "matplotlib",
72 "pandas",
73 "seaborn",
74 "scikit-learn",
75 "wordcloud",
76 ],
77 "cpu": f"jax[cpu]{_jax_version_constraints}",
78 # TPU and CUDA installations, currently require to add package repository URL, i.e.,
79 # pip install numpyro[cuda] -f https://storage.googleapis.com/jax-releases/jax_releases.html
80 "tpu": f"jax[tpu]{_jax_version_constraints}",
81 "cuda": f"jax[cuda]{_jax_version_constraints}",
82 },
83 long_description=long_description,
84 long_description_content_type="text/markdown",
85 keywords="probabilistic machine learning bayesian statistics",
86 license="Apache License 2.0",
87 classifiers=[
88 "Intended Audience :: Developers",
89 "Intended Audience :: Education",
90 "Intended Audience :: Science/Research",
91 "License :: OSI Approved :: Apache Software License",
92 "Operating System :: POSIX :: Linux",
93 "Operating System :: MacOS :: MacOS X",
94 "Programming Language :: Python :: 3.7",
95 "Programming Language :: Python :: 3.8",
96 "Programming Language :: Python :: 3.9",
97 "Programming Language :: Python :: 3.10",
98 ],
99 )
100
```
Path: `numpyro/ops/provenance.py`
Content:
```
1 # Copyright Contributors to the Pyro project.
2 # SPDX-License-Identifier: Apache-2.0
3
4 import jax
5 from jax.interpreters import partial_eval
6 from jax.linear_util import wrap_init
7
8
9 class _ProvenanceJaxprTrace(partial_eval.DynamicJaxprTrace):
10 """A JAX class to control the behavior of primitives on tracers."""
11
12 def process_primitive(self, primitive, tracers, params):
13 # remove "_provenance" dimension in arguments before executing the function
14 provenances = [
15 t.aval.named_shape.pop("_provenance", frozenset()) for t in tracers
16 ]
17 out_tracers = super().process_primitive(primitive, tracers, params)
18 # add "_provenance" dimensions to arguments again
19 for t, p in zip(tracers, provenances):
20 if p:
21 t.aval.named_shape["_provenance"] = p
22
23 # update outputs' provenance
24 out_provenance = frozenset().union(*provenances)
25 if out_provenance:
26 out_tracers = out_tracers if primitive.multiple_results else [out_tracers]
27 for t in out_tracers:
28 t.aval.named_shape["_provenance"] = out_provenance
29 out_tracers = out_tracers if primitive.multiple_results else out_tracers[0]
30 return out_tracers
31
32
33 class ProvenanceArray:
34 """
35 Provenance tracking implementation in JAX.
36
37 This class wraps an ndarray to track provenance through JAX ops,
38 where provenance is a user-defined frozenset of objects. The
39 provenance of the output arrays of any op is the union of provenances
40 of input arrays.
41
42 - To start tracking provenance in a function, wrap input arrays in
43 :class:`ProvenanceArray` with user-defined initial provenance,
44 then use :func:`eval_provenance` to get the provenance output array.
45 - To read the provenance of an ndarray use :func:`get_provenance` .
46
47 Example::
48
49 >>> a = ProvenanceArray(jnp.zeros(3), frozenset({"a"}))
50 >>> b = ProvenanceArray(jnp.ones(3), frozenset({"b"}))
51 >>> c = jnp.arange(3)
52 >>> f = lambda a, b, c: a + b + c
53 >>> o = eval_provenance(f, a, b, c)
54 >>> assert get_provenance(o) == frozenset({"a", "b"})
55
56 **References**
57
58 [1] David Wingate, Noah Goodman, Andreas Stuhlmüller, Jeffrey Siskind (2011)
59 Nonstandard Interpretations of Probabilistic Programs for Efficient Inference
60 http://papers.neurips.cc/paper/4309-nonstandard-interpretations-of-probabilistic-programs-for-efficient-inference.pdf
61
62 :param data: An initial data to start tracking. The data needs
63 to have attributes `shape` and `dtype`.
64 :param frozenset provenance: An initial provenance set.
65 """
66
67 def __init__(self, data, provenance=frozenset()):
68 self.shape = data.shape
69 self.dtype = data.dtype
70 self.named_shape = {"_provenance": provenance}
71
72
73 def get_provenance(data):
74 """
75 Reads the provenance of a recursive datastructure possibly containing ndarray.
76
77 :param data: An input data.
78 :returns: A provenance frozenset.
79 :rtype: frozenset
80 """
81 return jax.tree_util.tree_map(
82 lambda a: a.named_shape.get("_provenance", frozenset()), data
83 )
84
85
86 def eval_provenance(fun, *args, **kwargs):
87 """
88 Compute the provenance output of ``fun`` using JAX's abstract
89 interpretation machinery. There is no actual array computation performed.
90
91 :param fun: A callable to track provenance of its (keyword) arguments.
92 :param args: Positional arguments of `fun`.
93 :param kwargs: Keyword arguments of `fun`.
94 :returns: A pytree of :class:`ProvenanceArray`.
95 """
96 # flatten the function and its arguments
97 args_flat, in_tree = jax.tree_util.tree_flatten((args, kwargs))
98 wrapped_fun, out_tree = jax.api_util.flatten_fun(wrap_init(fun), in_tree)
99 fun = wrap_init(wrapped_fun.call_wrapped)
100 avals = jax.util.safe_map(jax.api_util.shaped_abstractify, args_flat)
101
102 # execute the function and trace provenance
103 with jax.core.new_main(_ProvenanceJaxprTrace, dynamic=True) as main:
104 main.jaxpr_stack = ()
105 out = partial_eval.trace_to_subjaxpr_dynamic(fun, main, avals)[1]
106
107 # unflatten the output and get its provenance
108 out = [jax.ShapeDtypeStruct(x.shape, x.dtype, x.named_shape) for x in out]
109 out = jax.tree_util.tree_unflatten(out_tree(), out)
110 return jax.tree_util.tree_map(
111 lambda x: ProvenanceArray(x, x.named_shape.get("_provenance", frozenset())),
112 out,
113 )
114
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/numpyro/ops/provenance.py b/numpyro/ops/provenance.py
--- a/numpyro/ops/provenance.py
+++ b/numpyro/ops/provenance.py
@@ -26,6 +26,9 @@
out_tracers = out_tracers if primitive.multiple_results else [out_tracers]
for t in out_tracers:
t.aval.named_shape["_provenance"] = out_provenance
+ # Also update provenance of the cached tracer -> aval dict.
+ aval_cache = self.frame.tracer_to_var[id(t)].aval
+ aval_cache.named_shape["_provenance"] = out_provenance
out_tracers = out_tracers if primitive.multiple_results else out_tracers[0]
return out_tracers
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -9,7 +9,7 @@
from setuptools import find_packages, setup
PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))
-_jax_version_constraints = ">=0.2.13,<0.2.28"
+_jax_version_constraints = ">=0.2.13"
_jaxlib_version_constraints = ">=0.1.65"
# Find version
| {"golden_diff": "diff --git a/numpyro/ops/provenance.py b/numpyro/ops/provenance.py\n--- a/numpyro/ops/provenance.py\n+++ b/numpyro/ops/provenance.py\n@@ -26,6 +26,9 @@\n out_tracers = out_tracers if primitive.multiple_results else [out_tracers]\n for t in out_tracers:\n t.aval.named_shape[\"_provenance\"] = out_provenance\n+ # Also update provenance of the cached tracer -> aval dict.\n+ aval_cache = self.frame.tracer_to_var[id(t)].aval\n+ aval_cache.named_shape[\"_provenance\"] = out_provenance\n out_tracers = out_tracers if primitive.multiple_results else out_tracers[0]\n return out_tracers\n \ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -9,7 +9,7 @@\n from setuptools import find_packages, setup\n \n PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))\n-_jax_version_constraints = \">=0.2.13,<0.2.28\"\n+_jax_version_constraints = \">=0.2.13\"\n _jaxlib_version_constraints = \">=0.1.65\"\n \n # Find version\n", "issue": "`numpyro.render_model` behaviour changed in 0.9.0\nI noticed that the behaviour of `numpyro.render_model` appears to have changed since the latest release, specifically arrows signifying conditional dependence are not being rendered properly.\r\n\r\nI haven't had time to dig into why this is yet, but here is a simple recreation\r\n\r\n```python\r\nimport numpyro\r\nimport numpyro.distributions as dist\r\n\r\n\r\ndef model():\r\n theta = numpyro.sample(\"theta\", dist.Uniform())\r\n \r\n with numpyro.plate(\"N\", size=10):\r\n numpyro.sample(\"y\", dist.Bernoulli(theta))\r\n \r\n \r\nif __name__ == \"__main__\":\r\n g = numpyro.render_model(model)\r\n g.render(f\"model-{numpyro.__version__}\", format=\"png\")\r\n```\r\n\r\nRunning this with 0.8.0 and 0.9.0 I get (respectively)\r\n\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\n\nfrom setuptools import find_packages, setup\n\nPROJECT_PATH = os.path.dirname(os.path.abspath(__file__))\n_jax_version_constraints = \">=0.2.13,<0.2.28\"\n_jaxlib_version_constraints = \">=0.1.65\"\n\n# Find version\nfor line in open(os.path.join(PROJECT_PATH, \"numpyro\", \"version.py\")):\n if line.startswith(\"__version__ = \"):\n version = line.strip().split()[2][1:-1]\n\n# READ README.md for long description on PyPi.\ntry:\n long_description = open(\"README.md\", encoding=\"utf-8\").read()\nexcept Exception as e:\n sys.stderr.write(\"Failed to read README.md:\\n {}\\n\".format(e))\n sys.stderr.flush()\n long_description = \"\"\n\nsetup(\n name=\"numpyro\",\n version=version,\n description=\"Pyro PPL on NumPy\",\n packages=find_packages(include=[\"numpyro\", \"numpyro.*\"]),\n url=\"https://github.com/pyro-ppl/numpyro\",\n author=\"Uber AI Labs\",\n install_requires=[\n f\"jax{_jax_version_constraints}\",\n f\"jaxlib{_jaxlib_version_constraints}\",\n \"multipledispatch\",\n \"numpy\",\n \"tqdm\",\n ],\n extras_require={\n \"doc\": [\n \"ipython\", # sphinx needs this to render codes\n \"nbsphinx>=0.8.5\",\n \"readthedocs-sphinx-search==0.1.0\",\n \"sphinx\",\n \"sphinx_rtd_theme\",\n \"sphinx-gallery\",\n ],\n \"test\": [\n \"black[jupyter]>=21.8b0\",\n \"flake8\",\n \"isort>=5.0\",\n \"pytest>=4.1\",\n \"pyro-api>=0.1.1\",\n \"scipy>=1.6,<1.7\",\n ],\n \"dev\": [\n \"dm-haiku\",\n \"flax\",\n \"funsor>=0.4.1\",\n \"graphviz\",\n \"jaxns==0.0.7\",\n \"optax>=0.0.6\",\n \"tensorflow_probability>=0.15.0\",\n ],\n \"examples\": [\n \"arviz\",\n \"jupyter\",\n \"matplotlib\",\n \"pandas\",\n \"seaborn\",\n \"scikit-learn\",\n \"wordcloud\",\n ],\n \"cpu\": f\"jax[cpu]{_jax_version_constraints}\",\n # TPU and CUDA installations, currently require to add package repository URL, i.e.,\n # pip install numpyro[cuda] -f https://storage.googleapis.com/jax-releases/jax_releases.html\n \"tpu\": f\"jax[tpu]{_jax_version_constraints}\",\n \"cuda\": f\"jax[cuda]{_jax_version_constraints}\",\n },\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n keywords=\"probabilistic machine learning bayesian statistics\",\n license=\"Apache License 2.0\",\n classifiers=[\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n ],\n)\n", "path": "setup.py"}, {"content": "# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nimport jax\nfrom jax.interpreters import partial_eval\nfrom jax.linear_util import wrap_init\n\n\nclass _ProvenanceJaxprTrace(partial_eval.DynamicJaxprTrace):\n \"\"\"A JAX class to control the behavior of primitives on tracers.\"\"\"\n\n def process_primitive(self, primitive, tracers, params):\n # remove \"_provenance\" dimension in arguments before executing the function\n provenances = [\n t.aval.named_shape.pop(\"_provenance\", frozenset()) for t in tracers\n ]\n out_tracers = super().process_primitive(primitive, tracers, params)\n # add \"_provenance\" dimensions to arguments again\n for t, p in zip(tracers, provenances):\n if p:\n t.aval.named_shape[\"_provenance\"] = p\n\n # update outputs' provenance\n out_provenance = frozenset().union(*provenances)\n if out_provenance:\n out_tracers = out_tracers if primitive.multiple_results else [out_tracers]\n for t in out_tracers:\n t.aval.named_shape[\"_provenance\"] = out_provenance\n out_tracers = out_tracers if primitive.multiple_results else out_tracers[0]\n return out_tracers\n\n\nclass ProvenanceArray:\n \"\"\"\n Provenance tracking implementation in JAX.\n\n This class wraps an ndarray to track provenance through JAX ops,\n where provenance is a user-defined frozenset of objects. The\n provenance of the output arrays of any op is the union of provenances\n of input arrays.\n\n - To start tracking provenance in a function, wrap input arrays in\n :class:`ProvenanceArray` with user-defined initial provenance,\n then use :func:`eval_provenance` to get the provenance output array.\n - To read the provenance of an ndarray use :func:`get_provenance` .\n\n Example::\n\n >>> a = ProvenanceArray(jnp.zeros(3), frozenset({\"a\"}))\n >>> b = ProvenanceArray(jnp.ones(3), frozenset({\"b\"}))\n >>> c = jnp.arange(3)\n >>> f = lambda a, b, c: a + b + c\n >>> o = eval_provenance(f, a, b, c)\n >>> assert get_provenance(o) == frozenset({\"a\", \"b\"})\n\n **References**\n\n [1] David Wingate, Noah Goodman, Andreas Stuhlm\u00fcller, Jeffrey Siskind (2011)\n Nonstandard Interpretations of Probabilistic Programs for Efficient Inference\n http://papers.neurips.cc/paper/4309-nonstandard-interpretations-of-probabilistic-programs-for-efficient-inference.pdf\n\n :param data: An initial data to start tracking. The data needs\n to have attributes `shape` and `dtype`.\n :param frozenset provenance: An initial provenance set.\n \"\"\"\n\n def __init__(self, data, provenance=frozenset()):\n self.shape = data.shape\n self.dtype = data.dtype\n self.named_shape = {\"_provenance\": provenance}\n\n\ndef get_provenance(data):\n \"\"\"\n Reads the provenance of a recursive datastructure possibly containing ndarray.\n\n :param data: An input data.\n :returns: A provenance frozenset.\n :rtype: frozenset\n \"\"\"\n return jax.tree_util.tree_map(\n lambda a: a.named_shape.get(\"_provenance\", frozenset()), data\n )\n\n\ndef eval_provenance(fun, *args, **kwargs):\n \"\"\"\n Compute the provenance output of ``fun`` using JAX's abstract\n interpretation machinery. There is no actual array computation performed.\n\n :param fun: A callable to track provenance of its (keyword) arguments.\n :param args: Positional arguments of `fun`.\n :param kwargs: Keyword arguments of `fun`.\n :returns: A pytree of :class:`ProvenanceArray`.\n \"\"\"\n # flatten the function and its arguments\n args_flat, in_tree = jax.tree_util.tree_flatten((args, kwargs))\n wrapped_fun, out_tree = jax.api_util.flatten_fun(wrap_init(fun), in_tree)\n fun = wrap_init(wrapped_fun.call_wrapped)\n avals = jax.util.safe_map(jax.api_util.shaped_abstractify, args_flat)\n\n # execute the function and trace provenance\n with jax.core.new_main(_ProvenanceJaxprTrace, dynamic=True) as main:\n main.jaxpr_stack = ()\n out = partial_eval.trace_to_subjaxpr_dynamic(fun, main, avals)[1]\n\n # unflatten the output and get its provenance\n out = [jax.ShapeDtypeStruct(x.shape, x.dtype, x.named_shape) for x in out]\n out = jax.tree_util.tree_unflatten(out_tree(), out)\n return jax.tree_util.tree_map(\n lambda x: ProvenanceArray(x, x.named_shape.get(\"_provenance\", frozenset())),\n out,\n )\n", "path": "numpyro/ops/provenance.py"}], "after_files": [{"content": "# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\n\nfrom setuptools import find_packages, setup\n\nPROJECT_PATH = os.path.dirname(os.path.abspath(__file__))\n_jax_version_constraints = \">=0.2.13\"\n_jaxlib_version_constraints = \">=0.1.65\"\n\n# Find version\nfor line in open(os.path.join(PROJECT_PATH, \"numpyro\", \"version.py\")):\n if line.startswith(\"__version__ = \"):\n version = line.strip().split()[2][1:-1]\n\n# READ README.md for long description on PyPi.\ntry:\n long_description = open(\"README.md\", encoding=\"utf-8\").read()\nexcept Exception as e:\n sys.stderr.write(\"Failed to read README.md:\\n {}\\n\".format(e))\n sys.stderr.flush()\n long_description = \"\"\n\nsetup(\n name=\"numpyro\",\n version=version,\n description=\"Pyro PPL on NumPy\",\n packages=find_packages(include=[\"numpyro\", \"numpyro.*\"]),\n url=\"https://github.com/pyro-ppl/numpyro\",\n author=\"Uber AI Labs\",\n install_requires=[\n f\"jax{_jax_version_constraints}\",\n f\"jaxlib{_jaxlib_version_constraints}\",\n \"multipledispatch\",\n \"numpy\",\n \"tqdm\",\n ],\n extras_require={\n \"doc\": [\n \"ipython\", # sphinx needs this to render codes\n \"nbsphinx>=0.8.5\",\n \"readthedocs-sphinx-search==0.1.0\",\n \"sphinx\",\n \"sphinx_rtd_theme\",\n \"sphinx-gallery\",\n ],\n \"test\": [\n \"black[jupyter]>=21.8b0\",\n \"flake8\",\n \"isort>=5.0\",\n \"pytest>=4.1\",\n \"pyro-api>=0.1.1\",\n \"scipy>=1.6,<1.7\",\n ],\n \"dev\": [\n \"dm-haiku\",\n \"flax\",\n \"funsor>=0.4.1\",\n \"graphviz\",\n \"jaxns==0.0.7\",\n \"optax>=0.0.6\",\n \"tensorflow_probability>=0.15.0\",\n ],\n \"examples\": [\n \"arviz\",\n \"jupyter\",\n \"matplotlib\",\n \"pandas\",\n \"seaborn\",\n \"scikit-learn\",\n \"wordcloud\",\n ],\n \"cpu\": f\"jax[cpu]{_jax_version_constraints}\",\n # TPU and CUDA installations, currently require to add package repository URL, i.e.,\n # pip install numpyro[cuda] -f https://storage.googleapis.com/jax-releases/jax_releases.html\n \"tpu\": f\"jax[tpu]{_jax_version_constraints}\",\n \"cuda\": f\"jax[cuda]{_jax_version_constraints}\",\n },\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n keywords=\"probabilistic machine learning bayesian statistics\",\n license=\"Apache License 2.0\",\n classifiers=[\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n ],\n)\n", "path": "setup.py"}, {"content": "# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nimport jax\nfrom jax.interpreters import partial_eval\nfrom jax.linear_util import wrap_init\n\n\nclass _ProvenanceJaxprTrace(partial_eval.DynamicJaxprTrace):\n \"\"\"A JAX class to control the behavior of primitives on tracers.\"\"\"\n\n def process_primitive(self, primitive, tracers, params):\n # remove \"_provenance\" dimension in arguments before executing the function\n provenances = [\n t.aval.named_shape.pop(\"_provenance\", frozenset()) for t in tracers\n ]\n out_tracers = super().process_primitive(primitive, tracers, params)\n # add \"_provenance\" dimensions to arguments again\n for t, p in zip(tracers, provenances):\n if p:\n t.aval.named_shape[\"_provenance\"] = p\n\n # update outputs' provenance\n out_provenance = frozenset().union(*provenances)\n if out_provenance:\n out_tracers = out_tracers if primitive.multiple_results else [out_tracers]\n for t in out_tracers:\n t.aval.named_shape[\"_provenance\"] = out_provenance\n # Also update provenance of the cached tracer -> aval dict.\n aval_cache = self.frame.tracer_to_var[id(t)].aval\n aval_cache.named_shape[\"_provenance\"] = out_provenance\n out_tracers = out_tracers if primitive.multiple_results else out_tracers[0]\n return out_tracers\n\n\nclass ProvenanceArray:\n \"\"\"\n Provenance tracking implementation in JAX.\n\n This class wraps an ndarray to track provenance through JAX ops,\n where provenance is a user-defined frozenset of objects. The\n provenance of the output arrays of any op is the union of provenances\n of input arrays.\n\n - To start tracking provenance in a function, wrap input arrays in\n :class:`ProvenanceArray` with user-defined initial provenance,\n then use :func:`eval_provenance` to get the provenance output array.\n - To read the provenance of an ndarray use :func:`get_provenance` .\n\n Example::\n\n >>> a = ProvenanceArray(jnp.zeros(3), frozenset({\"a\"}))\n >>> b = ProvenanceArray(jnp.ones(3), frozenset({\"b\"}))\n >>> c = jnp.arange(3)\n >>> f = lambda a, b, c: a + b + c\n >>> o = eval_provenance(f, a, b, c)\n >>> assert get_provenance(o) == frozenset({\"a\", \"b\"})\n\n **References**\n\n [1] David Wingate, Noah Goodman, Andreas Stuhlm\u00fcller, Jeffrey Siskind (2011)\n Nonstandard Interpretations of Probabilistic Programs for Efficient Inference\n http://papers.neurips.cc/paper/4309-nonstandard-interpretations-of-probabilistic-programs-for-efficient-inference.pdf\n\n :param data: An initial data to start tracking. The data needs\n to have attributes `shape` and `dtype`.\n :param frozenset provenance: An initial provenance set.\n \"\"\"\n\n def __init__(self, data, provenance=frozenset()):\n self.shape = data.shape\n self.dtype = data.dtype\n self.named_shape = {\"_provenance\": provenance}\n\n\ndef get_provenance(data):\n \"\"\"\n Reads the provenance of a recursive datastructure possibly containing ndarray.\n\n :param data: An input data.\n :returns: A provenance frozenset.\n :rtype: frozenset\n \"\"\"\n return jax.tree_util.tree_map(\n lambda a: a.named_shape.get(\"_provenance\", frozenset()), data\n )\n\n\ndef eval_provenance(fun, *args, **kwargs):\n \"\"\"\n Compute the provenance output of ``fun`` using JAX's abstract\n interpretation machinery. There is no actual array computation performed.\n\n :param fun: A callable to track provenance of its (keyword) arguments.\n :param args: Positional arguments of `fun`.\n :param kwargs: Keyword arguments of `fun`.\n :returns: A pytree of :class:`ProvenanceArray`.\n \"\"\"\n # flatten the function and its arguments\n args_flat, in_tree = jax.tree_util.tree_flatten((args, kwargs))\n wrapped_fun, out_tree = jax.api_util.flatten_fun(wrap_init(fun), in_tree)\n fun = wrap_init(wrapped_fun.call_wrapped)\n avals = jax.util.safe_map(jax.api_util.shaped_abstractify, args_flat)\n\n # execute the function and trace provenance\n with jax.core.new_main(_ProvenanceJaxprTrace, dynamic=True) as main:\n main.jaxpr_stack = ()\n out = partial_eval.trace_to_subjaxpr_dynamic(fun, main, avals)[1]\n\n # unflatten the output and get its provenance\n out = [jax.ShapeDtypeStruct(x.shape, x.dtype, x.named_shape) for x in out]\n out = jax.tree_util.tree_unflatten(out_tree(), out)\n return jax.tree_util.tree_map(\n lambda x: ProvenanceArray(x, x.named_shape.get(\"_provenance\", frozenset())),\n out,\n )\n", "path": "numpyro/ops/provenance.py"}]} | 3,000 | 283 |
gh_patches_debug_29620 | rasdani/github-patches | git_diff | ansible__ansible-modules-core-3251 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
apache2_module hangs with "WARNING: The following essential module will be disabled."
I have this code in my playbook:
```
- apache2_module: name={{item}} state=absent
with_items:
- autoindex
- negotiation
```
However someone decided that autoindex and negotiation both are "essential", so when I try to disable them I get this warning:
```
# a2dismod autoindex
WARNING: The following essential module will be disabled.
This might result in unexpected behavior and should NOT be done
unless you know exactly what you are doing!
autoindex
To continue type in the phrase 'Yes, do as I say!' or retry by passing '-f':
```
and expects me to actually type "Yes, do as I say!" (WTF?!?) if I don't provide "-f". The ansible module does not provide that -f parameter, so it locks up.
As weird as that is, I think there _at least_ should be a "force=true" option in apache2_module - or -f should be default.
(Not sure if that's true for all "a2dismod" instances because there is a suspiciously low amount of Google results (none). It is at least there for apache2 currently installed from dotdeb.org)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `web_infrastructure/apache2_module.py`
Content:
```
1 #!/usr/bin/python
2 #coding: utf-8 -*-
3
4 # (c) 2013-2014, Christian Berendt <[email protected]>
5 #
6 # This module is free software: you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation, either version 3 of the License, or
9 # (at your option) any later version.
10 #
11 # This software is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with this software. If not, see <http://www.gnu.org/licenses/>.
18
19 DOCUMENTATION = '''
20 ---
21 module: apache2_module
22 version_added: 1.6
23 author: "Christian Berendt (@berendt)"
24 short_description: enables/disables a module of the Apache2 webserver
25 description:
26 - Enables or disables a specified module of the Apache2 webserver.
27 options:
28 name:
29 description:
30 - name of the module to enable/disable
31 required: true
32 state:
33 description:
34 - indicate the desired state of the resource
35 choices: ['present', 'absent']
36 default: present
37
38 requirements: ["a2enmod","a2dismod"]
39 '''
40
41 EXAMPLES = '''
42 # enables the Apache2 module "wsgi"
43 - apache2_module: state=present name=wsgi
44
45 # disables the Apache2 module "wsgi"
46 - apache2_module: state=absent name=wsgi
47 '''
48
49 import re
50
51 def _disable_module(module):
52 name = module.params['name']
53 a2dismod_binary = module.get_bin_path("a2dismod")
54 if a2dismod_binary is None:
55 module.fail_json(msg="a2dismod not found. Perhaps this system does not use a2dismod to manage apache")
56
57 result, stdout, stderr = module.run_command("%s %s" % (a2dismod_binary, name))
58
59 if re.match(r'.*\b' + name + r' already disabled', stdout, re.S|re.M):
60 module.exit_json(changed = False, result = "Success")
61 elif result != 0:
62 module.fail_json(msg="Failed to disable module %s: %s" % (name, stdout))
63 else:
64 module.exit_json(changed = True, result = "Disabled")
65
66 def _enable_module(module):
67 name = module.params['name']
68 a2enmod_binary = module.get_bin_path("a2enmod")
69 if a2enmod_binary is None:
70 module.fail_json(msg="a2enmod not found. Perhaps this system does not use a2enmod to manage apache")
71
72 result, stdout, stderr = module.run_command("%s %s" % (a2enmod_binary, name))
73
74 if re.match(r'.*\b' + name + r' already enabled', stdout, re.S|re.M):
75 module.exit_json(changed = False, result = "Success")
76 elif result != 0:
77 module.fail_json(msg="Failed to enable module %s: %s" % (name, stdout))
78 else:
79 module.exit_json(changed = True, result = "Enabled")
80
81 def main():
82 module = AnsibleModule(
83 argument_spec = dict(
84 name = dict(required=True),
85 state = dict(default='present', choices=['absent', 'present'])
86 ),
87 )
88
89 if module.params['state'] == 'present':
90 _enable_module(module)
91
92 if module.params['state'] == 'absent':
93 _disable_module(module)
94
95 # import module snippets
96 from ansible.module_utils.basic import *
97 if __name__ == '__main__':
98 main()
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/web_infrastructure/apache2_module.py b/web_infrastructure/apache2_module.py
--- a/web_infrastructure/apache2_module.py
+++ b/web_infrastructure/apache2_module.py
@@ -29,6 +29,12 @@
description:
- name of the module to enable/disable
required: true
+ force:
+ description:
+ - force disabling of default modules and override Debian warnings
+ required: false
+ choices: ['yes', 'no']
+ default: no
state:
description:
- indicate the desired state of the resource
@@ -50,10 +56,14 @@
def _disable_module(module):
name = module.params['name']
+ force = module.params['force']
a2dismod_binary = module.get_bin_path("a2dismod")
if a2dismod_binary is None:
module.fail_json(msg="a2dismod not found. Perhaps this system does not use a2dismod to manage apache")
+ if force:
+ a2dismod_binary += ' -f'
+
result, stdout, stderr = module.run_command("%s %s" % (a2dismod_binary, name))
if re.match(r'.*\b' + name + r' already disabled', stdout, re.S|re.M):
@@ -82,6 +92,7 @@
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
+ force = dict(required=False, type='bool', default=False),
state = dict(default='present', choices=['absent', 'present'])
),
)
| {"golden_diff": "diff --git a/web_infrastructure/apache2_module.py b/web_infrastructure/apache2_module.py\n--- a/web_infrastructure/apache2_module.py\n+++ b/web_infrastructure/apache2_module.py\n@@ -29,6 +29,12 @@\n description:\n - name of the module to enable/disable\n required: true\n+ force:\n+ description:\n+ - force disabling of default modules and override Debian warnings\n+ required: false\n+ choices: ['yes', 'no']\n+ default: no\n state:\n description:\n - indicate the desired state of the resource\n@@ -50,10 +56,14 @@\n \n def _disable_module(module):\n name = module.params['name']\n+ force = module.params['force']\n a2dismod_binary = module.get_bin_path(\"a2dismod\")\n if a2dismod_binary is None:\n module.fail_json(msg=\"a2dismod not found. Perhaps this system does not use a2dismod to manage apache\")\n \n+ if force:\n+ a2dismod_binary += ' -f'\n+\n result, stdout, stderr = module.run_command(\"%s %s\" % (a2dismod_binary, name))\n \n if re.match(r'.*\\b' + name + r' already disabled', stdout, re.S|re.M):\n@@ -82,6 +92,7 @@\n module = AnsibleModule(\n argument_spec = dict(\n name = dict(required=True),\n+ force = dict(required=False, type='bool', default=False),\n state = dict(default='present', choices=['absent', 'present'])\n ),\n )\n", "issue": "apache2_module hangs with \"WARNING: The following essential module will be disabled.\"\nI have this code in my playbook:\n\n```\n- apache2_module: name={{item}} state=absent\n with_items:\n - autoindex\n - negotiation\n```\n\nHowever someone decided that autoindex and negotiation both are \"essential\", so when I try to disable them I get this warning:\n\n```\n# a2dismod autoindex\nWARNING: The following essential module will be disabled.\nThis might result in unexpected behavior and should NOT be done\nunless you know exactly what you are doing!\n autoindex\n\nTo continue type in the phrase 'Yes, do as I say!' or retry by passing '-f':\n```\n\nand expects me to actually type \"Yes, do as I say!\" (WTF?!?) if I don't provide \"-f\". The ansible module does not provide that -f parameter, so it locks up.\n\nAs weird as that is, I think there _at least_ should be a \"force=true\" option in apache2_module - or -f should be default.\n\n(Not sure if that's true for all \"a2dismod\" instances because there is a suspiciously low amount of Google results (none). It is at least there for apache2 currently installed from dotdeb.org)\n\n", "before_files": [{"content": "#!/usr/bin/python\n#coding: utf-8 -*-\n\n# (c) 2013-2014, Christian Berendt <[email protected]>\n#\n# This module is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This software is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this software. If not, see <http://www.gnu.org/licenses/>.\n\nDOCUMENTATION = '''\n---\nmodule: apache2_module\nversion_added: 1.6\nauthor: \"Christian Berendt (@berendt)\"\nshort_description: enables/disables a module of the Apache2 webserver\ndescription:\n - Enables or disables a specified module of the Apache2 webserver.\noptions:\n name:\n description:\n - name of the module to enable/disable\n required: true\n state:\n description:\n - indicate the desired state of the resource\n choices: ['present', 'absent']\n default: present\n\nrequirements: [\"a2enmod\",\"a2dismod\"]\n'''\n\nEXAMPLES = '''\n# enables the Apache2 module \"wsgi\"\n- apache2_module: state=present name=wsgi\n\n# disables the Apache2 module \"wsgi\"\n- apache2_module: state=absent name=wsgi\n'''\n\nimport re\n\ndef _disable_module(module):\n name = module.params['name']\n a2dismod_binary = module.get_bin_path(\"a2dismod\")\n if a2dismod_binary is None:\n module.fail_json(msg=\"a2dismod not found. Perhaps this system does not use a2dismod to manage apache\")\n\n result, stdout, stderr = module.run_command(\"%s %s\" % (a2dismod_binary, name))\n\n if re.match(r'.*\\b' + name + r' already disabled', stdout, re.S|re.M):\n module.exit_json(changed = False, result = \"Success\")\n elif result != 0:\n module.fail_json(msg=\"Failed to disable module %s: %s\" % (name, stdout))\n else:\n module.exit_json(changed = True, result = \"Disabled\")\n\ndef _enable_module(module):\n name = module.params['name']\n a2enmod_binary = module.get_bin_path(\"a2enmod\")\n if a2enmod_binary is None:\n module.fail_json(msg=\"a2enmod not found. Perhaps this system does not use a2enmod to manage apache\")\n\n result, stdout, stderr = module.run_command(\"%s %s\" % (a2enmod_binary, name))\n\n if re.match(r'.*\\b' + name + r' already enabled', stdout, re.S|re.M):\n module.exit_json(changed = False, result = \"Success\")\n elif result != 0:\n module.fail_json(msg=\"Failed to enable module %s: %s\" % (name, stdout))\n else:\n module.exit_json(changed = True, result = \"Enabled\")\n\ndef main():\n module = AnsibleModule(\n argument_spec = dict(\n name = dict(required=True),\n state = dict(default='present', choices=['absent', 'present'])\n ),\n )\n\n if module.params['state'] == 'present':\n _enable_module(module)\n\n if module.params['state'] == 'absent':\n _disable_module(module)\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nif __name__ == '__main__':\n main()\n", "path": "web_infrastructure/apache2_module.py"}], "after_files": [{"content": "#!/usr/bin/python\n#coding: utf-8 -*-\n\n# (c) 2013-2014, Christian Berendt <[email protected]>\n#\n# This module is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This software is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this software. If not, see <http://www.gnu.org/licenses/>.\n\nDOCUMENTATION = '''\n---\nmodule: apache2_module\nversion_added: 1.6\nauthor: \"Christian Berendt (@berendt)\"\nshort_description: enables/disables a module of the Apache2 webserver\ndescription:\n - Enables or disables a specified module of the Apache2 webserver.\noptions:\n name:\n description:\n - name of the module to enable/disable\n required: true\n force:\n description:\n - force disabling of default modules and override Debian warnings\n required: false\n choices: ['yes', 'no']\n default: no\n state:\n description:\n - indicate the desired state of the resource\n choices: ['present', 'absent']\n default: present\n\nrequirements: [\"a2enmod\",\"a2dismod\"]\n'''\n\nEXAMPLES = '''\n# enables the Apache2 module \"wsgi\"\n- apache2_module: state=present name=wsgi\n\n# disables the Apache2 module \"wsgi\"\n- apache2_module: state=absent name=wsgi\n'''\n\nimport re\n\ndef _disable_module(module):\n name = module.params['name']\n force = module.params['force']\n a2dismod_binary = module.get_bin_path(\"a2dismod\")\n if a2dismod_binary is None:\n module.fail_json(msg=\"a2dismod not found. Perhaps this system does not use a2dismod to manage apache\")\n\n if force:\n a2dismod_binary += ' -f'\n\n result, stdout, stderr = module.run_command(\"%s %s\" % (a2dismod_binary, name))\n\n if re.match(r'.*\\b' + name + r' already disabled', stdout, re.S|re.M):\n module.exit_json(changed = False, result = \"Success\")\n elif result != 0:\n module.fail_json(msg=\"Failed to disable module %s: %s\" % (name, stdout))\n else:\n module.exit_json(changed = True, result = \"Disabled\")\n\ndef _enable_module(module):\n name = module.params['name']\n a2enmod_binary = module.get_bin_path(\"a2enmod\")\n if a2enmod_binary is None:\n module.fail_json(msg=\"a2enmod not found. Perhaps this system does not use a2enmod to manage apache\")\n\n result, stdout, stderr = module.run_command(\"%s %s\" % (a2enmod_binary, name))\n\n if re.match(r'.*\\b' + name + r' already enabled', stdout, re.S|re.M):\n module.exit_json(changed = False, result = \"Success\")\n elif result != 0:\n module.fail_json(msg=\"Failed to enable module %s: %s\" % (name, stdout))\n else:\n module.exit_json(changed = True, result = \"Enabled\")\n\ndef main():\n module = AnsibleModule(\n argument_spec = dict(\n name = dict(required=True),\n force = dict(required=False, type='bool', default=False),\n state = dict(default='present', choices=['absent', 'present'])\n ),\n )\n\n if module.params['state'] == 'present':\n _enable_module(module)\n\n if module.params['state'] == 'absent':\n _disable_module(module)\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nif __name__ == '__main__':\n main()\n", "path": "web_infrastructure/apache2_module.py"}]} | 1,571 | 368 |
gh_patches_debug_21796 | rasdani/github-patches | git_diff | psychopy__psychopy-4625 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
404 errors on some of the page in the HELP menus
Version used
- psychopy: 2021.2.3
https://www.psychopy.org/builder/builder.html
https://www.psychopy.org/api/api.html
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `psychopy/app/urls.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """A central location to store information about urls
5 """
6 urls = dict()
7
8 # links based on string names
9 urls['builder'] = "https://www.psychopy.org/builder/builder.html"
10 urls['builder.loops'] = "https://www.psychopy.org/builder/flow.html#loops"
11 # NB. builder components get their urls defined by the component code
12 # (so a custom component can have a url)
13
14 urls['downloads'] = "https://github.com/psychopy/psychopy/releases"
15 urls['changelog'] = "https://www.psychopy.org/changelog.html"
16
17 general = "https://www.psychopy.org/general/"
18 urls['prefs'] = general + "prefs.html"
19 urls['prefs.general'] = general + "prefs.html#general-settings"
20 urls['prefs.app'] = general + "prefs.html#application-settings"
21 urls['prefs.coder'] = general + "prefs.html#coder-settings"
22 urls['prefs.builder'] = general + "prefs.html#builder-settings"
23 urls['prefs.connections'] = general + "prefs.html#connection-settings"
24
25 # links keyed by wxIDs (e.g. menu item IDs)
26 urls['psychopyHome'] = "https://www.psychopy.org/"
27 urls['psychopyReference'] = "https://www.psychopy.org/api/api.html"
28 urls['coderTutorial'] = "https://www.psychopy.org/coder/tutorial1.html"
29 urls['builderHelp'] = urls['builder']
30 urls['builderDemos'] = "http://code.google.com/p/psychopy/downloads/list?can=2&q=demos"
31 urls['projsAbout'] = "https://www.psychopy.org/general/projects.html"
32
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/psychopy/app/urls.py b/psychopy/app/urls.py
--- a/psychopy/app/urls.py
+++ b/psychopy/app/urls.py
@@ -6,7 +6,7 @@
urls = dict()
# links based on string names
-urls['builder'] = "https://www.psychopy.org/builder/builder.html"
+urls['builder'] = "https://www.psychopy.org/builder"
urls['builder.loops'] = "https://www.psychopy.org/builder/flow.html#loops"
# NB. builder components get their urls defined by the component code
# (so a custom component can have a url)
@@ -24,7 +24,7 @@
# links keyed by wxIDs (e.g. menu item IDs)
urls['psychopyHome'] = "https://www.psychopy.org/"
-urls['psychopyReference'] = "https://www.psychopy.org/api/api.html"
+urls['psychopyReference'] = "https://www.psychopy.org/api"
urls['coderTutorial'] = "https://www.psychopy.org/coder/tutorial1.html"
urls['builderHelp'] = urls['builder']
urls['builderDemos'] = "http://code.google.com/p/psychopy/downloads/list?can=2&q=demos"
| {"golden_diff": "diff --git a/psychopy/app/urls.py b/psychopy/app/urls.py\n--- a/psychopy/app/urls.py\n+++ b/psychopy/app/urls.py\n@@ -6,7 +6,7 @@\n urls = dict()\n \n # links based on string names\n-urls['builder'] = \"https://www.psychopy.org/builder/builder.html\"\n+urls['builder'] = \"https://www.psychopy.org/builder\"\n urls['builder.loops'] = \"https://www.psychopy.org/builder/flow.html#loops\"\n # NB. builder components get their urls defined by the component code\n # (so a custom component can have a url)\n@@ -24,7 +24,7 @@\n \n # links keyed by wxIDs (e.g. menu item IDs)\n urls['psychopyHome'] = \"https://www.psychopy.org/\"\n-urls['psychopyReference'] = \"https://www.psychopy.org/api/api.html\"\n+urls['psychopyReference'] = \"https://www.psychopy.org/api\"\n urls['coderTutorial'] = \"https://www.psychopy.org/coder/tutorial1.html\"\n urls['builderHelp'] = urls['builder']\n urls['builderDemos'] = \"http://code.google.com/p/psychopy/downloads/list?can=2&q=demos\"\n", "issue": "404 errors on some of the page in the HELP menus\nVersion used\r\n- psychopy: 2021.2.3\r\n\r\nhttps://www.psychopy.org/builder/builder.html\r\nhttps://www.psychopy.org/api/api.html\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"A central location to store information about urls\n\"\"\"\nurls = dict()\n\n# links based on string names\nurls['builder'] = \"https://www.psychopy.org/builder/builder.html\"\nurls['builder.loops'] = \"https://www.psychopy.org/builder/flow.html#loops\"\n# NB. builder components get their urls defined by the component code\n# (so a custom component can have a url)\n\nurls['downloads'] = \"https://github.com/psychopy/psychopy/releases\"\nurls['changelog'] = \"https://www.psychopy.org/changelog.html\"\n\ngeneral = \"https://www.psychopy.org/general/\"\nurls['prefs'] = general + \"prefs.html\"\nurls['prefs.general'] = general + \"prefs.html#general-settings\"\nurls['prefs.app'] = general + \"prefs.html#application-settings\"\nurls['prefs.coder'] = general + \"prefs.html#coder-settings\"\nurls['prefs.builder'] = general + \"prefs.html#builder-settings\"\nurls['prefs.connections'] = general + \"prefs.html#connection-settings\"\n\n# links keyed by wxIDs (e.g. menu item IDs)\nurls['psychopyHome'] = \"https://www.psychopy.org/\"\nurls['psychopyReference'] = \"https://www.psychopy.org/api/api.html\"\nurls['coderTutorial'] = \"https://www.psychopy.org/coder/tutorial1.html\"\nurls['builderHelp'] = urls['builder']\nurls['builderDemos'] = \"http://code.google.com/p/psychopy/downloads/list?can=2&q=demos\"\nurls['projsAbout'] = \"https://www.psychopy.org/general/projects.html\"\n", "path": "psychopy/app/urls.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"A central location to store information about urls\n\"\"\"\nurls = dict()\n\n# links based on string names\nurls['builder'] = \"https://www.psychopy.org/builder\"\nurls['builder.loops'] = \"https://www.psychopy.org/builder/flow.html#loops\"\n# NB. builder components get their urls defined by the component code\n# (so a custom component can have a url)\n\nurls['downloads'] = \"https://github.com/psychopy/psychopy/releases\"\nurls['changelog'] = \"https://www.psychopy.org/changelog.html\"\n\ngeneral = \"https://www.psychopy.org/general/\"\nurls['prefs'] = general + \"prefs.html\"\nurls['prefs.general'] = general + \"prefs.html#general-settings\"\nurls['prefs.app'] = general + \"prefs.html#application-settings\"\nurls['prefs.coder'] = general + \"prefs.html#coder-settings\"\nurls['prefs.builder'] = general + \"prefs.html#builder-settings\"\nurls['prefs.connections'] = general + \"prefs.html#connection-settings\"\n\n# links keyed by wxIDs (e.g. menu item IDs)\nurls['psychopyHome'] = \"https://www.psychopy.org/\"\nurls['psychopyReference'] = \"https://www.psychopy.org/api\"\nurls['coderTutorial'] = \"https://www.psychopy.org/coder/tutorial1.html\"\nurls['builderHelp'] = urls['builder']\nurls['builderDemos'] = \"http://code.google.com/p/psychopy/downloads/list?can=2&q=demos\"\nurls['projsAbout'] = \"https://www.psychopy.org/general/projects.html\"\n", "path": "psychopy/app/urls.py"}]} | 739 | 285 |
gh_patches_debug_54129 | rasdani/github-patches | git_diff | Gallopsled__pwntools-1252 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
'from pwn import *' fails when one of the command line args is a Base64 string (or has more than one equal sign)
The argument parsing functionality invoked by `from pwn import *` causes an error when one of the command line arguments has more than one equal sign:
`echo "from pwn import *\nprint('hello world')" > test.py`
`python test.py afdsa==`
```
Traceback (most recent call last):
File "test.py", line 1, in <module>
from pwn import *
File "/usr/local/lib/python2.7/site-packages/pwn/__init__.py", line 6, in <module>
pwnlib.args.initialize()
File "/usr/local/lib/python2.7/site-packages/pwnlib/args.py", line 197, in initialize
arg, value = arg.split('=')
ValueError: too many values to unpack
```
https://github.com/Gallopsled/pwntools/blob/dev/pwnlib/args.py#L197 probably just needs to be changed to `if arg.count("=") == 1:`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwnlib/args.py`
Content:
```
1
2 #!/usr/bin/env python2
3 """
4 Pwntools exposes several magic command-line arguments and environment
5 variables when operating in `from pwn import *` mode.
6
7 The arguments extracted from the command-line and removed from ``sys.argv``.
8
9 Arguments can be set by appending them to the command-line, or setting
10 them in the environment prefixed by ``PWNLIB_``.
11
12 The easiest example is to enable more verbose debugging. Just set ``DEBUG``.
13
14 .. code-block:: bash
15
16 $ PWNLIB_DEBUG=1 python exploit.py
17 $ python exploit.py DEBUG
18
19 These arguments are automatically extracted, regardless of their name, and
20 exposed via :mod:`pwnlib.args.args`, which is exposed as the global variable
21 :data:`args`. Arguments which ``pwntools`` reserves internally are not exposed
22 this way.
23
24 .. code-block:: bash
25
26 $ python -c 'from pwn import *; print args' A=1 B=Hello HOST=1.2.3.4 DEBUG
27 defaultdict(<type 'str'>, {'A': '1', 'HOST': '1.2.3.4', 'B': 'Hello'})
28
29 This is very useful for conditional code, for example determining whether to
30 run an exploit locally or to connect to a remote server. Arguments which are
31 not specified evaluate to an empty string.
32
33 .. code-block:: python
34
35 if args['REMOTE']:
36 io = remote('exploitme.com', 4141)
37 else:
38 io = process('./pwnable')
39
40 Arguments can also be accessed directly with the dot operator, e.g.:
41
42 .. code-block:: python
43
44 if args.REMOTE:
45 ...
46
47 Any undefined arguments evaluate to an empty string, ``''``.
48
49 The full list of supported "magic arguments" and their effects are listed
50 below.
51
52 """
53 from __future__ import absolute_import
54
55 import collections
56 import logging
57 import os
58 import string
59 import sys
60
61 from pwnlib import term
62 from pwnlib.context import context
63
64 class PwnlibArgs(collections.defaultdict):
65 def __getattr__(self, attr):
66 return self[attr]
67
68 args = PwnlibArgs(str)
69 term_mode = True
70 env_prefix = 'PWNLIB_'
71 free_form = True
72
73 # Check to see if we were invoked as one of the 'pwn xxx' scripts.
74 # If so, we don't want to remove e.g. "SYS_" from the end of the command
75 # line, as this breaks things like constgrep.
76 import pwnlib.commandline
77 basename = os.path.basename(sys.argv[0])
78
79 if basename == 'pwn' or basename in pwnlib.commandline.__all__:
80 free_form = False
81
82
83 def isident(s):
84 """
85 Helper function to check whether a string is a valid identifier,
86 as passed in on the command-line.
87 """
88 first = string.uppercase + '_'
89 body = string.digits + first
90 if not s:
91 return False
92 if s[0] not in first:
93 return False
94 if not all(c in body for c in s[1:]):
95 return False
96 return True
97
98 def asbool(s):
99 """
100 Convert a string to its boolean value
101 """
102 if s.lower() == 'true':
103 return True
104 elif s.lower() == 'false':
105 return False
106 elif s.isdigit():
107 return bool(int(s))
108 else:
109 raise ValueError('must be integer or boolean: %r' % s)
110
111 def LOG_LEVEL(x):
112 """Sets the logging verbosity used via ``context.log_level``,
113 e.g. ``LOG_LEVEL=debug``.
114 """
115 with context.local(log_level=x):
116 context.defaults['log_level']=context.log_level
117
118 def LOG_FILE(x):
119 """Sets a log file to be used via ``context.log_file``, e.g.
120 ``LOG_FILE=./log.txt``"""
121 context.log_file=x
122
123 def SILENT(x):
124 """Sets the logging verbosity to ``error`` which silences most
125 output."""
126 LOG_LEVEL('error')
127
128 def DEBUG(x):
129 """Sets the logging verbosity to ``debug`` which displays much
130 more information, including logging each byte sent by tubes."""
131 LOG_LEVEL('debug')
132
133 def NOTERM(v):
134 """Disables pretty terminal settings and animations."""
135 if asbool(v):
136 global term_mode
137 term_mode = False
138
139 def TIMEOUT(v):
140 """Sets a timeout for tube operations (in seconds) via
141 ``context.timeout``, e.g. ``TIMEOUT=30``"""
142 context.defaults['timeout'] = int(v)
143
144 def RANDOMIZE(v):
145 """Enables randomization of various pieces via ``context.randomize``"""
146 context.defaults['randomize'] = asbool(v)
147
148 def NOASLR(v):
149 """Disables ASLR via ``context.aslr``"""
150 context.defaults['aslr'] = not asbool(v)
151
152 def NOPTRACE(v):
153 """Disables facilities which require ``ptrace`` such as ``gdb.attach()``
154 statements, via ``context.noptrace``."""
155 context.defaults['noptrace'] = asbool(v)
156
157 def STDERR(v):
158 """Sends logging to ``stderr`` by default, instead of ``stdout``"""
159 context.log_console = sys.stderr
160
161 hooks = {
162 'LOG_LEVEL': LOG_LEVEL,
163 'LOG_FILE': LOG_FILE,
164 'DEBUG': DEBUG,
165 'NOTERM': NOTERM,
166 'SILENT': SILENT,
167 'RANDOMIZE': RANDOMIZE,
168 'TIMEOUT': TIMEOUT,
169 'NOASLR': NOASLR,
170 'NOPTRACE': NOPTRACE,
171 'STDERR': STDERR,
172 }
173
174 def initialize():
175 global args, term_mode
176
177 # Hack for readthedocs.org
178 if 'READTHEDOCS' in os.environ:
179 os.environ['PWNLIB_NOTERM'] = '1'
180
181 for k, v in os.environ.items():
182 if not k.startswith(env_prefix):
183 continue
184 k = k[len(env_prefix):]
185
186 if k in hooks:
187 hooks[k](v)
188 elif isident(k):
189 args[k] = v
190
191 argv = sys.argv[:]
192 for arg in sys.argv[:]:
193 orig = arg
194 value = 'True'
195
196 if '=' in arg:
197 arg, value = arg.split('=')
198
199 if arg in hooks:
200 sys.argv.remove(orig)
201 hooks[arg](value)
202
203 elif free_form and isident(arg):
204 sys.argv.remove(orig)
205 args[arg] = value
206
207 if term_mode:
208 term.init()
209
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pwnlib/args.py b/pwnlib/args.py
--- a/pwnlib/args.py
+++ b/pwnlib/args.py
@@ -194,7 +194,7 @@
value = 'True'
if '=' in arg:
- arg, value = arg.split('=')
+ arg, value = arg.split('=', 1)
if arg in hooks:
sys.argv.remove(orig)
| {"golden_diff": "diff --git a/pwnlib/args.py b/pwnlib/args.py\n--- a/pwnlib/args.py\n+++ b/pwnlib/args.py\n@@ -194,7 +194,7 @@\n value = 'True'\n \n if '=' in arg:\n- arg, value = arg.split('=')\n+ arg, value = arg.split('=', 1)\n \n if arg in hooks:\n sys.argv.remove(orig)\n", "issue": "'from pwn import *' fails when one of the command line args is a Base64 string (or has more than one equal sign)\nThe argument parsing functionality invoked by `from pwn import *` causes an error when one of the command line arguments has more than one equal sign:\r\n`echo \"from pwn import *\\nprint('hello world')\" > test.py`\r\n`python test.py afdsa==`\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"test.py\", line 1, in <module>\r\n from pwn import *\r\n File \"/usr/local/lib/python2.7/site-packages/pwn/__init__.py\", line 6, in <module>\r\n pwnlib.args.initialize()\r\n File \"/usr/local/lib/python2.7/site-packages/pwnlib/args.py\", line 197, in initialize\r\n arg, value = arg.split('=')\r\nValueError: too many values to unpack\r\n```\r\n\r\nhttps://github.com/Gallopsled/pwntools/blob/dev/pwnlib/args.py#L197 probably just needs to be changed to `if arg.count(\"=\") == 1:`\n", "before_files": [{"content": "\n#!/usr/bin/env python2\n\"\"\"\nPwntools exposes several magic command-line arguments and environment\nvariables when operating in `from pwn import *` mode.\n\nThe arguments extracted from the command-line and removed from ``sys.argv``.\n\nArguments can be set by appending them to the command-line, or setting\nthem in the environment prefixed by ``PWNLIB_``.\n\nThe easiest example is to enable more verbose debugging. Just set ``DEBUG``.\n\n.. code-block:: bash\n\n $ PWNLIB_DEBUG=1 python exploit.py\n $ python exploit.py DEBUG\n\nThese arguments are automatically extracted, regardless of their name, and\nexposed via :mod:`pwnlib.args.args`, which is exposed as the global variable\n:data:`args`. Arguments which ``pwntools`` reserves internally are not exposed\nthis way.\n\n.. code-block:: bash\n\n $ python -c 'from pwn import *; print args' A=1 B=Hello HOST=1.2.3.4 DEBUG\n defaultdict(<type 'str'>, {'A': '1', 'HOST': '1.2.3.4', 'B': 'Hello'})\n\nThis is very useful for conditional code, for example determining whether to\nrun an exploit locally or to connect to a remote server. Arguments which are\nnot specified evaluate to an empty string.\n\n.. code-block:: python\n\n if args['REMOTE']:\n io = remote('exploitme.com', 4141)\n else:\n io = process('./pwnable')\n\nArguments can also be accessed directly with the dot operator, e.g.:\n\n.. code-block:: python\n\n if args.REMOTE:\n ...\n\nAny undefined arguments evaluate to an empty string, ``''``.\n\nThe full list of supported \"magic arguments\" and their effects are listed\nbelow.\n\n\"\"\"\nfrom __future__ import absolute_import\n\nimport collections\nimport logging\nimport os\nimport string\nimport sys\n\nfrom pwnlib import term\nfrom pwnlib.context import context\n\nclass PwnlibArgs(collections.defaultdict):\n def __getattr__(self, attr):\n return self[attr]\n\nargs = PwnlibArgs(str)\nterm_mode = True\nenv_prefix = 'PWNLIB_'\nfree_form = True\n\n# Check to see if we were invoked as one of the 'pwn xxx' scripts.\n# If so, we don't want to remove e.g. \"SYS_\" from the end of the command\n# line, as this breaks things like constgrep.\nimport pwnlib.commandline\nbasename = os.path.basename(sys.argv[0])\n\nif basename == 'pwn' or basename in pwnlib.commandline.__all__:\n free_form = False\n\n\ndef isident(s):\n \"\"\"\n Helper function to check whether a string is a valid identifier,\n as passed in on the command-line.\n \"\"\"\n first = string.uppercase + '_'\n body = string.digits + first\n if not s:\n return False\n if s[0] not in first:\n return False\n if not all(c in body for c in s[1:]):\n return False\n return True\n\ndef asbool(s):\n \"\"\"\n Convert a string to its boolean value\n \"\"\"\n if s.lower() == 'true':\n return True\n elif s.lower() == 'false':\n return False\n elif s.isdigit():\n return bool(int(s))\n else:\n raise ValueError('must be integer or boolean: %r' % s)\n\ndef LOG_LEVEL(x):\n \"\"\"Sets the logging verbosity used via ``context.log_level``,\n e.g. ``LOG_LEVEL=debug``.\n \"\"\"\n with context.local(log_level=x):\n context.defaults['log_level']=context.log_level\n\ndef LOG_FILE(x):\n \"\"\"Sets a log file to be used via ``context.log_file``, e.g.\n ``LOG_FILE=./log.txt``\"\"\"\n context.log_file=x\n\ndef SILENT(x):\n \"\"\"Sets the logging verbosity to ``error`` which silences most\n output.\"\"\"\n LOG_LEVEL('error')\n\ndef DEBUG(x):\n \"\"\"Sets the logging verbosity to ``debug`` which displays much\n more information, including logging each byte sent by tubes.\"\"\"\n LOG_LEVEL('debug')\n\ndef NOTERM(v):\n \"\"\"Disables pretty terminal settings and animations.\"\"\"\n if asbool(v):\n global term_mode\n term_mode = False\n\ndef TIMEOUT(v):\n \"\"\"Sets a timeout for tube operations (in seconds) via\n ``context.timeout``, e.g. ``TIMEOUT=30``\"\"\"\n context.defaults['timeout'] = int(v)\n\ndef RANDOMIZE(v):\n \"\"\"Enables randomization of various pieces via ``context.randomize``\"\"\"\n context.defaults['randomize'] = asbool(v)\n\ndef NOASLR(v):\n \"\"\"Disables ASLR via ``context.aslr``\"\"\"\n context.defaults['aslr'] = not asbool(v)\n\ndef NOPTRACE(v):\n \"\"\"Disables facilities which require ``ptrace`` such as ``gdb.attach()``\n statements, via ``context.noptrace``.\"\"\"\n context.defaults['noptrace'] = asbool(v)\n\ndef STDERR(v):\n \"\"\"Sends logging to ``stderr`` by default, instead of ``stdout``\"\"\"\n context.log_console = sys.stderr\n\nhooks = {\n 'LOG_LEVEL': LOG_LEVEL,\n 'LOG_FILE': LOG_FILE,\n 'DEBUG': DEBUG,\n 'NOTERM': NOTERM,\n 'SILENT': SILENT,\n 'RANDOMIZE': RANDOMIZE,\n 'TIMEOUT': TIMEOUT,\n 'NOASLR': NOASLR,\n 'NOPTRACE': NOPTRACE,\n 'STDERR': STDERR,\n}\n\ndef initialize():\n global args, term_mode\n\n # Hack for readthedocs.org\n if 'READTHEDOCS' in os.environ:\n os.environ['PWNLIB_NOTERM'] = '1'\n\n for k, v in os.environ.items():\n if not k.startswith(env_prefix):\n continue\n k = k[len(env_prefix):]\n\n if k in hooks:\n hooks[k](v)\n elif isident(k):\n args[k] = v\n\n argv = sys.argv[:]\n for arg in sys.argv[:]:\n orig = arg\n value = 'True'\n\n if '=' in arg:\n arg, value = arg.split('=')\n\n if arg in hooks:\n sys.argv.remove(orig)\n hooks[arg](value)\n\n elif free_form and isident(arg):\n sys.argv.remove(orig)\n args[arg] = value\n\n if term_mode:\n term.init()\n", "path": "pwnlib/args.py"}], "after_files": [{"content": "\n#!/usr/bin/env python2\n\"\"\"\nPwntools exposes several magic command-line arguments and environment\nvariables when operating in `from pwn import *` mode.\n\nThe arguments extracted from the command-line and removed from ``sys.argv``.\n\nArguments can be set by appending them to the command-line, or setting\nthem in the environment prefixed by ``PWNLIB_``.\n\nThe easiest example is to enable more verbose debugging. Just set ``DEBUG``.\n\n.. code-block:: bash\n\n $ PWNLIB_DEBUG=1 python exploit.py\n $ python exploit.py DEBUG\n\nThese arguments are automatically extracted, regardless of their name, and\nexposed via :mod:`pwnlib.args.args`, which is exposed as the global variable\n:data:`args`. Arguments which ``pwntools`` reserves internally are not exposed\nthis way.\n\n.. code-block:: bash\n\n $ python -c 'from pwn import *; print args' A=1 B=Hello HOST=1.2.3.4 DEBUG\n defaultdict(<type 'str'>, {'A': '1', 'HOST': '1.2.3.4', 'B': 'Hello'})\n\nThis is very useful for conditional code, for example determining whether to\nrun an exploit locally or to connect to a remote server. Arguments which are\nnot specified evaluate to an empty string.\n\n.. code-block:: python\n\n if args['REMOTE']:\n io = remote('exploitme.com', 4141)\n else:\n io = process('./pwnable')\n\nArguments can also be accessed directly with the dot operator, e.g.:\n\n.. code-block:: python\n\n if args.REMOTE:\n ...\n\nAny undefined arguments evaluate to an empty string, ``''``.\n\nThe full list of supported \"magic arguments\" and their effects are listed\nbelow.\n\n\"\"\"\nfrom __future__ import absolute_import\n\nimport collections\nimport logging\nimport os\nimport string\nimport sys\n\nfrom pwnlib import term\nfrom pwnlib.context import context\n\nclass PwnlibArgs(collections.defaultdict):\n def __getattr__(self, attr):\n return self[attr]\n\nargs = PwnlibArgs(str)\nterm_mode = True\nenv_prefix = 'PWNLIB_'\nfree_form = True\n\n# Check to see if we were invoked as one of the 'pwn xxx' scripts.\n# If so, we don't want to remove e.g. \"SYS_\" from the end of the command\n# line, as this breaks things like constgrep.\nimport pwnlib.commandline\nbasename = os.path.basename(sys.argv[0])\n\nif basename == 'pwn' or basename in pwnlib.commandline.__all__:\n free_form = False\n\n\ndef isident(s):\n \"\"\"\n Helper function to check whether a string is a valid identifier,\n as passed in on the command-line.\n \"\"\"\n first = string.uppercase + '_'\n body = string.digits + first\n if not s:\n return False\n if s[0] not in first:\n return False\n if not all(c in body for c in s[1:]):\n return False\n return True\n\ndef asbool(s):\n \"\"\"\n Convert a string to its boolean value\n \"\"\"\n if s.lower() == 'true':\n return True\n elif s.lower() == 'false':\n return False\n elif s.isdigit():\n return bool(int(s))\n else:\n raise ValueError('must be integer or boolean: %r' % s)\n\ndef LOG_LEVEL(x):\n \"\"\"Sets the logging verbosity used via ``context.log_level``,\n e.g. ``LOG_LEVEL=debug``.\n \"\"\"\n with context.local(log_level=x):\n context.defaults['log_level']=context.log_level\n\ndef LOG_FILE(x):\n \"\"\"Sets a log file to be used via ``context.log_file``, e.g.\n ``LOG_FILE=./log.txt``\"\"\"\n context.log_file=x\n\ndef SILENT(x):\n \"\"\"Sets the logging verbosity to ``error`` which silences most\n output.\"\"\"\n LOG_LEVEL('error')\n\ndef DEBUG(x):\n \"\"\"Sets the logging verbosity to ``debug`` which displays much\n more information, including logging each byte sent by tubes.\"\"\"\n LOG_LEVEL('debug')\n\ndef NOTERM(v):\n \"\"\"Disables pretty terminal settings and animations.\"\"\"\n if asbool(v):\n global term_mode\n term_mode = False\n\ndef TIMEOUT(v):\n \"\"\"Sets a timeout for tube operations (in seconds) via\n ``context.timeout``, e.g. ``TIMEOUT=30``\"\"\"\n context.defaults['timeout'] = int(v)\n\ndef RANDOMIZE(v):\n \"\"\"Enables randomization of various pieces via ``context.randomize``\"\"\"\n context.defaults['randomize'] = asbool(v)\n\ndef NOASLR(v):\n \"\"\"Disables ASLR via ``context.aslr``\"\"\"\n context.defaults['aslr'] = not asbool(v)\n\ndef NOPTRACE(v):\n \"\"\"Disables facilities which require ``ptrace`` such as ``gdb.attach()``\n statements, via ``context.noptrace``.\"\"\"\n context.defaults['noptrace'] = asbool(v)\n\ndef STDERR(v):\n \"\"\"Sends logging to ``stderr`` by default, instead of ``stdout``\"\"\"\n context.log_console = sys.stderr\n\nhooks = {\n 'LOG_LEVEL': LOG_LEVEL,\n 'LOG_FILE': LOG_FILE,\n 'DEBUG': DEBUG,\n 'NOTERM': NOTERM,\n 'SILENT': SILENT,\n 'RANDOMIZE': RANDOMIZE,\n 'TIMEOUT': TIMEOUT,\n 'NOASLR': NOASLR,\n 'NOPTRACE': NOPTRACE,\n 'STDERR': STDERR,\n}\n\ndef initialize():\n global args, term_mode\n\n # Hack for readthedocs.org\n if 'READTHEDOCS' in os.environ:\n os.environ['PWNLIB_NOTERM'] = '1'\n\n for k, v in os.environ.items():\n if not k.startswith(env_prefix):\n continue\n k = k[len(env_prefix):]\n\n if k in hooks:\n hooks[k](v)\n elif isident(k):\n args[k] = v\n\n argv = sys.argv[:]\n for arg in sys.argv[:]:\n orig = arg\n value = 'True'\n\n if '=' in arg:\n arg, value = arg.split('=', 1)\n\n if arg in hooks:\n sys.argv.remove(orig)\n hooks[arg](value)\n\n elif free_form and isident(arg):\n sys.argv.remove(orig)\n args[arg] = value\n\n if term_mode:\n term.init()\n", "path": "pwnlib/args.py"}]} | 2,455 | 99 |
gh_patches_debug_905 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-6262 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Redis: regen README.rst (DO NOT MERGE)
This PR was generated using Autosynth. :rainbow:
Here's the log from Synthtool:
```
synthtool > Cloning googleapis.
synthtool > Running generator for google/cloud/redis/artman_redis_v1beta1.yaml.
synthtool > Ensuring dependencies.
synthtool > Pulling artman image.
synthtool > Generated code into /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/python/redis-v1beta1.
synthtool > Running generator for google/cloud/redis/artman_redis_v1.yaml.
synthtool > Ensuring dependencies.
synthtool > Pulling artman image.
synthtool > Generated code into /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/python/redis-v1.
synthtool > Replaced 'resources of the form:\\n ``' in google/cloud/redis_v1/gapic/cloud_redis_client.py.
synthtool > Replaced 'resources of the form:\\n ``' in google/cloud/redis_v1beta1/gapic/cloud_redis_client.py.
synthtool > Replaced '\n parent \\(str\\): Required. The resource name of the instance location using the form:\n ::\n\n `projects/{project_id}/locations/{location_id}`\n where ``location_id`` refers to a GCP region' in google/cloud/redis_v1/gapic/cloud_redis_client.py.
synthtool > Replaced '\n parent \\(str\\): Required. The resource name of the instance location using the form:\n ::\n\n `projects/{project_id}/locations/{location_id}`\n where ``location_id`` refers to a GCP region' in google/cloud/redis_v1beta1/gapic/cloud_redis_client.py.
synthtool > Replaced '\n with the following restrictions:\n\n \\* Must contain only lowercase letters, numbers, and hyphens\\.' in google/cloud/redis_v1/gapic/cloud_redis_client.py.
synthtool > Replaced '\n with the following restrictions:\n\n \\* Must contain only lowercase letters, numbers, and hyphens\\.' in google/cloud/redis_v1beta1/gapic/cloud_redis_client.py.
synthtool > Replaced '\n name \\(str\\): Required. Redis instance resource name using the form:\n ::\n\n `projects/{project_id}/locations/{location_id}/instances/{instance_id}`\n where ``location_id`` refers to a GCP region' in google/cloud/redis_v1/gapic/cloud_redis_client.py.
synthtool > Replaced '\n name \\(str\\): Required. Redis instance resource name using the form:\n ::\n\n `projects/{project_id}/locations/{location_id}/instances/{instance_id}`\n where ``location_id`` refers to a GCP region' in google/cloud/redis_v1beta1/gapic/cloud_redis_client.py.
synthtool > Replaced '\n fields from ``Instance``:\n\n \\* ``displayName``\n \\* ``labels``\n \\* ``memorySizeGb``\n \\* ``redisConfig``' in google/cloud/redis_v1/gapic/cloud_redis_client.py.
synthtool > Replaced '(release_status = )(.*)$' in setup.py.
synthtool > Replaced '.. _Enable the Google Cloud Memorystore for Redis API.: https://cloud.google.com/redis' in README.rst.
synthtool > Replaced 'https://cloud.google.com/redis' in README.rst.
synthtool > Replaced 'https://googlecloudplatform.github.io/google-cloud-python/stable/redis/usage.html' in README.rst.
synthtool > Replaced 'https://googlecloudplatform.github.io/google-cloud-python/stable/core/auth.html' in README.rst.
synthtool > Cleaned up 1 temporary directories.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redis/synth.py`
Content:
```
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """This script is used to synthesize generated parts of this library."""
16
17 import synthtool as s
18 import synthtool.gcp as gcp
19 import logging
20
21 logging.basicConfig(level=logging.DEBUG)
22
23 gapic = gcp.GAPICGenerator()
24 common = gcp.CommonTemplates()
25 excludes = [
26 'setup.py',
27 'nox.py',
28 'docs/conf.py',
29 'docs/index.rst',
30 ]
31
32 for version in ['v1beta1', 'v1']:
33 library = gapic.py_library(
34 'redis', version,
35 config_path=f'artman_redis_{version}.yaml')
36
37 s.copy(library, excludes=excludes)
38
39
40 # Fix docstrings
41 s.replace(
42 'google/cloud/**/cloud_redis_client.py',
43 r'resources of the form:\n ``',
44 r'resources of the form:\n\n ``',)
45
46 s.replace(
47 'google/cloud/**/cloud_redis_client.py',
48 r"""
49 parent \(str\): Required. The resource name of the instance location using the form:
50 ::
51
52 `projects/{project_id}/locations/{location_id}`
53 where ``location_id`` refers to a GCP region""",
54
55 r"""
56 parent (str): Required. The resource name of the instance location using the form ``projects/{project_id}/locations/{location_id}``
57 where ``location_id`` refers to a GCP region""",)
58
59
60 s.replace(
61 'google/cloud/**/cloud_redis_client.py',
62 r"""
63 with the following restrictions:
64
65 \* Must contain only lowercase letters, numbers, and hyphens\.""",
66 r"""
67 with the following restrictions:
68 * Must contain only lowercase letters, numbers, and hyphens.""")
69
70 s.replace(
71 'google/cloud/**/cloud_redis_client.py',
72 r"""
73 name \(str\): Required. Redis instance resource name using the form:
74 ::
75
76 `projects/{project_id}/locations/{location_id}/instances/{instance_id}`
77 where ``location_id`` refers to a GCP region""",
78 r"""
79 name (str): Required. Redis instance resource name using the form ``projects/{project_id}/locations/{location_id}/instances/{instance_id}```
80 where ``location_id`` refers to a GCP region""")
81
82 s.replace(
83 'google/cloud/**/cloud_redis_client.py',
84 r"""
85 fields from ``Instance``:
86
87 \* ``displayName``
88 \* ``labels``
89 \* ``memorySizeGb``
90 \* ``redisConfig``""",
91
92 r"""
93 fields from ``Instance``: ``displayName``, ``labels``, ``memorySizeGb``, and ``redisConfig``.""",)
94
95 # Set Release Status
96 release_status = 'Development Status :: 3 - Alpha'
97 s.replace('setup.py',
98 '(release_status = )(.*)$',
99 f"\\1'{release_status}'")
100
101 # Fix the enable API link
102 s.replace(
103 'README.rst',
104 r'.. _Enable the Google Cloud Memorystore for Redis API.: https://cloud.google.com/redis',
105 '.. _Enable the Google Cloud Memorystore for Redis API.: https://console.cloud.google.com/apis/'
106 'library/redis.googleapis.com')
107
108 # Fix link to product page
109 s.replace(
110 'README.rst',
111 r'https://cloud.google.com/redis',
112 'https://cloud.google.com/memorystore/')
113
114 # Fix link to Client Library Documentation
115 s.replace(
116 'README.rst',
117 r'https://googlecloudplatform.github.io/google-cloud-python/stable/redis/usage.html',
118 'https://googlecloudplatform.github.io/google-cloud-python/latest/redis/index.html')
119
120 # Fix link to Auth instructions
121 s.replace(
122 'README.rst',
123 r'https://googlecloudplatform.github.io/google-cloud-python/stable/core/auth.html',
124 'https://googlecloudplatform.github.io/google-cloud-python/latest/core/auth.html')
125
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/redis/synth.py b/redis/synth.py
--- a/redis/synth.py
+++ b/redis/synth.py
@@ -23,8 +23,9 @@
gapic = gcp.GAPICGenerator()
common = gcp.CommonTemplates()
excludes = [
+ 'README.rst',
'setup.py',
- 'nox.py',
+ 'nox*.py',
'docs/conf.py',
'docs/index.rst',
]
| {"golden_diff": "diff --git a/redis/synth.py b/redis/synth.py\n--- a/redis/synth.py\n+++ b/redis/synth.py\n@@ -23,8 +23,9 @@\n gapic = gcp.GAPICGenerator()\n common = gcp.CommonTemplates()\n excludes = [\n+ 'README.rst',\n 'setup.py',\n- 'nox.py',\n+ 'nox*.py',\n 'docs/conf.py',\n 'docs/index.rst',\n ]\n", "issue": "Redis: regen README.rst (DO NOT MERGE)\nThis PR was generated using Autosynth. :rainbow:\n\nHere's the log from Synthtool:\n```\nsynthtool > Cloning googleapis.\nsynthtool > Running generator for google/cloud/redis/artman_redis_v1beta1.yaml.\nsynthtool > Ensuring dependencies.\nsynthtool > Pulling artman image.\nsynthtool > Generated code into /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/python/redis-v1beta1.\nsynthtool > Running generator for google/cloud/redis/artman_redis_v1.yaml.\nsynthtool > Ensuring dependencies.\nsynthtool > Pulling artman image.\nsynthtool > Generated code into /home/kbuilder/.cache/synthtool/googleapis/artman-genfiles/python/redis-v1.\nsynthtool > Replaced 'resources of the form:\\\\n ``' in google/cloud/redis_v1/gapic/cloud_redis_client.py.\nsynthtool > Replaced 'resources of the form:\\\\n ``' in google/cloud/redis_v1beta1/gapic/cloud_redis_client.py.\nsynthtool > Replaced '\\n parent \\\\(str\\\\): Required. The resource name of the instance location using the form:\\n ::\\n\\n `projects/{project_id}/locations/{location_id}`\\n where ``location_id`` refers to a GCP region' in google/cloud/redis_v1/gapic/cloud_redis_client.py.\nsynthtool > Replaced '\\n parent \\\\(str\\\\): Required. The resource name of the instance location using the form:\\n ::\\n\\n `projects/{project_id}/locations/{location_id}`\\n where ``location_id`` refers to a GCP region' in google/cloud/redis_v1beta1/gapic/cloud_redis_client.py.\nsynthtool > Replaced '\\n with the following restrictions:\\n\\n \\\\* Must contain only lowercase letters, numbers, and hyphens\\\\.' in google/cloud/redis_v1/gapic/cloud_redis_client.py.\nsynthtool > Replaced '\\n with the following restrictions:\\n\\n \\\\* Must contain only lowercase letters, numbers, and hyphens\\\\.' in google/cloud/redis_v1beta1/gapic/cloud_redis_client.py.\nsynthtool > Replaced '\\n name \\\\(str\\\\): Required. Redis instance resource name using the form:\\n ::\\n\\n `projects/{project_id}/locations/{location_id}/instances/{instance_id}`\\n where ``location_id`` refers to a GCP region' in google/cloud/redis_v1/gapic/cloud_redis_client.py.\nsynthtool > Replaced '\\n name \\\\(str\\\\): Required. Redis instance resource name using the form:\\n ::\\n\\n `projects/{project_id}/locations/{location_id}/instances/{instance_id}`\\n where ``location_id`` refers to a GCP region' in google/cloud/redis_v1beta1/gapic/cloud_redis_client.py.\nsynthtool > Replaced '\\n fields from ``Instance``:\\n\\n \\\\* ``displayName``\\n \\\\* ``labels``\\n \\\\* ``memorySizeGb``\\n \\\\* ``redisConfig``' in google/cloud/redis_v1/gapic/cloud_redis_client.py.\nsynthtool > Replaced '(release_status = )(.*)$' in setup.py.\nsynthtool > Replaced '.. _Enable the Google Cloud Memorystore for Redis API.: https://cloud.google.com/redis' in README.rst.\nsynthtool > Replaced 'https://cloud.google.com/redis' in README.rst.\nsynthtool > Replaced 'https://googlecloudplatform.github.io/google-cloud-python/stable/redis/usage.html' in README.rst.\nsynthtool > Replaced 'https://googlecloudplatform.github.io/google-cloud-python/stable/core/auth.html' in README.rst.\nsynthtool > Cleaned up 1 temporary directories.\n\n```\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This script is used to synthesize generated parts of this library.\"\"\"\n\nimport synthtool as s\nimport synthtool.gcp as gcp\nimport logging\n\nlogging.basicConfig(level=logging.DEBUG)\n\ngapic = gcp.GAPICGenerator()\ncommon = gcp.CommonTemplates()\nexcludes = [\n 'setup.py',\n 'nox.py',\n 'docs/conf.py',\n 'docs/index.rst',\n]\n\nfor version in ['v1beta1', 'v1']:\n library = gapic.py_library(\n 'redis', version,\n config_path=f'artman_redis_{version}.yaml')\n\n s.copy(library, excludes=excludes)\n\n\n# Fix docstrings\ns.replace(\n 'google/cloud/**/cloud_redis_client.py',\n r'resources of the form:\\n ``',\n r'resources of the form:\\n\\n ``',)\n\ns.replace(\n 'google/cloud/**/cloud_redis_client.py',\n r\"\"\"\n parent \\(str\\): Required. The resource name of the instance location using the form:\n ::\n\n `projects/{project_id}/locations/{location_id}`\n where ``location_id`` refers to a GCP region\"\"\",\n\n r\"\"\"\n parent (str): Required. The resource name of the instance location using the form ``projects/{project_id}/locations/{location_id}``\n where ``location_id`` refers to a GCP region\"\"\",)\n\n\ns.replace(\n 'google/cloud/**/cloud_redis_client.py',\n r\"\"\"\n with the following restrictions:\n\n \\* Must contain only lowercase letters, numbers, and hyphens\\.\"\"\",\n r\"\"\"\n with the following restrictions:\n * Must contain only lowercase letters, numbers, and hyphens.\"\"\")\n\ns.replace(\n 'google/cloud/**/cloud_redis_client.py',\n r\"\"\"\n name \\(str\\): Required. Redis instance resource name using the form:\n ::\n\n `projects/{project_id}/locations/{location_id}/instances/{instance_id}`\n where ``location_id`` refers to a GCP region\"\"\",\n r\"\"\"\n name (str): Required. Redis instance resource name using the form ``projects/{project_id}/locations/{location_id}/instances/{instance_id}```\n where ``location_id`` refers to a GCP region\"\"\")\n\ns.replace(\n 'google/cloud/**/cloud_redis_client.py',\n r\"\"\"\n fields from ``Instance``:\n\n \\* ``displayName``\n \\* ``labels``\n \\* ``memorySizeGb``\n \\* ``redisConfig``\"\"\",\n\n r\"\"\"\n fields from ``Instance``: ``displayName``, ``labels``, ``memorySizeGb``, and ``redisConfig``.\"\"\",)\n\n# Set Release Status\nrelease_status = 'Development Status :: 3 - Alpha'\ns.replace('setup.py',\n '(release_status = )(.*)$',\n f\"\\\\1'{release_status}'\")\n\n# Fix the enable API link\ns.replace(\n 'README.rst',\n r'.. _Enable the Google Cloud Memorystore for Redis API.: https://cloud.google.com/redis',\n '.. _Enable the Google Cloud Memorystore for Redis API.: https://console.cloud.google.com/apis/'\n 'library/redis.googleapis.com')\n\n# Fix link to product page\ns.replace(\n 'README.rst',\n r'https://cloud.google.com/redis',\n 'https://cloud.google.com/memorystore/')\n\n# Fix link to Client Library Documentation\ns.replace(\n 'README.rst',\n r'https://googlecloudplatform.github.io/google-cloud-python/stable/redis/usage.html',\n 'https://googlecloudplatform.github.io/google-cloud-python/latest/redis/index.html')\n\n# Fix link to Auth instructions\ns.replace(\n 'README.rst',\n r'https://googlecloudplatform.github.io/google-cloud-python/stable/core/auth.html',\n 'https://googlecloudplatform.github.io/google-cloud-python/latest/core/auth.html')\n", "path": "redis/synth.py"}], "after_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This script is used to synthesize generated parts of this library.\"\"\"\n\nimport synthtool as s\nimport synthtool.gcp as gcp\nimport logging\n\nlogging.basicConfig(level=logging.DEBUG)\n\ngapic = gcp.GAPICGenerator()\ncommon = gcp.CommonTemplates()\nexcludes = [\n 'README.rst',\n 'setup.py',\n 'nox*.py',\n 'docs/conf.py',\n 'docs/index.rst',\n]\n\nfor version in ['v1beta1', 'v1']:\n library = gapic.py_library(\n 'redis', version,\n config_path=f'artman_redis_{version}.yaml')\n\n s.copy(library, excludes=excludes)\n\n\n# Fix docstrings\ns.replace(\n 'google/cloud/**/cloud_redis_client.py',\n r'resources of the form:\\n ``',\n r'resources of the form:\\n\\n ``',)\n\ns.replace(\n 'google/cloud/**/cloud_redis_client.py',\n r\"\"\"\n parent \\(str\\): Required. The resource name of the instance location using the form:\n ::\n\n `projects/{project_id}/locations/{location_id}`\n where ``location_id`` refers to a GCP region\"\"\",\n\n r\"\"\"\n parent (str): Required. The resource name of the instance location using the form ``projects/{project_id}/locations/{location_id}``\n where ``location_id`` refers to a GCP region\"\"\",)\n\n\ns.replace(\n 'google/cloud/**/cloud_redis_client.py',\n r\"\"\"\n with the following restrictions:\n\n \\* Must contain only lowercase letters, numbers, and hyphens\\.\"\"\",\n r\"\"\"\n with the following restrictions:\n * Must contain only lowercase letters, numbers, and hyphens.\"\"\")\n\ns.replace(\n 'google/cloud/**/cloud_redis_client.py',\n r\"\"\"\n name \\(str\\): Required. Redis instance resource name using the form:\n ::\n\n `projects/{project_id}/locations/{location_id}/instances/{instance_id}`\n where ``location_id`` refers to a GCP region\"\"\",\n r\"\"\"\n name (str): Required. Redis instance resource name using the form ``projects/{project_id}/locations/{location_id}/instances/{instance_id}```\n where ``location_id`` refers to a GCP region\"\"\")\n\ns.replace(\n 'google/cloud/**/cloud_redis_client.py',\n r\"\"\"\n fields from ``Instance``:\n\n \\* ``displayName``\n \\* ``labels``\n \\* ``memorySizeGb``\n \\* ``redisConfig``\"\"\",\n\n r\"\"\"\n fields from ``Instance``: ``displayName``, ``labels``, ``memorySizeGb``, and ``redisConfig``.\"\"\",)\n\n# Set Release Status\nrelease_status = 'Development Status :: 3 - Alpha'\ns.replace('setup.py',\n '(release_status = )(.*)$',\n f\"\\\\1'{release_status}'\")\n\n# Fix the enable API link\ns.replace(\n 'README.rst',\n r'.. _Enable the Google Cloud Memorystore for Redis API.: https://cloud.google.com/redis',\n '.. _Enable the Google Cloud Memorystore for Redis API.: https://console.cloud.google.com/apis/'\n 'library/redis.googleapis.com')\n\n# Fix link to product page\ns.replace(\n 'README.rst',\n r'https://cloud.google.com/redis',\n 'https://cloud.google.com/memorystore/')\n\n# Fix link to Client Library Documentation\ns.replace(\n 'README.rst',\n r'https://googlecloudplatform.github.io/google-cloud-python/stable/redis/usage.html',\n 'https://googlecloudplatform.github.io/google-cloud-python/latest/redis/index.html')\n\n# Fix link to Auth instructions\ns.replace(\n 'README.rst',\n r'https://googlecloudplatform.github.io/google-cloud-python/stable/core/auth.html',\n 'https://googlecloudplatform.github.io/google-cloud-python/latest/core/auth.html')\n", "path": "redis/synth.py"}]} | 2,349 | 103 |
gh_patches_debug_36007 | rasdani/github-patches | git_diff | arviz-devs__arviz-414 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
arviz-darkgrid not working in example from quickstart
This command is not being recognized.
I installed the program from github on Mac OS 10.13 using macports Python3.5
az.style.use('arviz-darkgrid')
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import codecs
2 import shutil
3 import os
4 import re
5
6 import setuptools
7 from setuptools import setup, find_packages
8 from setuptools.command.install import install
9 from setuptools.command.develop import develop
10
11
12 PROJECT_ROOT = os.path.dirname(os.path.realpath(__file__))
13 REQUIREMENTS_FILE = os.path.join(PROJECT_ROOT, 'requirements.txt')
14 README_FILE = os.path.join(PROJECT_ROOT, 'README.md')
15 VERSION_FILE = os.path.join(PROJECT_ROOT, 'arviz', '__init__.py')
16
17
18 # Ensure matplotlib dependencies are available to copy
19 # styles over
20 setuptools.dist.Distribution().fetch_build_eggs(['matplotlib>=3.0'])
21
22 def get_requirements():
23 with codecs.open(REQUIREMENTS_FILE) as buff:
24 return buff.read().splitlines()
25
26
27 def get_long_description():
28 with codecs.open(README_FILE, 'rt') as buff:
29 return buff.read()
30
31
32 def copy_styles():
33 from matplotlib import get_configdir
34 sd = os.path.join(get_configdir(), "stylelib")
35 source = os.path.dirname(os.path.realpath(__file__))
36 lsd = os.path.join(source, 'arviz', 'plots', 'styles')
37 styles = [f for f in os.listdir(lsd)]
38 if not os.path.isdir(sd):
39 os.makedirs(sd)
40 for s in styles:
41 shutil.copy(os.path.join(lsd, s), os.path.join(sd, s))
42
43
44 class DevelopStyles(develop):
45 def run(self):
46 copy_styles()
47 develop.run(self)
48
49 class InstallStyles(install):
50 def run(self):
51 copy_styles()
52 install.run(self)
53
54 def get_version():
55 lines = open(VERSION_FILE, 'rt').readlines()
56 version_regex = r"^__version__ = ['\"]([^'\"]*)['\"]"
57 for line in lines:
58 mo = re.search(version_regex, line, re.M)
59 if mo:
60 return mo.group(1)
61 raise RuntimeError('Unable to find version in %s.' % (VERSION_FILE,))
62
63 setup(
64 name='arviz',
65 version=get_version(),
66 description='Exploratory analysis of Bayesian models',
67 author='ArviZ Developers',
68 url="http://github.com/arviz-devs/arviz",
69 packages=find_packages(),
70 install_requires=get_requirements(),
71 long_description=get_long_description(),
72 long_description_content_type='text/markdown',
73 include_package_data=True,
74 cmdclass={
75 'develop': DevelopStyles,
76 'install': InstallStyles,
77 },
78 )
79
```
Path: `arviz/__init__.py`
Content:
```
1 # pylint: disable=wildcard-import,invalid-name,wrong-import-position
2 """ArviZ is a library for exploratory analysis of Bayesian models."""
3 __version__ = "0.2.1"
4
5 import logging
6 from matplotlib.pyplot import style
7
8 # Configure logging before importing arviz internals
9 _log = logging.getLogger("arviz")
10
11 if not logging.root.handlers:
12 handler = logging.StreamHandler()
13 _log.setLevel(logging.INFO)
14 _log.addHandler(handler)
15
16 from .data import *
17 from .plots import *
18 from .stats import *
19
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/arviz/__init__.py b/arviz/__init__.py
--- a/arviz/__init__.py
+++ b/arviz/__init__.py
@@ -2,9 +2,15 @@
"""ArviZ is a library for exploratory analysis of Bayesian models."""
__version__ = "0.2.1"
+import os
import logging
from matplotlib.pyplot import style
+# add ArviZ's styles to matplotlib's styles
+arviz_style_path = os.path.join(os.path.dirname(__file__), "plots", "styles")
+style.core.USER_LIBRARY_PATHS.append(arviz_style_path)
+style.core.reload_library()
+
# Configure logging before importing arviz internals
_log = logging.getLogger("arviz")
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,5 +1,4 @@
import codecs
-import shutil
import os
import re
@@ -15,10 +14,6 @@
VERSION_FILE = os.path.join(PROJECT_ROOT, 'arviz', '__init__.py')
-# Ensure matplotlib dependencies are available to copy
-# styles over
-setuptools.dist.Distribution().fetch_build_eggs(['matplotlib>=3.0'])
-
def get_requirements():
with codecs.open(REQUIREMENTS_FILE) as buff:
return buff.read().splitlines()
@@ -28,29 +23,6 @@
with codecs.open(README_FILE, 'rt') as buff:
return buff.read()
-
-def copy_styles():
- from matplotlib import get_configdir
- sd = os.path.join(get_configdir(), "stylelib")
- source = os.path.dirname(os.path.realpath(__file__))
- lsd = os.path.join(source, 'arviz', 'plots', 'styles')
- styles = [f for f in os.listdir(lsd)]
- if not os.path.isdir(sd):
- os.makedirs(sd)
- for s in styles:
- shutil.copy(os.path.join(lsd, s), os.path.join(sd, s))
-
-
-class DevelopStyles(develop):
- def run(self):
- copy_styles()
- develop.run(self)
-
-class InstallStyles(install):
- def run(self):
- copy_styles()
- install.run(self)
-
def get_version():
lines = open(VERSION_FILE, 'rt').readlines()
version_regex = r"^__version__ = ['\"]([^'\"]*)['\"]"
@@ -71,8 +43,4 @@
long_description=get_long_description(),
long_description_content_type='text/markdown',
include_package_data=True,
- cmdclass={
- 'develop': DevelopStyles,
- 'install': InstallStyles,
- },
)
| {"golden_diff": "diff --git a/arviz/__init__.py b/arviz/__init__.py\n--- a/arviz/__init__.py\n+++ b/arviz/__init__.py\n@@ -2,9 +2,15 @@\n \"\"\"ArviZ is a library for exploratory analysis of Bayesian models.\"\"\"\n __version__ = \"0.2.1\"\n \n+import os\n import logging\n from matplotlib.pyplot import style\n \n+# add ArviZ's styles to matplotlib's styles\n+arviz_style_path = os.path.join(os.path.dirname(__file__), \"plots\", \"styles\")\n+style.core.USER_LIBRARY_PATHS.append(arviz_style_path)\n+style.core.reload_library()\n+\n # Configure logging before importing arviz internals\n _log = logging.getLogger(\"arviz\")\n \ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,5 +1,4 @@\n import codecs\n-import shutil\n import os\n import re\n \n@@ -15,10 +14,6 @@\n VERSION_FILE = os.path.join(PROJECT_ROOT, 'arviz', '__init__.py')\n \n \n-# Ensure matplotlib dependencies are available to copy\n-# styles over\n-setuptools.dist.Distribution().fetch_build_eggs(['matplotlib>=3.0'])\n-\n def get_requirements():\n with codecs.open(REQUIREMENTS_FILE) as buff:\n return buff.read().splitlines()\n@@ -28,29 +23,6 @@\n with codecs.open(README_FILE, 'rt') as buff:\n return buff.read()\n \n-\n-def copy_styles():\n- from matplotlib import get_configdir\n- sd = os.path.join(get_configdir(), \"stylelib\")\n- source = os.path.dirname(os.path.realpath(__file__))\n- lsd = os.path.join(source, 'arviz', 'plots', 'styles')\n- styles = [f for f in os.listdir(lsd)]\n- if not os.path.isdir(sd):\n- os.makedirs(sd)\n- for s in styles:\n- shutil.copy(os.path.join(lsd, s), os.path.join(sd, s))\n-\n-\n-class DevelopStyles(develop):\n- def run(self):\n- copy_styles()\n- develop.run(self)\n-\n-class InstallStyles(install):\n- def run(self):\n- copy_styles()\n- install.run(self)\n-\n def get_version():\n lines = open(VERSION_FILE, 'rt').readlines()\n version_regex = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"\n@@ -71,8 +43,4 @@\n long_description=get_long_description(),\n long_description_content_type='text/markdown',\n include_package_data=True,\n- cmdclass={\n- 'develop': DevelopStyles,\n- 'install': InstallStyles,\n- },\n )\n", "issue": "arviz-darkgrid not working in example from quickstart\nThis command is not being recognized.\r\nI installed the program from github on Mac OS 10.13 using macports Python3.5 \r\n\r\naz.style.use('arviz-darkgrid')\n", "before_files": [{"content": "import codecs\nimport shutil\nimport os\nimport re\n\nimport setuptools\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install\nfrom setuptools.command.develop import develop\n\n\nPROJECT_ROOT = os.path.dirname(os.path.realpath(__file__))\nREQUIREMENTS_FILE = os.path.join(PROJECT_ROOT, 'requirements.txt')\nREADME_FILE = os.path.join(PROJECT_ROOT, 'README.md')\nVERSION_FILE = os.path.join(PROJECT_ROOT, 'arviz', '__init__.py')\n\n\n# Ensure matplotlib dependencies are available to copy\n# styles over\nsetuptools.dist.Distribution().fetch_build_eggs(['matplotlib>=3.0'])\n\ndef get_requirements():\n with codecs.open(REQUIREMENTS_FILE) as buff:\n return buff.read().splitlines()\n\n\ndef get_long_description():\n with codecs.open(README_FILE, 'rt') as buff:\n return buff.read()\n\n\ndef copy_styles():\n from matplotlib import get_configdir\n sd = os.path.join(get_configdir(), \"stylelib\")\n source = os.path.dirname(os.path.realpath(__file__))\n lsd = os.path.join(source, 'arviz', 'plots', 'styles')\n styles = [f for f in os.listdir(lsd)]\n if not os.path.isdir(sd):\n os.makedirs(sd)\n for s in styles:\n shutil.copy(os.path.join(lsd, s), os.path.join(sd, s))\n\n\nclass DevelopStyles(develop):\n def run(self):\n copy_styles()\n develop.run(self)\n\nclass InstallStyles(install):\n def run(self):\n copy_styles()\n install.run(self)\n\ndef get_version():\n lines = open(VERSION_FILE, 'rt').readlines()\n version_regex = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"\n for line in lines:\n mo = re.search(version_regex, line, re.M)\n if mo:\n return mo.group(1)\n raise RuntimeError('Unable to find version in %s.' % (VERSION_FILE,))\n\nsetup(\n name='arviz',\n version=get_version(),\n description='Exploratory analysis of Bayesian models',\n author='ArviZ Developers',\n url=\"http://github.com/arviz-devs/arviz\",\n packages=find_packages(),\n install_requires=get_requirements(),\n long_description=get_long_description(),\n long_description_content_type='text/markdown',\n include_package_data=True,\n cmdclass={\n 'develop': DevelopStyles,\n 'install': InstallStyles,\n },\n)\n", "path": "setup.py"}, {"content": "# pylint: disable=wildcard-import,invalid-name,wrong-import-position\n\"\"\"ArviZ is a library for exploratory analysis of Bayesian models.\"\"\"\n__version__ = \"0.2.1\"\n\nimport logging\nfrom matplotlib.pyplot import style\n\n# Configure logging before importing arviz internals\n_log = logging.getLogger(\"arviz\")\n\nif not logging.root.handlers:\n handler = logging.StreamHandler()\n _log.setLevel(logging.INFO)\n _log.addHandler(handler)\n\nfrom .data import *\nfrom .plots import *\nfrom .stats import *\n", "path": "arviz/__init__.py"}], "after_files": [{"content": "import codecs\nimport os\nimport re\n\nimport setuptools\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install\nfrom setuptools.command.develop import develop\n\n\nPROJECT_ROOT = os.path.dirname(os.path.realpath(__file__))\nREQUIREMENTS_FILE = os.path.join(PROJECT_ROOT, 'requirements.txt')\nREADME_FILE = os.path.join(PROJECT_ROOT, 'README.md')\nVERSION_FILE = os.path.join(PROJECT_ROOT, 'arviz', '__init__.py')\n\n\ndef get_requirements():\n with codecs.open(REQUIREMENTS_FILE) as buff:\n return buff.read().splitlines()\n\n\ndef get_long_description():\n with codecs.open(README_FILE, 'rt') as buff:\n return buff.read()\n\ndef get_version():\n lines = open(VERSION_FILE, 'rt').readlines()\n version_regex = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"\n for line in lines:\n mo = re.search(version_regex, line, re.M)\n if mo:\n return mo.group(1)\n raise RuntimeError('Unable to find version in %s.' % (VERSION_FILE,))\n\nsetup(\n name='arviz',\n version=get_version(),\n description='Exploratory analysis of Bayesian models',\n author='ArviZ Developers',\n url=\"http://github.com/arviz-devs/arviz\",\n packages=find_packages(),\n install_requires=get_requirements(),\n long_description=get_long_description(),\n long_description_content_type='text/markdown',\n include_package_data=True,\n)\n", "path": "setup.py"}, {"content": "# pylint: disable=wildcard-import,invalid-name,wrong-import-position\n\"\"\"ArviZ is a library for exploratory analysis of Bayesian models.\"\"\"\n__version__ = \"0.2.1\"\n\nimport os\nimport logging\nfrom matplotlib.pyplot import style\n\n# add ArviZ's styles to matplotlib's styles\narviz_style_path = os.path.join(os.path.dirname(__file__), \"plots\", \"styles\")\nstyle.core.USER_LIBRARY_PATHS.append(arviz_style_path)\nstyle.core.reload_library()\n\n# Configure logging before importing arviz internals\n_log = logging.getLogger(\"arviz\")\n\nif not logging.root.handlers:\n handler = logging.StreamHandler()\n _log.setLevel(logging.INFO)\n _log.addHandler(handler)\n\nfrom .data import *\nfrom .plots import *\nfrom .stats import *\n", "path": "arviz/__init__.py"}]} | 1,138 | 588 |
gh_patches_debug_30651 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-8481 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Firestore: sync shim module with 'firestore_v1' package.
See https://github.com/googleapis/google-cloud-python/issues/7533#issuecomment-495917295 &ff.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `firestore/google/cloud/firestore.py`
Content:
```
1 # Copyright 2017 Google LLC All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Python idiomatic client for Google Cloud Firestore."""
16
17
18 from google.cloud.firestore_v1 import __version__
19 from google.cloud.firestore_v1 import Client
20 from google.cloud.firestore_v1 import CollectionReference
21 from google.cloud.firestore_v1 import DELETE_FIELD
22 from google.cloud.firestore_v1 import DocumentReference
23 from google.cloud.firestore_v1 import DocumentSnapshot
24 from google.cloud.firestore_v1 import enums
25 from google.cloud.firestore_v1 import ExistsOption
26 from google.cloud.firestore_v1 import GeoPoint
27 from google.cloud.firestore_v1 import LastUpdateOption
28 from google.cloud.firestore_v1 import Query
29 from google.cloud.firestore_v1 import ReadAfterWriteError
30 from google.cloud.firestore_v1 import SERVER_TIMESTAMP
31 from google.cloud.firestore_v1 import Transaction
32 from google.cloud.firestore_v1 import transactional
33 from google.cloud.firestore_v1 import types
34 from google.cloud.firestore_v1 import Watch
35 from google.cloud.firestore_v1 import WriteBatch
36 from google.cloud.firestore_v1 import WriteOption
37
38
39 __all__ = [
40 "__version__",
41 "Client",
42 "CollectionReference",
43 "DELETE_FIELD",
44 "DocumentReference",
45 "DocumentSnapshot",
46 "enums",
47 "ExistsOption",
48 "GeoPoint",
49 "LastUpdateOption",
50 "Query",
51 "ReadAfterWriteError",
52 "SERVER_TIMESTAMP",
53 "Transaction",
54 "transactional",
55 "types",
56 "Watch",
57 "WriteBatch",
58 "WriteOption",
59 ]
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/firestore/google/cloud/firestore.py b/firestore/google/cloud/firestore.py
--- a/firestore/google/cloud/firestore.py
+++ b/firestore/google/cloud/firestore.py
@@ -16,6 +16,8 @@
from google.cloud.firestore_v1 import __version__
+from google.cloud.firestore_v1 import ArrayRemove
+from google.cloud.firestore_v1 import ArrayUnion
from google.cloud.firestore_v1 import Client
from google.cloud.firestore_v1 import CollectionReference
from google.cloud.firestore_v1 import DELETE_FIELD
@@ -24,7 +26,10 @@
from google.cloud.firestore_v1 import enums
from google.cloud.firestore_v1 import ExistsOption
from google.cloud.firestore_v1 import GeoPoint
+from google.cloud.firestore_v1 import Increment
from google.cloud.firestore_v1 import LastUpdateOption
+from google.cloud.firestore_v1 import Maximum
+from google.cloud.firestore_v1 import Minimum
from google.cloud.firestore_v1 import Query
from google.cloud.firestore_v1 import ReadAfterWriteError
from google.cloud.firestore_v1 import SERVER_TIMESTAMP
@@ -38,6 +43,8 @@
__all__ = [
"__version__",
+ "ArrayRemove",
+ "ArrayUnion",
"Client",
"CollectionReference",
"DELETE_FIELD",
@@ -46,7 +53,10 @@
"enums",
"ExistsOption",
"GeoPoint",
+ "Increment",
"LastUpdateOption",
+ "Maximum",
+ "Minimum",
"Query",
"ReadAfterWriteError",
"SERVER_TIMESTAMP",
| {"golden_diff": "diff --git a/firestore/google/cloud/firestore.py b/firestore/google/cloud/firestore.py\n--- a/firestore/google/cloud/firestore.py\n+++ b/firestore/google/cloud/firestore.py\n@@ -16,6 +16,8 @@\n \n \n from google.cloud.firestore_v1 import __version__\n+from google.cloud.firestore_v1 import ArrayRemove\n+from google.cloud.firestore_v1 import ArrayUnion\n from google.cloud.firestore_v1 import Client\n from google.cloud.firestore_v1 import CollectionReference\n from google.cloud.firestore_v1 import DELETE_FIELD\n@@ -24,7 +26,10 @@\n from google.cloud.firestore_v1 import enums\n from google.cloud.firestore_v1 import ExistsOption\n from google.cloud.firestore_v1 import GeoPoint\n+from google.cloud.firestore_v1 import Increment\n from google.cloud.firestore_v1 import LastUpdateOption\n+from google.cloud.firestore_v1 import Maximum\n+from google.cloud.firestore_v1 import Minimum\n from google.cloud.firestore_v1 import Query\n from google.cloud.firestore_v1 import ReadAfterWriteError\n from google.cloud.firestore_v1 import SERVER_TIMESTAMP\n@@ -38,6 +43,8 @@\n \n __all__ = [\n \"__version__\",\n+ \"ArrayRemove\",\n+ \"ArrayUnion\",\n \"Client\",\n \"CollectionReference\",\n \"DELETE_FIELD\",\n@@ -46,7 +53,10 @@\n \"enums\",\n \"ExistsOption\",\n \"GeoPoint\",\n+ \"Increment\",\n \"LastUpdateOption\",\n+ \"Maximum\",\n+ \"Minimum\",\n \"Query\",\n \"ReadAfterWriteError\",\n \"SERVER_TIMESTAMP\",\n", "issue": "Firestore: sync shim module with 'firestore_v1' package.\nSee https://github.com/googleapis/google-cloud-python/issues/7533#issuecomment-495917295 &ff.\n", "before_files": [{"content": "# Copyright 2017 Google LLC All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Python idiomatic client for Google Cloud Firestore.\"\"\"\n\n\nfrom google.cloud.firestore_v1 import __version__\nfrom google.cloud.firestore_v1 import Client\nfrom google.cloud.firestore_v1 import CollectionReference\nfrom google.cloud.firestore_v1 import DELETE_FIELD\nfrom google.cloud.firestore_v1 import DocumentReference\nfrom google.cloud.firestore_v1 import DocumentSnapshot\nfrom google.cloud.firestore_v1 import enums\nfrom google.cloud.firestore_v1 import ExistsOption\nfrom google.cloud.firestore_v1 import GeoPoint\nfrom google.cloud.firestore_v1 import LastUpdateOption\nfrom google.cloud.firestore_v1 import Query\nfrom google.cloud.firestore_v1 import ReadAfterWriteError\nfrom google.cloud.firestore_v1 import SERVER_TIMESTAMP\nfrom google.cloud.firestore_v1 import Transaction\nfrom google.cloud.firestore_v1 import transactional\nfrom google.cloud.firestore_v1 import types\nfrom google.cloud.firestore_v1 import Watch\nfrom google.cloud.firestore_v1 import WriteBatch\nfrom google.cloud.firestore_v1 import WriteOption\n\n\n__all__ = [\n \"__version__\",\n \"Client\",\n \"CollectionReference\",\n \"DELETE_FIELD\",\n \"DocumentReference\",\n \"DocumentSnapshot\",\n \"enums\",\n \"ExistsOption\",\n \"GeoPoint\",\n \"LastUpdateOption\",\n \"Query\",\n \"ReadAfterWriteError\",\n \"SERVER_TIMESTAMP\",\n \"Transaction\",\n \"transactional\",\n \"types\",\n \"Watch\",\n \"WriteBatch\",\n \"WriteOption\",\n]\n", "path": "firestore/google/cloud/firestore.py"}], "after_files": [{"content": "# Copyright 2017 Google LLC All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Python idiomatic client for Google Cloud Firestore.\"\"\"\n\n\nfrom google.cloud.firestore_v1 import __version__\nfrom google.cloud.firestore_v1 import ArrayRemove\nfrom google.cloud.firestore_v1 import ArrayUnion\nfrom google.cloud.firestore_v1 import Client\nfrom google.cloud.firestore_v1 import CollectionReference\nfrom google.cloud.firestore_v1 import DELETE_FIELD\nfrom google.cloud.firestore_v1 import DocumentReference\nfrom google.cloud.firestore_v1 import DocumentSnapshot\nfrom google.cloud.firestore_v1 import enums\nfrom google.cloud.firestore_v1 import ExistsOption\nfrom google.cloud.firestore_v1 import GeoPoint\nfrom google.cloud.firestore_v1 import Increment\nfrom google.cloud.firestore_v1 import LastUpdateOption\nfrom google.cloud.firestore_v1 import Maximum\nfrom google.cloud.firestore_v1 import Minimum\nfrom google.cloud.firestore_v1 import Query\nfrom google.cloud.firestore_v1 import ReadAfterWriteError\nfrom google.cloud.firestore_v1 import SERVER_TIMESTAMP\nfrom google.cloud.firestore_v1 import Transaction\nfrom google.cloud.firestore_v1 import transactional\nfrom google.cloud.firestore_v1 import types\nfrom google.cloud.firestore_v1 import Watch\nfrom google.cloud.firestore_v1 import WriteBatch\nfrom google.cloud.firestore_v1 import WriteOption\n\n\n__all__ = [\n \"__version__\",\n \"ArrayRemove\",\n \"ArrayUnion\",\n \"Client\",\n \"CollectionReference\",\n \"DELETE_FIELD\",\n \"DocumentReference\",\n \"DocumentSnapshot\",\n \"enums\",\n \"ExistsOption\",\n \"GeoPoint\",\n \"Increment\",\n \"LastUpdateOption\",\n \"Maximum\",\n \"Minimum\",\n \"Query\",\n \"ReadAfterWriteError\",\n \"SERVER_TIMESTAMP\",\n \"Transaction\",\n \"transactional\",\n \"types\",\n \"Watch\",\n \"WriteBatch\",\n \"WriteOption\",\n]\n", "path": "firestore/google/cloud/firestore.py"}]} | 851 | 341 |
gh_patches_debug_35046 | rasdani/github-patches | git_diff | biolab__orange3-5423 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DBSCAN changes instance ids
The output of DBSCAN looses traceability of data instances. The following workflow should explain the problem:

t-SNE should mark all data instances, as the data subset is the same as the data, but instead reports that "No subset data instances appear in input data".

Similar is of course true with any selection of the data that comes from DBSCAN. My original intention was to send the DBSCAN data to the boxplot and then show the selection from the box plot, sat, the clusters with interesting class-distribution, as a data subset to the t-SNE. But then I get, of course, the same warning in t-SNE, reporting the mismatch of the data subset and data provided to this widget.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Orange/widgets/unsupervised/owdbscan.py`
Content:
```
1 import sys
2 from itertools import chain
3
4 import numpy as np
5 from AnyQt.QtWidgets import QApplication
6 from AnyQt.QtGui import QColor
7 from sklearn.metrics import pairwise_distances
8
9 from Orange.preprocess import Normalize, Continuize, SklImpute
10 from Orange.widgets import widget, gui
11 from Orange.widgets.utils.slidergraph import SliderGraph
12 from Orange.widgets.settings import Setting
13 from Orange.data import Table, Domain, DiscreteVariable
14 from Orange.data.util import get_unique_names
15 from Orange.clustering import DBSCAN
16 from Orange.widgets.utils.annotated_data import ANNOTATED_DATA_SIGNAL_NAME
17 from Orange.widgets.utils.signals import Input, Output
18 from Orange.widgets.utils.state_summary import format_summary_details
19 from Orange.widgets.widget import Msg
20
21
22 DEFAULT_CUT_POINT = 0.1
23 PREPROCESSORS = [Continuize(), Normalize(), SklImpute()]
24 EPS_BOTTOM_LIMIT = 0.01
25
26
27 def get_kth_distances(data, metric, k=5):
28 """
29 The function computes the epsilon parameter for DBSCAN through method
30 proposed in the paper.
31 Parameters
32 ----------
33 data : Orange.data.Table
34 Visualisation coordinates - embeddings
35 metric : callable or str
36 The metric to compute the distance.
37 k : int
38 Number kth observed neighbour
39
40 Returns
41 -------
42 np.ndarray
43 Epsilon parameter for DBSCAN
44 """
45 x = data.X
46 if x.shape[0] > 1000: # subsample
47 x = x[np.random.randint(x.shape[0], size=1000), :]
48
49 dist = pairwise_distances(x, metric=metric)
50 k = min(k+1, len(data) - 1) # k+1 since first one is item itself
51 kth_point = np.argpartition(dist, k, axis=1)[:, k]
52 kth_dist = np.sort(dist[np.arange(0, len(kth_point)), kth_point])[::-1]
53
54 return kth_dist
55
56
57 class OWDBSCAN(widget.OWWidget):
58 name = "DBSCAN"
59 description = "Density-based spatial clustering."
60 icon = "icons/DBSCAN.svg"
61 priority = 2150
62
63 class Inputs:
64 data = Input("Data", Table)
65
66 class Outputs:
67 annotated_data = Output(ANNOTATED_DATA_SIGNAL_NAME, Table)
68
69 class Error(widget.OWWidget.Error):
70 not_enough_instances = Msg("Not enough unique data instances. "
71 "At least two are required.")
72
73 METRICS = [
74 ("Euclidean", "euclidean"),
75 ("Manhattan", "cityblock"),
76 ("Cosine", "cosine")
77 ]
78
79 min_samples = Setting(4)
80 eps = Setting(0.5)
81 metric_idx = Setting(0)
82 auto_commit = Setting(True)
83 k_distances = None
84 cut_point = None
85
86 def __init__(self):
87 super().__init__()
88
89 self.data = None
90 self.data_normalized = None
91 self.db = None
92 self.model = None
93 self._set_input_summary(None)
94 self._set_output_summary(None)
95
96 box = gui.widgetBox(self.controlArea, "Parameters")
97 gui.spin(box, self, "min_samples", 1, 100, 1,
98 callback=self._min_samples_changed,
99 label="Core point neighbors")
100 gui.doubleSpin(box, self, "eps", EPS_BOTTOM_LIMIT, 1000, 0.01,
101 callback=self._eps_changed,
102 label="Neighborhood distance")
103
104 box = gui.widgetBox(self.controlArea, self.tr("Distance Metric"))
105 gui.comboBox(box, self, "metric_idx",
106 items=list(zip(*self.METRICS))[0],
107 callback=self._metirc_changed)
108
109 gui.auto_apply(self.buttonsArea, self, "auto_commit")
110 gui.rubber(self.controlArea)
111
112 self.controlArea.layout().addStretch()
113
114 self.plot = SliderGraph(
115 x_axis_label="Data items sorted by score",
116 y_axis_label="Distance to the k-th nearest neighbour",
117 callback=self._on_cut_changed
118 )
119
120 self.mainArea.layout().addWidget(self.plot)
121
122 def check_data_size(self, data):
123 if data is None:
124 return False
125 if len(data) < 2:
126 self.Error.not_enough_instances()
127 return False
128 return True
129
130 def commit(self):
131 self.cluster()
132
133 def cluster(self):
134 if not self.check_data_size(self.data):
135 return
136 self.model = DBSCAN(
137 eps=self.eps,
138 min_samples=self.min_samples,
139 metric=self.METRICS[self.metric_idx][1]
140 ).get_model(self.data_normalized)
141 self.send_data()
142
143 def _compute_and_plot(self, cut_point=None):
144 self._compute_kdistances()
145 if cut_point is None:
146 self._compute_cut_point()
147 self._plot_graph()
148
149 def _plot_graph(self):
150 nonzero = np.sum(self.k_distances > EPS_BOTTOM_LIMIT)
151 self.plot.update(np.arange(len(self.k_distances)),
152 [self.k_distances],
153 colors=[QColor('red')],
154 cutpoint_x=self.cut_point,
155 selection_limit=(0, nonzero - 1))
156
157 def _compute_kdistances(self):
158 self.k_distances = get_kth_distances(
159 self.data_normalized, metric=self.METRICS[self.metric_idx][1],
160 k=self.min_samples
161 )
162
163 def _compute_cut_point(self):
164 self.cut_point = int(DEFAULT_CUT_POINT * len(self.k_distances))
165 self.eps = self.k_distances[self.cut_point]
166
167 if self.eps < EPS_BOTTOM_LIMIT:
168 self.eps = np.min(
169 self.k_distances[self.k_distances >= EPS_BOTTOM_LIMIT])
170 self.cut_point = self._find_nearest_dist(self.eps)
171
172 @Inputs.data
173 def set_data(self, data):
174 self.Error.clear()
175 self._set_input_summary(data)
176 if not self.check_data_size(data):
177 data = None
178 self.data = self.data_normalized = data
179 if self.data is None:
180 self._set_output_summary(None)
181 self.Outputs.annotated_data.send(None)
182 self.plot.clear_plot()
183 return
184
185 if self.data is None:
186 return
187
188 # preprocess data
189 for pp in PREPROCESSORS:
190 self.data_normalized = pp(self.data_normalized)
191
192 self._compute_and_plot()
193 self.unconditional_commit()
194
195 def send_data(self):
196 model = self.model
197
198 clusters = [c if c >= 0 else np.nan for c in model.labels]
199 k = len(set(clusters) - {np.nan})
200 clusters = np.array(clusters).reshape(len(self.data), 1)
201 core_samples = set(model.projector.core_sample_indices_)
202 in_core = np.array([1 if (i in core_samples) else 0
203 for i in range(len(self.data))])
204 in_core = in_core.reshape(len(self.data), 1)
205
206 domain = self.data.domain
207 attributes, classes = domain.attributes, domain.class_vars
208 meta_attrs = domain.metas
209 names = [var.name for var in chain(attributes, classes, meta_attrs) if var]
210
211 u_clust_var = get_unique_names(names, "Cluster")
212 clust_var = DiscreteVariable(
213 u_clust_var, values=["C%d" % (x + 1) for x in range(k)])
214
215 u_in_core = get_unique_names(names + [u_clust_var], "DBSCAN Core")
216 in_core_var = DiscreteVariable(u_in_core, values=("0", "1"))
217
218 x, y, metas = self.data.X, self.data.Y, self.data.metas
219
220 meta_attrs += (clust_var, )
221 metas = np.hstack((metas, clusters))
222 meta_attrs += (in_core_var, )
223 metas = np.hstack((metas, in_core))
224
225 domain = Domain(attributes, classes, meta_attrs)
226 new_table = Table(domain, x, y, metas, self.data.W)
227
228 self._set_output_summary(new_table)
229 self.Outputs.annotated_data.send(new_table)
230
231 def _set_input_summary(self, data):
232 summary = len(data) if data else self.info.NoInput
233 details = format_summary_details(data) if data else ""
234 self.info.set_input_summary(summary, details)
235
236 def _set_output_summary(self, output):
237 summary = len(output) if output else self.info.NoOutput
238 details = format_summary_details(output) if output else ""
239 self.info.set_output_summary(summary, details)
240
241 def _invalidate(self):
242 self.commit()
243
244 def _find_nearest_dist(self, value):
245 array = np.asarray(self.k_distances)
246 idx = (np.abs(array - value)).argmin()
247 return idx
248
249 def _eps_changed(self):
250 # find the closest value to eps
251 if self.data is None:
252 return
253 self.cut_point = self._find_nearest_dist(self.eps)
254 self.plot.set_cut_point(self.cut_point)
255 self._invalidate()
256
257 def _metirc_changed(self):
258 if self.data is not None:
259 self._compute_and_plot()
260 self._invalidate()
261
262 def _on_cut_changed(self, value):
263 # cut changed by means of a cut line over the scree plot.
264 self.cut_point = value
265 self.eps = self.k_distances[value]
266
267 self.commit()
268
269 def _min_samples_changed(self):
270 if self.data is None:
271 return
272 self._compute_and_plot(cut_point=self.cut_point)
273 self._invalidate()
274
275
276 if __name__ == "__main__":
277 a = QApplication(sys.argv)
278 ow = OWDBSCAN()
279 d = Table("iris.tab")
280 ow.set_data(d)
281 ow.show()
282 a.exec()
283 ow.saveSettings()
284
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/Orange/widgets/unsupervised/owdbscan.py b/Orange/widgets/unsupervised/owdbscan.py
--- a/Orange/widgets/unsupervised/owdbscan.py
+++ b/Orange/widgets/unsupervised/owdbscan.py
@@ -10,7 +10,7 @@
from Orange.widgets import widget, gui
from Orange.widgets.utils.slidergraph import SliderGraph
from Orange.widgets.settings import Setting
-from Orange.data import Table, Domain, DiscreteVariable
+from Orange.data import Table, DiscreteVariable
from Orange.data.util import get_unique_names
from Orange.clustering import DBSCAN
from Orange.widgets.utils.annotated_data import ANNOTATED_DATA_SIGNAL_NAME
@@ -197,11 +197,10 @@
clusters = [c if c >= 0 else np.nan for c in model.labels]
k = len(set(clusters) - {np.nan})
- clusters = np.array(clusters).reshape(len(self.data), 1)
+ clusters = np.array(clusters)
core_samples = set(model.projector.core_sample_indices_)
in_core = np.array([1 if (i in core_samples) else 0
for i in range(len(self.data))])
- in_core = in_core.reshape(len(self.data), 1)
domain = self.data.domain
attributes, classes = domain.attributes, domain.class_vars
@@ -215,15 +214,8 @@
u_in_core = get_unique_names(names + [u_clust_var], "DBSCAN Core")
in_core_var = DiscreteVariable(u_in_core, values=("0", "1"))
- x, y, metas = self.data.X, self.data.Y, self.data.metas
-
- meta_attrs += (clust_var, )
- metas = np.hstack((metas, clusters))
- meta_attrs += (in_core_var, )
- metas = np.hstack((metas, in_core))
-
- domain = Domain(attributes, classes, meta_attrs)
- new_table = Table(domain, x, y, metas, self.data.W)
+ new_table = self.data.add_column(clust_var, clusters, to_metas=True)
+ new_table = new_table.add_column(in_core_var, in_core, to_metas=True)
self._set_output_summary(new_table)
self.Outputs.annotated_data.send(new_table)
| {"golden_diff": "diff --git a/Orange/widgets/unsupervised/owdbscan.py b/Orange/widgets/unsupervised/owdbscan.py\n--- a/Orange/widgets/unsupervised/owdbscan.py\n+++ b/Orange/widgets/unsupervised/owdbscan.py\n@@ -10,7 +10,7 @@\n from Orange.widgets import widget, gui\n from Orange.widgets.utils.slidergraph import SliderGraph\n from Orange.widgets.settings import Setting\n-from Orange.data import Table, Domain, DiscreteVariable\n+from Orange.data import Table, DiscreteVariable\n from Orange.data.util import get_unique_names\n from Orange.clustering import DBSCAN\n from Orange.widgets.utils.annotated_data import ANNOTATED_DATA_SIGNAL_NAME\n@@ -197,11 +197,10 @@\n \n clusters = [c if c >= 0 else np.nan for c in model.labels]\n k = len(set(clusters) - {np.nan})\n- clusters = np.array(clusters).reshape(len(self.data), 1)\n+ clusters = np.array(clusters)\n core_samples = set(model.projector.core_sample_indices_)\n in_core = np.array([1 if (i in core_samples) else 0\n for i in range(len(self.data))])\n- in_core = in_core.reshape(len(self.data), 1)\n \n domain = self.data.domain\n attributes, classes = domain.attributes, domain.class_vars\n@@ -215,15 +214,8 @@\n u_in_core = get_unique_names(names + [u_clust_var], \"DBSCAN Core\")\n in_core_var = DiscreteVariable(u_in_core, values=(\"0\", \"1\"))\n \n- x, y, metas = self.data.X, self.data.Y, self.data.metas\n-\n- meta_attrs += (clust_var, )\n- metas = np.hstack((metas, clusters))\n- meta_attrs += (in_core_var, )\n- metas = np.hstack((metas, in_core))\n-\n- domain = Domain(attributes, classes, meta_attrs)\n- new_table = Table(domain, x, y, metas, self.data.W)\n+ new_table = self.data.add_column(clust_var, clusters, to_metas=True)\n+ new_table = new_table.add_column(in_core_var, in_core, to_metas=True)\n \n self._set_output_summary(new_table)\n self.Outputs.annotated_data.send(new_table)\n", "issue": "DBSCAN changes instance ids\nThe output of DBSCAN looses traceability of data instances. The following workflow should explain the problem:\r\n\r\n\r\n\r\nt-SNE should mark all data instances, as the data subset is the same as the data, but instead reports that \"No subset data instances appear in input data\". \r\n\r\n\r\n\r\nSimilar is of course true with any selection of the data that comes from DBSCAN. My original intention was to send the DBSCAN data to the boxplot and then show the selection from the box plot, sat, the clusters with interesting class-distribution, as a data subset to the t-SNE. But then I get, of course, the same warning in t-SNE, reporting the mismatch of the data subset and data provided to this widget.\n", "before_files": [{"content": "import sys\nfrom itertools import chain\n\nimport numpy as np\nfrom AnyQt.QtWidgets import QApplication\nfrom AnyQt.QtGui import QColor\nfrom sklearn.metrics import pairwise_distances\n\nfrom Orange.preprocess import Normalize, Continuize, SklImpute\nfrom Orange.widgets import widget, gui\nfrom Orange.widgets.utils.slidergraph import SliderGraph\nfrom Orange.widgets.settings import Setting\nfrom Orange.data import Table, Domain, DiscreteVariable\nfrom Orange.data.util import get_unique_names\nfrom Orange.clustering import DBSCAN\nfrom Orange.widgets.utils.annotated_data import ANNOTATED_DATA_SIGNAL_NAME\nfrom Orange.widgets.utils.signals import Input, Output\nfrom Orange.widgets.utils.state_summary import format_summary_details\nfrom Orange.widgets.widget import Msg\n\n\nDEFAULT_CUT_POINT = 0.1\nPREPROCESSORS = [Continuize(), Normalize(), SklImpute()]\nEPS_BOTTOM_LIMIT = 0.01\n\n\ndef get_kth_distances(data, metric, k=5):\n \"\"\"\n The function computes the epsilon parameter for DBSCAN through method\n proposed in the paper.\n Parameters\n ----------\n data : Orange.data.Table\n Visualisation coordinates - embeddings\n metric : callable or str\n The metric to compute the distance.\n k : int\n Number kth observed neighbour\n\n Returns\n -------\n np.ndarray\n Epsilon parameter for DBSCAN\n \"\"\"\n x = data.X\n if x.shape[0] > 1000: # subsample\n x = x[np.random.randint(x.shape[0], size=1000), :]\n\n dist = pairwise_distances(x, metric=metric)\n k = min(k+1, len(data) - 1) # k+1 since first one is item itself\n kth_point = np.argpartition(dist, k, axis=1)[:, k]\n kth_dist = np.sort(dist[np.arange(0, len(kth_point)), kth_point])[::-1]\n\n return kth_dist\n\n\nclass OWDBSCAN(widget.OWWidget):\n name = \"DBSCAN\"\n description = \"Density-based spatial clustering.\"\n icon = \"icons/DBSCAN.svg\"\n priority = 2150\n\n class Inputs:\n data = Input(\"Data\", Table)\n\n class Outputs:\n annotated_data = Output(ANNOTATED_DATA_SIGNAL_NAME, Table)\n\n class Error(widget.OWWidget.Error):\n not_enough_instances = Msg(\"Not enough unique data instances. \"\n \"At least two are required.\")\n\n METRICS = [\n (\"Euclidean\", \"euclidean\"),\n (\"Manhattan\", \"cityblock\"),\n (\"Cosine\", \"cosine\")\n ]\n\n min_samples = Setting(4)\n eps = Setting(0.5)\n metric_idx = Setting(0)\n auto_commit = Setting(True)\n k_distances = None\n cut_point = None\n\n def __init__(self):\n super().__init__()\n\n self.data = None\n self.data_normalized = None\n self.db = None\n self.model = None\n self._set_input_summary(None)\n self._set_output_summary(None)\n\n box = gui.widgetBox(self.controlArea, \"Parameters\")\n gui.spin(box, self, \"min_samples\", 1, 100, 1,\n callback=self._min_samples_changed,\n label=\"Core point neighbors\")\n gui.doubleSpin(box, self, \"eps\", EPS_BOTTOM_LIMIT, 1000, 0.01,\n callback=self._eps_changed,\n label=\"Neighborhood distance\")\n\n box = gui.widgetBox(self.controlArea, self.tr(\"Distance Metric\"))\n gui.comboBox(box, self, \"metric_idx\",\n items=list(zip(*self.METRICS))[0],\n callback=self._metirc_changed)\n\n gui.auto_apply(self.buttonsArea, self, \"auto_commit\")\n gui.rubber(self.controlArea)\n\n self.controlArea.layout().addStretch()\n\n self.plot = SliderGraph(\n x_axis_label=\"Data items sorted by score\",\n y_axis_label=\"Distance to the k-th nearest neighbour\",\n callback=self._on_cut_changed\n )\n\n self.mainArea.layout().addWidget(self.plot)\n\n def check_data_size(self, data):\n if data is None:\n return False\n if len(data) < 2:\n self.Error.not_enough_instances()\n return False\n return True\n\n def commit(self):\n self.cluster()\n\n def cluster(self):\n if not self.check_data_size(self.data):\n return\n self.model = DBSCAN(\n eps=self.eps,\n min_samples=self.min_samples,\n metric=self.METRICS[self.metric_idx][1]\n ).get_model(self.data_normalized)\n self.send_data()\n\n def _compute_and_plot(self, cut_point=None):\n self._compute_kdistances()\n if cut_point is None:\n self._compute_cut_point()\n self._plot_graph()\n\n def _plot_graph(self):\n nonzero = np.sum(self.k_distances > EPS_BOTTOM_LIMIT)\n self.plot.update(np.arange(len(self.k_distances)),\n [self.k_distances],\n colors=[QColor('red')],\n cutpoint_x=self.cut_point,\n selection_limit=(0, nonzero - 1))\n\n def _compute_kdistances(self):\n self.k_distances = get_kth_distances(\n self.data_normalized, metric=self.METRICS[self.metric_idx][1],\n k=self.min_samples\n )\n\n def _compute_cut_point(self):\n self.cut_point = int(DEFAULT_CUT_POINT * len(self.k_distances))\n self.eps = self.k_distances[self.cut_point]\n\n if self.eps < EPS_BOTTOM_LIMIT:\n self.eps = np.min(\n self.k_distances[self.k_distances >= EPS_BOTTOM_LIMIT])\n self.cut_point = self._find_nearest_dist(self.eps)\n\n @Inputs.data\n def set_data(self, data):\n self.Error.clear()\n self._set_input_summary(data)\n if not self.check_data_size(data):\n data = None\n self.data = self.data_normalized = data\n if self.data is None:\n self._set_output_summary(None)\n self.Outputs.annotated_data.send(None)\n self.plot.clear_plot()\n return\n\n if self.data is None:\n return\n\n # preprocess data\n for pp in PREPROCESSORS:\n self.data_normalized = pp(self.data_normalized)\n\n self._compute_and_plot()\n self.unconditional_commit()\n\n def send_data(self):\n model = self.model\n\n clusters = [c if c >= 0 else np.nan for c in model.labels]\n k = len(set(clusters) - {np.nan})\n clusters = np.array(clusters).reshape(len(self.data), 1)\n core_samples = set(model.projector.core_sample_indices_)\n in_core = np.array([1 if (i in core_samples) else 0\n for i in range(len(self.data))])\n in_core = in_core.reshape(len(self.data), 1)\n\n domain = self.data.domain\n attributes, classes = domain.attributes, domain.class_vars\n meta_attrs = domain.metas\n names = [var.name for var in chain(attributes, classes, meta_attrs) if var]\n\n u_clust_var = get_unique_names(names, \"Cluster\")\n clust_var = DiscreteVariable(\n u_clust_var, values=[\"C%d\" % (x + 1) for x in range(k)])\n\n u_in_core = get_unique_names(names + [u_clust_var], \"DBSCAN Core\")\n in_core_var = DiscreteVariable(u_in_core, values=(\"0\", \"1\"))\n\n x, y, metas = self.data.X, self.data.Y, self.data.metas\n\n meta_attrs += (clust_var, )\n metas = np.hstack((metas, clusters))\n meta_attrs += (in_core_var, )\n metas = np.hstack((metas, in_core))\n\n domain = Domain(attributes, classes, meta_attrs)\n new_table = Table(domain, x, y, metas, self.data.W)\n\n self._set_output_summary(new_table)\n self.Outputs.annotated_data.send(new_table)\n\n def _set_input_summary(self, data):\n summary = len(data) if data else self.info.NoInput\n details = format_summary_details(data) if data else \"\"\n self.info.set_input_summary(summary, details)\n\n def _set_output_summary(self, output):\n summary = len(output) if output else self.info.NoOutput\n details = format_summary_details(output) if output else \"\"\n self.info.set_output_summary(summary, details)\n\n def _invalidate(self):\n self.commit()\n\n def _find_nearest_dist(self, value):\n array = np.asarray(self.k_distances)\n idx = (np.abs(array - value)).argmin()\n return idx\n\n def _eps_changed(self):\n # find the closest value to eps\n if self.data is None:\n return\n self.cut_point = self._find_nearest_dist(self.eps)\n self.plot.set_cut_point(self.cut_point)\n self._invalidate()\n\n def _metirc_changed(self):\n if self.data is not None:\n self._compute_and_plot()\n self._invalidate()\n\n def _on_cut_changed(self, value):\n # cut changed by means of a cut line over the scree plot.\n self.cut_point = value\n self.eps = self.k_distances[value]\n\n self.commit()\n\n def _min_samples_changed(self):\n if self.data is None:\n return\n self._compute_and_plot(cut_point=self.cut_point)\n self._invalidate()\n\n\nif __name__ == \"__main__\":\n a = QApplication(sys.argv)\n ow = OWDBSCAN()\n d = Table(\"iris.tab\")\n ow.set_data(d)\n ow.show()\n a.exec()\n ow.saveSettings()\n", "path": "Orange/widgets/unsupervised/owdbscan.py"}], "after_files": [{"content": "import sys\nfrom itertools import chain\n\nimport numpy as np\nfrom AnyQt.QtWidgets import QApplication\nfrom AnyQt.QtGui import QColor\nfrom sklearn.metrics import pairwise_distances\n\nfrom Orange.preprocess import Normalize, Continuize, SklImpute\nfrom Orange.widgets import widget, gui\nfrom Orange.widgets.utils.slidergraph import SliderGraph\nfrom Orange.widgets.settings import Setting\nfrom Orange.data import Table, DiscreteVariable\nfrom Orange.data.util import get_unique_names\nfrom Orange.clustering import DBSCAN\nfrom Orange.widgets.utils.annotated_data import ANNOTATED_DATA_SIGNAL_NAME\nfrom Orange.widgets.utils.signals import Input, Output\nfrom Orange.widgets.utils.state_summary import format_summary_details\nfrom Orange.widgets.widget import Msg\n\n\nDEFAULT_CUT_POINT = 0.1\nPREPROCESSORS = [Continuize(), Normalize(), SklImpute()]\nEPS_BOTTOM_LIMIT = 0.01\n\n\ndef get_kth_distances(data, metric, k=5):\n \"\"\"\n The function computes the epsilon parameter for DBSCAN through method\n proposed in the paper.\n Parameters\n ----------\n data : Orange.data.Table\n Visualisation coordinates - embeddings\n metric : callable or str\n The metric to compute the distance.\n k : int\n Number kth observed neighbour\n\n Returns\n -------\n np.ndarray\n Epsilon parameter for DBSCAN\n \"\"\"\n x = data.X\n if x.shape[0] > 1000: # subsample\n x = x[np.random.randint(x.shape[0], size=1000), :]\n\n dist = pairwise_distances(x, metric=metric)\n k = min(k+1, len(data) - 1) # k+1 since first one is item itself\n kth_point = np.argpartition(dist, k, axis=1)[:, k]\n kth_dist = np.sort(dist[np.arange(0, len(kth_point)), kth_point])[::-1]\n\n return kth_dist\n\n\nclass OWDBSCAN(widget.OWWidget):\n name = \"DBSCAN\"\n description = \"Density-based spatial clustering.\"\n icon = \"icons/DBSCAN.svg\"\n priority = 2150\n\n class Inputs:\n data = Input(\"Data\", Table)\n\n class Outputs:\n annotated_data = Output(ANNOTATED_DATA_SIGNAL_NAME, Table)\n\n class Error(widget.OWWidget.Error):\n not_enough_instances = Msg(\"Not enough unique data instances. \"\n \"At least two are required.\")\n\n METRICS = [\n (\"Euclidean\", \"euclidean\"),\n (\"Manhattan\", \"cityblock\"),\n (\"Cosine\", \"cosine\")\n ]\n\n min_samples = Setting(4)\n eps = Setting(0.5)\n metric_idx = Setting(0)\n auto_commit = Setting(True)\n k_distances = None\n cut_point = None\n\n def __init__(self):\n super().__init__()\n\n self.data = None\n self.data_normalized = None\n self.db = None\n self.model = None\n self._set_input_summary(None)\n self._set_output_summary(None)\n\n box = gui.widgetBox(self.controlArea, \"Parameters\")\n gui.spin(box, self, \"min_samples\", 1, 100, 1,\n callback=self._min_samples_changed,\n label=\"Core point neighbors\")\n gui.doubleSpin(box, self, \"eps\", EPS_BOTTOM_LIMIT, 1000, 0.01,\n callback=self._eps_changed,\n label=\"Neighborhood distance\")\n\n box = gui.widgetBox(self.controlArea, self.tr(\"Distance Metric\"))\n gui.comboBox(box, self, \"metric_idx\",\n items=list(zip(*self.METRICS))[0],\n callback=self._metirc_changed)\n\n gui.auto_apply(self.buttonsArea, self, \"auto_commit\")\n gui.rubber(self.controlArea)\n\n self.controlArea.layout().addStretch()\n\n self.plot = SliderGraph(\n x_axis_label=\"Data items sorted by score\",\n y_axis_label=\"Distance to the k-th nearest neighbour\",\n callback=self._on_cut_changed\n )\n\n self.mainArea.layout().addWidget(self.plot)\n\n def check_data_size(self, data):\n if data is None:\n return False\n if len(data) < 2:\n self.Error.not_enough_instances()\n return False\n return True\n\n def commit(self):\n self.cluster()\n\n def cluster(self):\n if not self.check_data_size(self.data):\n return\n self.model = DBSCAN(\n eps=self.eps,\n min_samples=self.min_samples,\n metric=self.METRICS[self.metric_idx][1]\n ).get_model(self.data_normalized)\n self.send_data()\n\n def _compute_and_plot(self, cut_point=None):\n self._compute_kdistances()\n if cut_point is None:\n self._compute_cut_point()\n self._plot_graph()\n\n def _plot_graph(self):\n nonzero = np.sum(self.k_distances > EPS_BOTTOM_LIMIT)\n self.plot.update(np.arange(len(self.k_distances)),\n [self.k_distances],\n colors=[QColor('red')],\n cutpoint_x=self.cut_point,\n selection_limit=(0, nonzero - 1))\n\n def _compute_kdistances(self):\n self.k_distances = get_kth_distances(\n self.data_normalized, metric=self.METRICS[self.metric_idx][1],\n k=self.min_samples\n )\n\n def _compute_cut_point(self):\n self.cut_point = int(DEFAULT_CUT_POINT * len(self.k_distances))\n self.eps = self.k_distances[self.cut_point]\n\n if self.eps < EPS_BOTTOM_LIMIT:\n self.eps = np.min(\n self.k_distances[self.k_distances >= EPS_BOTTOM_LIMIT])\n self.cut_point = self._find_nearest_dist(self.eps)\n\n @Inputs.data\n def set_data(self, data):\n self.Error.clear()\n self._set_input_summary(data)\n if not self.check_data_size(data):\n data = None\n self.data = self.data_normalized = data\n if self.data is None:\n self._set_output_summary(None)\n self.Outputs.annotated_data.send(None)\n self.plot.clear_plot()\n return\n\n if self.data is None:\n return\n\n # preprocess data\n for pp in PREPROCESSORS:\n self.data_normalized = pp(self.data_normalized)\n\n self._compute_and_plot()\n self.unconditional_commit()\n\n def send_data(self):\n model = self.model\n\n clusters = [c if c >= 0 else np.nan for c in model.labels]\n k = len(set(clusters) - {np.nan})\n clusters = np.array(clusters)\n core_samples = set(model.projector.core_sample_indices_)\n in_core = np.array([1 if (i in core_samples) else 0\n for i in range(len(self.data))])\n\n domain = self.data.domain\n attributes, classes = domain.attributes, domain.class_vars\n meta_attrs = domain.metas\n names = [var.name for var in chain(attributes, classes, meta_attrs) if var]\n\n u_clust_var = get_unique_names(names, \"Cluster\")\n clust_var = DiscreteVariable(\n u_clust_var, values=[\"C%d\" % (x + 1) for x in range(k)])\n\n u_in_core = get_unique_names(names + [u_clust_var], \"DBSCAN Core\")\n in_core_var = DiscreteVariable(u_in_core, values=(\"0\", \"1\"))\n\n new_table = self.data.add_column(clust_var, clusters, to_metas=True)\n new_table = new_table.add_column(in_core_var, in_core, to_metas=True)\n\n self._set_output_summary(new_table)\n self.Outputs.annotated_data.send(new_table)\n\n def _set_input_summary(self, data):\n summary = len(data) if data else self.info.NoInput\n details = format_summary_details(data) if data else \"\"\n self.info.set_input_summary(summary, details)\n\n def _set_output_summary(self, output):\n summary = len(output) if output else self.info.NoOutput\n details = format_summary_details(output) if output else \"\"\n self.info.set_output_summary(summary, details)\n\n def _invalidate(self):\n self.commit()\n\n def _find_nearest_dist(self, value):\n array = np.asarray(self.k_distances)\n idx = (np.abs(array - value)).argmin()\n return idx\n\n def _eps_changed(self):\n # find the closest value to eps\n if self.data is None:\n return\n self.cut_point = self._find_nearest_dist(self.eps)\n self.plot.set_cut_point(self.cut_point)\n self._invalidate()\n\n def _metirc_changed(self):\n if self.data is not None:\n self._compute_and_plot()\n self._invalidate()\n\n def _on_cut_changed(self, value):\n # cut changed by means of a cut line over the scree plot.\n self.cut_point = value\n self.eps = self.k_distances[value]\n\n self.commit()\n\n def _min_samples_changed(self):\n if self.data is None:\n return\n self._compute_and_plot(cut_point=self.cut_point)\n self._invalidate()\n\n\nif __name__ == \"__main__\":\n a = QApplication(sys.argv)\n ow = OWDBSCAN()\n d = Table(\"iris.tab\")\n ow.set_data(d)\n ow.show()\n a.exec()\n ow.saveSettings()\n", "path": "Orange/widgets/unsupervised/owdbscan.py"}]} | 3,396 | 529 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.