title
stringlengths
2
169
diff
stringlengths
235
19.5k
body
stringlengths
0
30.5k
url
stringlengths
48
84
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
diff_len
float64
101
3.99k
repo_name
stringclasses
83 values
__index_level_0__
int64
15
52.7k
Add `macos-latest` runner for CoreML benchmarks
diff --git a/models/common.py b/models/common.py index 2b61307ad46..825a4c4e263 100644 --- a/models/common.py +++ b/models/common.py @@ -514,8 +514,7 @@ def forward(self, im, augment=False, visualize=False): conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float) y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1) else: - k = 'var_' + str(sorted(int(k.replace('var_', '')) for k in y)[-1]) # output key - y = y[k] # output + y = list(reversed(y.values())) # reversed for segmentation models (pred, proto) elif self.paddle: # PaddlePaddle im = im.cpu().numpy().astype(np.float32) self.input_handle.copy_from_cpu(im)
Signed-off-by: Glenn Jocher <[email protected]> <!-- Thank you for submitting a YOLOv5 🚀 Pull Request! We want to make contributing to YOLOv5 as easy and transparent as possible. A few tips to get you started: - Search existing YOLOv5 [PRs](https://github.com/ultralytics/yolov5/pull) to see if a similar PR already exists. - Link this PR to a YOLOv5 [issue](https://github.com/ultralytics/yolov5/issues) to help us understand what bug fix or feature is being implemented. - Provide before and after profiling/inference/training results to help us quantify the improvement your PR provides (if applicable). Please see our ✅ [Contributing Guide](https://github.com/ultralytics/yolov5/blob/master/CONTRIBUTING.md) for more details. --> ## 🛠️ PR Summary <sub>Made with ❤️ by [Ultralytics Actions](https://github.com/ultralytics/actions)<sub> ### 🌟 Summary Enhanced model output handling for segmentation in YOLOv5. ### 📊 Key Changes - Simplified the logic for handling outputs in models that include segmentation tasks. - The previous method of identifying the last layer's output by key has been replaced. - Outputs are now retrieved by reversing the order of model values which is particularly relevant for certain segmentation models. ### 🎯 Purpose & Impact - The change aims to streamline the post-processing step in YOLOv5's forward pass, especially for models that perform segmentation tasks. - This can potentially make the code easier to maintain and understand, as it removes the need for dynamically constructing the key based on the outputs available. - Users can expect a more consistent handling of outputs, which may, in turn, enhance the usability of YOLOv5 for various computer vision tasks including segmentation. - There might be a performance improvement in scenarios where segmentation model outputs are used, due to a more direct access to the required output tensors. 🚀
https://api.github.com/repos/ultralytics/yolov5/pulls/9453
2022-09-17T10:41:28Z
2022-09-17T22:57:49Z
2022-09-17T22:57:49Z
2024-01-19T05:51:28Z
220
ultralytics/yolov5
25,577
Change TWISTED_REACTOR in the default template.
diff --git a/docs/topics/settings.rst b/docs/topics/settings.rst index a711fd197ab..0b1ef71cfa3 100644 --- a/docs/topics/settings.rst +++ b/docs/topics/settings.rst @@ -1642,6 +1642,11 @@ install the default reactor defined by Twisted for the current platform. This is to maintain backward compatibility and avoid possible problems caused by using a non-default reactor. +.. versionchanged:: VERSION + The :command:`startproject` command now sets this setting to + ``twisted.internet.asyncioreactor.AsyncioSelectorReactor`` in the generated + ``settings.py`` file. + For additional information, see :doc:`core/howto/choosing-reactor`. diff --git a/scrapy/templates/project/module/settings.py.tmpl b/scrapy/templates/project/module/settings.py.tmpl index 5e541e2c0bb..c0c34e986cb 100644 --- a/scrapy/templates/project/module/settings.py.tmpl +++ b/scrapy/templates/project/module/settings.py.tmpl @@ -89,3 +89,4 @@ ROBOTSTXT_OBEY = True # Set settings whose default value is deprecated to a future-proof value REQUEST_FINGERPRINTER_IMPLEMENTATION = 'VERSION' +TWISTED_REACTOR = 'twisted.internet.asyncioreactor.AsyncioSelectorReactor' diff --git a/tests/test_commands.py b/tests/test_commands.py index 76d5f3935b4..eaca41102b9 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -689,8 +689,15 @@ def test_asyncio_enabled_true(self): ]) self.assertIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log) - def test_asyncio_enabled_false(self): + def test_asyncio_enabled_default(self): log = self.get_log(self.debug_log_spider, args=[]) + self.assertIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log) + + def test_asyncio_enabled_false(self): + log = self.get_log(self.debug_log_spider, args=[ + '-s', 'TWISTED_REACTOR=twisted.internet.selectreactor.SelectReactor' + ]) + self.assertIn("Using reactor: twisted.internet.selectreactor.SelectReactor", log) self.assertNotIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log) @mark.skipif(sys.implementation.name == 'pypy', reason='uvloop does not support pypy properly')
Fixes #5590
https://api.github.com/repos/scrapy/scrapy/pulls/5679
2022-10-14T16:04:51Z
2022-10-15T09:54:59Z
2022-10-15T09:54:59Z
2022-10-15T09:55:03Z
567
scrapy/scrapy
34,207
[autoparallel] add gpt2 performance test
diff --git a/tests/test_auto_parallel/test_tensor_shard/test_gpt/gpt_modules.py b/tests/test_auto_parallel/test_tensor_shard/test_gpt/gpt_modules.py index b66ad1949d49..22a2371311f9 100644 --- a/tests/test_auto_parallel/test_tensor_shard/test_gpt/gpt_modules.py +++ b/tests/test_auto_parallel/test_tensor_shard/test_gpt/gpt_modules.py @@ -113,6 +113,7 @@ def forward( attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]: + # query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2) qkv = self.c_attn(hidden_states) @@ -187,7 +188,6 @@ def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, - token_type_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: input_shape = input_ids.size() @@ -196,8 +196,6 @@ def forward( device = input_ids.device - token_type_ids = token_type_ids.view(-1, input_shape[-1]) - past_length = 0 past_key_values = tuple([None] * len(self.h)) @@ -223,9 +221,6 @@ def forward( # add_2 hidden_states = inputs_embeds + position_embeds - token_type_embeds = self.wte(token_type_ids) - hidden_states = hidden_states + token_type_embeds - # comment to run pipeline # add_3 output_shape = input_shape + (hidden_states.size(-1),) @@ -239,3 +234,46 @@ def forward( hidden_states = hidden_states.view(output_shape) return hidden_states + + +class GPT2LMHeadModel(GPT2PreTrainedModel): + _keys_to_ignore_on_load_missing = [r"attn.masked_bias", r"attn.bias", r"lm_head.weight"] + + def __init__(self, config): + super().__init__(config) + self.transformer = GPT2Model(config) + self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) + + # Model parallel + self.model_parallel = False + self.device_map = None + + # Initialize weights and apply final processing + self.post_init() + + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + ): + transformer_outputs = self.transformer( + input_ids=input_ids, + attention_mask=attention_mask, + ) + + lm_logits = self.lm_head(transformer_outputs) + + return lm_logits + + +class GPTLMLoss(nn.Module): + + def __init__(self): + super().__init__() + self.loss_fn = nn.CrossEntropyLoss() + + def forward(self, logits, labels): + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + return self.loss_fn(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) diff --git a/tests/test_auto_parallel/test_tensor_shard/test_gpt/test_gpt2_performance.py b/tests/test_auto_parallel/test_tensor_shard/test_gpt/test_gpt2_performance.py new file mode 100644 index 000000000000..87155307fe0f --- /dev/null +++ b/tests/test_auto_parallel/test_tensor_shard/test_gpt/test_gpt2_performance.py @@ -0,0 +1,159 @@ +import copy +import random +from functools import partial +from time import time +from typing import Dict, Optional, Tuple, Union + +import numpy as np +import psutil +import pytest +import torch +import torch.multiprocessing as mp +import torch.nn as nn +import transformers +from torch.fx import GraphModule +from torch.profiler import ProfilerActivity, profile, record_function, schedule, tensorboard_trace_handler + +from colossalai.auto_parallel.passes.runtime_apply_pass import runtime_apply_pass +from colossalai.auto_parallel.passes.runtime_preparation_pass import runtime_preparation_pass +from colossalai.auto_parallel.tensor_shard.constants import BATCHNORM_MODULE_OP +from colossalai.auto_parallel.tensor_shard.sharding_strategy import ShardingSpec +from colossalai.auto_parallel.tensor_shard.solver import ( + CostGraph, + GraphAnalyser, + Solver, + SolverOptions, + StrategiesConstructor, +) +from colossalai.device.device_mesh import DeviceMesh +from colossalai.fx.tracer.tracer import ColoTracer +from colossalai.initialize import launch, launch_from_torch +from colossalai.logging import disable_existing_loggers, get_dist_logger +from colossalai.tensor.shape_consistency import ShapeConsistencyManager, to_global +from colossalai.testing import assert_close, assert_close_loose, parameterize, rerun_if_address_is_in_use +from colossalai.testing.pytest_wrapper import run_on_environment_flag +from colossalai.utils import free_port +from tests.test_auto_parallel.test_tensor_shard.test_gpt.gpt_modules import GPT2LMHeadModel, GPTLMLoss + +BATCH_SIZE = 128 +SEQ_LENGTH = 128 +HIDDEN_DIM = 4096 +NUM_HEADS = 32 +NUM_LAYERS = 4 +VOCAB_SIZE = 50257 +NUM_STEPS = 10 + + +def get_cpu_mem(): + return psutil.Process().memory_info().rss / 1024**2 + + +def get_gpu_mem(): + return torch.cuda.memory_allocated() / 1024**2 + + +def get_mem_info(prefix=''): + return f'{prefix}GPU memory usage: {get_gpu_mem():.2f} MB, CPU memory usage: {get_cpu_mem():.2f} MB' + + +def get_tflops(model_numel, batch_size, seq_len, step_time): + return model_numel * batch_size * seq_len * 8 / 1e12 / (step_time + 1e-12) + + +# Randomly Generated Data +def get_data(batch_size, seq_len, vocab_size): + input_ids = torch.randint(0, vocab_size, (batch_size, seq_len), device=torch.cuda.current_device()) + attention_mask = torch.ones_like(input_ids) + return input_ids, attention_mask + + +def main(): + disable_existing_loggers() + launch_from_torch(config={}) + logger = get_dist_logger() + config = transformers.GPT2Config(n_position=SEQ_LENGTH, n_layer=NUM_LAYERS, n_head=NUM_HEADS, n_embd=HIDDEN_DIM) + + model = GPT2LMHeadModel(config=config).to('cuda') + + input_ids = torch.zeros((BATCH_SIZE, SEQ_LENGTH), dtype=torch.int64) + attention_mask = torch.zeros((BATCH_SIZE, SEQ_LENGTH), dtype=torch.int64) + + meta_input_sample = { + 'input_ids': input_ids.to('meta'), + 'attention_mask': attention_mask.to('meta'), + } + + physical_mesh_id = torch.arange(0, 4) + mesh_shape = (2, 2) + # [[0, 1] + # [2, 3]] + device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True) + shape_consistency_manager = ShapeConsistencyManager() + + tracer = ColoTracer() + + graph = tracer.trace(root=model, meta_args=meta_input_sample) + gm = GraphModule(model, graph, model.__class__.__name__) + gm.recompile() + + graph_analyser = GraphAnalyser(gm) + liveness_list = graph_analyser.liveness_analysis() + solver_options = SolverOptions() + strategies_constructor = StrategiesConstructor(graph, device_mesh, solver_options) + strategies_constructor.build_strategies_and_cost() + + cost_graph = CostGraph(strategies_constructor.leaf_strategies) + cost_graph.simplify_graph() + solver = Solver(gm.graph, strategies_constructor, cost_graph, graph_analyser, memory_budget=-1) + ret = solver.call_solver_serialized_args() + + solution = list(ret[0]) + print(solution) + gm, sharding_spec_dict, origin_spec_dict, comm_actions_dict = runtime_preparation_pass( + gm, solution, device_mesh, strategies_constructor) + gm = runtime_apply_pass(gm) + gm.recompile() + # *******************strategy selected******************* + print("*******************strategy selected*******************") + strategies_list = solution + + nodes = [strategies_vector.node for strategies_vector in strategies_constructor.leaf_strategies] + for index, node in enumerate(nodes): + print(node.name, node.strategies_vector[strategies_list[index]].name) + + # build criterion + criterion = GPTLMLoss() + + optimizer = torch.optim.Adam(gm.parameters(), lr=0.01) + numel = sum([p.numel() for p in model.parameters()]) + logger.info(get_mem_info(prefix='After init model, '), ranks=[0]) + get_tflops_func = partial(get_tflops, numel, BATCH_SIZE, SEQ_LENGTH) + torch.cuda.synchronize() + model.train() + # with profile(activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA], + # schedule=schedule(wait=1, warmup=2, active=2), + # on_trace_ready=tensorboard_trace_handler(f'log/dummy_data/bs128_seq128_new'), + # record_shapes=True, + # profile_memory=True) as prof: + # with torch.profiler.profile(activities=[torch.profiler.ProfilerActivity.CPU, torch.profiler.ProfilerActivity.CUDA]) as prof: + for n in range(10): + # we just use randomly generated data here + input_ids, attn_mask = get_data(BATCH_SIZE, SEQ_LENGTH, VOCAB_SIZE) + optimizer.zero_grad() + start = time() + outputs = gm(input_ids, attn_mask, sharding_spec_dict, origin_spec_dict, comm_actions_dict) + loss = criterion(outputs, input_ids) + loss.backward() + optimizer.step() + # prof.step() + torch.cuda.synchronize() + step_time = time() - start + logger.info( + f'[{n+1}/{NUM_STEPS}] Loss:{loss.item():.3f}, Step time: {step_time:.3f}s, TFLOPS: {get_tflops_func(step_time):.3f}', + ranks=[0]) + # print(prof.key_averages().table(sort_by="self_cuda_time_total", row_limit=10)) + torch.cuda.synchronize() + + +if __name__ == '__main__': + main()
### What does this PR do 1. To compare the performance with the gpt2 demo in CAI, GPT2LMHeadModel is added into gpt2_modules.py. 2. Add gpt2 performance test in this PR, we could use this test to measure the auto parallel system performance on gpt2 model.
https://api.github.com/repos/hpcaitech/ColossalAI/pulls/2194
2022-12-25T10:58:22Z
2022-12-26T13:56:59Z
2022-12-26T13:56:59Z
2022-12-26T13:56:59Z
2,509
hpcaitech/ColossalAI
11,065
Rewrite LazyFrames.__getitem__ to only decompress if needed
diff --git a/gym/wrappers/frame_stack.py b/gym/wrappers/frame_stack.py index c51c61efb09..30ca76c909c 100644 --- a/gym/wrappers/frame_stack.py +++ b/gym/wrappers/frame_stack.py @@ -6,57 +6,62 @@ class LazyFrames(object): - r"""Ensures common frames are only stored once to optimize memory use. + r"""Ensures common frames are only stored once to optimize memory use. - To further reduce the memory use, it is optionally to turn on lz4 to + To further reduce the memory use, it is optionally to turn on lz4 to compress the observations. .. note:: - This object should only be converted to numpy array just before forward pass. + This object should only be converted to numpy array just before forward pass. + + Args: + lz4_compress (bool): use lz4 to compress the frames internally """ + __slots__ = ('frame_shape', 'dtype', 'shape', 'lz4_compress', '_frames') + def __init__(self, frames, lz4_compress=False): if lz4_compress: from lz4.block import compress - self.frame_shape = frames[0].shape + self.frame_shape = tuple(frames[0].shape) self.dtype = frames[0].dtype + self.shape = (len(frames),) + self.frame_shape frames = [compress(frame) for frame in frames] self._frames = frames self.lz4_compress = lz4_compress def __array__(self, dtype=None): - if self.lz4_compress: - from lz4.block import decompress - frames = [np.frombuffer(decompress(frame), dtype=self.dtype).reshape(self.frame_shape) for frame in self._frames] - else: - frames = self._frames - out = np.stack(frames, axis=0) + arr = self[:] if dtype is not None: - out = out.astype(dtype) - return out + return arr.astype(dtype) + return arr def __len__(self): - return len(self.__array__()) + return self.shape[0] - def __getitem__(self, i): - return self.__array__()[i] + def __getitem__(self, int_or_slice): + if isinstance(int_or_slice, int): + return self._check_decompress(self._frames[int_or_slice]) # single frame + return np.stack([self._check_decompress(f) for f in self._frames[int_or_slice]], axis=0) def __eq__(self, other): return self.__array__() == other - @property - def shape(self): - return self.__array__().shape + def _check_decompress(self, frame): + if self.lz4_compress: + from lz4.block import decompress + return np.frombuffer(decompress(frame), dtype=self.dtype).reshape(self.frame_shape) + return frame class FrameStack(Wrapper): - r"""Observation wrapper that stacks the observations in a rolling manner. + r"""Observation wrapper that stacks the observations in a rolling manner. For example, if the number of stacks is 4, then the returned observation contains the most recent 4 observations. For environment 'Pendulum-v0', the original observation is an array with shape [3], so if we stack 4 observations, the processed observation - has shape [3, 4]. + has shape [4, 3]. .. note:: @@ -65,7 +70,7 @@ class FrameStack(Wrapper): .. note:: The observation space must be `Box` type. If one uses `Dict` - as observation space, it should apply `FlattenDictWrapper` at first. + as observation space, it should apply `FlattenDictWrapper` at first. Example:: @@ -78,6 +83,7 @@ class FrameStack(Wrapper): Args: env (Env): environment object num_stack (int): number of stacks + lz4_compress (bool): use lz4 to compress the frames internally """ def __init__(self, env, num_stack, lz4_compress=False):
This is a relatively small PR. It adds the `LazyFrames.last_frame` property for efficient extraction of the last frame contained in a `LazyFrames` frame stack. This is particularly useful when you want to insert frames into an experience-replay buffer without duplication. A counter-argument against adding this property might be to say that this functionality is already covered by `__getitem__`, e.g. `lazy_frames[-1]`. The added benefit of using `last_frame` instead is that it skips the `__array__` call. Oh and I also added some lines of documentation for `lz4_compress`.
https://api.github.com/repos/openai/gym/pulls/1906
2020-05-08T02:07:45Z
2020-06-05T22:01:05Z
2020-06-05T22:01:05Z
2020-06-06T00:00:10Z
964
openai/gym
5,914
Allow BackMsg<->ForwardMsg association for easier performance testing
diff --git a/lib/streamlit/runtime/app_session.py b/lib/streamlit/runtime/app_session.py index d1493ec7bd6c..0fda069e1c22 100644 --- a/lib/streamlit/runtime/app_session.py +++ b/lib/streamlit/runtime/app_session.py @@ -153,6 +153,8 @@ def __init__( self._session_state = SessionState() self._user_info = user_info + self._debug_last_backmsg_id: Optional[str] = None + LOGGER.debug("AppSession initialized (id=%s)", self.id) def flush_browser_queue(self) -> List[ForwardMsg]: @@ -213,6 +215,9 @@ def _enqueue_forward_msg(self, msg: ForwardMsg) -> None: if not config.get_option("client.displayEnabled"): return + if self._debug_last_backmsg_id: + msg.debug_last_backmsg_id = self._debug_last_backmsg_id + self._session_data.enqueue(msg) if self._message_enqueued_callback: self._message_enqueued_callback() @@ -223,6 +228,9 @@ def handle_backmsg(self, msg: BackMsg) -> None: msg_type = msg.WhichOneof("type") if msg_type == "rerun_script": + if msg.debug_last_backmsg_id: + self._debug_last_backmsg_id = msg.debug_last_backmsg_id + self._handle_rerun_script_request(msg.rerun_script) elif msg_type == "load_git_info": self._handle_git_information_request() @@ -487,6 +495,8 @@ def _handle_scriptrunner_event_on_main_thread( ) self._enqueue_forward_msg(script_finished_msg) + self._debug_last_backmsg_id = None + if script_succeeded: # The script completed successfully: update our # LocalSourcesWatcher to account for any source code changes diff --git a/lib/tests/streamlit/runtime/app_session_test.py b/lib/tests/streamlit/runtime/app_session_test.py index d696f8ff0d4d..02b18b029e96 100644 --- a/lib/tests/streamlit/runtime/app_session_test.py +++ b/lib/tests/streamlit/runtime/app_session_test.py @@ -255,6 +255,21 @@ def test_ignore_events_from_noncurrent_scriptrunner(self, mock_enqueue: MagicMoc ) mock_enqueue.assert_not_called() + @patch("streamlit.runtime.app_session.ScriptRunner", MagicMock(spec=ScriptRunner)) + @patch("streamlit.runtime.app_session.AppSession._enqueue_forward_msg", MagicMock()) + def test_resets_debug_last_backmsg_id_on_script_finished(self): + session = _create_test_session() + session._create_scriptrunner(initial_rerun_data=RerunData()) + session._debug_last_backmsg_id = "some_backmsg_id" + + session._handle_scriptrunner_event_on_main_thread( + sender=session._scriptrunner, + event=ScriptRunnerEvent.SCRIPT_STOPPED_WITH_SUCCESS, + forward_msg=ForwardMsg(), + ) + + self.assertIsNone(session._debug_last_backmsg_id) + def test_passes_client_state_on_run_on_save(self): session = _create_test_session() session._run_on_save = True @@ -315,6 +330,15 @@ def test_deregisters_pages_watcher_on_shutdown(self, patched_on_pages_changed): session._on_pages_changed ) + def test_tags_fwd_msgs_with_last_backmsg_id_if_set(self): + session = _create_test_session() + session._debug_last_backmsg_id = "some backmsg id" + + msg = ForwardMsg() + session._enqueue_forward_msg(msg) + + self.assertEqual(msg.debug_last_backmsg_id, "some backmsg id") + def _mock_get_options_for_section(overrides=None) -> Callable[..., Any]: if not overrides: @@ -544,6 +568,15 @@ async def test_handle_backmsg_handles_exceptions(self): handle_clear_cache_request.assert_called_once() handle_backmsg_exception.assert_called_once_with(error) + @patch("streamlit.runtime.app_session.AppSession._create_scriptrunner", MagicMock()) + async def test_handle_backmsg_handles_debug_ids(self): + session = _create_test_session(asyncio.get_running_loop()) + msg = BackMsg( + rerun_script=session._client_state, debug_last_backmsg_id="some backmsg" + ) + session.handle_backmsg(msg) + self.assertEqual(session._debug_last_backmsg_id, "some backmsg") + class PopulateCustomThemeMsgTest(unittest.TestCase): @patch("streamlit.runtime.app_session.config") diff --git a/proto/streamlit/proto/BackMsg.proto b/proto/streamlit/proto/BackMsg.proto index 0b7d0102fe9b..a437f15b5aea 100644 --- a/proto/streamlit/proto/BackMsg.proto +++ b/proto/streamlit/proto/BackMsg.proto @@ -54,5 +54,10 @@ message BackMsg { bool load_git_info = 12; } + // An ID used to associate this BackMsg with the corresponding ForwardMsgs + // that are sent to the client due to it. As its name suggests, this field + // should only be used for testing. + string debug_last_backmsg_id = 13; + reserved 1, 2, 3, 4, 8, 9; } diff --git a/proto/streamlit/proto/ForwardMsg.proto b/proto/streamlit/proto/ForwardMsg.proto index 1dc36828b9de..b72399935f84 100644 --- a/proto/streamlit/proto/ForwardMsg.proto +++ b/proto/streamlit/proto/ForwardMsg.proto @@ -71,8 +71,13 @@ message ForwardMsg { string ref_hash = 11; } + // The ID of the last BackMsg that we received before sending this + // ForwardMsg. As its name suggests, this field should only be used for + // testing. + string debug_last_backmsg_id = 17; + reserved 7, 8; - // Next: 17 + // Next: 18 } // ForwardMsgMetadata contains all data that does _not_ get hashed (or cached)
## 📚 Context For some performance work that we're doing, we'd like to be able to more easily associate `ForwardMsg`s with the `BackMsg`s that triggered the script run that created them. In order to do this, we allow a new `debug_last_backmsg_id` field to be set in a `BackMsg` so that all `ForwardMsg`s that are sent from the time the `BackMsg` is received to the end of the corresponding script run are tagged with the same ID. Note that these proto fields are only intended to be used for testing, and because of this their names begin with the `debug_` prefix. In particular, we never expect a real Streamlit web client to ever set this field. - What kind of change does this PR introduce? - [x] Other, please describe: Test infrastructure additions ## 🧪 Testing Done - [x] Added/Updated unit tests
https://api.github.com/repos/streamlit/streamlit/pulls/5169
2022-08-16T01:35:06Z
2022-08-18T22:11:41Z
2022-08-18T22:11:41Z
2023-05-26T23:34:13Z
1,386
streamlit/streamlit
22,232
Add typing to binary_exponentiation_3.py
diff --git a/maths/binary_exponentiation_3.py b/maths/binary_exponentiation_3.py index dd4e70e74129..9cd143e09207 100644 --- a/maths/binary_exponentiation_3.py +++ b/maths/binary_exponentiation_3.py @@ -11,7 +11,7 @@ """ -def b_expo(a, b): +def b_expo(a: int, b: int) -> int: res = 1 while b > 0: if b & 1: @@ -23,7 +23,7 @@ def b_expo(a, b): return res -def b_expo_mod(a, b, c): +def b_expo_mod(a: int, b: int, c: int) -> int: res = 1 while b > 0: if b & 1:
### Describe your change: * [ ] Add an algorithm? * [ ] Fix a bug or typo in an existing algorithm? * [x] Documentation change? ### Checklist: * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms include at least one URL that points to Wikipedia or another similar explanation. * [x] If this pull request resolves one or more open issues then the description above includes the issue number(s) with a [closing keyword](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue): "Fixes #ISSUE-NUMBER".
https://api.github.com/repos/TheAlgorithms/Python/pulls/9477
2023-10-02T14:31:26Z
2023-10-02T14:41:34Z
2023-10-02T14:41:34Z
2023-10-02T14:41:39Z
203
TheAlgorithms/Python
29,924
Fix misplaced paren in docs
diff --git a/flask/ctx.py b/flask/ctx.py index 8472c920c2..ec8e787eb0 100644 --- a/flask/ctx.py +++ b/flask/ctx.py @@ -170,7 +170,7 @@ def __init__(self, username, remote_addr=None): self.remote_addr = remote_addr Alternatively you can also just test any of the context bound objects - (such as :class:`request` or :class:`g` for truthness):: + (such as :class:`request` or :class:`g`) for truthness:: class User(db.Model):
https://api.github.com/repos/pallets/flask/pulls/3196
2019-05-13T21:12:29Z
2019-05-16T15:28:59Z
2019-05-16T15:28:59Z
2020-11-14T02:21:37Z
145
pallets/flask
20,026
[Core] Reduce surfacing scary `Failed to get the resource load:` messages upon node failures
diff --git a/src/ray/gcs/gcs_server/gcs_server.cc b/src/ray/gcs/gcs_server/gcs_server.cc index 26fefdb2666ce..fc08dfe1fde8b 100644 --- a/src/ray/gcs/gcs_server/gcs_server.cc +++ b/src/ray/gcs/gcs_server/gcs_server.cc @@ -292,8 +292,8 @@ void GcsServer::InitGcsResourceManager(const GcsInitData &gcs_init_data) { if (status.ok()) { gcs_resource_manager_->UpdateResourceLoads(load.resources()); } else { - RAY_LOG(ERROR) << "Failed to get the resource load: " - << status.ToString(); + RAY_LOG_EVERY_N(WARNING, 10) + << "Failed to get the resource load: " << status.ToString(); } }); }
<!-- Thank you for your contribution! Please review https://github.com/ray-project/ray/blob/master/CONTRIBUTING.rst before opening a pull request. --> <!-- Please add a reviewer to the assignee section when you create a PR. If you don't have the access to it, we will shortly find a reviewer and assign them to your PR. --> ## Why are these changes needed? Node failures logs become extremely spammy with `Failed to get the resource load:` logs. This PR removes the logs from driver-side logs and prints it less often ## Related issue number <!-- For example: "Closes #1234" --> ## Checks - [ ] I've run `scripts/format.sh` to lint the changes in this PR. - [ ] I've included any doc changes needed for https://docs.ray.io/en/master/. - [ ] I've made sure the tests are passing. Note that there might be a few flaky tests, see the recent failures at https://flakey-tests.ray.io/ - Testing Strategy - [ ] Unit tests - [ ] Release tests - [ ] This PR is not tested :(
https://api.github.com/repos/ray-project/ray/pulls/27094
2022-07-27T14:56:00Z
2022-07-27T16:35:57Z
2022-07-27T16:35:57Z
2022-07-27T16:35:57Z
196
ray-project/ray
19,375
refs(py3): Upgrade celery to 3.1.25
diff --git a/requirements-base.txt b/requirements-base.txt index 411b2b70e43b6..66d0a3ec1d2f8 100644 --- a/requirements-base.txt +++ b/requirements-base.txt @@ -1,7 +1,7 @@ beautifulsoup4>=4.7.1,<4.8 boto3>=1.4.1,<1.4.6 botocore<1.5.71 -celery>=3.1.8,<3.1.19 +celery>=3.1.25,<4.0.0 click>=5.0,<7.0 confluent-kafka==0.11.5 croniter>=0.3.34,<0.4.0 @@ -24,7 +24,7 @@ google-cloud-storage==1.13.3 googleapis-common-protos==1.6.0 ipaddress>=1.0.16,<1.1.0 ; python_version < "3.3" jsonschema==2.6.0 -kombu==3.0.35 +kombu==3.0.37 lxml>=4.3.3,<4.4.0 maxminddb==1.4.1 mistune>0.7,<0.9
We want to get to 3.1.25 so that we have forwards compatibility with the celery 4.x message protocol, since a version of 4.x is required for python 3. This version requires: - billiard == 3.3.0.23. We don't even pin this here, but we're already pinned to this version in getsentry, so should be safe. - kombu == 3.0.37. Bumping this here. - librabbitmq==1.6.1. We're already pinned to this so should be fine. Not sure if this is a great test since we mostly run things in process if we use celery. Will have to do some manual testing too. I've done some basic manual testing. Task format doesn't look to have changed at all, and we're still able to fire/run tasks.
https://api.github.com/repos/getsentry/sentry/pulls/19753
2020-07-07T19:40:21Z
2020-07-08T20:19:03Z
2020-07-08T20:19:03Z
2023-04-03T23:39:21Z
293
getsentry/sentry
44,492
Made some grammatical corrections in README.MD
diff --git a/exercises/devops/README.md b/exercises/devops/README.md index 988a84cea..96648d944 100644 --- a/exercises/devops/README.md +++ b/exercises/devops/README.md @@ -72,7 +72,7 @@ A couple of thoughts: #### Tooling <details> -<summary>What are you taking into consideration when choosing a tool/technology?</summary><br><b> +<summary>What do you take into consideration when choosing a tool/technology?</summary><br><b> A few ideas to think about: @@ -220,7 +220,7 @@ Read more [here](https://en.wikipedia.org/wiki/Software_repository) </b></details> <details> -<summary>What is caching? How does it works? Why is it important?</summary><br><b> +<summary>What is caching? How does it work? Why is it important?</summary><br><b> Caching is fast access to frequently used resources which are computationally expensive or IO intensive and do not change often. There can be several layers of cache that can start from CPU caches to distributed cache systems. Common ones are in memory caching and distributed caching. <br/> Caches are typically data structures that contains some data, such as a hashtable or dictionary. However, any data structure can provide caching capabilities, like set, sorted set, sorted dictionary etc. While, caching is used in many applications, they can create subtle bugs if not implemented correctly or used correctly. For example,cache invalidation, expiration or updating is usually quite challenging and hard. </b></details> @@ -239,11 +239,11 @@ Reliability, when used in DevOps context, is the ability of a system to recover </b></details> <details> -<summary>What "Availability" means? What means are there to track Availability of a service?</summary><br><b> +<summary>What does "Availability" mean? What means are there to track Availability of a service?</summary><br><b> </b></details> <details> -<summary>Why 100% availability isn't a target? Why most companies or teams set it to be 99%.X?</summary><br><b> +<summary>Why isn't 100% availability a target? Why do most companies or teams set it to be 99%.X?</summary><br><b> </b></details> <details> @@ -251,7 +251,7 @@ Reliability, when used in DevOps context, is the ability of a system to recover </b></details> <details> -<summary>How a web server works?</summary><br><b> +<summary>How does a web server work?</summary><br><b> <a href="https://developer.mozilla.org/en-US/docs/Learn/Common_questions/What_is_a_web_server" title="Click here to redirect to MDN official page" style="background-color:#FFFFFF;color:#000000;text-decoration:none">According to MDN Web Docs -</a> We can understand web servers using two view points, which is: @@ -286,7 +286,7 @@ This communcation between web browser and web server happens in the following wa </b></details> <details> -<summary>Describe me the architecture of service/app/project/... you designed and/or implemented</summary><br><b> +<summary>Describe the architecture of service/app/project/... you designed and/or implemented</summary><br><b> </b></details> <details> @@ -322,7 +322,7 @@ IAC (infrastructure as code) is a declarative approach of defining infrastructur </b></details> <details> -<summary>What benefits infrastructure-as-code has?</summary><br><b> +<summary>What benefits does infrastructure-as-code have?</summary><br><b> - fully automated process of provisioning, modifying and deleting your infrastructure - version control for your infrastructure which allows you to quickly rollback to previous versions @@ -364,7 +364,7 @@ Build artifacts are usually stored in a repository. They can be used in release </b></details> <details> -<summary>What do you think about the following sentence?: "implementing or practicing DevOps leads to more secure software"</summary><br><b> +<summary>What do you think about the following sentence?: "Implementing or practicing DevOps leads to more secure software"</summary><br><b> </b></details> <details>
No changes to meaning.
https://api.github.com/repos/bregman-arie/devops-exercises/pulls/243
2022-05-19T08:00:50Z
2022-05-19T08:57:02Z
2022-05-19T08:57:02Z
2022-05-19T08:57:02Z
982
bregman-arie/devops-exercises
17,518
We don't try to add location blocks through a mechanism that checks REPEATABLE_DIRECTIVES, and it wouldn't work as an accurate check even if we did, so just remove it
diff --git a/certbot-nginx/certbot_nginx/parser.py b/certbot-nginx/certbot_nginx/parser.py index 3dc70f19bd1..5a6e9ceb5fd 100644 --- a/certbot-nginx/certbot_nginx/parser.py +++ b/certbot-nginx/certbot_nginx/parser.py @@ -559,7 +559,7 @@ def _update_or_add_directives(directives, insert_at_top, block): INCLUDE = 'include' -REPEATABLE_DIRECTIVES = set(['server_name', 'listen', INCLUDE, 'location', 'rewrite']) +REPEATABLE_DIRECTIVES = set(['server_name', 'listen', INCLUDE, 'rewrite']) COMMENT = ' managed by Certbot' COMMENT_BLOCK = [' ', '#', COMMENT]
Part of #5438.
https://api.github.com/repos/certbot/certbot/pulls/5787
2018-03-23T23:32:52Z
2018-03-27T22:25:35Z
2018-03-27T22:25:35Z
2018-03-27T22:25:35Z
177
certbot/certbot
3,527
[docs] fix commonmarker security issue
diff --git a/docs/Gemfile.lock b/docs/Gemfile.lock index d2a0939d71a2..0534d934144c 100644 --- a/docs/Gemfile.lock +++ b/docs/Gemfile.lock @@ -14,7 +14,7 @@ GEM execjs coffee-script-source (1.11.1) colorator (1.1.0) - commonmarker (0.17.13) + commonmarker (0.23.4) ruby-enum (~> 0.5) concurrent-ruby (1.1.10) dnsruby (1.61.9)
reference: https://github.com/github/cmark-gfm/security/advisories/GHSA-mc3g-88wq-6f4x
https://api.github.com/repos/microsoft/DeepSpeed/pulls/1892
2022-04-15T16:57:54Z
2022-04-15T19:11:45Z
2022-04-15T19:11:45Z
2022-04-15T19:11:48Z
146
microsoft/DeepSpeed
10,751
Add Keras utility for making user programs deterministic.
diff --git a/keras/api/golden/v2/tensorflow.keras.utils.pbtxt b/keras/api/golden/v2/tensorflow.keras.utils.pbtxt index 4796cb3fbe0..ad46dc1c3b5 100644 --- a/keras/api/golden/v2/tensorflow.keras.utils.pbtxt +++ b/keras/api/golden/v2/tensorflow.keras.utils.pbtxt @@ -100,6 +100,10 @@ tf_module { name: "serialize_keras_object" argspec: "args=[\'instance\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "set_random_seed" + argspec: "args=[\'seed\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "text_dataset_from_directory" argspec: "args=[\'directory\', \'labels\', \'label_mode\', \'class_names\', \'batch_size\', \'max_length\', \'shuffle\', \'seed\', \'validation_split\', \'subset\', \'follow_links\'], varargs=None, keywords=None, defaults=[\'inferred\', \'int\', \'None\', \'32\', \'None\', \'True\', \'None\', \'None\', \'None\', \'False\'], " diff --git a/keras/utils/tf_utils.py b/keras/utils/tf_utils.py index f3fc6ebb952..4c681756874 100644 --- a/keras/utils/tf_utils.py +++ b/keras/utils/tf_utils.py @@ -18,6 +18,7 @@ import collections import copy +import random import numpy as np from tensorflow.python.framework import ops from keras import backend @@ -27,6 +28,38 @@ from tensorflow.python.util.tf_export import keras_export +@keras_export('keras.utils.set_random_seed', v1=[]) +def set_random_seed(seed): + """Sets all random seeds for the program (Python, NumPy, and TensorFlow). + + You can use this utility to make almost any Keras program fully deterministic. + Some limitations apply in cases where network communications are involved + (e.g. parameter server distribution), which creates additional sources of + randomness, or when certain non-deterministic cuDNN ops are involved. + + Calling this utility is equivalent to the following: + + ```python + import random + import numpy as np + import tensorflow as tf + random.seed(seed) + np.random.seed(seed) + tf.random.set_seed(seed) + ``` + + Arguments: + seed: Integer, the random seed to use. + """ + if not isinstance(seed, int): + raise ValueError( + 'Expected `seed` argument to be an integer. ' + f'Received: seed={seed} (of type {type(seed)})') + random.seed(seed) + np.random.seed(seed) + tf.random.set_seed(seed) + + def is_tensor_or_tensor_list(v): v = tf.nest.flatten(v) if v and isinstance(v[0], tf.Tensor): diff --git a/keras/utils/tf_utils_test.py b/keras/utils/tf_utils_test.py index 945fdaba54d..dd35adddd20 100644 --- a/keras/utils/tf_utils_test.py +++ b/keras/utils/tf_utils_test.py @@ -17,7 +17,7 @@ import tensorflow.compat.v2 as tf from absl.testing import parameterized - +import numpy as np import keras from keras import combinations from keras.utils import tf_utils @@ -228,5 +228,26 @@ def test_is_extension_type_return_false_for_list(self): tensor = [1., 2., 3.] self.assertFalse(tf_utils.is_extension_type(tensor)) + +class TestRandomSeedSetting(tf.test.TestCase): + + def test_seeds(self): + def get_model_output(): + model = keras.Sequential([ + keras.layers.Dense(10), + keras.layers.Dropout(0.5), + keras.layers.Dense(10), + ]) + x = np.random.random((32, 10)).astype('float32') + ds = tf.data.Dataset.from_tensor_slices(x).shuffle(32).batch(16) + return model.predict(ds) + + tf_utils.set_random_seed(42) + y1 = get_model_output() + tf_utils.set_random_seed(42) + y2 = get_model_output() + self.assertAllClose(y1, y2, atol=1e-6) + + if __name__ == '__main__': tf.test.main()
Add Keras utility for making user programs deterministic.
https://api.github.com/repos/keras-team/keras/pulls/15243
2021-08-24T21:54:52Z
2021-08-25T17:35:45Z
2021-08-25T17:35:45Z
2021-08-25T17:35:45Z
1,029
keras-team/keras
47,489
refactor(makefile): :recycle: remove repetitive command
diff --git a/Makefile b/Makefile index 98d9213f59..65ee829ec0 100644 --- a/Makefile +++ b/Makefile @@ -5,7 +5,7 @@ COLOR_RESET=\033[0m COLOR_CYAN=\033[1;36m COLOR_GREEN=\033[1;32m -.PHONY: help install dev-install run +.PHONY: help install run .DEFAULT_GOAL := help @@ -17,11 +17,8 @@ help: @echo "Please use 'make <target>' where <target> is one of the following:" @echo " help Return this message with usage instructions." @echo " install Will install the dependencies and create a virtual environment." - @echo " dev-install Will install the dev dependencies too." @echo " run <folder_name> Runs GPT Engineer on the folder with the given name." -dev-install: install - install: create-venv upgrade-pip install-dependencies install-pre-commit farewell create-venv:
Removing repetitive command `dev-install` because it does the same as `install`. Does not matter if you run `make install` or `make dev-install`, both do the same thing.
https://api.github.com/repos/gpt-engineer-org/gpt-engineer/pulls/451
2023-06-30T10:35:45Z
2023-07-02T13:22:07Z
2023-07-02T13:22:07Z
2023-07-02T13:22:07Z
245
gpt-engineer-org/gpt-engineer
33,076
add alias for use on mondays
diff --git a/README.md b/README.md index 29f409302..8845cf769 100644 --- a/README.md +++ b/README.md @@ -63,6 +63,8 @@ And add to `.bashrc` or `.zshrc`: ```bash alias fuck='$(thefuck $(fc -ln -1))' +# When you really need it +alias FUCK='fuck' ``` Or in `config.fish`:
https://api.github.com/repos/nvbn/thefuck/pulls/14
2015-04-17T23:31:23Z
2015-04-18T19:20:00Z
2015-04-18T19:20:00Z
2015-04-18T19:20:00Z
106
nvbn/thefuck
30,702
Strip slashes around baseUrlPath value
diff --git a/lib/streamlit/server/server_util.py b/lib/streamlit/server/server_util.py index bad079046bca..e00f56744387 100644 --- a/lib/streamlit/server/server_util.py +++ b/lib/streamlit/server/server_util.py @@ -149,5 +149,5 @@ def _get_s3_url_host_if_manually_set(): def make_url_path_regex(*path): """Get a regex of the form ^/foo/bar/baz/?$ for a path (foo, bar, baz).""" - path = [x for x in path if x] # Filter out falsy components. + path = [x.strip("/") for x in path if x] # Filter out falsy components. return r"^/%s/?$" % "/".join(path)
**Issue:** #521 **Description:** When constructing Tornado routes, strip any forward-slash characters on the value of server.baseUrlPath. Otherwise, routes with double slashes are generated, making them not match anything. The Report class already strips slashes when generating URLs, so the URLs printed to the console don't look any different. Given an invocation like `streamlit run --server.baseUrlPath /abc sa/gui.py`, here's the routes being generated. ``` [ ('^//abc/stream/?$', …), ('^//abc/healthz/?$', …), ('^//abc/debugz/?$', …), ('^//abc/metrics/?$', …), ('^//abc/message/?$', …), ('^//abc/(.*)/?$', …) ] ``` Stripping slashes gives the correct route. ``` [ ('^/abc/stream/?$', …), ('^/abc/healthz/?$', …), ('^/abc/debugz/?$', …), ('^/abc/metrics/?$', …), ('^/abc/message/?$', …), ('^/abc/(.*)/?$', …) ] ``` **Contribution License Agreement** By submiting this pull request you agree that all contributions to this project are made under the Apache 2.0 license.
https://api.github.com/repos/streamlit/streamlit/pulls/523
2019-10-24T03:06:59Z
2019-10-24T23:54:14Z
2019-10-24T23:54:14Z
2019-10-25T01:06:25Z
174
streamlit/streamlit
22,537
gh-102356: Add trashcan macros to filter object dealloc
diff --git a/Lib/test/test_builtin.py b/Lib/test/test_builtin.py index 9e19af0ae90fc1..e7a79bc13b7f3d 100644 --- a/Lib/test/test_builtin.py +++ b/Lib/test/test_builtin.py @@ -926,6 +926,16 @@ def test_filter_pickle(self): f2 = filter(filter_char, "abcdeabcde") self.check_iter_pickle(f1, list(f2), proto) + def test_filter_dealloc(self): + # Tests recursive deallocation of nested filter objects using the + # thrashcan mechanism. See gh-102356 for more details. + max_iters = 1000000 + i = filter(bool, range(max_iters)) + for _ in range(max_iters): + i = filter(bool, i) + del i + gc.collect() + def test_getattr(self): self.assertTrue(getattr(sys, 'stdout') is sys.stdout) self.assertRaises(TypeError, getattr) diff --git a/Misc/ACKS b/Misc/ACKS index c591cd3bfe4b9e..7bbde3af99782b 100644 --- a/Misc/ACKS +++ b/Misc/ACKS @@ -637,6 +637,7 @@ Tim Golden Yonatan Goldschmidt Mark Gollahon Mikhail Golubev +Marta Gómez Macías Guilherme Gonçalves Tiago Gonçalves Chris Gonnerman diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-03-04-20-56-12.gh-issue-102356.07KvUd.rst b/Misc/NEWS.d/next/Core and Builtins/2023-03-04-20-56-12.gh-issue-102356.07KvUd.rst new file mode 100644 index 00000000000000..c03fd5266bc301 --- /dev/null +++ b/Misc/NEWS.d/next/Core and Builtins/2023-03-04-20-56-12.gh-issue-102356.07KvUd.rst @@ -0,0 +1,2 @@ +Fix a bug that caused a crash when deallocating deeply nested filter +objects. Patch by Marta Gómez Macías. diff --git a/Python/bltinmodule.c b/Python/bltinmodule.c index 53439ab16040c4..12ca0ba6c4873c 100644 --- a/Python/bltinmodule.c +++ b/Python/bltinmodule.c @@ -553,9 +553,11 @@ static void filter_dealloc(filterobject *lz) { PyObject_GC_UnTrack(lz); + Py_TRASHCAN_BEGIN(lz, filter_dealloc) Py_XDECREF(lz->func); Py_XDECREF(lz->it); Py_TYPE(lz)->tp_free(lz); + Py_TRASHCAN_END } static int
<!-- Thanks for your contribution! Please read this comment in its entirety. It's quite important. # Pull Request title It should be in the following format: ``` gh-NNNNN: Summary of the changes made ``` Where: gh-NNNNN refers to the GitHub issue number. Most PRs will require an issue number. Trivial changes, like fixing a typo, do not need an issue. # Backport Pull Request title If this is a backport PR (PR made against branches other than `main`), please ensure that the PR title is in the following format: ``` [X.Y] <title from the original PR> (GH-NNNN) ``` Where: [X.Y] is the branch name, e.g. [3.6]. GH-NNNN refers to the PR number from `main`. --> <!-- gh-issue-number: gh-102356 --> * Issue: gh-102356 <!-- /gh-issue-number -->
https://api.github.com/repos/python/cpython/pulls/102426
2023-03-04T19:57:48Z
2023-03-05T11:00:42Z
2023-03-05T11:00:42Z
2023-03-09T10:43:22Z
674
python/cpython
3,803
Fix PR08, RT02, RT03, and SA01 on pandas.Index.fillna
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index aa22527d8c2d7..d2887c6652635 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -2126,13 +2126,18 @@ def fillna(self, value=None, downcast=None): Scalar value to use to fill holes (e.g. 0). This value cannot be a list-likes. downcast : dict, default is None - a dict of item->dtype of what to downcast if possible, + A dict of item->dtype of what to downcast if possible, or the string 'infer' which will try to downcast to an appropriate equal type (e.g. float64 to int64 if possible). Returns ------- - filled : Index + Index + + See Also + -------- + DataFrame.fillna : Fill NaN values of a DataFrame. + Series.fillna : Fill NaN Values of a Series. """ self._assert_can_do_op(value) if self.hasnans:
- [x] closes #https://github.com/pandanistas/pandanistas_sprint_ui2020/issues/5 - [ ] tests added / passed - [x] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/32355
2020-02-29T05:15:36Z
2020-03-04T15:41:32Z
2020-03-04T15:41:32Z
2020-03-04T15:41:38Z
255
pandas-dev/pandas
44,930
Update miaopai api
diff --git a/src/you_get/extractors/yixia.py b/src/you_get/extractors/yixia.py index ff45730d66..d3d1ef350e 100644 --- a/src/you_get/extractors/yixia.py +++ b/src/you_get/extractors/yixia.py @@ -7,6 +7,24 @@ from json import loads import re +#---------------------------------------------------------------------- +def miaopai_download_by_smid(smid, output_dir = '.', merge = True, info_only = False): + """""" + api_endpoint = 'https://n.miaopai.com/api/aj_media/info.json?smid={smid}'.format(smid = smid) + + html = get_content(api_endpoint) + + api_content = loads(html) + + video_url = api_content['data']['meta_data'][0]['play_urls']['l'] + title = api_content['data']['description'] + + type, ext, size = url_info(video_url) + + print_info(site_info, title, type, size) + if not info_only: + download_urls([video_url], title, ext, size, output_dir, merge=merge) + #---------------------------------------------------------------------- def yixia_miaopai_download_by_scid(scid, output_dir = '.', merge = True, info_only = False): """""" @@ -47,7 +65,11 @@ def yixia_xiaokaxiu_download_by_scid(scid, output_dir = '.', merge = True, info_ def yixia_download(url, output_dir = '.', merge = True, info_only = False, **kwargs): """wrapper""" hostname = urlparse(url).hostname - if 'miaopai.com' in hostname: #Miaopai + if 'n.miaopai.com' == hostname: + smid = match1(url, r'n\.miaopai\.com/media/([^.]+)') + miaopai_download_by_smid(smid, output_dir, merge, info_only) + return + elif 'miaopai.com' in hostname: #Miaopai yixia_download_by_scid = yixia_miaopai_download_by_scid site_info = "Yixia Miaopai"
完善秒拍抓取的问题
https://api.github.com/repos/soimort/you-get/pulls/2648
2018-10-25T03:15:08Z
2018-10-28T12:28:12Z
2018-10-28T12:28:12Z
2018-10-28T12:28:22Z
508
soimort/you-get
21,123
Implement CIDR forbidden blacklist
diff --git a/shadowsocks/asyncdns.py b/shadowsocks/asyncdns.py index 6f60dc983..6fee6b986 100644 --- a/shadowsocks/asyncdns.py +++ b/shadowsocks/asyncdns.py @@ -233,18 +233,6 @@ def parse_response(data): return None -def is_ip(address): - for family in (socket.AF_INET, socket.AF_INET6): - try: - if type(address) != str: - address = address.decode('utf8') - socket.inet_pton(family, address) - return family - except (TypeError, ValueError, OSError, IOError): - pass - return False - - def is_valid_hostname(hostname): if len(hostname) > 255: return False @@ -296,7 +284,7 @@ def _parse_resolv(self): parts = line.split() if len(parts) >= 2: server = parts[1] - if is_ip(server) == socket.AF_INET: + if common.is_ip(server) == socket.AF_INET: if type(server) != str: server = server.decode('utf8') self._servers.append(server) @@ -316,7 +304,7 @@ def _parse_hosts(self): parts = line.split() if len(parts) >= 2: ip = parts[0] - if is_ip(ip): + if common.is_ip(ip): for i in range(1, len(parts)): hostname = parts[i] if hostname: @@ -423,7 +411,7 @@ def resolve(self, hostname, callback): hostname = hostname.encode('utf8') if not hostname: callback(None, Exception('empty hostname')) - elif is_ip(hostname): + elif common.is_ip(hostname): callback((hostname, hostname), None) elif hostname in self._hosts: logging.debug('hit hosts: %s', hostname) diff --git a/shadowsocks/common.py b/shadowsocks/common.py index e4f698c0a..0c4e27857 100644 --- a/shadowsocks/common.py +++ b/shadowsocks/common.py @@ -101,6 +101,18 @@ def inet_pton(family, addr): raise RuntimeError("What family?") +def is_ip(address): + for family in (socket.AF_INET, socket.AF_INET6): + try: + if type(address) != str: + address = address.decode('utf8') + inet_pton(family, address) + return family + except (TypeError, ValueError, OSError, IOError): + pass + return False + + def patch_socket(): if not hasattr(socket, 'inet_pton'): socket.inet_pton = inet_pton @@ -172,6 +184,61 @@ def parse_header(data): return addrtype, to_bytes(dest_addr), dest_port, header_length +class IPNetwork(object): + ADDRLENGTH = {socket.AF_INET: 32, socket.AF_INET6: 128, False: 0} + + def __init__(self, addrs): + self._network_list_v4 = [] + self._network_list_v6 = [] + if type(addrs) == str: + addrs = addrs.split(',') + list(map(self.add_network, addrs)) + + def add_network(self, addr): + if addr is "": + return + block = addr.split('/') + addr_family = is_ip(block[0]) + addr_len = IPNetwork.ADDRLENGTH[addr_family] + if addr_family is socket.AF_INET: + ip, = struct.unpack("!I", socket.inet_aton(block[0])) + elif addr_family is socket.AF_INET6: + hi, lo = struct.unpack("!QQ", inet_pton(addr_family, block[0])) + ip = (hi << 64) | lo + else: + raise SyntaxError("Not a valid CIDR notation: %s" % addr) + if len(block) is 1: + prefix_size = 0 + while (ip & 1) == 0 and ip is not 0: + ip >>= 1 + prefix_size += 1 + logging.warn("You did't specify CIDR routing prefix size for %s, " + "implicit treated as %s/%d" % (addr, addr, addr_len)) + elif block[1].isdigit() and int(block[1]) <= addr_len: + prefix_size = addr_len - int(block[1]) + ip >>= prefix_size + else: + raise SyntaxError("Not a valid CIDR notation: %s" % addr) + if addr_family is socket.AF_INET: + self._network_list_v4.append((ip, prefix_size)) + else: + self._network_list_v6.append((ip, prefix_size)) + + def __contains__(self, addr): + addr_family = is_ip(addr) + if addr_family is socket.AF_INET: + ip, = struct.unpack("!I", socket.inet_aton(addr)) + return any(map(lambda n_ps: n_ps[0] == ip >> n_ps[1], + self._network_list_v4)) + elif addr_family is socket.AF_INET6: + hi, lo = struct.unpack("!QQ", inet_pton(addr_family, addr)) + ip = (hi << 64) | lo + return any(map(lambda n_ps: n_ps[0] == ip >> n_ps[1], + self._network_list_v6)) + else: + return False + + def test_inet_conv(): ipv4 = b'8.8.4.4' b = inet_pton(socket.AF_INET, ipv4) @@ -198,7 +265,23 @@ def test_pack_header(): assert pack_addr(b'www.google.com') == b'\x03\x0ewww.google.com' +def test_ip_network(): + ip_network = IPNetwork('127.0.0.0/24,::ff:1/112,::1,192.168.1.1,192.0.2.0') + assert '127.0.0.1' in ip_network + assert '127.0.1.1' not in ip_network + assert ':ff:ffff' in ip_network + assert '::ffff:1' not in ip_network + assert '::1' in ip_network + assert '::2' not in ip_network + assert '192.168.1.1' in ip_network + assert '192.168.1.2' not in ip_network + assert '192.0.2.1' in ip_network + assert '192.0.3.1' in ip_network # 192.0.2.0 is treated as 192.0.2.0/23 + assert 'www.google.com' not in ip_network + + if __name__ == '__main__': test_inet_conv() test_parse_header() test_pack_header() + test_ip_network() diff --git a/shadowsocks/utils.py b/shadowsocks/utils.py index a51c9650e..6ea3daaaf 100644 --- a/shadowsocks/utils.py +++ b/shadowsocks/utils.py @@ -29,7 +29,7 @@ import sys import getopt import logging -from shadowsocks.common import to_bytes, to_str +from shadowsocks.common import to_bytes, to_str, IPNetwork VERBOSE_LEVEL = 5 @@ -193,6 +193,8 @@ def get_config(is_local): sys.exit(2) else: config['server'] = config.get('server', '0.0.0.0') + config['forbidden_ip'] = \ + IPNetwork(config.get('forbidden_ip', '127.0.0.0/8,::1/128')) config['server_port'] = config.get('server_port', 8388) if is_local and not config.get('password', None): diff --git a/tests/test_large_file.sh b/tests/test_large_file.sh index e8acd79f7..33bcb590f 100755 --- a/tests/test_large_file.sh +++ b/tests/test_large_file.sh @@ -8,7 +8,7 @@ mkdir -p tmp $PYTHON shadowsocks/local.py -c tests/aes.json & LOCAL=$! -$PYTHON shadowsocks/server.py -c tests/aes.json & +$PYTHON shadowsocks/server.py -c tests/aes.json --forbidden-ip "" & SERVER=$! sleep 3
In these commits I implemented a simple IPNetwork class, provide shadowsocks ability to use CIDR format to ban subnet.
https://api.github.com/repos/shadowsocks/shadowsocks/pulls/279
2015-01-31T15:06:56Z
2015-02-01T01:00:08Z
2015-02-01T01:00:08Z
2015-02-01T01:00:31Z
1,933
shadowsocks/shadowsocks
24,632
Add ZAP FileUpload AddOn to Tools (Upload Insecure Files Page)
diff --git a/Upload Insecure Files/README.md b/Upload Insecure Files/README.md index e5593850f4..a0eff1656c 100644 --- a/Upload Insecure Files/README.md +++ b/Upload Insecure Files/README.md @@ -21,6 +21,7 @@ Uploaded files may pose a significant risk if not handled correctly. A remote at ## Tools - [Fuxploider](https://github.com/almandin/fuxploider) - [Burp > Upload Scanner](https://portswigger.net/bappstore/b2244cbb6953442cb3c82fa0a0d908fa) +- [ZAP > FileUpload AddOn](https://www.zaproxy.org/blog/2021-08-20-zap-fileupload-addon/) ## Exploits
https://api.github.com/repos/swisskyrepo/PayloadsAllTheThings/pulls/448
2021-10-20T00:08:27Z
2022-05-01T11:00:25Z
2022-05-01T11:00:25Z
2022-05-01T11:00:25Z
187
swisskyrepo/PayloadsAllTheThings
8,819
Objects365 images GB vs zips GB
diff --git a/data/Objects365.yaml b/data/Objects365.yaml index 8e6326b3859..334c23c359c 100644 --- a/data/Objects365.yaml +++ b/data/Objects365.yaml @@ -4,7 +4,7 @@ # parent # ├── yolov5 # └── datasets -# └── Objects365 ← downloads here (712 GB) +# └── Objects365 ← downloads here (712 GB = 367G data + 345G zips) # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
@kalenmike ## 🛠️ PR Summary <sub>Made with ❤️ by [Ultralytics Actions](https://github.com/ultralytics/actions)<sub> ### 🌟 Summary Enhancements to Objects365 dataset configuration in YOLOv5. ### 📊 Key Changes - Updated the Objects365.yaml file, potentially refining the dataset paths or structure. - The specifics of the change are not clear from the provided diff excerpt, as the actual lines altered are not shown. ### 🎯 Purpose & Impact - 🎨 Improves data handling for the Objects365 dataset within the YOLOv5 framework by possibly fixing issues or optimizing the dataset referencing method. - 👩‍💻 Allows users of YOLOv5 to train models more effectively using Objects365, either through more efficient access to data or more accurate dataset division into train/val/test sets. - 🚀 May lead to better performance and convenience for researchers and developers working on object detection tasks using this dataset.
https://api.github.com/repos/ultralytics/yolov5/pulls/7335
2022-04-07T14:11:39Z
2022-04-07T14:12:45Z
2022-04-07T14:12:45Z
2024-01-19T11:33:11Z
170
ultralytics/yolov5
25,211
URL Reversing in Quickstart Doc #779
diff --git a/docs/quickstart.rst b/docs/quickstart.rst index 3cb9b2f765..a10149ae08 100644 --- a/docs/quickstart.rst +++ b/docs/quickstart.rst @@ -236,8 +236,9 @@ below. It tells Flask to behave as though it is handling a request, even though we are interacting with it through a Python shell. Have a look at the explanation below. :ref:`context-locals`). -Why would you want to build URLs instead of hard-coding them into your -templates? There are three good reasons for this: +Why would you want to build URLs using the URL reversing function :func:`~flask.url_for` +instead of hard-coding them into your templates? There are three good reasons +for this: 1. Reversing is often more descriptive than hard-coding the URLs. More importantly, it allows you to change URLs in one go, without having to
Updated quickstart to make clear that URL reversing refers to the use of url_for() to build the URL. Issue #779
https://api.github.com/repos/pallets/flask/pulls/897
2013-10-30T00:45:43Z
2014-02-09T13:20:46Z
2014-02-09T13:20:46Z
2020-11-14T05:07:58Z
225
pallets/flask
20,598
[gemini] polish stateful_tensor_mgr
diff --git a/colossalai/gemini/stateful_tensor_mgr.py b/colossalai/gemini/stateful_tensor_mgr.py index 15f1217109d2..9ee1a6805c35 100644 --- a/colossalai/gemini/stateful_tensor_mgr.py +++ b/colossalai/gemini/stateful_tensor_mgr.py @@ -6,7 +6,6 @@ from colossalai.gemini.stateful_tensor import StatefulTensor, TensorState from colossalai.gemini.tensor_placement_policy import TensorPlacementPolicy from typing import List -from colossalai.logging import get_dist_logger class StatefulTensorMgr(object): @@ -20,23 +19,30 @@ class StatefulTensorMgr(object): def __init__(self, tensor_placement_policy: TensorPlacementPolicy) -> None: self._tensor_placement_policy: TensorPlacementPolicy = tensor_placement_policy self._stateful_tensor_list: List[StatefulTensor] = [] - self._logger = get_dist_logger("StatefulTensorMgr") - - self._warmup = True self._compute_list: List[StatefulTensor] = [] self._compute_idx: int = -1 self._cpu_gpu_move_volume = 0 + self._warmup = True - def register_stateful_param(self, param) -> None: - from colossalai.zero.sharded_param.sharded_param import ShardedParamV2 - assert isinstance(param, ShardedParamV2) - for t in param.get_payload_tensors(): + def register_stateful_tensor_list(self, tensor_list: List[StatefulTensor]) -> None: + assert self._stateful_tensor_list == [], "Can't register stateful tensors for manager twice" + self._stateful_tensor_list = tensor_list + for t in self._stateful_tensor_list: assert isinstance(t, StatefulTensor) - self._stateful_tensor_list.append(t) t.trans_state = types.MethodType(functools.partial(self._trans_state, t.trans_state), t) + def start_iter(self): + pass + + def finish_iter(self): + """This function must be called when each iteration finishes + """ + self._warmup = False + self._compute_idx = -1 + self._cpu_gpu_move_volume = 0 + def adjust_layout(self) -> None: """ Adjust the layout of statefuil tensor according to the information provided by mem_stats_collector, which should belongs to a Sharded Model. @@ -63,21 +69,14 @@ def adjust_layout(self) -> None: compute_list=self._compute_list, compute_idx=self._compute_idx) # move COMPUTE tensors to CUDA + self._cpu_gpu_move_volume += cuda_demand for t in move_to_cuda_tensor_list: colo_model_data_tensor_move_inline(t, get_current_device()) - self._cpu_gpu_move_volume += t.payload_size @property def cpu_gpu_move_volume(self): return self._cpu_gpu_move_volume - def reset(self): - """This function must be called when each iteration finishes - """ - self._warmup = False - self._compute_idx = -1 - self._cpu_gpu_move_volume = 0 - def _trans_state(self, trans_state_func, stateful_tensor, state): trans_state_func(state) if state == TensorState.COMPUTE: diff --git a/colossalai/zero/sharded_model/sharded_model_v2.py b/colossalai/zero/sharded_model/sharded_model_v2.py index 0f958aaea81a..cc37ddf17af2 100644 --- a/colossalai/zero/sharded_model/sharded_model_v2.py +++ b/colossalai/zero/sharded_model/sharded_model_v2.py @@ -111,10 +111,10 @@ def __init__(self, self._memstats_collector = None self._tensor_placement_policy: TensorPlacementPolicy = TensorPlacementPolicyFactory.create( tensor_placement_policy)(mem_stats_collector=self._memstats_collector) + self._stateful_tensor_mgr = StatefulTensorMgr(self._tensor_placement_policy) - for param in module.parameters(): - if hasattr(param, 'colo_attr'): - self._stateful_tensor_mgr.register_stateful_param(param.colo_attr) + param_tensor_list = [p.colo_attr.sharded_data_tensor for p in module.parameters() if hasattr(p, 'colo_attr')] + self._stateful_tensor_mgr.register_stateful_tensor_list(param_tensor_list) # Register hooks self._ophook_list = [ @@ -198,6 +198,8 @@ def _pre_forward_operations(self): if hasattr(p, 'colo_attr'): p.colo_attr.sharded_data_tensor.trans_state(TensorState.HOLD) + self._stateful_tensor_mgr.start_iter() + def _post_forward_operations(self): for p in self.module.parameters(): if hasattr(p, 'colo_attr'): diff --git a/colossalai/zero/utils/zero_hook.py b/colossalai/zero/utils/zero_hook.py index 5aa9da15829a..38461703097d 100644 --- a/colossalai/zero/utils/zero_hook.py +++ b/colossalai/zero/utils/zero_hook.py @@ -115,4 +115,4 @@ def post_iter(self): if self._stateful_tensor_mgr: self.logger.info( f"CPU-GPU data moving this iteration {self._stateful_tensor_mgr.cpu_gpu_move_volume/1e9} GB", ranks=[0]) - self._stateful_tensor_mgr.reset() + self._stateful_tensor_mgr.finish_iter()
https://api.github.com/repos/hpcaitech/ColossalAI/pulls/876
2022-04-26T05:30:16Z
2022-04-26T07:05:04Z
2022-04-26T07:05:04Z
2022-04-26T07:05:04Z
1,259
hpcaitech/ColossalAI
10,974
404 error on a book link
diff --git a/books.md b/books.md index 3d4b47ee..e4fcf397 100644 --- a/books.md +++ b/books.md @@ -102,7 +102,6 @@ The following is a list of free and/or open source books on machine learning, st ## Linear Algebra -* [Linear Algebra and its applications by Gilbert strang](http://www.math.hcmus.edu.vn/~bxthang/Linear%20algebra%20and%20its%20applications.pdf) * [The Matrix Cookbook](https://www.math.uwaterloo.ca/~hwolkowi/matrixcookbook.pdf) * [Linear Algebra by Shilov](https://cosmathclub.files.wordpress.com/2014/10/georgi-shilov-linear-algebra4.pdf) * [Linear Algebra Done Wrong](https://www.math.brown.edu/~treil/papers/LADW/LADW.html)
It seems that the professor doesn't work at that university anymore because his home page gives 404 either: http://www.math.hcmus.edu.vn/~bxthang/
https://api.github.com/repos/josephmisiti/awesome-machine-learning/pulls/745
2020-10-11T22:22:59Z
2020-10-12T14:15:22Z
2020-10-12T14:15:22Z
2020-10-12T14:15:22Z
197
josephmisiti/awesome-machine-learning
52,231
Filter warnings about tail unrecognized file system from test logs
diff --git a/localstack/utils/testutil.py b/localstack/utils/testutil.py index cf32d468fb10a..8ff69114b2d73 100644 --- a/localstack/utils/testutil.py +++ b/localstack/utils/testutil.py @@ -605,6 +605,9 @@ def get_log_events(function_name, delay_time): or "START" in raw_message or "END" in raw_message or "REPORT" in raw_message + # necessary until tail is updated in docker images. See this PR: + # http://git.savannah.gnu.org/gitweb/?p=coreutils.git;a=commitdiff;h=v8.24-111-g1118f32 + or "tail: unrecognized file system type" in raw_message or regex_filter and not re.search(regex_filter, raw_message) ):
Due to the bug fixed by this PR http://git.savannah.gnu.org/gitweb/?p=coreutils.git;a=commitdiff;h=v8.24-111-g1118f32 this log message can appear if tail is executed in the context of lambda execution. This PR filters that message, until we can update the tail command in the lambda docker images.
https://api.github.com/repos/localstack/localstack/pulls/5050
2021-12-01T16:07:18Z
2021-12-02T09:48:53Z
2021-12-02T09:48:53Z
2021-12-02T09:48:56Z
190
localstack/localstack
28,582
chore(deps): bump jinja2 from 2.11.1 to 2.11.3
diff --git a/poetry.lock b/poetry.lock index bc2cf5b43..d9debe339 100644 --- a/poetry.lock +++ b/poetry.lock @@ -149,7 +149,7 @@ xdg_home = ["appdirs (>=1.4.0)"] [[package]] name = "jinja2" -version = "2.11.1" +version = "2.11.3" description = "A very fast and expressive template engine." category = "main" optional = false @@ -402,8 +402,8 @@ isort = [ {file = "isort-4.3.21.tar.gz", hash = "sha256:54da7e92468955c4fceacd0c86bd0ec997b0e1ee80d97f67c35a78b719dccab1"}, ] jinja2 = [ - {file = "Jinja2-2.11.1-py2.py3-none-any.whl", hash = "sha256:b0eaf100007721b5c16c1fc1eecb87409464edc10469ddc9a22a27a99123be49"}, - {file = "Jinja2-2.11.1.tar.gz", hash = "sha256:93187ffbc7808079673ef52771baa950426fd664d3aad1d0fa3e95644360e250"}, + {file = "Jinja2-2.11.3-py2.py3-none-any.whl", hash = "sha256:03e47ad063331dd6a3f04a43eddca8a966a26ba0c5b7207a9a9e4e08f1b29419"}, + {file = "Jinja2-2.11.3.tar.gz", hash = "sha256:a6d58433de0ae800347cab1fa3043cebbabe8baa9d29e668f1c768cb87a333c6"}, ] lazy-object-proxy = [ {file = "lazy-object-proxy-1.4.3.tar.gz", hash = "sha256:f3900e8a5de27447acbf900b4750b0ddfd7ec1ea7fbaf11dfa911141bc522af0"}, @@ -447,20 +447,39 @@ markupsafe = [ {file = "MarkupSafe-1.1.1-cp35-cp35m-win32.whl", hash = "sha256:6dd73240d2af64df90aa7c4e7481e23825ea70af4b4922f8ede5b9e35f78a3b1"}, {file = "MarkupSafe-1.1.1-cp35-cp35m-win_amd64.whl", hash = "sha256:9add70b36c5666a2ed02b43b335fe19002ee5235efd4b8a89bfcf9005bebac0d"}, {file = "MarkupSafe-1.1.1-cp36-cp36m-macosx_10_6_intel.whl", hash = "sha256:24982cc2533820871eba85ba648cd53d8623687ff11cbb805be4ff7b4c971aff"}, + {file = "MarkupSafe-1.1.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:d53bc011414228441014aa71dbec320c66468c1030aae3a6e29778a3382d96e5"}, {file = "MarkupSafe-1.1.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473"}, {file = "MarkupSafe-1.1.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:717ba8fe3ae9cc0006d7c451f0bb265ee07739daf76355d06366154ee68d221e"}, + {file = "MarkupSafe-1.1.1-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:3b8a6499709d29c2e2399569d96719a1b21dcd94410a586a18526b143ec8470f"}, + {file = "MarkupSafe-1.1.1-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:84dee80c15f1b560d55bcfe6d47b27d070b4681c699c572af2e3c7cc90a3b8e0"}, + {file = "MarkupSafe-1.1.1-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:b1dba4527182c95a0db8b6060cc98ac49b9e2f5e64320e2b56e47cb2831978c7"}, {file = "MarkupSafe-1.1.1-cp36-cp36m-win32.whl", hash = "sha256:535f6fc4d397c1563d08b88e485c3496cf5784e927af890fb3c3aac7f933ec66"}, {file = "MarkupSafe-1.1.1-cp36-cp36m-win_amd64.whl", hash = "sha256:b1282f8c00509d99fef04d8ba936b156d419be841854fe901d8ae224c59f0be5"}, {file = "MarkupSafe-1.1.1-cp37-cp37m-macosx_10_6_intel.whl", hash = "sha256:8defac2f2ccd6805ebf65f5eeb132adcf2ab57aa11fdf4c0dd5169a004710e7d"}, + {file = "MarkupSafe-1.1.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:bf5aa3cbcfdf57fa2ee9cd1822c862ef23037f5c832ad09cfea57fa846dec193"}, {file = "MarkupSafe-1.1.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:46c99d2de99945ec5cb54f23c8cd5689f6d7177305ebff350a58ce5f8de1669e"}, {file = "MarkupSafe-1.1.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:ba59edeaa2fc6114428f1637ffff42da1e311e29382d81b339c1817d37ec93c6"}, + {file = "MarkupSafe-1.1.1-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:6fffc775d90dcc9aed1b89219549b329a9250d918fd0b8fa8d93d154918422e1"}, + {file = "MarkupSafe-1.1.1-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:a6a744282b7718a2a62d2ed9d993cad6f5f585605ad352c11de459f4108df0a1"}, + {file = "MarkupSafe-1.1.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:195d7d2c4fbb0ee8139a6cf67194f3973a6b3042d742ebe0a9ed36d8b6f0c07f"}, {file = "MarkupSafe-1.1.1-cp37-cp37m-win32.whl", hash = "sha256:b00c1de48212e4cc9603895652c5c410df699856a2853135b3967591e4beebc2"}, {file = "MarkupSafe-1.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:9bf40443012702a1d2070043cb6291650a0841ece432556f784f004937f0f32c"}, {file = "MarkupSafe-1.1.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6788b695d50a51edb699cb55e35487e430fa21f1ed838122d722e0ff0ac5ba15"}, {file = "MarkupSafe-1.1.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:cdb132fc825c38e1aeec2c8aa9338310d29d337bebbd7baa06889d09a60a1fa2"}, {file = "MarkupSafe-1.1.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:13d3144e1e340870b25e7b10b98d779608c02016d5184cfb9927a9f10c689f42"}, + {file = "MarkupSafe-1.1.1-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:acf08ac40292838b3cbbb06cfe9b2cb9ec78fce8baca31ddb87aaac2e2dc3bc2"}, + {file = "MarkupSafe-1.1.1-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:d9be0ba6c527163cbed5e0857c451fcd092ce83947944d6c14bc95441203f032"}, + {file = "MarkupSafe-1.1.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:caabedc8323f1e93231b52fc32bdcde6db817623d33e100708d9a68e1f53b26b"}, {file = "MarkupSafe-1.1.1-cp38-cp38-win32.whl", hash = "sha256:596510de112c685489095da617b5bcbbac7dd6384aeebeda4df6025d0256a81b"}, {file = "MarkupSafe-1.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:e8313f01ba26fbbe36c7be1966a7b7424942f670f38e666995b88d012765b9be"}, + {file = "MarkupSafe-1.1.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d73a845f227b0bfe8a7455ee623525ee656a9e2e749e4742706d80a6065d5e2c"}, + {file = "MarkupSafe-1.1.1-cp39-cp39-manylinux1_i686.whl", hash = "sha256:98bae9582248d6cf62321dcb52aaf5d9adf0bad3b40582925ef7c7f0ed85fceb"}, + {file = "MarkupSafe-1.1.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:2beec1e0de6924ea551859edb9e7679da6e4870d32cb766240ce17e0a0ba2014"}, + {file = "MarkupSafe-1.1.1-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:7fed13866cf14bba33e7176717346713881f56d9d2bcebab207f7a036f41b850"}, + {file = "MarkupSafe-1.1.1-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:6f1e273a344928347c1290119b493a1f0303c52f5a5eae5f16d74f48c15d4a85"}, + {file = "MarkupSafe-1.1.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:feb7b34d6325451ef96bc0e36e1a6c0c1c64bc1fbec4b854f4529e51887b1621"}, + {file = "MarkupSafe-1.1.1-cp39-cp39-win32.whl", hash = "sha256:22c178a091fc6630d0d045bdb5992d2dfe14e3259760e713c490da5323866c39"}, + {file = "MarkupSafe-1.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:b7d644ddb4dbd407d31ffb699f1d140bc35478da613b441c582aeb7c43838dd8"}, {file = "MarkupSafe-1.1.1.tar.gz", hash = "sha256:29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b"}, ] mccabe = [
Bumps [jinja2](https://github.com/pallets/jinja) from 2.11.1 to 2.11.3. <details> <summary>Release notes</summary> <p><em>Sourced from <a href="https://github.com/pallets/jinja/releases">jinja2's releases</a>.</em></p> <blockquote> <h2>2.11.3</h2> <p>This contains a fix for a speed issue with the <code>urlize</code> filter. <code>urlize</code> is likely to be called on untrusted user input. For certain inputs some of the regular expressions used to parse the text could take a very long time due to backtracking. As part of the fix, the email matching became slightly stricter. The various speedups apply to <code>urlize</code> in general, not just the specific input cases.</p> <ul> <li>PyPI: <a href="https://pypi.org/project/Jinja2/2.11.3/">https://pypi.org/project/Jinja2/2.11.3/</a></li> <li>Changes: <a href="https://jinja.palletsprojects.com/en/2.11.x/changelog/#version-2-11-3">https://jinja.palletsprojects.com/en/2.11.x/changelog/#version-2-11-3</a></li> </ul> <h2>2.11.2</h2> <ul> <li>Changelog: <a href="https://jinja.palletsprojects.com/en/2.11.x/changelog/#version-2-11-2">https://jinja.palletsprojects.com/en/2.11.x/changelog/#version-2-11-2</a></li> </ul> </blockquote> </details> <details> <summary>Changelog</summary> <p><em>Sourced from <a href="https://github.com/pallets/jinja/blob/master/CHANGES.rst">jinja2's changelog</a>.</em></p> <blockquote> <h2>Version 2.11.3</h2> <p>Released 2021-01-31</p> <ul> <li>Improve the speed of the <code>urlize</code> filter by reducing regex backtracking. Email matching requires a word character at the start of the domain part, and only word characters in the TLD. :pr:<code>1343</code></li> </ul> <h2>Version 2.11.2</h2> <p>Released 2020-04-13</p> <ul> <li>Fix a bug that caused callable objects with <code>__getattr__</code>, like :class:<code>~unittest.mock.Mock</code> to be treated as a :func:<code>contextfunction</code>. :issue:<code>1145</code></li> <li>Update <code>wordcount</code> filter to trigger :class:<code>Undefined</code> methods by wrapping the input in :func:<code>soft_str</code>. :pr:<code>1160</code></li> <li>Fix a hang when displaying tracebacks on Python 32-bit. :issue:<code>1162</code></li> <li>Showing an undefined error for an object that raises <code>AttributeError</code> on access doesn't cause a recursion error. :issue:<code>1177</code></li> <li>Revert changes to :class:<code>~loaders.PackageLoader</code> from 2.10 which removed the dependency on setuptools and pkg_resources, and added limited support for namespace packages. The changes caused issues when using Pytest. Due to the difficulty in supporting Python 2 and :pep:<code>451</code> simultaneously, the changes are reverted until 3.0. :pr:<code>1182</code></li> <li>Fix line numbers in error messages when newlines are stripped. :pr:<code>1178</code></li> <li>The special <code>namespace()</code> assignment object in templates works in async environments. :issue:<code>1180</code></li> <li>Fix whitespace being removed before tags in the middle of lines when <code>lstrip_blocks</code> is enabled. :issue:<code>1138</code></li> <li>:class:<code>~nativetypes.NativeEnvironment</code> doesn't evaluate intermediate strings during rendering. This prevents early evaluation which could change the value of an expression. :issue:<code>1186</code></li> </ul> </blockquote> </details> <details> <summary>Commits</summary> <ul> <li><a href="https://github.com/pallets/jinja/commit/cf215390d4a4d6f0a4de27e2687eed176878f13d"><code>cf21539</code></a> release version 2.11.3</li> <li><a href="https://github.com/pallets/jinja/commit/15ef8f09b659f9100610583938005a7a10472d4d"><code>15ef8f0</code></a> Merge pull request <a href="https://github-redirect.dependabot.com/pallets/jinja/issues/1343">#1343</a> from pallets/urlize-speedup</li> <li><a href="https://github.com/pallets/jinja/commit/ef658dc3b6389b091d608e710a810ce8b87995b3"><code>ef658dc</code></a> speed up urlize matching</li> <li><a href="https://github.com/pallets/jinja/commit/eeca0fecc3318d43f61bc340ad61db641b861ade"><code>eeca0fe</code></a> Merge pull request <a href="https://github-redirect.dependabot.com/pallets/jinja/issues/1207">#1207</a> from mhansen/patch-1</li> <li><a href="https://github.com/pallets/jinja/commit/2dd769111cbb1a2637f805b3b4c652ec8096d371"><code>2dd7691</code></a> Merge pull request <a href="https://github-redirect.dependabot.com/pallets/jinja/issues/1209">#1209</a> from mhansen/patch-3</li> <li><a href="https://github.com/pallets/jinja/commit/48929401db7228db04dfd8e88115dd5c30dc2d86"><code>4892940</code></a> do_dictsort: update example ready to copy/paste</li> <li><a href="https://github.com/pallets/jinja/commit/7db7d336ba12574e6205fdd929386fd529e3fad4"><code>7db7d33</code></a> api.rst: bugfix in docs, import PackageLoader</li> <li><a href="https://github.com/pallets/jinja/commit/9ec465baefe32e305bd4e61da49e6c39360c194e"><code>9ec465b</code></a> fix changelog header</li> <li><a href="https://github.com/pallets/jinja/commit/737a4cd41d09878e7e6c584a2062f5853dc30150"><code>737a4cd</code></a> release version 2.11.2</li> <li><a href="https://github.com/pallets/jinja/commit/179df6b54e87b3d420cabf65fc07b2605ffc05f8"><code>179df6b</code></a> Merge pull request <a href="https://github-redirect.dependabot.com/pallets/jinja/issues/1190">#1190</a> from pallets/native-eval</li> <li>Additional commits viewable in <a href="https://github.com/pallets/jinja/compare/2.11.1...2.11.3">compare view</a></li> </ul> </details> <br /> [![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=jinja2&package-manager=pip&previous-version=2.11.1&new-version=2.11.3)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) --- <details> <summary>Dependabot commands and options</summary> <br /> You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) </details>
https://api.github.com/repos/mingrammer/diagrams/pulls/456
2021-02-01T08:57:46Z
2021-02-10T19:48:28Z
2021-02-10T19:48:28Z
2021-02-10T19:48:38Z
3,201
mingrammer/diagrams
52,571
gtk_tray 优先使用 pygtk
diff --git a/code/default/launcher/gtk_tray.py b/code/default/launcher/gtk_tray.py index 9fba8e87a9..bf0bb3eab5 100644 --- a/code/default/launcher/gtk_tray.py +++ b/code/default/launcher/gtk_tray.py @@ -24,6 +24,13 @@ enable_appind = True try: + import pygtk + pygtk.require('2.0') + import gtk + import gtk.gdk as gdk + use_gi = False + xlog.info('Using PyGTK as the GUI Backend.') +except: import gi gi.require_version('Gtk', '3.0') gi.require_version('Gdk', '3.0') @@ -31,13 +38,6 @@ from gi.repository import Gdk as gdk use_gi = True xlog.info('Using PyGObject as the GUI Backend.') -except: - import pygtk - pygtk.require('2.0') - import gtk - import gtk.gdk as gdk - use_gi = False - xlog.info('Using PyGTK as the GUI Backend.') gdk.threads_init() @@ -93,6 +93,7 @@ def __init__(self): xlog.info('AppIndicator found and used.') else: self.trayicon = self.gtk_trayicon(logo_filename) + xlog.info('Gtk.StatusIcon used.') def appind_trayicon(self, logo_filename): trayicon = new_appindicator('XX-Net', 'indicator-messages', appind_category) diff --git a/code/default/launcher/start.py b/code/default/launcher/start.py index 4fb66d79f3..8e6dd051eb 100644 --- a/code/default/launcher/start.py +++ b/code/default/launcher/start.py @@ -116,7 +116,7 @@ def has_pygtk(): except: return False - if X_is_running() and (has_gi() or has_pygtk()): + if X_is_running() and (has_pygtk() or has_gi()): from gtk_tray import sys_tray else: from non_tray import sys_tray
#9994 修改中,基于PyGObject的实现在某些系统上会导致设置页面打不开,原因未知,因此改回默认使用PyGTK。 fix #10046 fix #10080 fix #10112 fix #10113 fix #10193
https://api.github.com/repos/XX-net/XX-Net/pulls/10225
2018-03-27T05:51:47Z
2018-03-27T06:54:19Z
2018-03-27T06:54:18Z
2018-03-27T12:37:32Z
503
XX-net/XX-Net
17,075
Fix the text.Span.__repr__ method
diff --git a/rich/text.py b/rich/text.py index 0ecdeae4b..52ecd1604 100644 --- a/rich/text.py +++ b/rich/text.py @@ -57,7 +57,7 @@ def __repr__(self) -> str: return ( f"Span({self.start}, {self.end}, {self.style!r})" if (isinstance(self.style, Style) and self.style._meta) - else f"Span({self.start}, {self.end}, {str(self.style)!r})" + else f"Span({self.start}, {self.end}, {repr(self.style)})" ) def __bool__(self) -> bool:
Fixes Issue #1676 ## Type of changes - [x] Bug fix - [x] Tests ## Checklist - [x] I've run the latest [black](https://github.com/psf/black) with default args on new code. - [x] I've updated CHANGELOG.md and CONTRIBUTORS.md where appropriate. - [x] I've added tests for new code. - [x] I accept that @willmcgugan may be pedantic in the code review.
https://api.github.com/repos/Textualize/rich/pulls/1677
2021-11-10T20:24:05Z
2021-11-15T15:46:41Z
2021-11-15T15:46:41Z
2021-11-15T16:26:41Z
156
Textualize/rich
48,546
ocelot gateway add
diff --git a/diagrams/onprem/network.py b/diagrams/onprem/network.py index 82a614991..67f5e7124 100644 --- a/diagrams/onprem/network.py +++ b/diagrams/onprem/network.py @@ -52,6 +52,10 @@ class Nginx(_Network): _icon = "nginx.png" +class Ocelot(_Network): + _icon = "ocelot.png" + + class Pfsense(_Network): _icon = "pfsense.png" diff --git a/docs/nodes/onprem.md b/docs/nodes/onprem.md index 165490668..454b6baef 100644 --- a/docs/nodes/onprem.md +++ b/docs/nodes/onprem.md @@ -134,6 +134,7 @@ Node classes list of onprem provider. - **diagrams.onprem.network.Kong** - **diagrams.onprem.network.Linkerd** - **diagrams.onprem.network.Nginx** +- **diagrams.onprem.network.Ocelot** - **diagrams.onprem.network.Pfsense**, **PFSense** (alias) - **diagrams.onprem.network.Pomerium** - **diagrams.onprem.network.Tomcat** diff --git a/resources/onprem/network/ocelot.png b/resources/onprem/network/ocelot.png new file mode 100644 index 000000000..121605730 Binary files /dev/null and b/resources/onprem/network/ocelot.png differ
https://api.github.com/repos/mingrammer/diagrams/pulls/263
2020-09-04T18:29:10Z
2020-09-05T15:43:32Z
2020-09-05T15:43:32Z
2020-09-05T15:43:32Z
353
mingrammer/diagrams
52,569
Add `DATASETS_DIR` global in general.py
diff --git a/utils/datasets.py b/utils/datasets.py index 1026731f3a4..b3e0dbb3523 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -27,7 +27,7 @@ from tqdm import tqdm from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective -from utils.general import (LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, +from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn) from utils.torch_utils import torch_distributed_zero_first @@ -817,15 +817,15 @@ def create_folder(path='./new'): os.makedirs(path) # make new output folder -def flatten_recursive(path='../datasets/coco128'): +def flatten_recursive(path=DATASETS_DIR / 'coco128'): # Flatten a recursive directory by bringing all files to top level - new_path = Path(path + '_flat') + new_path = Path(str(path) + '_flat') create_folder(new_path) for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)): shutil.copyfile(file, new_path / Path(file).name) -def extract_boxes(path='../datasets/coco128'): # from utils.datasets import *; extract_boxes() +def extract_boxes(path=DATASETS_DIR / 'coco128'): # from utils.datasets import *; extract_boxes() # Convert detection dataset into classification dataset, with one directory per class path = Path(path) # images dir shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing @@ -859,7 +859,7 @@ def extract_boxes(path='../datasets/coco128'): # from utils.datasets import *; assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}' -def autosplit(path='../datasets/coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False): +def autosplit(path=DATASETS_DIR / 'coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False): """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files Usage: from utils.datasets import *; autosplit() Arguments @@ -939,7 +939,7 @@ def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profil """ Return dataset statistics dictionary with images and instances counts per split per class To run in parent directory: export PYTHONPATH="$PWD/yolov5" Usage1: from utils.datasets import *; dataset_stats('coco128.yaml', autodownload=True) - Usage2: from utils.datasets import *; dataset_stats('../datasets/coco128_with_yaml.zip') + Usage2: from utils.datasets import *; dataset_stats('path/to/coco128_with_yaml.zip') Arguments path: Path to data.yaml or data.zip (with data.yaml inside data.zip) autodownload: Attempt to download dataset if not found locally diff --git a/utils/general.py b/utils/general.py index 030539d7670..d4ca6e2736d 100755 --- a/utils/general.py +++ b/utils/general.py @@ -35,6 +35,7 @@ # Settings FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory +DATASETS_DIR = ROOT.parent / 'datasets' # YOLOv5 datasets directory NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads VERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode FONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf @@ -398,8 +399,8 @@ def check_dataset(data, autodownload=True): # Download (optional) extract_dir = '' if isinstance(data, (str, Path)) and str(data).endswith('.zip'): # i.e. gs://bucket/dir/coco128.zip - download(data, dir='../datasets', unzip=True, delete=False, curl=False, threads=1) - data = next((Path('../datasets') / Path(data).stem).rglob('*.yaml')) + download(data, dir=DATASETS_DIR, unzip=True, delete=False, curl=False, threads=1) + data = next((DATASETS_DIR / Path(data).stem).rglob('*.yaml')) extract_dir, autodownload = data.parent, False # Read yaml (optional)
HUB fix for bug introduced in #6489 ## 🛠️ PR Summary <sub>Made with ❤️ by [Ultralytics Actions](https://github.com/ultralytics/actions)<sub> ### 🌟 Summary Refactoring dataset paths to use a standardized directory variable. ### 📊 Key Changes - Introduced `DATASETS_DIR` variable in `utils/general.py` to set a standard directory for datasets. - Replaced hardcoded dataset paths in `utils/datasets.py` with `DATASETS_DIR` to reference the new variable. ### 🎯 Purpose & Impact - **Consistency:** Centralizes the datasets directory path, making code cleaner and more maintainable. - **Usability:** Simplifies path management, aiding those who clone the repo and work with dataset directories. - **Reliability:** Reduces the chance of path-related errors when users manage multiple datasets.
https://api.github.com/repos/ultralytics/yolov5/pulls/6578
2022-02-08T20:46:10Z
2022-02-08T21:20:44Z
2022-02-08T21:20:44Z
2024-01-19T12:59:42Z
1,114
ultralytics/yolov5
25,591
[BUG FIX] Deadlock issues when using multiprocessing
diff --git a/llama_index/ingestion/pipeline.py b/llama_index/ingestion/pipeline.py index 6e84cc4b3d053..fee44d6b20694 100644 --- a/llama_index/ingestion/pipeline.py +++ b/llama_index/ingestion/pipeline.py @@ -447,7 +447,7 @@ def run( "Setting `num_workers` down to the maximum CPU count." ) - with multiprocessing.Pool(num_workers) as p: + with multiprocessing.get_context("spawn").Pool(num_workers) as p: node_batches = self._node_batcher( num_batches=num_workers, nodes=nodes_to_run ) diff --git a/llama_index/readers/file/base.py b/llama_index/readers/file/base.py index 1c8664b89ecde..7f72d9406e54a 100644 --- a/llama_index/readers/file/base.py +++ b/llama_index/readers/file/base.py @@ -361,7 +361,7 @@ def load_data( "Specified num_workers exceed number of CPUs in the system. " "Setting `num_workers` down to the maximum CPU count." ) - with multiprocessing.Pool(num_workers) as p: + with multiprocessing.get_context("spawn").Pool(num_workers) as p: results = p.starmap( SimpleDirectoryReader.load_file, zip(
# Description - Multiprocessing context by default "fork"'s (except on Windows) - This can create deadlock issue if fork is done at wrong time and worker processor is trying to acquire lock - To fix, we set context to "spawn" which will start the processor by using a fresh python interpretor rather than fork. For more information see [here](https://pythonspeed.com/articles/python-multiprocessing/) and [here](https://stackoverflow.com/questions/43818519/what-is-the-meaning-of-context-argument-in-multiprocessing-pool-pool) Fixes #10104 ## Type of Change Please delete options that are not relevant. - [x] Bug fix (non-breaking change which fixes an issue) # How Has This Been Tested? Please describe the tests that you ran to verify your changes. Provide instructions so we can reproduce. Please also list any relevant details for your test configuration - [x] Ran previous parallel processing nbs and confirmed similar wall times are achieved under "spawn" context - [x] I stared at the code and made sure it makes sense
https://api.github.com/repos/run-llama/llama_index/pulls/10125
2024-01-18T15:20:48Z
2024-01-18T15:53:27Z
2024-01-18T15:53:27Z
2024-01-18T15:53:28Z
319
run-llama/llama_index
5,944
[AIRFLOW-4570] Remove future library (drop Python 2 support)
diff --git a/airflow/api/auth/backend/kerberos_auth.py b/airflow/api/auth/backend/kerberos_auth.py index f95f4d257191c..8a6a76f018a78 100644 --- a/airflow/api/auth/backend/kerberos_auth.py +++ b/airflow/api/auth/backend/kerberos_auth.py @@ -24,8 +24,6 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -from future.standard_library import install_aliases - from airflow.utils.log.logging_mixin import LoggingMixin import kerberos @@ -43,8 +41,6 @@ from requests_kerberos import HTTPKerberosAuth from socket import getfqdn -install_aliases() - client_auth = HTTPKerberosAuth(service='airflow') _SERVICE_NAME = None diff --git a/airflow/api/client/json_client.py b/airflow/api/client/json_client.py index e05913ebb4743..2b8440813a8d4 100644 --- a/airflow/api/client/json_client.py +++ b/airflow/api/client/json_client.py @@ -17,7 +17,8 @@ # specific language governing permissions and limitations # under the License. -from future.moves.urllib.parse import urljoin +from urllib.parse import urljoin + import requests from airflow.api.client import api_client diff --git a/airflow/configuration.py b/airflow/configuration.py index 7e0f8ad8a5855..af68c6bb77dd2 100644 --- a/airflow/configuration.py +++ b/airflow/configuration.py @@ -21,7 +21,6 @@ from builtins import str from collections import OrderedDict import copy -from future import standard_library import os import pathlib import shlex @@ -36,8 +35,6 @@ from airflow.exceptions import AirflowConfigException from airflow.utils.log.logging_mixin import LoggingMixin -standard_library.install_aliases() - log = LoggingMixin().log # show Airflow's deprecation warnings diff --git a/airflow/contrib/auth/backends/ldap_auth.py b/airflow/contrib/auth/backends/ldap_auth.py index 1639f43aec307..6729f9df2cee8 100644 --- a/airflow/contrib/auth/backends/ldap_auth.py +++ b/airflow/contrib/auth/backends/ldap_auth.py @@ -16,7 +16,6 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -from future.utils import native import flask_login from flask_login import login_required, current_user, logout_user # noqa: F401 @@ -75,7 +74,7 @@ def get_ldap_connection(dn=None, password=None): use_ssl=True, tls=tls_configuration) - conn = Connection(server, native(dn), native(password)) + conn = Connection(server, dn, password) if not conn.bind(): log.error("Cannot bind to ldap server: %s ", conn.last_error) @@ -87,8 +86,7 @@ def get_ldap_connection(dn=None, password=None): def group_contains_user(conn, search_base, group_filter, user_name_attr, username): search_filter = '(&({0}))'.format(group_filter) - if not conn.search(native(search_base), native(search_filter), - attributes=[native(user_name_attr)]): + if not conn.search(search_base, search_filter, attributes=[user_name_attr]): log.warning("Unable to find group for %s %s", search_base, search_filter) else: for entry in conn.entries: @@ -105,8 +103,7 @@ def groups_user(conn, search_base, user_filter, user_name_att, username): memberof_attr = configuration.conf.get("ldap", "group_member_attr") except Exception: memberof_attr = "memberOf" - res = conn.search(native(search_base), native(search_filter), - attributes=[native(memberof_attr)]) + res = conn.search(search_base, search_filter, attributes=[memberof_attr]) if not res: log.info("Cannot find user %s", username) raise AuthenticationError("Invalid username or password") @@ -210,9 +207,7 @@ def try_login(username, password): # todo: BASE or ONELEVEL? - res = conn.search(native(configuration.conf.get("ldap", "basedn")), - native(search_filter), - search_scope=native(search_scope)) + res = conn.search(configuration.conf.get("ldap", "basedn"), search_filter, search_scope=search_scope) # todo: use list or result? if not res: diff --git a/airflow/models/dag.py b/airflow/models/dag.py index 3c337d97c432b..b25d6c2d57250 100644 --- a/airflow/models/dag.py +++ b/airflow/models/dag.py @@ -34,7 +34,6 @@ import six from croniter import croniter from dateutil.relativedelta import relativedelta -from future.standard_library import install_aliases from sqlalchemy import Column, String, Boolean, Integer, Text, func, or_ from airflow import configuration, settings, utils @@ -54,8 +53,6 @@ from airflow.utils.sqlalchemy import UtcDateTime, Interval from airflow.utils.state import State -install_aliases() - ScheduleInterval = Union[str, timedelta, relativedelta] diff --git a/airflow/www/utils.py b/airflow/www/utils.py index e097499e38c07..39498691e98a8 100644 --- a/airflow/www/utils.py +++ b/airflow/www/utils.py @@ -17,9 +17,6 @@ # specific language governing permissions and limitations # under the License. -from future import standard_library # noqa -standard_library.install_aliases() # noqa - import inspect import json import time diff --git a/setup.py b/setup.py index 6970bcd326671..7a67635c711e9 100644 --- a/setup.py +++ b/setup.py @@ -323,7 +323,6 @@ def do_setup(): 'flask-swagger==0.2.13', 'flask-wtf>=0.14.2, <0.15', 'funcsigs==1.0.0', - 'future>=0.16.0, <0.17', 'gitpython>=2.0.2', 'gunicorn>=19.5.0, <20.0', 'iso8601>=0.1.12',
Make sure you have checked _all_ steps below. ### Jira - [x] My PR addresses the following [Airflow Jira](https://issues.apache.org/jira/browse/AIRFLOW/) issues and references them in the PR title. For example, "\[AIRFLOW-XXX\] My Airflow PR" - https://issues.apache.org/jira/browse/AIRFLOW-4570 - In case you are fixing a typo in the documentation you can prepend your commit with \[AIRFLOW-XXX\], code changes always need a Jira issue. - In case you are proposing a fundamental code change, you need to create an Airflow Improvement Proposal ([AIP](https://cwiki.apache.org/confluence/display/AIRFLOW/Airflow+Improvements+Proposals)). - In case you are adding a dependency, check if the license complies with the [ASF 3rd Party License Policy](https://www.apache.org/legal/resolved.html#category-x). ### Description - [x] Here are some details about my PR, including screenshots of any UI changes: Remove all usage of the future library because we're dropping Python 2 support. ### Tests - [x] My PR adds the following unit tests __OR__ does not need testing for this extremely good reason: ### Commits - [x] My commits all reference Jira issues in their subject lines, and I have squashed multiple commits if they address the same issue. In addition, my commits follow the guidelines from "[How to write a good git commit message](http://chris.beams.io/posts/git-commit/)": 1. Subject is separated from body by a blank line 1. Subject is limited to 50 characters (not including Jira issue reference) 1. Subject does not end with a period 1. Subject uses the imperative mood ("add", not "adding") 1. Body wraps at 72 characters 1. Body explains "what" and "why", not "how" ### Documentation - [x] In case of new functionality, my PR adds documentation that describes how to use it. - All the public functions and the classes in the PR contain docstrings that explain what it does - If you implement backwards incompatible changes, please leave a note in the [Updating.md](https://github.com/apache/airflow/blob/master/UPDATING.md) so we can assign it to a appropriate release ### Code Quality - [x] Passes `flake8`
https://api.github.com/repos/apache/airflow/pulls/5324
2019-05-25T15:25:52Z
2019-05-25T16:58:34Z
2019-05-25T16:58:34Z
2019-05-25T16:58:34Z
1,485
apache/airflow
14,740
fix: fix syntax errors
diff --git a/modules/upscaler.py b/modules/upscaler.py index 3aee69db8d2..9d13ee99324 100644 --- a/modules/upscaler.py +++ b/modules/upscaler.py @@ -20,7 +20,7 @@ class Upscaler: filter = None model = None user_path = None - scalers: [] + scalers: list = [] tile = True def __init__(self, create_dirs=False):
## Description python syntax error. ## Screenshots/videos: ![image](https://github.com/AUTOMATIC1111/stable-diffusion-webui/assets/40930677/243aa6ca-f863-478f-a169-a433e774d711) ## Checklist: - [ ] I have read [contributing wiki page](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing) - [ ] I have performed a self-review of my own code - [ ] My code follows the [style guidelines](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing#code-style) - [ ] My code passes [tests](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Tests)
https://api.github.com/repos/AUTOMATIC1111/stable-diffusion-webui/pulls/15179
2024-03-08T09:27:18Z
2024-03-16T15:45:01Z
2024-03-16T15:45:01Z
2024-03-16T15:45:01Z
114
AUTOMATIC1111/stable-diffusion-webui
40,047
Add reddit back in
diff --git a/removed_sites.json b/removed_sites.json index 65345fed8..3f5fcfbce 100644 --- a/removed_sites.json +++ b/removed_sites.json @@ -646,13 +646,6 @@ "username_claimed": "Matt-Riggsby", "username_unclaimed": "noonewouldeverusethis7" }, - "Reddit": { - "errorType": "status_code", - "url": "https://www.reddit.com/user/{}", - "urlMain": "https://www.reddit.com/", - "username_claimed": "blue", - "username_unclaimed": "noonewouldeverusethis7" - }, "SparkPeople": { "errorMsg": "We couldn't find that user", "errorType": "message", diff --git a/removed_sites.md b/removed_sites.md index 48f678f65..a44773634 100644 --- a/removed_sites.md +++ b/removed_sites.md @@ -1259,19 +1259,6 @@ As of 2021-09-04, Quora returns false positives. } ``` -### Reddit - -As of 2021-09-04, Reddit returns false positives. -``` - "Reddit": { - "errorType": "status_code", - "url": "https://www.reddit.com/user/{}", - "urlMain": "https://www.reddit.com/", - "username_claimed": "blue", - "username_unclaimed": "noonewouldeverusethis7" - }, -``` - ### SparkPeople As of 2021-09-04, SparkPeople returns false positives. ``` diff --git a/sherlock/resources/data.json b/sherlock/resources/data.json index 5903f794b..03a5405ab 100644 --- a/sherlock/resources/data.json +++ b/sherlock/resources/data.json @@ -1197,6 +1197,17 @@ "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis77777" }, + "Reddit": { + "errorMsg": "Sorry, nobody on Reddit goes by that name.", + "errorType": "message", + "headers" : { + "accept-language": "en-US,en;q=0.9" + }, + "url": "https://www.reddit.com/user/{}", + "urlMain": "https://www.reddit.com/", + "username_claimed": "blue", + "username_unclaimed": "noonewouldeverusethis7" +}, "Repl.it": { "errorType": "status_code", "url": "https://repl.it/@{}", diff --git a/sites.md b/sites.md index 89a2695a4..756bc7172 100644 --- a/sites.md +++ b/sites.md @@ -1,4 +1,4 @@ -## List Of Supported Sites (283 Sites In Total!) +## List Of Supported Sites (284 Sites In Total!) 1. [2Dimensions](https://2Dimensions.com/) 1. [3dnews](http://forum.3dnews.ru/) 1. [7Cups](https://www.7cups.com/) @@ -156,6 +156,7 @@ 1. [Rajce.net](https://www.rajce.idnes.cz/) 1. [Rate Your Music](https://rateyourmusic.com/) 1. [Redbubble](https://www.redbubble.com/) +1. [Reddit](https://www.reddit.com/) 1. [Repl.it](https://repl.it/) 1. [ResearchGate](https://www.researchgate.net/) 1. [ReverbNation](https://www.reverbnation.com/)
Hello, The issue with reddit seems to be that they now return 404 status only if you pass a cookie in headers and they always expect a language header and return the error message in the corresponding language. This pr should fix the issue and make reddit working again.
https://api.github.com/repos/sherlock-project/sherlock/pulls/1132
2021-09-05T20:17:15Z
2021-09-06T05:13:02Z
2021-09-06T05:13:02Z
2021-09-06T08:06:15Z
861
sherlock-project/sherlock
36,319
Fixed #28889 -- Prevented double submission of admin forms.
diff --git a/django/contrib/admin/static/admin/js/change_form.js b/django/contrib/admin/static/admin/js/change_form.js index 96a4c62ef4c35..0ba16efef9818 100644 --- a/django/contrib/admin/static/admin/js/change_form.js +++ b/django/contrib/admin/static/admin/js/change_form.js @@ -1,9 +1,23 @@ +/*global gettext*/ 'use strict'; { const inputTags = ['BUTTON', 'INPUT', 'SELECT', 'TEXTAREA']; const modelName = document.getElementById('django-admin-form-add-constants').dataset.modelName; + let submitted = false; + if (modelName) { const form = document.getElementById(modelName + '_form'); + + form.addEventListener('submit', (event) => { + event.preventDefault(); + if (submitted) { + const answer = window.confirm(gettext('You have already submitted this form. Are you sure you want to submit it again?')); + if (!answer) {return;} + }; + event.target.submit(); + submitted = true; + }); + for (const element of form.elements) { // HTMLElement.offsetParent returns null when the element is not // rendered. diff --git a/tests/admin_views/test_prevent_double_submission.py b/tests/admin_views/test_prevent_double_submission.py new file mode 100644 index 0000000000000..1052a977bab50 --- /dev/null +++ b/tests/admin_views/test_prevent_double_submission.py @@ -0,0 +1,105 @@ +from django.contrib.admin.tests import AdminSeleniumTestCase +from django.contrib.auth.models import User +from django.test import override_settings +from django.urls import reverse + +from .models import Bookmark + + +@override_settings(ROOT_URLCONF="admin_views.urls") +class SeleniumTests(AdminSeleniumTestCase): + available_apps = ["admin_views"] + AdminSeleniumTestCase.available_apps + + def setUp(self): + self.BOOKMARK_ADD_URL = reverse("admin:admin_views_bookmark_add") + self.BOOKMARK_LIST_URL = reverse("admin:admin_views_bookmark_changelist") + self.ALERT_MESSAGE = ( + "You have already submitted this form. " + "Are you sure you want to submit it again?" + ) + self.superuser = User.objects.create_superuser( + username="super", + password="secret", + email="[email protected]", + ) + self.admin_login( + username="super", + password="secret", + login_url=reverse("admin:index"), + ) + + def test_single_submit_click_is_success_without_alert(self): + from selenium.webdriver.common.by import By + + self.selenium.get(self.live_server_url + self.BOOKMARK_ADD_URL) + input_ = self.selenium.find_element(By.ID, "id_name") + input_.send_keys("Bookmark name") + save_button = self.selenium.find_element(By.CSS_SELECTOR, "input[name=_save]") + save_button.click() + self.assertEqual( + self.selenium.current_url, self.live_server_url + self.BOOKMARK_LIST_URL + ) + self.assertEqual(Bookmark.objects.count(), 1) + + def _double_click_submit(self): + from selenium.webdriver.common.action_chains import ActionChains + from selenium.webdriver.common.by import By + + self.selenium.get(self.live_server_url + self.BOOKMARK_ADD_URL) + input_ = self.selenium.find_element(By.ID, "id_name") + input_.send_keys("Bookmark name") + save_button = self.selenium.find_element(By.CSS_SELECTOR, "input[name=_save]") + ActionChains(self.selenium).double_click(save_button).perform() + + def test_confirm_double_submit_alert(self): + self._double_click_submit() + alert = self.selenium.switch_to.alert + self.assertEqual(alert.text, self.ALERT_MESSAGE) + alert.accept() + self.wait_page_ready() + + OBJECTS_CREATED = 1 + if self.browser == "chrome": + OBJECTS_CREATED = 2 + elif self.browser == "firefox": + pass + + self.assertEqual(Bookmark.objects.count(), OBJECTS_CREATED) + + def test_cancel_double_submit_alert(self): + self._double_click_submit() + alert = self.selenium.switch_to.alert + self.assertEqual(alert.text, self.ALERT_MESSAGE) + alert.dismiss() + self.wait_page_ready() + self.assertEqual(Bookmark.objects.count(), 1) + + def test_submit_and_go_back(self): + from selenium.webdriver.common.by import By + + self.selenium.get(self.live_server_url + self.BOOKMARK_ADD_URL) + input_ = self.selenium.find_element(By.ID, "id_name") + input_.send_keys("Bookmark name") + + # submit by first time + save_button = self.selenium.find_element(By.CSS_SELECTOR, "input[name=_save]") + save_button.click() + self.assertEqual(Bookmark.objects.count(), 1) + self.assertEqual( + self.selenium.current_url, self.live_server_url + self.BOOKMARK_LIST_URL + ) + + # go back + self.selenium.back() + self.assertEqual( + self.selenium.current_url, self.live_server_url + self.BOOKMARK_ADD_URL + ) + + # submit again + input_ = self.selenium.find_element(By.ID, "id_name") + input_.clear() + input_.send_keys("Other bookmark name") + save_button = self.selenium.find_element(By.CSS_SELECTOR, "input[name=_save]") + save_button.click() + + self.assertEqual(Bookmark.objects.count(), 2)
This is the "You already submitted (alert)" approach for [ticket](https://code.djangoproject.com/ticket/28889) To run tests please do: `./runtests.py admin_views.test_prevent_double_submission --selenium=chrome,firefox` Thanks!
https://api.github.com/repos/django/django/pulls/15217
2021-12-20T03:10:54Z
2022-02-23T13:57:34Z
2022-02-23T13:57:34Z
2022-02-24T13:47:23Z
1,248
django/django
51,327
Disable CPU inference on PRs
diff --git a/.github/workflows/cpu-inference.yml b/.github/workflows/cpu-inference.yml index 2cf7c02923b9..521fe2b5bea4 100644 --- a/.github/workflows/cpu-inference.yml +++ b/.github/workflows/cpu-inference.yml @@ -1,13 +1,7 @@ name: cpu-inference on: - pull_request: - paths-ignore: - - 'docs/**' - - 'blogs/**' workflow_dispatch: - merge_group: - branches: [ master ] concurrency: group: ${{ github.workflow }}-${{ github.ref }}
Currently paused in the GitHub UI, so this does not have an impact on the current changes.
https://api.github.com/repos/microsoft/DeepSpeed/pulls/4590
2023-10-31T16:49:32Z
2023-10-31T18:15:43Z
2023-10-31T18:15:43Z
2024-02-28T18:14:36Z
142
microsoft/DeepSpeed
10,819
Bump tailwindcss from 3.3.2 to 3.3.3 in /website
diff --git a/website/package-lock.json b/website/package-lock.json index 7ddd66afcb..91203feaf5 100644 --- a/website/package-lock.json +++ b/website/package-lock.json @@ -64,7 +64,7 @@ "sharp": "^0.32.1", "simplebar-react": "^3.2.4", "swr": "^2.2.0", - "tailwindcss": "^3.3.2", + "tailwindcss": "^3.3.3", "use-debounce": "^9.0.4", "usehooks-ts": "^2.9.1" }, @@ -30379,9 +30379,9 @@ } }, "node_modules/tailwindcss": { - "version": "3.3.2", - "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.3.2.tgz", - "integrity": "sha512-9jPkMiIBXvPc2KywkraqsUfbfj+dHDb+JPWtSJa9MLFdrPyazI7q6WX2sUrm7R9eVR7qqv3Pas7EvQFzxKnI6w==", + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.3.3.tgz", + "integrity": "sha512-A0KgSkef7eE4Mf+nKJ83i75TMyq8HqY3qmFIJSWy8bNt0v1lG7jUcpGpoTFxAwYcWOphcTBLPPJg+bDfhDf52w==", "dependencies": { "@alloc/quick-lru": "^5.2.0", "arg": "^5.0.2", @@ -30403,7 +30403,6 @@ "postcss-load-config": "^4.0.1", "postcss-nested": "^6.0.1", "postcss-selector-parser": "^6.0.11", - "postcss-value-parser": "^4.2.0", "resolve": "^1.22.2", "sucrase": "^3.32.0" }, diff --git a/website/package.json b/website/package.json index ba736a83a4..b26f604ffd 100644 --- a/website/package.json +++ b/website/package.json @@ -85,7 +85,7 @@ "sharp": "^0.32.1", "simplebar-react": "^3.2.4", "swr": "^2.2.0", - "tailwindcss": "^3.3.2", + "tailwindcss": "^3.3.3", "use-debounce": "^9.0.4", "usehooks-ts": "^2.9.1" },
Bumps [tailwindcss](https://github.com/tailwindlabs/tailwindcss) from 3.3.2 to 3.3.3. <details> <summary>Release notes</summary> <p><em>Sourced from <a href="https://github.com/tailwindlabs/tailwindcss/releases">tailwindcss's releases</a>.</em></p> <blockquote> <h2>v3.3.3</h2> <h3>Fixed</h3> <ul> <li>Fix issue where some pseudo-element variants generated the wrong selector (<a href="https://redirect.github.com/tailwindlabs/tailwindcss/pull/10943">#10943</a>, <a href="https://redirect.github.com/tailwindlabs/tailwindcss/pull/10962">#10962</a>, <a href="https://redirect.github.com/tailwindlabs/tailwindcss/pull/11111">#11111</a>)</li> <li>Make font settings propagate into buttons, inputs, etc. (<a href="https://redirect.github.com/tailwindlabs/tailwindcss/pull/10940">#10940</a>)</li> <li>Fix parsing of <code>theme()</code> inside <code>calc()</code> when there are no spaces around operators (<a href="https://redirect.github.com/tailwindlabs/tailwindcss/pull/11157">#11157</a>)</li> <li>Ensure <code>repeating-conic-gradient</code> is detected as an image (<a href="https://redirect.github.com/tailwindlabs/tailwindcss/pull/11180">#11180</a>)</li> <li>Move unknown pseudo-elements outside of <code>:is</code> by default (<a href="https://redirect.github.com/tailwindlabs/tailwindcss/pull/11345">#11345</a>)</li> <li>Escape animation names when prefixes contain special characters (<a href="https://redirect.github.com/tailwindlabs/tailwindcss/pull/11470">#11470</a>)</li> <li>Don't prefix arbitrary classes in <code>group</code> and <code>peer</code> variants (<a href="https://redirect.github.com/tailwindlabs/tailwindcss/pull/11454">#11454</a>)</li> <li>Sort classes using position of first matching rule (<a href="https://redirect.github.com/tailwindlabs/tailwindcss/pull/11504">#11504</a>)</li> <li>Allow variant to be an at-rule without a prelude (<a href="https://redirect.github.com/tailwindlabs/tailwindcss/pull/11589">#11589</a>)</li> <li>Make PostCSS plugin async to improve performance (<a href="https://redirect.github.com/tailwindlabs/tailwindcss/pull/11548">#11548</a>)</li> <li>Don’t error when a config file is missing (<a href="https://github.com/tailwindlabs/tailwindcss/commit/f97759f808d15ace66647b1405744fcf95a392e5">f97759f</a>)</li> </ul> <h3>Added</h3> <ul> <li>Add <code>aria-busy</code> utility (<a href="https://redirect.github.com/tailwindlabs/tailwindcss/pull/10966">#10966</a>)</li> </ul> <h3>Changed</h3> <ul> <li>Reset padding for <code>&lt;dialog&gt;</code> elements in preflight (<a href="https://redirect.github.com/tailwindlabs/tailwindcss/pull/11069">#11069</a>)</li> </ul> </blockquote> </details> <details> <summary>Changelog</summary> <p><em>Sourced from <a href="https://github.com/tailwindlabs/tailwindcss/blob/v3.3.3/CHANGELOG.md">tailwindcss's changelog</a>.</em></p> <blockquote> <h2>[3.3.3] - 2023-07-13</h2> <h3>Fixed</h3> <ul> <li>Fix issue where some pseudo-element variants generated the wrong selector (<a href="https://redirect.github.com/tailwindlabs/tailwindcss/pull/10943">#10943</a>, <a href="https://redirect.github.com/tailwindlabs/tailwindcss/pull/10962">#10962</a>, <a href="https://redirect.github.com/tailwindlabs/tailwindcss/pull/11111">#11111</a>)</li> <li>Make font settings propagate into buttons, inputs, etc. (<a href="https://redirect.github.com/tailwindlabs/tailwindcss/pull/10940">#10940</a>)</li> <li>Fix parsing of <code>theme()</code> inside <code>calc()</code> when there are no spaces around operators (<a href="https://redirect.github.com/tailwindlabs/tailwindcss/pull/11157">#11157</a>)</li> <li>Ensure <code>repeating-conic-gradient</code> is detected as an image (<a href="https://redirect.github.com/tailwindlabs/tailwindcss/pull/11180">#11180</a>)</li> <li>Move unknown pseudo-elements outside of <code>:is</code> by default (<a href="https://redirect.github.com/tailwindlabs/tailwindcss/pull/11345">#11345</a>)</li> <li>Escape animation names when prefixes contain special characters (<a href="https://redirect.github.com/tailwindlabs/tailwindcss/pull/11470">#11470</a>)</li> <li>Don't prefix arbitrary classes in <code>group</code> and <code>peer</code> variants (<a href="https://redirect.github.com/tailwindlabs/tailwindcss/pull/11454">#11454</a>)</li> <li>Sort classes using position of first matching rule (<a href="https://redirect.github.com/tailwindlabs/tailwindcss/pull/11504">#11504</a>)</li> <li>Allow variant to be an at-rule without a prelude (<a href="https://redirect.github.com/tailwindlabs/tailwindcss/pull/11589">#11589</a>)</li> <li>Make PostCSS plugin async to improve performance (<a href="https://redirect.github.com/tailwindlabs/tailwindcss/pull/11548">#11548</a>)</li> <li>Don’t error when a config file is missing (<a href="https://github.com/tailwindlabs/tailwindcss/commit/f97759f808d15ace66647b1405744fcf95a392e5">f97759f</a>)</li> </ul> <h3>Added</h3> <ul> <li>Add <code>aria-busy</code> utility (<a href="https://redirect.github.com/tailwindlabs/tailwindcss/pull/10966">#10966</a>)</li> </ul> <h3>Changed</h3> <ul> <li>Reset padding for <code>&lt;dialog&gt;</code> elements in preflight (<a href="https://redirect.github.com/tailwindlabs/tailwindcss/pull/11069">#11069</a>)</li> </ul> </blockquote> </details> <details> <summary>Commits</summary> <ul> <li><a href="https://github.com/tailwindlabs/tailwindcss/commit/0bd81a06c499be58bf87ca16481333823d86e828"><code>0bd81a0</code></a> 3.3.3</li> <li><a href="https://github.com/tailwindlabs/tailwindcss/commit/6a6ceb524016efcab107fdb0ac9adfe6cf9b2cf3"><code>6a6ceb5</code></a> Update changelog</li> <li><a href="https://github.com/tailwindlabs/tailwindcss/commit/f97759f808d15ace66647b1405744fcf95a392e5"><code>f97759f</code></a> Don’t error when a config file is missing</li> <li><a href="https://github.com/tailwindlabs/tailwindcss/commit/005c1be2edf85c9ace9d441cb6b8a9c973491566"><code>005c1be</code></a> Don't prefix arbitrary classes in <code>peer</code>/<code>group</code> variants (<a href="https://redirect.github.com/tailwindlabs/tailwindcss/issues/11454">#11454</a>)</li> <li><a href="https://github.com/tailwindlabs/tailwindcss/commit/5b9cbb3a81c1557a60f0f0142fedd6159bf09ff7"><code>5b9cbb3</code></a> Make PostCSS plugin async to improve performance (<a href="https://redirect.github.com/tailwindlabs/tailwindcss/issues/11548">#11548</a>)</li> <li><a href="https://github.com/tailwindlabs/tailwindcss/commit/1c9bb387e1a081d2a8ea7bf7aab413a212afd99d"><code>1c9bb38</code></a> Allow variant to be an at-rule without a prelude (<a href="https://redirect.github.com/tailwindlabs/tailwindcss/issues/11589">#11589</a>)</li> <li><a href="https://github.com/tailwindlabs/tailwindcss/commit/80f3e85fa079036949a203054a6924d8e87ac3be"><code>80f3e85</code></a> Sort classes using position of first matching rule (<a href="https://redirect.github.com/tailwindlabs/tailwindcss/issues/11504">#11504</a>)</li> <li><a href="https://github.com/tailwindlabs/tailwindcss/commit/243226887f16166fe504ad543fbb203fc46a4130"><code>2432268</code></a> Reset dialog element styles (<a href="https://redirect.github.com/tailwindlabs/tailwindcss/issues/11069">#11069</a>)</li> <li><a href="https://github.com/tailwindlabs/tailwindcss/commit/b885fff544b1a109ba72a4a3f0e6b2a3a6cbcfe0"><code>b885fff</code></a> Add <code>aria-busy</code> utility (<a href="https://redirect.github.com/tailwindlabs/tailwindcss/issues/10966">#10966</a>)</li> <li><a href="https://github.com/tailwindlabs/tailwindcss/commit/1fb7486165bc633dc53b6c1d10328ccd09a4b13b"><code>1fb7486</code></a> Make font settings propagate into buttons, inputs, etc. (<a href="https://redirect.github.com/tailwindlabs/tailwindcss/issues/10940">#10940</a>)</li> <li>Additional commits viewable in <a href="https://github.com/tailwindlabs/tailwindcss/compare/v3.3.2...v3.3.3">compare view</a></li> </ul> </details> <br /> [![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=tailwindcss&package-manager=npm_and_yarn&previous-version=3.3.2&new-version=3.3.3)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) --- <details> <summary>Dependabot commands and options</summary> <br /> You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) </details>
https://api.github.com/repos/LAION-AI/Open-Assistant/pulls/3601
2023-07-24T18:54:40Z
2023-07-24T20:53:26Z
2023-07-24T20:53:26Z
2023-07-24T20:53:27Z
687
LAION-AI/Open-Assistant
37,795
A solution to xelatex error converting to xdv
diff --git a/manimlib/ctex_template.tex b/manimlib/ctex_template.tex index 41edd9683e..016bc3ccd6 100644 --- a/manimlib/ctex_template.tex +++ b/manimlib/ctex_template.tex @@ -1,4 +1,5 @@ \documentclass[preview]{standalone} +\usepackage[UTF8]{ctex} \usepackage[english]{babel} \usepackage{amsmath} @@ -16,7 +17,6 @@ \usepackage{xcolor} \usepackage{microtype} %\DisableLigatures{encoding = *, family = * } -\usepackage[UTF8]{ctex} \linespread{1} \begin{document}
This may solve some xelatex errors, like #1055 #1027 etc. If `\usepackage[UTF8]{ctex}` doesn’t be placed at the top of `ctex_template.tex`, it may cause xelatex error even TexLive or MiKTeX is installed correctly.
https://api.github.com/repos/3b1b/manim/pulls/1187
2020-07-28T15:13:00Z
2020-09-30T12:54:47Z
2020-09-30T12:54:47Z
2020-09-30T12:54:48Z
167
3b1b/manim
18,584
Save a csv containing the loss while training
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index b6c06d49e6c..2751a8c8d14 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -5,6 +5,7 @@ import sys import traceback import tqdm +import csv import torch @@ -174,7 +175,7 @@ def attention_CrossAttention_forward(self, x, context=None, mask=None): return self.to_out(out) -def train_hypernetwork(hypernetwork_name, learn_rate, data_root, log_directory, steps, create_image_every, save_hypernetwork_every, template_file, preview_image_prompt): +def train_hypernetwork(hypernetwork_name, learn_rate, data_root, log_directory, steps, create_image_every, save_hypernetwork_every, write_csv_every, template_file, preview_image_prompt): assert hypernetwork_name, 'hypernetwork not selected' path = shared.hypernetworks.get(hypernetwork_name, None) @@ -256,6 +257,20 @@ def train_hypernetwork(hypernetwork_name, learn_rate, data_root, log_directory, last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork_name}-{hypernetwork.step}.pt') hypernetwork.save(last_saved_file) + if write_csv_every > 0 and hypernetwork_dir is not None and hypernetwork.step % write_csv_every == 0: + write_csv_header = False if os.path.exists(os.path.join(hypernetwork_dir, "hypernetwork_loss.csv")) else True + + with open(os.path.join(hypernetwork_dir, "hypernetwork_loss.csv"), "a+") as fout: + + csv_writer = csv.DictWriter(fout, fieldnames=["step", "loss", "learn_rate"]) + + if write_csv_header: + csv_writer.writeheader() + + csv_writer.writerow({"step": hypernetwork.step, + "loss": f"{losses.mean():.7f}", + "learn_rate": scheduler.learn_rate}) + if hypernetwork.step > 0 and images_dir is not None and hypernetwork.step % create_image_every == 0: last_saved_image = os.path.join(images_dir, f'{hypernetwork_name}-{hypernetwork.step}.png') diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index fa0e33a2ae0..b83df079dde 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -6,6 +6,7 @@ import tqdm import html import datetime +import csv from PIL import Image, PngImagePlugin @@ -172,7 +173,7 @@ def create_embedding(name, num_vectors_per_token, init_text='*'): return fn -def train_embedding(embedding_name, learn_rate, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_image_prompt): +def train_embedding(embedding_name, learn_rate, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, write_csv_every, template_file, save_image_with_stored_embedding, preview_image_prompt): assert embedding_name, 'embedding not selected' shared.state.textinfo = "Initializing textual inversion training..." @@ -256,6 +257,21 @@ def train_embedding(embedding_name, learn_rate, data_root, log_directory, traini last_saved_file = os.path.join(embedding_dir, f'{embedding_name}-{embedding.step}.pt') embedding.save(last_saved_file) + if write_csv_every > 0 and log_directory is not None and embedding.step % write_csv_every == 0: + write_csv_header = False if os.path.exists(os.path.join(log_directory, "textual_inversion_loss.csv")) else True + + with open(os.path.join(log_directory, "textual_inversion_loss.csv"), "a+") as fout: + + csv_writer = csv.DictWriter(fout, fieldnames=["epoch", "epoch_step", "loss", "learn_rate"]) + + if write_csv_header: + csv_writer.writeheader() + + csv_writer.writerow({"epoch": epoch_num + 1, + "epoch_step": epoch_step - 1, + "loss": f"{losses.mean():.7f}", + "learn_rate": scheduler.learn_rate}) + if embedding.step > 0 and images_dir is not None and embedding.step % create_image_every == 0: last_saved_image = os.path.join(images_dir, f'{embedding_name}-{embedding.step}.png') diff --git a/modules/ui.py b/modules/ui.py index e07ee0e1d77..1195c2f12ed 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1096,6 +1096,7 @@ def create_ui(wrap_gradio_gpu_call): training_height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512) steps = gr.Number(label='Max steps', value=100000, precision=0) create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0) + write_csv_every = gr.Number(label='Save an csv containing the loss to log directory every N steps, 0 to disable', value=500, precision=0) save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0) save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True) preview_image_prompt = gr.Textbox(label='Preview prompt', value="") @@ -1174,6 +1175,7 @@ def create_ui(wrap_gradio_gpu_call): steps, create_image_every, save_embedding_every, + write_csv_every, template_file, save_image_with_stored_embedding, preview_image_prompt, @@ -1195,6 +1197,7 @@ def create_ui(wrap_gradio_gpu_call): steps, create_image_every, save_embedding_every, + write_csv_every, template_file, preview_image_prompt, ],
**Describe what this pull request is trying to achieve.** This will make it so that the loss is written to a csv file along with the epoch and epoch step in the case of textual inversion, and just the step for hypernetworks. Using this one could graph the loss and see how well the model is learning. The times when the loss should be written can be defined in the options. **Additional notes and description of your changes** To understand exactly what loss means, [this page](https://developers.google.com/machine-learning/crash-course/descending-into-ml/training-and-loss) has a simple explanation about loss in machine learning. To make a graph in Libreoffice, don't forget to check `First column as label` in `data range`. **Environment this was tested in** OS: Linux, Ubuntu 22.04 LTS Graphics card: Geforce RTX 3060 **Screenshots or videos of your changes** ![image](https://user-images.githubusercontent.com/12272837/195453616-8453351a-966d-4ed7-bde7-728cf56246fe.png) ![image](https://user-images.githubusercontent.com/12272837/195454028-caf12273-962c-4389-9ea0-536f95ecb91a.png)
https://api.github.com/repos/AUTOMATIC1111/stable-diffusion-webui/pulls/2430
2022-10-12T21:48:00Z
2022-10-14T19:44:00Z
2022-10-14T19:44:00Z
2022-10-14T19:44:00Z
1,428
AUTOMATIC1111/stable-diffusion-webui
39,907
修复whl包没有use_zero_copy_run参数的问题
diff --git a/paddleocr.py b/paddleocr.py index 65bca7ae24..d3d73cb1b9 100644 --- a/paddleocr.py +++ b/paddleocr.py @@ -129,6 +129,7 @@ def str2bool(v): parser.add_argument("--det", type=str2bool, default=True) parser.add_argument("--rec", type=str2bool, default=True) + parser.add_argument("--use_zero_copy_run", type=bool, default=False) return parser.parse_args() @@ -209,4 +210,4 @@ def main(): print(img_path) result = ocr_engine.ocr(img_path, det=args.det, rec=args.rec) for line in result: - print(line) + print(line) \ No newline at end of file
https://api.github.com/repos/PaddlePaddle/PaddleOCR/pulls/623
2020-08-27T07:12:12Z
2020-08-28T03:55:24Z
2020-08-28T03:55:24Z
2020-08-31T03:05:03Z
182
PaddlePaddle/PaddleOCR
41,952
Resolve CVE-2023-32681 by accepting requests>=2.31.0
diff --git a/setup.py b/setup.py index 8b2fd8361934d..8d807b1912f84 100644 --- a/setup.py +++ b/setup.py @@ -20,7 +20,7 @@ "tenacity>=8.2.0,<9.0.0", "openai>=0.26.4", "pandas", - "requests<2.30.0", + "urllib3<2", "fsspec>=2023.5.0", "typing-inspect==0.8.0", "typing_extensions==4.5.0",
Accepting `requests>=2.31.0` to resolve [CVE-2023-32681 (Unintended leak of Proxy-Authorization header)](https://github.com/psf/requests/security/advisories/GHSA-j8r2-6x86-q33q). `requests` was previously pinned to `<2.30.0` by #2099 because requests version (2.30.0) breaks download_loader in llama_hub. I think the root cause of the breaks is that `requests 2.30.0` starts to support `urllib3 2.0`, which may contain minor breaking changes. As per [Release History of `requests`](https://github.com/psf/requests/blob/main/HISTORY.md), we can stay on `urllib3 1.x` with `requests>=2.30.0`. Hence the better solution is to pin `urllib3<2` instead of pinning `requests<2.30.0`.
https://api.github.com/repos/run-llama/llama_index/pulls/3833
2023-05-24T02:32:06Z
2023-05-25T19:09:33Z
2023-05-25T19:09:33Z
2023-05-25T21:32:07Z
143
run-llama/llama_index
6,172
Added emoji zwj sequences
diff --git a/blns.json b/blns.json index 75d5973..4a3a85d 100644 --- a/blns.json +++ b/blns.json @@ -156,6 +156,7 @@ "🐵 🙈 🙉 🙊", "❤️ 💔 💌 💕 💞 💓 💗 💖 💘 💝 💟 💜 💛 💚 💙", "✋🏿 💪🏿 👐🏿 🙌🏿 👏🏿 🙏🏿", + "👨‍👩‍👦 👨‍👩‍👧‍👦 👨‍👨‍👦 👩‍👩‍👧 👨‍👦 👨‍👧‍👦 👩‍👦 👩‍👧‍👦", "🚾 🆒 🆓 🆕 🆖 🆗 🆙 🏧", "0️⃣ 1️⃣ 2️⃣ 3️⃣ 4️⃣ 5️⃣ 6️⃣ 7️⃣ 8️⃣ 9️⃣ 🔟", "🇺🇸🇷🇺🇸 🇦🇫🇦🇲🇸", diff --git a/blns.txt b/blns.txt index 280a977..1facd28 100644 --- a/blns.txt +++ b/blns.txt @@ -259,6 +259,7 @@ __ロ(,_,*) 🐵 🙈 🙉 🙊 ❤️ 💔 💌 💕 💞 💓 💗 💖 💘 💝 💟 💜 💛 💚 💙 ✋🏿 💪🏿 👐🏿 🙌🏿 👏🏿 🙏🏿 +👨‍👩‍👦 👨‍👩‍👧‍👦 👨‍👨‍👦 👩‍👩‍👧 👨‍👦 👨‍👧‍👦 👩‍👦 👩‍👧‍👦 🚾 🆒 🆓 🆕 🆖 🆗 🆙 🏧 0️⃣ 1️⃣ 2️⃣ 3️⃣ 4️⃣ 5️⃣ 6️⃣ 7️⃣ 8️⃣ 9️⃣ 🔟
The following are the recommended emoji zwj sequences, which use a U+200D ZERO WIDTH JOINER (ZWJ) to join the characters into a single glyph if available. When not available, the ZWJ characters are ignored and a fallback sequence of separate emoji is displayed. The example sequences consist of up to 7 characters per glyph and could create issues with string length calculations and layout. https://unicode.org/emoji/charts/emoji-zwj-sequences.html
https://api.github.com/repos/minimaxir/big-list-of-naughty-strings/pulls/211
2020-05-24T17:42:29Z
2020-05-26T02:39:04Z
2020-05-26T02:39:04Z
2020-05-26T02:39:04Z
645
minimaxir/big-list-of-naughty-strings
4,804
Raise InvalidUrl if host starts with '.' character.
diff --git a/requests/models.py b/requests/models.py index e7d292d580..d86388182b 100644 --- a/requests/models.py +++ b/requests/models.py @@ -403,7 +403,7 @@ def prepare_url(self, url, params): host = self._get_idna_encoded_host(host) except UnicodeError: raise InvalidURL('URL has an invalid label.') - elif host.startswith(u'*'): + elif host.startswith((u'*', u'.')): raise InvalidURL('URL has an invalid label.') # Carefully reconstruct the network location diff --git a/tests/test_requests.py b/tests/test_requests.py index 463e8bf47a..29b3aca84e 100644 --- a/tests/test_requests.py +++ b/tests/test_requests.py @@ -81,6 +81,8 @@ def test_entry_points(self): (InvalidSchema, 'localhost.localdomain:3128/'), (InvalidSchema, '10.122.1.1:3128/'), (InvalidURL, 'http://'), + (InvalidURL, 'http://*example.com'), + (InvalidURL, 'http://.example.com'), )) def test_invalid_url(self, exception, url): with pytest.raises(exception):
Closes #5367 Attempting to get `http://.example.com` results in a `UnicodeError` but should be raise `InvalidUrl` as attempting to get `http://*example.com`. I've added `InvalidUrl` tests for `http://.example.com` and `http://*example.com`.
https://api.github.com/repos/psf/requests/pulls/5414
2020-04-05T16:52:04Z
2021-12-29T01:39:37Z
2021-12-29T01:39:36Z
2022-03-29T08:05:38Z
284
psf/requests
32,043
Suppress `torch` AMP-CPU warnings
diff --git a/utils/torch_utils.py b/utils/torch_utils.py index ca91ff6b4ad..c5257c6ebfe 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -9,6 +9,7 @@ import platform import subprocess import time +import warnings from contextlib import contextmanager from copy import deepcopy from pathlib import Path @@ -25,6 +26,9 @@ except ImportError: thop = None +# Suppress PyTorch warnings +warnings.filterwarnings('ignore', message='User provided device_type of \'cuda\', but CUDA is not available. Disabling') + @contextmanager def torch_distributed_zero_first(local_rank: int): @@ -293,13 +297,9 @@ def __call__(self, epoch, fitness): class ModelEMA: - """ Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models - Keep a moving average of everything in the model state_dict (parameters and buffers). - This is intended to allow functionality like - https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage - A smoothed version of the weights is necessary for some training schemes to perform well. - This class is sensitive where it is initialized in the sequence of model init, - GPU assignment and distributed training wrappers. + """ Updated Exponential Moving Average (EMA) from https://github.com/rwightman/pytorch-image-models + Keeps a moving average of everything in the model state_dict (parameters and buffers) + For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage """ def __init__(self, model, decay=0.9999, updates=0):
This is a torch bug, but they seem unable or unwilling to fix it so I'm creating a suppression in YOLOv5. Resolves https://github.com/ultralytics/yolov5/issues/6692 ## 🛠️ PR Summary <sub>Made with ❤️ by [Ultralytics Actions](https://github.com/ultralytics/actions)<sub> ### 🌟 Summary Enhancement of model training stability by suppressing irrelevant PyTorch warnings and refining ModelEMA documentation. ### 📊 Key Changes - 🚫 Added code to suppress specific PyTorch warnings regarding CUDA availability. - 📝 Updated comments for the `ModelEMA` class to better explain its purpose and provide references. ### 🎯 Purpose & Impact - 🔇 Suppressing non-critical warnings helps declutter the command output during training, leading to less confusion for users especially when the warning is not pertinent to their use case. - 📖 Improved documentation for the `ModelEMA` provides clearer guidance on its use and significance, potentially improving understandability for developers utilizing this module in their training pipelines.
https://api.github.com/repos/ultralytics/yolov5/pulls/6706
2022-02-19T15:44:34Z
2022-02-19T15:48:34Z
2022-02-19T15:48:34Z
2024-01-19T12:43:30Z
394
ultralytics/yolov5
25,216
[extractor/rai] fix for #4690
diff --git a/yt_dlp/extractor/rai.py b/yt_dlp/extractor/rai.py index dc911069dc4..6ed8227eb6f 100644 --- a/yt_dlp/extractor/rai.py +++ b/yt_dlp/extractor/rai.py @@ -156,7 +156,7 @@ def get_format_info(tbr): br = int_or_none(tbr) if len(fmts) == 1 and not br: br = fmts[0].get('tbr') - if br or 0 > 300: + if br and br > 300: tbr = compat_str(math.floor(br / 100) * 100) else: tbr = '250'
### Description of your *pull request* and other information Stupid error by me introduced in a previous PR Fixes #4690 ### Before submitting a *pull request* make sure you have: - [x] At least skimmed through [contributing guidelines](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) including [yt-dlp coding conventions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#yt-dlp-coding-conventions) - [x] [Searched](https://github.com/yt-dlp/yt-dlp/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests - [x] Checked the code with [flake8](https://pypi.python.org/pypi/flake8) and [ran relevant tests](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) ### In order to be accepted and merged into yt-dlp each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check one of the following options: - [x] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/) ### What is the purpose of your *pull request*? - [x] Fix or improvement to an extractor (Make sure to add/update tests)
https://api.github.com/repos/yt-dlp/yt-dlp/pulls/4700
2022-08-18T20:57:18Z
2022-08-18T20:58:59Z
2022-08-18T20:58:59Z
2022-10-20T09:49:29Z
171
yt-dlp/yt-dlp
7,630
Fix capitalization of Dropbox
diff --git a/README.md b/README.md index eb64419c48..266f8b168d 100644 --- a/README.md +++ b/README.md @@ -1723,7 +1723,7 @@ Handy metrics based on numbers above: | Amazon | [Amazon architecture](http://highscalability.com/amazon-architecture) | | Cinchcast | [Producing 1,500 hours of audio every day](http://highscalability.com/blog/2012/7/16/cinchcast-architecture-producing-1500-hours-of-audio-every-d.html) | | DataSift | [Realtime datamining At 120,000 tweets per second](http://highscalability.com/blog/2011/11/29/datasift-architecture-realtime-datamining-at-120000-tweets-p.html) | -| DropBox | [How we've scaled Dropbox](https://www.youtube.com/watch?v=PE4gwstWhmc) | +| Dropbox | [How we've scaled Dropbox](https://www.youtube.com/watch?v=PE4gwstWhmc) | | ESPN | [Operating At 100,000 duh nuh nuhs per second](http://highscalability.com/blog/2013/11/4/espns-architecture-at-scale-operating-at-100000-duh-nuh-nuhs.html) | | Google | [Google architecture](http://highscalability.com/google-architecture) | | Instagram | [14 million users, terabytes of photos](http://highscalability.com/blog/2011/12/6/instagram-architecture-14-million-users-terabytes-of-photos.html)<br/>[What powers Instagram](http://instagram-engineering.tumblr.com/post/13649370142/what-powers-instagram-hundreds-of-instances) |
Dropbox is officially one word according to the company. This makes the entry in the table match all other mentions of the company in README.md
https://api.github.com/repos/donnemartin/system-design-primer/pulls/511
2021-02-23T03:06:20Z
2021-05-09T18:20:50Z
2021-05-09T18:20:50Z
2021-05-09T18:21:00Z
389
donnemartin/system-design-primer
36,723
Fixing link to C.146 to be valid, and a link to ??? to be unlinked
diff --git a/CppCoreGuidelines.md b/CppCoreGuidelines.md index 989d59aaf..7d02e9e96 100644 --- a/CppCoreGuidelines.md +++ b/CppCoreGuidelines.md @@ -7376,8 +7376,9 @@ the former (`dynamic_cast`) is far harder to implement correctly in general. Consider: struct B { - const char* name {"B"}; - virtual const char* id() const { return name; } // if pb1->id() == pb2->id() *pb1 is the same type as *pb2 + const char* name {"B"}; + // if pb1->id() == pb2->id() *pb1 is the same type as *pb2 + virtual const char* id() const { return name; } // ... }; @@ -7601,7 +7602,7 @@ give a wrong result (especially as a hierarchy is modified during maintenance). ##### Enforcement -See [C.146] and [???] +See [C.146](#Rh-dynamic_cast) and ??? ## <a name="SS-overload"></a>C.over: Overloading and overloaded operators @@ -11399,7 +11400,7 @@ If you feel the need for a lot of casts, there may be a fundamental design probl ##### Alternatives -Casts are widely (mis) used. Modern C++ has constructs that eliminats the need for casts in many contexts, such as +Casts are widely (mis) used. Modern C++ has constructs that eliminates the need for casts in many contexts, such as * Use templates * Use `std::variant` @@ -13261,7 +13262,7 @@ The code determining whether to `join()` or `detach()` may be complicated and ev // ... should I join here? ... } -This seriously complicted lifetime analysis, and in not too unlikely cases make lifetime analysis impossible. +This seriously complicated lifetime analysis, and in not too unlikely cases make lifetime analysis impossible. This implies that we cannot safely refer to local objects in `use()` from the thread or refer to local objects in the thread from `use()`. ##### Note @@ -13275,7 +13276,7 @@ Because of old code and third party libraries using `std::thread` this rule can ##### Enforcement -Flag uses of 'std::thread': +Flag uses of `std::thread`: * Suggest use of `gsl::joining_thread`. * Suggest ["exporting ownership"](#Rconc-detached_thread) to an enclosing scope if it detaches. @@ -13286,7 +13287,7 @@ Flag uses of 'std::thread': ##### Reason Often, the need to outlive the scope of its creation is inherent in the `thread`s task, -but implementing that idea by `detach` makes it harder monitor and communicat with the detached thread. +but implementing that idea by `detach` makes it harder monitor and communicate with the detached thread. In particular, it is harder (though not impossible) to ensure that the thread completed as expected or lived for as long as expected. ##### Example @@ -13303,9 +13304,9 @@ In particular, it is harder (though not impossible) to ensure that the thread co This is a reasonable use of a thread, for which `detach()` is commonly used. There are problems, though. How do we monitor the detached thread to see if it is alive? -Something might go wrong with the heartbeat, and loosing a haertbeat can be very serious in a system for which it is needed. -So, we need to communicate with the haertbeat thread -(e.g., through a stream of messages or notification events using a `conrition_variable`). +Something might go wrong with the heartbeat, and loosing a heartbeat can be very serious in a system for which it is needed. +So, we need to communicate with the heartbeat thread +(e.g., through a stream of messages or notification events using a `condition_variable`). An alternative, and usually superior solution is to control its lifetime by placing it in a scope outside its point of creation (or activation). For example: @@ -13324,7 +13325,8 @@ Sometimes, we need to separate the point of creation from the point of ownership void use() { - tick_toc = make_unique(gsl::joining_thread,heartbeat); // heartbeat is meant to run as long as tick_tock lives + // heartbeat is meant to run as long as tick_tock lives + tick_toc = make_unique(gsl::joining_thread, heartbeat); // ... } @@ -19569,7 +19571,7 @@ for example, `Expects(p!=nullptr)` will become `[[expects: p!=nullptr]]`. * `narrow` // `narrow<T>(x)` is `static_cast<T>(x)` if `static_cast<T>(x) == x` or it throws `narrowing_error` * `[[implicit]]` // "Marker" to put on single-argument constructors to explicitly make them non-explicit. * `move_owner` // `p = move_owner(q)` means `p = q` but ??? -* `joining_thread` // a RAII style versin of `std::thread` that joins. +* `joining_thread` // a RAII style version of `std::thread` that joins. ## <a name="SS-gsl-concepts"></a>GSL.concept: Concepts diff --git a/scripts/hunspell/isocpp.dic b/scripts/hunspell/isocpp.dic index bbedd95c5..da01ce655 100644 --- a/scripts/hunspell/isocpp.dic +++ b/scripts/hunspell/isocpp.dic @@ -522,6 +522,7 @@ thread2 Tjark tmp TMP +tock TODO toolchains TotallyOrdered
Fixing stuff so that the script tests passes
https://api.github.com/repos/isocpp/CppCoreGuidelines/pulls/934
2017-05-23T06:16:11Z
2017-05-24T02:55:07Z
2017-05-24T02:55:07Z
2017-05-24T02:55:07Z
1,298
isocpp/CppCoreGuidelines
15,651
Improve no_command
diff --git a/tests/rules/test_no_command.py b/tests/rules/test_no_command.py index 559837881..0839213f6 100644 --- a/tests/rules/test_no_command.py +++ b/tests/rules/test_no_command.py @@ -9,16 +9,27 @@ def command_found(): return b'''No command 'aptget' found, did you mean: Command 'apt-get' from package 'apt' (main) + Command 'not-installed' from package 'derp' (main) + Command 'not-really-used' from package 'whatever' (main) aptget: command not found ''' [email protected] +def uninstalled_command_found(): + return b'''No command 'pish' found, did you mean: + Command 'vish' from package 'vish' (universe) + Command 'wish' from package 'tk' (main) + Command 'fish' from package 'fish' (universe) + Command 'pdsh' from package 'pdsh' (universe) +pish: command not found +''' + @pytest.fixture def command_not_found(): return b'''No command 'vom' found, but there are 19 similar ones vom: command not found ''' - @pytest.fixture def bins_exists(request): p = patch('thefuck.rules.no_command.which', @@ -26,6 +37,26 @@ def bins_exists(request): p.start() request.addfinalizer(p.stop) [email protected] +def bin_might_exist(request): + def side_effect(name): + return name in ['not-really-used', 'apt-get', '/usr/lib/command-not-found', 'test'] + p = patch('thefuck.rules.no_command.which', + side_effect = side_effect) + p.start() + request.addfinalizer(p.stop) + + [email protected] +def patch_history(request): + def side_effect(name): + return 2 if name == 'not-really-used' else 12 + p = patch('thefuck.rules.no_command._count_history_uses', + side_effect = side_effect) + p.start() + request.addfinalizer(p.stop) + + @pytest.fixture def settings(): @@ -34,12 +65,12 @@ class _Settings(object): return _Settings [email protected]('bins_exists') -def test_match(command_found, command_not_found, settings): [email protected]('bin_might_exist', 'patch_history') +def test_match(command_found, command_not_found, uninstalled_command_found, settings): with patch('thefuck.rules.no_command.Popen') as Popen: Popen.return_value.stderr.read.return_value = command_found assert match(Command('aptget install vim', '', ''), settings) - Popen.assert_called_once_with('/usr/lib/command-not-found aptget', + Popen.assert_called_with('/usr/lib/command-not-found aptget', shell=True, stderr=PIPE) Popen.return_value.stderr.read.return_value = command_not_found assert not match(Command('ls', '', ''), settings) @@ -48,11 +79,14 @@ def test_match(command_found, command_not_found, settings): Popen.return_value.stderr.read.return_value = command_found assert match(Command('sudo aptget install vim', '', ''), Mock(command_not_found='test')) - Popen.assert_called_once_with('test aptget', + Popen.assert_called_with('test aptget', shell=True, stderr=PIPE) + with patch('thefuck.rules.no_command.Popen') as Popen: + Popen.return_value.stderr.read.return_value = uninstalled_command_found + assert not match(Command('pish bla blah', '', ''), settings) [email protected]('bins_exists') [email protected]('bin_might_exist', 'patch_history') def test_get_new_command(command_found): with patch('thefuck.rules.no_command._get_output', return_value=command_found.decode()): diff --git a/thefuck/rules/no_command.py b/thefuck/rules/no_command.py index cf0583752..0b3f9037d 100644 --- a/thefuck/rules/no_command.py +++ b/thefuck/rules/no_command.py @@ -2,7 +2,6 @@ import re from thefuck.utils import which, wrap_settings - local_settings = {'command_not_found': '/usr/lib/command-not-found'} @@ -12,12 +11,28 @@ def _get_output(command, settings): result = Popen(check_script, shell=True, stderr=PIPE) return result.stderr.read().decode() +def _count_history_uses(name): + script = "history | egrep '\\b{}\\b' | wc -l".format(name) + result = Popen(script, shell=True, + stdout=PIPE) + return int(result.stdout.read()) + +def _get_candidate_commands(command, settings): + output = _get_output(command, settings) + if "No command" in output and "from package" in output: + fixed_names = re.findall(r"Command '([^']*)' from package", + output) + return [name for name in fixed_names if which(name)] + return [] + + @wrap_settings(local_settings) def match(command, settings): if which(settings.command_not_found): output = _get_output(command, settings) - return "No command" in output and "from package" in output + return len(_get_candidate_commands(command, settings)) != 0 + @wrap_settings(local_settings) @@ -25,6 +40,7 @@ def get_new_command(command, settings): output = _get_output(command, settings) broken_name = re.findall(r"No command '([^']*)' found", output)[0] - fixed_name = re.findall(r"Command '([^']*)' from package", - output)[0] - return command.script.replace(broken_name, fixed_name, 1) + candidates = _get_candidate_commands(command, settings) + fixed_name = sorted(candidates, key=_count_history_uses, reverse=True)[0] + return command.script.replace(broken_name, fixed_name) +
This PR fixes a couple of limitations of the no_command rule: `/usr/lib/command-not-found` suggests commands from uninstalled packages, so we filter out commands that aren't installed. In many cases taking the first installed suggestion is inappropriate -- for example, if the broken command is `bish`, on my system two of the suggestions are installed: `wish` and `bash`. Which one did I mean? We can guess by counting the number of occurrences in my shell history: ``` sh ➜ thefuck git:(improve-no-command) history | egrep '\bbash\b' | wc -l 35 ➜ thefuck git:(improve-no-command) history | egrep '\bwish\b' | wc -l 3 ``` which tells us that I probably want `bash` (I don't even know what `wish` is, it's just in my history because I `which`ed it and `grep`ed for it a couple of times). I've updated the tests to check these cases and continue to work with the original cases.
https://api.github.com/repos/nvbn/thefuck/pulls/36
2015-04-18T21:41:45Z
2015-04-19T12:51:39Z
2015-04-19T12:51:39Z
2015-04-20T20:31:39Z
1,364
nvbn/thefuck
30,655
Revert "move text encoder to GPU (#360)"
diff --git a/fooocus_version.py b/fooocus_version.py index 024361b73..e83c613ba 100644 --- a/fooocus_version.py +++ b/fooocus_version.py @@ -1 +1 @@ -version = '2.0.4' +version = '2.0.3' diff --git a/modules/patch.py b/modules/patch.py index 09ec6fc26..084749566 100644 --- a/modules/patch.py +++ b/modules/patch.py @@ -70,8 +70,7 @@ def sdxl_encode_adm_patched(self, **kwargs): def text_encoder_device_patched(): - # Fooocus's style system uses text encoder much more times than comfy so this makes things much faster. - return comfy.model_management.get_torch_device() + return torch.device("cpu") def patch_all():
This reverts commit 7700276b5013ff6a4ce9776bfe6b6919326654b0. This seems to influence result quality.
https://api.github.com/repos/lllyasviel/Fooocus/pulls/361
2023-09-13T11:55:07Z
2023-09-13T11:55:22Z
2023-09-13T11:55:22Z
2023-09-13T12:00:59Z
192
lllyasviel/Fooocus
7,215
Upgrade pinned Python dependencies
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3fad8b28a1d85..638e07407f84e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -11,7 +11,7 @@ repos: - id: ruff-format - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.5.0 + rev: v4.6.0 hooks: - id: end-of-file-fixer - id: trailing-whitespace diff --git a/requirements-base-runtime.txt b/requirements-base-runtime.txt index a848b1d303b4a..0c655b14c3f0a 100644 --- a/requirements-base-runtime.txt +++ b/requirements-base-runtime.txt @@ -52,7 +52,7 @@ dnspython==2.6.1 # via localstack-core (pyproject.toml) docker==6.1.3 # via localstack-core (pyproject.toml) -flask==3.0.2 +flask==3.0.3 # via # localstack-core (pyproject.toml) # quart @@ -168,7 +168,7 @@ stevedore==5.2.0 # plux tailer==0.4.1 # via localstack-core (pyproject.toml) -typing-extensions==4.10.0 +typing-extensions==4.11.0 # via # localstack-twisted # readerwriterlock diff --git a/requirements-dev.txt b/requirements-dev.txt index c2d787a04ac1f..1976cfb3dce11 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -37,9 +37,9 @@ aws-cdk-asset-kubectl-v20==2.1.2 # via aws-cdk-lib aws-cdk-asset-node-proxy-agent-v6==2.0.3 # via aws-cdk-lib -aws-cdk-lib==2.135.0 +aws-cdk-lib==2.136.0 # via localstack-core -aws-sam-translator==1.86.0 +aws-sam-translator==1.87.0 # via # cfn-lint # localstack-core @@ -92,7 +92,7 @@ cffi==1.16.0 # via cryptography cfgv==3.4.0 # via pre-commit -cfn-lint==0.86.1 +cfn-lint==0.86.2 # via moto-ext charset-normalizer==3.3.2 # via requests @@ -127,7 +127,7 @@ cython==3.0.10 # via localstack-core (pyproject.toml) decorator==5.1.1 # via jsonpath-rw -deepdiff==6.7.1 +deepdiff==7.0.1 # via # localstack-core # localstack-snapshot @@ -156,7 +156,7 @@ docutils==0.16 # via awscli filelock==3.13.3 # via virtualenv -flask==3.0.2 +flask==3.0.3 # via # localstack-core # quart @@ -218,7 +218,7 @@ joserfc==0.9.0 # via moto-ext jschema-to-python==1.2.3 # via cfn-lint -jsii==1.96.0 +jsii==1.97.0 # via # aws-cdk-asset-awscli-v1 # aws-cdk-asset-kubectl-v20 @@ -279,7 +279,7 @@ mpmath==1.3.0 # via sympy multipart==0.2.4 # via moto-ext -networkx==3.2.1 +networkx==3.3 # via # cfn-lint # localstack-core (pyproject.toml) @@ -490,7 +490,7 @@ typeguard==2.13.3 # aws-cdk-lib # constructs # jsii -typing-extensions==4.10.0 +typing-extensions==4.11.0 # via # aws-sam-translator # jsii diff --git a/requirements-runtime.txt b/requirements-runtime.txt index d8efd434ec59b..8630e86553e8c 100644 --- a/requirements-runtime.txt +++ b/requirements-runtime.txt @@ -27,7 +27,7 @@ attrs==23.2.0 # localstack-twisted # referencing # sarif-om -aws-sam-translator==1.86.0 +aws-sam-translator==1.87.0 # via # cfn-lint # localstack-core (pyproject.toml) @@ -73,7 +73,7 @@ certifi==2024.2.2 # requests cffi==1.16.0 # via cryptography -cfn-lint==0.86.1 +cfn-lint==0.86.2 # via moto-ext charset-normalizer==3.3.2 # via requests @@ -117,7 +117,7 @@ docker==6.1.3 # moto-ext docutils==0.16 # via awscli -flask==3.0.2 +flask==3.0.3 # via # localstack-core # quart @@ -216,7 +216,7 @@ mpmath==1.3.0 # via sympy multipart==0.2.4 # via moto-ext -networkx==3.2.1 +networkx==3.3 # via cfn-lint openapi-schema-validator==0.6.2 # via openapi-spec-validator @@ -363,7 +363,7 @@ tailer==0.4.1 # via # localstack-core # localstack-core (pyproject.toml) -typing-extensions==4.10.0 +typing-extensions==4.11.0 # via # aws-sam-translator # localstack-twisted diff --git a/requirements-test.txt b/requirements-test.txt index 3f605da692398..2048464648a87 100644 --- a/requirements-test.txt +++ b/requirements-test.txt @@ -37,9 +37,9 @@ aws-cdk-asset-kubectl-v20==2.1.2 # via aws-cdk-lib aws-cdk-asset-node-proxy-agent-v6==2.0.3 # via aws-cdk-lib -aws-cdk-lib==2.135.0 +aws-cdk-lib==2.136.0 # via localstack-core (pyproject.toml) -aws-sam-translator==1.86.0 +aws-sam-translator==1.87.0 # via # cfn-lint # localstack-core @@ -90,7 +90,7 @@ certifi==2024.2.2 # requests cffi==1.16.0 # via cryptography -cfn-lint==0.86.1 +cfn-lint==0.86.2 # via moto-ext charset-normalizer==3.3.2 # via requests @@ -119,7 +119,7 @@ cryptography==42.0.5 # pyopenssl decorator==5.1.1 # via jsonpath-rw -deepdiff==6.7.1 +deepdiff==7.0.1 # via # localstack-core (pyproject.toml) # localstack-snapshot @@ -142,7 +142,7 @@ docker==6.1.3 # moto-ext docutils==0.16 # via awscli -flask==3.0.2 +flask==3.0.3 # via # localstack-core # quart @@ -202,7 +202,7 @@ joserfc==0.9.0 # via moto-ext jschema-to-python==1.2.3 # via cfn-lint -jsii==1.96.0 +jsii==1.97.0 # via # aws-cdk-asset-awscli-v1 # aws-cdk-asset-kubectl-v20 @@ -263,7 +263,7 @@ mpmath==1.3.0 # via sympy multipart==0.2.4 # via moto-ext -networkx==3.2.1 +networkx==3.3 # via cfn-lint openapi-schema-validator==0.6.2 # via openapi-spec-validator @@ -453,7 +453,7 @@ typeguard==2.13.3 # aws-cdk-lib # constructs # jsii -typing-extensions==4.10.0 +typing-extensions==4.11.0 # via # aws-sam-translator # jsii diff --git a/requirements-typehint.txt b/requirements-typehint.txt index 9469165dc781d..898a39e022056 100644 --- a/requirements-typehint.txt +++ b/requirements-typehint.txt @@ -37,9 +37,9 @@ aws-cdk-asset-kubectl-v20==2.1.2 # via aws-cdk-lib aws-cdk-asset-node-proxy-agent-v6==2.0.3 # via aws-cdk-lib -aws-cdk-lib==2.135.0 +aws-cdk-lib==2.136.0 # via localstack-core -aws-sam-translator==1.86.0 +aws-sam-translator==1.87.0 # via # cfn-lint # localstack-core @@ -96,7 +96,7 @@ cffi==1.16.0 # via cryptography cfgv==3.4.0 # via pre-commit -cfn-lint==0.86.1 +cfn-lint==0.86.2 # via moto-ext charset-normalizer==3.3.2 # via requests @@ -131,7 +131,7 @@ cython==3.0.10 # via localstack-core decorator==5.1.1 # via jsonpath-rw -deepdiff==6.7.1 +deepdiff==7.0.1 # via # localstack-core # localstack-snapshot @@ -160,7 +160,7 @@ docutils==0.16 # via awscli filelock==3.13.3 # via virtualenv -flask==3.0.2 +flask==3.0.3 # via # localstack-core # quart @@ -222,7 +222,7 @@ joserfc==0.9.0 # via moto-ext jschema-to-python==1.2.3 # via cfn-lint -jsii==1.96.0 +jsii==1.97.0 # via # aws-cdk-asset-awscli-v1 # aws-cdk-asset-kubectl-v20 @@ -313,7 +313,7 @@ mypy-boto3-ce==1.34.71 # via boto3-stubs mypy-boto3-cloudcontrol==1.34.0 # via boto3-stubs -mypy-boto3-cloudformation==1.34.66 +mypy-boto3-cloudformation==1.34.77 # via boto3-stubs mypy-boto3-cloudfront==1.34.0 # via boto3-stubs @@ -329,13 +329,13 @@ mypy-boto3-cognito-idp==1.34.59 # via boto3-stubs mypy-boto3-dms==1.34.0 # via boto3-stubs -mypy-boto3-docdb==1.34.13 +mypy-boto3-docdb==1.34.77 # via boto3-stubs mypy-boto3-dynamodb==1.34.67 # via boto3-stubs mypy-boto3-dynamodbstreams==1.34.0 # via boto3-stubs -mypy-boto3-ec2==1.34.73 +mypy-boto3-ec2==1.34.78 # via boto3-stubs mypy-boto3-ecr==1.34.0 # via boto3-stubs @@ -391,7 +391,7 @@ mypy-boto3-kms==1.34.65 # via boto3-stubs mypy-boto3-lakeformation==1.34.7 # via boto3-stubs -mypy-boto3-lambda==1.34.58 +mypy-boto3-lambda==1.34.77 # via boto3-stubs mypy-boto3-logs==1.34.66 # via boto3-stubs @@ -427,7 +427,7 @@ mypy-boto3-redshift==1.34.57 # via boto3-stubs mypy-boto3-redshift-data==1.34.0 # via boto3-stubs -mypy-boto3-resource-groups==1.34.0 +mypy-boto3-resource-groups==1.34.79 # via boto3-stubs mypy-boto3-resourcegroupstaggingapi==1.34.0 # via boto3-stubs @@ -475,7 +475,7 @@ mypy-boto3-wafv2==1.34.58 # via boto3-stubs mypy-boto3-xray==1.34.0 # via boto3-stubs -networkx==3.2.1 +networkx==3.3 # via # cfn-lint # localstack-core @@ -690,7 +690,7 @@ types-awscrt==0.20.5 # via botocore-stubs types-s3transfer==0.10.0 # via boto3-stubs -typing-extensions==4.10.0 +typing-extensions==4.11.0 # via # aws-sam-translator # boto3-stubs
This PR upgrades all the pinned Python dependencies.
https://api.github.com/repos/localstack/localstack/pulls/10619
2024-04-09T05:02:30Z
2024-04-09T06:37:50Z
2024-04-09T06:37:50Z
2024-04-09T06:37:51Z
3,441
localstack/localstack
28,542
[3.8] bpo-38239: Fix test_gdb for Link Time Optimization (LTO) (GH-16422)
diff --git a/Lib/test/test_gdb.py b/Lib/test/test_gdb.py index e07d3273a4552d..e1060330550e34 100644 --- a/Lib/test/test_gdb.py +++ b/Lib/test/test_gdb.py @@ -255,8 +255,15 @@ def get_gdb_repr(self, source, # gdb can insert additional '\n' and space characters in various places # in its output, depending on the width of the terminal it's connected # to (using its "wrap_here" function) - m = re.match(r'.*#0\s+builtin_id\s+\(self\=.*,\s+v=\s*(.*?)?\)\s+at\s+\S*Python/bltinmodule.c.*', - gdb_output, re.DOTALL) + m = re.search( + # Match '#0 builtin_id(self=..., v=...)' + r'#0\s+builtin_id\s+\(self\=.*,\s+v=\s*(.*?)?\)' + # Match ' at Python/bltinmodule.c'. + # bpo-38239: builtin_id() is defined in Python/bltinmodule.c, + # but accept any "Directory\file.c" to support Link Time + # Optimization (LTO). + r'\s+at\s+\S*[A-Za-z]+/[A-Za-z0-9_-]+\.c', + gdb_output, re.DOTALL) if not m: self.fail('Unexpected gdb output: %r\n%s' % (gdb_output, gdb_output)) return m.group(1), gdb_output diff --git a/Misc/NEWS.d/next/Tests/2019-09-26-15-48-36.bpo-38239.MfoVzY.rst b/Misc/NEWS.d/next/Tests/2019-09-26-15-48-36.bpo-38239.MfoVzY.rst new file mode 100644 index 00000000000000..f79da29fa18288 --- /dev/null +++ b/Misc/NEWS.d/next/Tests/2019-09-26-15-48-36.bpo-38239.MfoVzY.rst @@ -0,0 +1 @@ +Fix test_gdb for Link Time Optimization (LTO) builds.
(cherry picked from commit 64b4a3a2deabcd4103fac2759a311fe94159b4d1) Co-authored-by: Victor Stinner <[email protected]> <!-- issue-number: [bpo-38239](https://bugs.python.org/issue38239) --> https://bugs.python.org/issue38239 <!-- /issue-number -->
https://api.github.com/repos/python/cpython/pulls/16424
2019-09-26T14:54:28Z
2019-09-26T15:13:40Z
2019-09-26T15:13:40Z
2019-09-26T15:34:07Z
523
python/cpython
4,468
Use new SensorDeviceClass enum in bmp280
diff --git a/homeassistant/components/bmp280/sensor.py b/homeassistant/components/bmp280/sensor.py index 21ab71e5ce63..3721bffaf682 100644 --- a/homeassistant/components/bmp280/sensor.py +++ b/homeassistant/components/bmp280/sensor.py @@ -8,9 +8,8 @@ import voluptuous as vol from homeassistant.components.sensor import ( - DEVICE_CLASS_PRESSURE, - DEVICE_CLASS_TEMPERATURE, PLATFORM_SCHEMA, + SensorDeviceClass, SensorEntity, ) from homeassistant.const import CONF_NAME, PRESSURE_HPA, TEMP_CELSIUS @@ -87,7 +86,7 @@ class Bmp280TemperatureSensor(Bmp280Sensor): def __init__(self, bmp280: Adafruit_BMP280_I2C, name: str) -> None: """Initialize the entity.""" super().__init__( - bmp280, f"{name} Temperature", TEMP_CELSIUS, DEVICE_CLASS_TEMPERATURE + bmp280, f"{name} Temperature", TEMP_CELSIUS, SensorDeviceClass.TEMPERATURE ) @Throttle(MIN_TIME_BETWEEN_UPDATES) @@ -112,7 +111,7 @@ class Bmp280PressureSensor(Bmp280Sensor): def __init__(self, bmp280: Adafruit_BMP280_I2C, name: str) -> None: """Initialize the entity.""" super().__init__( - bmp280, f"{name} Pressure", PRESSURE_HPA, DEVICE_CLASS_PRESSURE + bmp280, f"{name} Pressure", PRESSURE_HPA, SensorDeviceClass.PRESSURE ) @Throttle(MIN_TIME_BETWEEN_UPDATES)
## Proposed change <!-- Describe the big picture of your changes here to communicate to the maintainers why we should accept this pull request. If it fixes a bug or resolves a feature request, be sure to link to that issue in the additional information section. --> ## Type of change <!-- What type of change does your PR introduce to Home Assistant? NOTE: Please, check only 1! box! If your PR requires multiple boxes to be checked, you'll most likely need to split it into multiple PRs. This makes things easier and faster to code review. --> - [ ] Dependency upgrade - [ ] Bugfix (non-breaking change which fixes an issue) - [ ] New integration (thank you!) - [ ] New feature (which adds functionality to an existing integration) - [ ] Breaking change (fix/feature causing existing functionality to break) - [x] Code quality improvements to existing code or addition of tests ## Additional information <!-- Details are important, and help maintainers processing your PR. Please be sure to fill out additional details, if applicable. --> - This PR fixes or closes issue: fixes # - This PR is related to issue: - Link to documentation pull request: ## Checklist <!-- Put an `x` in the boxes that apply. You can also fill these out after creating the PR. If you're unsure about any of them, don't hesitate to ask. We're here to help! This is simply a reminder of what we are going to look for before merging your code. --> - [ ] The code change is tested and works locally. - [ ] Local tests pass. **Your PR cannot be merged unless tests pass** - [ ] There is no commented out code in this PR. - [ ] I have followed the [development checklist][dev-checklist] - [ ] The code has been formatted using Black (`black --fast homeassistant tests`) - [ ] Tests have been added to verify that the new code works. If user exposed functionality or configuration variables are added/changed: - [ ] Documentation added/updated for [www.home-assistant.io][docs-repository] If the code communicates with devices, web services, or third-party tools: - [ ] The [manifest file][manifest-docs] has all fields filled out correctly. Updated and included derived files by running: `python3 -m script.hassfest`. - [ ] New or updated dependencies have been added to `requirements_all.txt`. Updated by running `python3 -m script.gen_requirements_all`. - [ ] For the updated dependencies - a link to the changelog, or at minimum a diff between library versions is added to the PR description. - [ ] Untested files have been added to `.coveragerc`. The integration reached or maintains the following [Integration Quality Scale][quality-scale]: <!-- The Integration Quality Scale scores an integration on the code quality and user experience. Each level of the quality scale consists of a list of requirements. We highly recommend getting your integration scored! --> - [ ] No score or internal - [ ] 🥈 Silver - [ ] 🥇 Gold - [ ] 🏆 Platinum <!-- This project is very active and we have a high turnover of pull requests. Unfortunately, the number of incoming pull requests is higher than what our reviewers can review and merge so there is a long backlog of pull requests waiting for review. You can help here! By reviewing another pull request, you will help raise the code quality of that pull request and the final review will be faster. This way the general pace of pull request reviews will go up and your wait time will go down. When picking a pull request to review, try to choose one that hasn't yet been reviewed. Thanks for helping out! --> To help with the load of incoming pull requests: - [ ] I have reviewed two other [open pull requests][prs] in this repository. [prs]: https://github.com/home-assistant/core/pulls?q=is%3Aopen+is%3Apr+-author%3A%40me+-draft%3Atrue+-label%3Awaiting-for-upstream+sort%3Acreated-desc+review%3Anone <!-- Thank you for contributing <3 Below, some useful links you could explore: --> [dev-checklist]: https://developers.home-assistant.io/docs/en/development_checklist.html [manifest-docs]: https://developers.home-assistant.io/docs/en/creating_integration_manifest.html [quality-scale]: https://developers.home-assistant.io/docs/en/next/integration_quality_scale_index.html [docs-repository]: https://github.com/home-assistant/home-assistant.io
https://api.github.com/repos/home-assistant/core/pulls/61320
2021-12-09T07:10:06Z
2021-12-09T07:28:38Z
2021-12-09T07:28:38Z
2021-12-10T09:01:45Z
377
home-assistant/core
38,990
Typo in super() 77acc99
diff --git a/manimlib/mobject/coordinate_systems.py b/manimlib/mobject/coordinate_systems.py index 4d3fc235d5..442a36d745 100644 --- a/manimlib/mobject/coordinate_systems.py +++ b/manimlib/mobject/coordinate_systems.py @@ -284,7 +284,7 @@ class NumberPlane(Axes): } def __init__(self, **kwargs): - super.__init__(**kwargs) + super().__init__(**kwargs) self.init_background_lines() def init_background_lines(self):
Hello, I believe there was a typo in commit 77acc99. Changing this allows to run the following example from Todd Zimmerman's tutorial. ```python class SimpleField(Scene): CONFIG = { "plane_kwargs" : { "color" : RED }, } def construct(self): plane = NumberPlane(**self.plane_kwargs) plane.add(plane.get_axis_labels()) self.add(plane) points = [x*RIGHT+y*UP for x in np.arange(-5,5,1) for y in np.arange(-5,5,1) ] vec_field = [] for point in points: field = 0.5*RIGHT + 0.5*UP result = Vector(field).shift(point) vec_field.append(result) draw_field = VGroup(*vec_field) self.play(ShowCreation(draw_field)) ```
https://api.github.com/repos/3b1b/manim/pulls/877
2020-02-09T10:45:00Z
2020-02-13T06:38:22Z
2020-02-13T06:38:22Z
2020-02-13T06:38:22Z
133
3b1b/manim
18,459
Update urllib3
diff --git a/AUTHORS.rst b/AUTHORS.rst index 31b2c2e297..62e01116eb 100644 --- a/AUTHORS.rst +++ b/AUTHORS.rst @@ -140,3 +140,4 @@ Patches and Suggestions - Matt Spitz @mattspitz - Vikram Oberoi @voberoi - Can Ibanoglu <[email protected]> @canibanoglu +- Thomas Weißschuh <[email protected]> @t-8ch diff --git a/requests/packages/urllib3/connectionpool.py b/requests/packages/urllib3/connectionpool.py index 1e5814357a..72011b5a33 100644 --- a/requests/packages/urllib3/connectionpool.py +++ b/requests/packages/urllib3/connectionpool.py @@ -461,6 +461,13 @@ def urlopen(self, method, url, body=None, headers=None, retries=3, conn = None + # Merge the proxy headers. Only do this in HTTP. We have to copy the + # headers dict so we can safely change it without those changes being + # reflected in anyone else's copy. + if self.scheme == 'http': + headers = headers.copy() + headers.update(self.proxy_headers) + try: # Request a connection from the queue conn = self._get_conn(timeout=pool_timeout) diff --git a/requests/packages/urllib3/contrib/pyopenssl.py b/requests/packages/urllib3/contrib/pyopenssl.py index 91bc2fa4f7..f78e71706e 100644 --- a/requests/packages/urllib3/contrib/pyopenssl.py +++ b/requests/packages/urllib3/contrib/pyopenssl.py @@ -29,7 +29,7 @@ import select from cStringIO import StringIO -from .. import connectionpool +from .. import connection from .. import util __all__ = ['inject_into_urllib3', 'extract_from_urllib3'] @@ -52,20 +52,20 @@ orig_util_HAS_SNI = util.HAS_SNI -orig_connectionpool_ssl_wrap_socket = connectionpool.ssl_wrap_socket +orig_connection_ssl_wrap_socket = connection.ssl_wrap_socket def inject_into_urllib3(): 'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.' - connectionpool.ssl_wrap_socket = ssl_wrap_socket + connection.ssl_wrap_socket = ssl_wrap_socket util.HAS_SNI = HAS_SNI def extract_from_urllib3(): 'Undo monkey-patching by :func:`inject_into_urllib3`.' - connectionpool.ssl_wrap_socket = orig_connectionpool_ssl_wrap_socket + connection.ssl_wrap_socket = orig_connection_ssl_wrap_socket util.HAS_SNI = orig_util_HAS_SNI diff --git a/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py b/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py index 2d61ac2139..3aa5b2e190 100644 --- a/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py +++ b/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py @@ -1,98 +1,13 @@ -"""The match_hostname() function from Python 3.2, essential when using SSL.""" - -import re - -__version__ = '3.2.2' - -class CertificateError(ValueError): - pass - -def _dnsname_match(dn, hostname, max_wildcards=1): - """Matching according to RFC 6125, section 6.4.3 - - http://tools.ietf.org/html/rfc6125#section-6.4.3 - """ - pats = [] - if not dn: - return False - - parts = dn.split(r'.') - leftmost = parts[0] - - wildcards = leftmost.count('*') - if wildcards > max_wildcards: - # Issue #17980: avoid denials of service by refusing more - # than one wildcard per fragment. A survery of established - # policy among SSL implementations showed it to be a - # reasonable choice. - raise CertificateError( - "too many wildcards in certificate DNS name: " + repr(dn)) - - # speed up common case w/o wildcards - if not wildcards: - return dn.lower() == hostname.lower() - - # RFC 6125, section 6.4.3, subitem 1. - # The client SHOULD NOT attempt to match a presented identifier in which - # the wildcard character comprises a label other than the left-most label. - if leftmost == '*': - # When '*' is a fragment by itself, it matches a non-empty dotless - # fragment. - pats.append('[^.]+') - elif leftmost.startswith('xn--') or hostname.startswith('xn--'): - # RFC 6125, section 6.4.3, subitem 3. - # The client SHOULD NOT attempt to match a presented identifier - # where the wildcard character is embedded within an A-label or - # U-label of an internationalized domain name. - pats.append(re.escape(leftmost)) - else: - # Otherwise, '*' matches any dotless string, e.g. www* - pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) - - # add the remaining fragments, ignore any wildcards - for frag in parts[1:]: - pats.append(re.escape(frag)) - - pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) - return pat.match(hostname) - - -def match_hostname(cert, hostname): - """Verify that *cert* (in decoded format as returned by - SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 - rules are followed, but IP addresses are not accepted for *hostname*. - - CertificateError is raised on failure. On success, the function - returns nothing. - """ - if not cert: - raise ValueError("empty or no certificate") - dnsnames = [] - san = cert.get('subjectAltName', ()) - for key, value in san: - if key == 'DNS': - if _dnsname_match(value, hostname): - return - dnsnames.append(value) - if not dnsnames: - # The subject is only checked when there is no dNSName entry - # in subjectAltName - for sub in cert.get('subject', ()): - for key, value in sub: - # XXX according to RFC 2818, the most specific Common Name - # must be used. - if key == 'commonName': - if _dnsname_match(value, hostname): - return - dnsnames.append(value) - if len(dnsnames) > 1: - raise CertificateError("hostname %r " - "doesn't match either of %s" - % (hostname, ', '.join(map(repr, dnsnames)))) - elif len(dnsnames) == 1: - raise CertificateError("hostname %r " - "doesn't match %r" - % (hostname, dnsnames[0])) - else: - raise CertificateError("no appropriate commonName or " - "subjectAltName fields were found") +try: + # Python 3.2+ + from ssl import CertificateError, match_hostname +except ImportError: + try: + # Backport of the function from a pypi module + from backports.ssl_match_hostname import CertificateError, match_hostname + except ImportError: + # Our vendored copy + from _implementation import CertificateError, match_hostname + +# Not needed, but documenting what we provide. +__all__ = ('CertificateError', 'match_hostname') diff --git a/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py b/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py new file mode 100644 index 0000000000..52f428733d --- /dev/null +++ b/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py @@ -0,0 +1,105 @@ +"""The match_hostname() function from Python 3.3.3, essential when using SSL.""" + +# Note: This file is under the PSF license as the code comes from the python +# stdlib. http://docs.python.org/3/license.html + +import re + +__version__ = '3.4.0.2' + +class CertificateError(ValueError): + pass + + +def _dnsname_match(dn, hostname, max_wildcards=1): + """Matching according to RFC 6125, section 6.4.3 + + http://tools.ietf.org/html/rfc6125#section-6.4.3 + """ + pats = [] + if not dn: + return False + + # Ported from python3-syntax: + # leftmost, *remainder = dn.split(r'.') + parts = dn.split(r'.') + leftmost = parts[0] + remainder = parts[1:] + + wildcards = leftmost.count('*') + if wildcards > max_wildcards: + # Issue #17980: avoid denials of service by refusing more + # than one wildcard per fragment. A survey of established + # policy among SSL implementations showed it to be a + # reasonable choice. + raise CertificateError( + "too many wildcards in certificate DNS name: " + repr(dn)) + + # speed up common case w/o wildcards + if not wildcards: + return dn.lower() == hostname.lower() + + # RFC 6125, section 6.4.3, subitem 1. + # The client SHOULD NOT attempt to match a presented identifier in which + # the wildcard character comprises a label other than the left-most label. + if leftmost == '*': + # When '*' is a fragment by itself, it matches a non-empty dotless + # fragment. + pats.append('[^.]+') + elif leftmost.startswith('xn--') or hostname.startswith('xn--'): + # RFC 6125, section 6.4.3, subitem 3. + # The client SHOULD NOT attempt to match a presented identifier + # where the wildcard character is embedded within an A-label or + # U-label of an internationalized domain name. + pats.append(re.escape(leftmost)) + else: + # Otherwise, '*' matches any dotless string, e.g. www* + pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) + + # add the remaining fragments, ignore any wildcards + for frag in remainder: + pats.append(re.escape(frag)) + + pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) + return pat.match(hostname) + + +def match_hostname(cert, hostname): + """Verify that *cert* (in decoded format as returned by + SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 + rules are followed, but IP addresses are not accepted for *hostname*. + + CertificateError is raised on failure. On success, the function + returns nothing. + """ + if not cert: + raise ValueError("empty or no certificate") + dnsnames = [] + san = cert.get('subjectAltName', ()) + for key, value in san: + if key == 'DNS': + if _dnsname_match(value, hostname): + return + dnsnames.append(value) + if not dnsnames: + # The subject is only checked when there is no dNSName entry + # in subjectAltName + for sub in cert.get('subject', ()): + for key, value in sub: + # XXX according to RFC 2818, the most specific Common Name + # must be used. + if key == 'commonName': + if _dnsname_match(value, hostname): + return + dnsnames.append(value) + if len(dnsnames) > 1: + raise CertificateError("hostname %r " + "doesn't match either of %s" + % (hostname, ', '.join(map(repr, dnsnames)))) + elif len(dnsnames) == 1: + raise CertificateError("hostname %r " + "doesn't match %r" + % (hostname, dnsnames[0])) + else: + raise CertificateError("no appropriate commonName or " + "subjectAltName fields were found") diff --git a/requests/packages/urllib3/poolmanager.py b/requests/packages/urllib3/poolmanager.py index e7f8667ee6..c16519f883 100644 --- a/requests/packages/urllib3/poolmanager.py +++ b/requests/packages/urllib3/poolmanager.py @@ -245,12 +245,11 @@ def urlopen(self, method, url, redirect=True, **kw): u = parse_url(url) if u.scheme == "http": - # It's too late to set proxy headers on per-request basis for - # tunnelled HTTPS connections, should use - # constructor's proxy_headers instead. + # For proxied HTTPS requests, httplib sets the necessary headers + # on the CONNECT to the proxy. For HTTP, we'll definitely + # need to set 'Host' at the very least. kw['headers'] = self._set_proxy_headers(url, kw.get('headers', self.headers)) - kw['headers'].update(self.proxy_headers) return super(ProxyManager, self).urlopen(method, url, redirect, **kw) diff --git a/requests/packages/urllib3/response.py b/requests/packages/urllib3/response.py index 4efff5a13b..6a1fe1a77c 100644 --- a/requests/packages/urllib3/response.py +++ b/requests/packages/urllib3/response.py @@ -90,6 +90,7 @@ def __init__(self, body='', headers=None, status=0, version=0, reason=None, self._body = body if body and isinstance(body, basestring) else None self._fp = None self._original_response = original_response + self._fp_bytes_read = 0 self._pool = pool self._connection = connection @@ -129,6 +130,14 @@ def data(self): if self._fp: return self.read(cache_content=True) + def tell(self): + """ + Obtain the number of bytes pulled over the wire so far. May differ from + the amount of content returned by :meth:``HTTPResponse.read`` if bytes + are encoded on the wire (e.g, compressed). + """ + return self._fp_bytes_read + def read(self, amt=None, decode_content=None, cache_content=False): """ Similar to :meth:`httplib.HTTPResponse.read`, but with two additional @@ -183,6 +192,8 @@ def read(self, amt=None, decode_content=None, cache_content=False): self._fp.close() flush_decoder = True + self._fp_bytes_read += len(data) + try: if decode_content and self._decoder: data = self._decoder.decompress(data) diff --git a/requests/packages/urllib3/util.py b/requests/packages/urllib3/util.py index cf934d4246..46a0c48de1 100644 --- a/requests/packages/urllib3/util.py +++ b/requests/packages/urllib3/util.py @@ -426,7 +426,7 @@ def get_host(url): def make_headers(keep_alive=None, accept_encoding=None, user_agent=None, - basic_auth=None): + basic_auth=None, proxy_basic_auth=None): """ Shortcuts for generating request headers. @@ -447,6 +447,10 @@ def make_headers(keep_alive=None, accept_encoding=None, user_agent=None, Colon-separated username:password string for 'authorization: basic ...' auth header. + :param proxy_basic_auth: + Colon-separated username:password string for 'proxy-authorization: basic ...' + auth header. + Example: :: >>> make_headers(keep_alive=True, user_agent="Batman/1.0") @@ -474,6 +478,10 @@ def make_headers(keep_alive=None, accept_encoding=None, user_agent=None, headers['authorization'] = 'Basic ' + \ b64encode(six.b(basic_auth)).decode('utf-8') + if proxy_basic_auth: + headers['proxy-authorization'] = 'Basic ' + \ + b64encode(six.b(proxy_basic_auth)).decode('utf-8') + return headers
... and use the opportunity to add myself to `AUTHORS.rst`
https://api.github.com/repos/psf/requests/pulls/1726
2013-11-04T18:57:04Z
2013-11-04T21:55:58Z
2013-11-04T21:55:58Z
2021-09-08T23:06:15Z
3,871
psf/requests
32,884
[zattoo] Fix video information extraction (closes #17175)
diff --git a/youtube_dl/extractor/zattoo.py b/youtube_dl/extractor/zattoo.py index fb167c19855..9c9024799ad 100644 --- a/youtube_dl/extractor/zattoo.py +++ b/youtube_dl/extractor/zattoo.py @@ -93,28 +93,30 @@ def _extract_cid(self, video_id, channel_name): def _extract_cid_and_video_info(self, video_id): data = self._download_json( - '%s/zapi/program/details' % self._HOST_URL, + '%s/zapi/v2/cached/program/power_details/%s' % ( + self._HOST_URL, self._power_guide_hash), video_id, 'Downloading video information', query={ - 'program_id': video_id, - 'complete': True + 'program_ids': video_id, + 'complete': True, }) - p = data['program'] + p = data['programs'][0] cid = p['cid'] info_dict = { 'id': video_id, - 'title': p.get('title') or p['episode_title'], - 'description': p.get('description'), - 'thumbnail': p.get('image_url'), + 'title': p.get('t') or p['et'], + 'description': p.get('d'), + 'thumbnail': p.get('i_url'), 'creator': p.get('channel_name'), - 'episode': p.get('episode_title'), - 'episode_number': int_or_none(p.get('episode_number')), - 'season_number': int_or_none(p.get('season_number')), + 'episode': p.get('et'), + 'episode_number': int_or_none(p.get('e_no')), + 'season_number': int_or_none(p.get('s_no')), 'release_year': int_or_none(p.get('year')), - 'categories': try_get(p, lambda x: x['categories'], list), + 'categories': try_get(p, lambda x: x['c'], list), + 'tags': try_get(p, lambda x: x['g'], list) } return cid, info_dict
## Please follow the guide below - You will be asked some questions, please read them **carefully** and answer honestly - Put an `x` into all the boxes [ ] relevant to your *pull request* (like that [x]) - Use *Preview* tab to see how your *pull request* will actually look like --- ### Before submitting a *pull request* make sure you have: - [x] At least skimmed through [adding new extractor tutorial](https://github.com/rg3/youtube-dl#adding-support-for-a-new-site) and [youtube-dl coding conventions](https://github.com/rg3/youtube-dl#youtube-dl-coding-conventions) sections - [x] [Searched](https://github.com/rg3/youtube-dl/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests - [x] Checked the code with [flake8](https://pypi.python.org/pypi/flake8) ### In order to be accepted and merged into youtube-dl each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check one of the following options: - [x] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/) - [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence) ### What is the purpose of your *pull request*? - [x] Bug fix - [ ] Improvement - [ ] New extractor - [ ] New feature --- ### Description of your *pull request* and other information Zattoo changed its way to extract the meta information of a video (like title, description, etc.) and therefore stopped working (see #17175). This PR fixes the issue.
https://api.github.com/repos/ytdl-org/youtube-dl/pulls/17542
2018-09-11T14:43:17Z
2018-09-23T14:34:48Z
2018-09-23T14:34:48Z
2018-09-23T14:34:48Z
489
ytdl-org/youtube-dl
50,508
🌐 Add German translation for `docs/de/docs/reference/middleware.md`
diff --git a/docs/de/docs/reference/middleware.md b/docs/de/docs/reference/middleware.md new file mode 100644 index 0000000000000..d8d2d50fc911e --- /dev/null +++ b/docs/de/docs/reference/middleware.md @@ -0,0 +1,45 @@ +# Middleware + +Es gibt mehrere Middlewares, die direkt von Starlette bereitgestellt werden. + +Lesen Sie mehr darüber in der [FastAPI-Dokumentation über Middleware](../advanced/middleware.md). + +::: fastapi.middleware.cors.CORSMiddleware + +Kann von `fastapi` importiert werden: + +```python +from fastapi.middleware.cors import CORSMiddleware +``` + +::: fastapi.middleware.gzip.GZipMiddleware + +Kann von `fastapi` importiert werden: + +```python +from fastapi.middleware.gzip import GZipMiddleware +``` + +::: fastapi.middleware.httpsredirect.HTTPSRedirectMiddleware + +Kann von `fastapi` importiert werden: + +```python +from fastapi.middleware.httpsredirect import HTTPSRedirectMiddleware +``` + +::: fastapi.middleware.trustedhost.TrustedHostMiddleware + +Kann von `fastapi` importiert werden: + +```python +from fastapi.middleware.trustedhost import TrustedHostMiddleware +``` + +::: fastapi.middleware.wsgi.WSGIMiddleware + +Kann von `fastapi` importiert werden: + +```python +from fastapi.middleware.wsgi import WSGIMiddleware +```
← `reference/responses.md` (#10825) → `reference/openapi/*.md` (#10838) [German translation progress](https://github.com/tiangolo/fastapi/discussions/10582)
https://api.github.com/repos/tiangolo/fastapi/pulls/10837
2023-12-26T10:53:02Z
2024-03-30T18:15:40Z
2024-03-30T18:15:40Z
2024-04-01T01:14:33Z
353
tiangolo/fastapi
22,922
Updating Russian Translation
diff --git a/website/public/locales/ru/chat.json b/website/public/locales/ru/chat.json new file mode 100644 index 0000000000..f72d80495c --- /dev/null +++ b/website/public/locales/ru/chat.json @@ -0,0 +1,4 @@ +{ + "back_to_chat_list": "Вернуться к диалогам", + "your_chats": "Ваши диалоги" +} diff --git a/website/public/locales/ru/labelling.json b/website/public/locales/ru/labelling.json index 225774c739..2ac03379dc 100644 --- a/website/public/locales/ru/labelling.json +++ b/website/public/locales/ru/labelling.json @@ -7,7 +7,8 @@ "contains_pii": "Содержит ПД", "contains_pii.one_desc": "Содержит ПД (Персональные Данные)", "creative": "Творческое", - "creative.one_desc": "Содержит в себе и/или побуждает на творческий подход", + "creative.one_desc.line_1": "Насколько этот вопрос необычен? Ставит ли он перед Ассистентом проблему, требующую творческого мышления?", + "creative.one_desc.line_2": "Содержится ли в вопросе конкретный контекст? Со стороны Ассистента, является ли ответ прямолинейным, или в нем содержится необычный подход к проблеме?", "fails_task": "Не справляется с заданием", "fails_task.one_desc": "Не справляется с заданными инструкциями / заданием", "fails_task.question": "Плохой ли это ответ на изначальный запрос?", diff --git a/website/public/locales/ru/message.json b/website/public/locales/ru/message.json index 35585bc43f..b224608719 100644 --- a/website/public/locales/ru/message.json +++ b/website/public/locales/ru/message.json @@ -2,6 +2,7 @@ "confirm_open_link_body": "Вы действительно хотите перейти по этой ссылке?", "confirm_open_link_header": "Подтверждение перехода", "copy_message_id": "Скопировать \"message ID\"", + "copy_message_text": "Скопировать содержание сообщения", "copy_message_link": "Скопировать ссылку на сообщение", "label_action": "Классифицировать", "label_title": "Классифицировать", @@ -22,5 +23,7 @@ "submit_labels": "Отправить", "tree_stopped": "Ветка завершена — {{id}}", "view_user": "О пользователе", - "your_recent_messages": "Ваши Последние сообщения" + "your_recent_messages": "Ваши Последние сообщения", + "synthetic": "Сгенерировано ИИ", + "synthetic_explain": "Это сообщение было сгенерировано ИИ" } diff --git a/website/public/locales/ru/stats.json b/website/public/locales/ru/stats.json index ef44a3eaae..02817df1fe 100644 --- a/website/public/locales/ru/stats.json +++ b/website/public/locales/ru/stats.json @@ -1,6 +1,8 @@ { "aborted_low_grade": "Остановлено из-за низкой оценки", + "assistant": "Ассистент", "backlog_ranking": "Оцениваются (В резерве)", + "choose_a_language": "Выбрать язык", "count": "Количество", "growing": "В процессе", "halted_by_moderator": "Отклонено модератором", @@ -10,6 +12,7 @@ "message_trees_by_state": "Состояние ветвей сообщений", "message_trees_states_by_lang": "Состояние ветвей сообщений по языковому признаку", "prompt_lottery_waiting": "Запросов участвуют в голосовании", + "prompter": "Человек", "ranking": "Оцениваются", "ready_for_export": "Готовы для экспорта", "stats": "Статистика", diff --git a/website/public/locales/ru/tasks.json b/website/public/locales/ru/tasks.json index b464aadef1..d86ec6226b 100644 --- a/website/public/locales/ru/tasks.json +++ b/website/public/locales/ru/tasks.json @@ -88,5 +88,6 @@ "writing_wrong_langauge_a_b": "Язык вашего текста определён как: {{detected_lang}}, но он будет отображаться как: {{submit_lang}}.", "submitted_as": "Будет помечено как {{submit_lang}} язык", "tab_write": "Редактор (без форматирования)", - "tab_preview": "Предпросмотр (с форматированием)" + "tab_preview": "Предпросмотр (с форматированием)", + "not_rankable": "Все предоставленные ответы неверны и не могут быть оценены" }
Keeping up-to-date. New: ru/chat.json Modified: ru/labelling.json ru/message.json ru/stats.json ru/tasks.json
https://api.github.com/repos/LAION-AI/Open-Assistant/pulls/2069
2023-03-14T19:25:35Z
2023-03-15T06:11:23Z
2023-03-15T06:11:23Z
2023-03-15T22:04:14Z
1,214
LAION-AI/Open-Assistant
37,480
Use g:pymode_python-defined interpreter if defined and exists, otherwise use existing defaults
diff --git a/plugin/black.vim b/plugin/black.vim index 0a26aa0eab..10f0cfe005 100644 --- a/plugin/black.vim +++ b/plugin/black.vim @@ -37,10 +37,17 @@ if !exists("g:black_skip_string_normalization") endif python3 << endpython3 +import os import sys import vim def _get_python_binary(exec_prefix): + try: + default = vim.eval("g:pymode_python").strip() + except vim.error: + default = "" + if default and os.path.exists(default): + return default if sys.platform[:3] == "win": return exec_prefix / 'python.exe' return exec_prefix / 'bin' / 'python3'
This is helpful when using custom-compiled interpreters, or alternative Python interpreters in non-standard locations
https://api.github.com/repos/psf/black/pulls/666
2019-01-10T15:15:40Z
2019-05-07T17:28:57Z
2019-05-07T17:28:57Z
2019-05-07T17:29:06Z
175
psf/black
23,635
update_1
diff --git a/20_Day/20_python_package_manager.md b/20_Day/20_python_package_manager.md index df0d0aef..833a93b6 100644 --- a/20_Day/20_python_package_manager.md +++ b/20_Day/20_python_package_manager.md @@ -22,15 +22,15 @@ - [📘 Day 20](#%f0%9f%93%98-day-20) - [Python PIP - Python Package Manager](#python-pip---python-package-manager) - [What is PIP ?](#what-is-pip) - - [Installing pip](#installing-pip) - - [Installing packages using pip](#installing-packages-using-pip) - - [Uninstall packages](#uninstall-packages) - - [List of packages](#list-of-packages) - - [Show package](#show-package) - - [PIP freeze](#pip-freeze) + - [Installing PIP](#installing-pip) + - [Installing Packages Using PIP](#installing-packages-using-pip) + - [Uninstalling Packages](#uninstalling-packages) + - [List of Packages](#list-of-packages) + - [Show Package](#show-package) + - [PIP Freeze](#pip-freeze) - [Reading from URL](#reading-from-url) - - [Creating a package](#creating-a-package) - - [Further information about packages](#further-information-about-packages) + - [Creating a Package](#creating-a-package) + - [Further Information About Packages](#further-information-about-packages) - [Exercises: Day 20](#exercises-day-20) # 📘 Day 20 @@ -40,12 +40,12 @@ ### What is PIP ? PIP stands for Preferred installer program. We use _pip_ to install different python packages. -Package is a python module which can contain one or more modules or other packages. A module or modules which we can install to our application is a package. -In programming, we do not have to write every utility programs instead we install packages and import the package to our applications. +Package is a python module that can contain one or more modules or other packages. A module or modules that we can install to our application is a package. +In programming, we do not have to write every utility program, instead we install packages and import them to our applications. -### Installing pip +### Installing PIP -If you did not install pip, lets install pip. Go to your terminal or command prompt and copy and past this: +If you did not install pip, let us do it now. Go to your terminal or command prompt and copy and paste this: ```sh asabeneh@Asabeneh:~$ pip install pip @@ -62,13 +62,13 @@ asabeneh@Asabeneh:~$ pip --version pip 19.3.1 from /usr/local/lib/python3.7/site-packages/pip (python 3.7) ``` -As you can see, I am using pip version 19.3.1, if you see some number a bit below or above that mean you have pip installed. +As you can see, I am using pip version 19.3.1, if you see some number a bit below or above that, means you have pip installed. -Let's some of the package used in the python community for different purposes. Just to let you know that there are lots of package which are available for use with different applications. +Let's check some of the packages used in the python community for different purposes. Just to let you know that there are lots of packages available for use with different applications. ### Installing packages using pip -Let's try to install _numpy_, which is called a numeric python. It is one of the most popular package in machine learning and data science community. +Let's try to install _numpy_, called numeric python. It is one of the most popular packages in machine learning and data science community. - NumPy is the fundamental package for scientific computing with Python. It contains among other things: - a powerful N-dimensional array object @@ -103,7 +103,7 @@ array([3, 4, 5, 6, 7]) >>> ``` -Pandas is an open source, BSD-licensed library providing high-performance, easy-to-use data structures and data analysis tools for the Python programming language. Lets install big brother of numpy _pandas_ as we did for _numpy_. +Pandas is an open source, BSD-licensed library providing high-performance, easy-to-use data structures and data analysis tools for the Python programming language. Let's install the big brother of numpy, _pandas_: ```sh asabeneh@Asabeneh:~$ pip install pandas @@ -117,9 +117,9 @@ Type "help", "copyright", "credits" or "license" for more information. >>> import pandas ``` -This section is not about numpy nor pandas, here we are trying to learn how to install packages and how to import them. If it is needed we will talk about different packages in other sections. +This section is not about numpy nor pandas, here we are trying to learn how to install packages and how to import them. If it is needed, we will talk about different packages in other sections. -Let's import a web browser module, which can help us to open any website.You do not install this module, it is installed by default with python 3. For instance if you like to open any number of website at any time or if you like to schedule something this _webbrowser_ module can be use. +Let's import a web browser module, which can help us to open any website. We do not install this module, it is already installed by default with python 3. For instance if you like to open any number of websites at any time or if you like to schedule something, this _webbrowser_ module can be of use. ```py import webbrowser # web browser module to open websites @@ -137,7 +137,7 @@ for url in url_lists: webbrowser.open_new_tab(url) ``` -### Uninstall packages +### Uninstalling Packages If you do not like to keep the installed packages, you can remove them. @@ -145,15 +145,15 @@ If you do not like to keep the installed packages, you can remove them. pip uninstall packagename ``` -### List of packages +### List of Packages -To see the installed packages on our machine. We can use pip followed by lis. +To see the installed packages on our machine. We can use pip followed by list. ```sh pip list ``` -### Show package +### Show Package To show information about a package @@ -175,7 +175,7 @@ Requires: python-dateutil, pytz, numpy Required-by: ``` -If we want even more detail than the above, just add --verbose +If we want even more details, just add --verbose ```sh asabeneh@Asabeneh:~$ pip show --verbose pandas @@ -209,7 +209,7 @@ Entry-points: matplotlib = pandas:plotting._matplotlib ``` -### PIP freeze +### PIP Freeze Generate output suitable for a requirements file. @@ -222,24 +222,25 @@ Pygments==1.6 Sphinx==1.2.2 ``` -The pip freeze gave us the packages use installed and their version. We use with requirements.txt file for deployment. +The pip freeze gave us the packages used, installed and their version. We use it with requirements.txt file for deployment. ### Reading from URL -By now you are familiar with how to read or write on a file which is located in you local machine. Sometimes, we may like to read from a website using url or from an API. -API stands for Application Program Interface. It is a means to exchange structure data between servers primarily a json data. To open network, we need a package called _requests_ which allows to open network and to implement CRUD(create, read, update and delete) operation. In this section, we will cover only reading part of a CRUD. +By now you are familiar with how to read or write on a file located on you local machine. Sometimes, we would like to read from a website using url or from an API. +API stands for Application Program Interface. It is a means to exchange structured data between servers primarily as json data. To open a network connection, we need a package called _requests_ - it allows to open a network connection and to implement CRUD(create, read, update and delete) operations. In this section, we will cover only reading part of a CRUD. -Let's install _requests_ +Let's install _requests_: ```py asabeneh@Asabeneh:~$ pip install requests ``` -We will see _get_, _status_code_, _headers_, _text_ and _json_ methods from _requests_ module -_ get(): to open a network and fetch data from url and it returns a response object -_ status*code: After we fetched, we check the status(succes, error, etc) -* headers: To check the header types -\_ text: to extract the text from the fetched response object \* json: to extract json data +We will see _get_, _status_code_, _headers_, _text_ and _json_ methods in _requests_ module: + - _get()_: to open a network and fetch data from url - it returns a response object + - _status_code_: After we fetched data, we can check the status of the operation (succes, error, etc) + - _headers_: To check the header types + - _text_: to extract the text from the fetched response object + - _json_: to extract json data Let's read a txt file form this website, https://www.w3.org/TR/PNG/iso_8859-1.txt. ```py @@ -260,7 +261,7 @@ print(response.text) # gives all the text from the page {'date': 'Sun, 08 Dec 2019 18:00:31 GMT', 'last-modified': 'Fri, 07 Nov 2003 05:51:11 GMT', 'etag': '"17e9-3cb82080711c0;50c0b26855880-gzip"', 'accept-ranges': 'bytes', 'cache-control': 'max-age=31536000', 'expires': 'Mon, 07 Dec 2020 18:00:31 GMT', 'vary': 'Accept-Encoding', 'content-encoding': 'gzip', 'access-control-allow-origin': '*', 'content-length': '1616', 'content-type': 'text/plain', 'strict-transport-security': 'max-age=15552000; includeSubdomains; preload', 'content-security-policy': 'upgrade-insecure-requests'} ``` -- Lets read from an api. API stands for Application Program Interface. It is a means to exchange structure data between servers primarily a json data. An example of api:https://restcountries.eu/rest/v2/all. Let's read this API using _requests_ module. +- Let's read from an api. API stands for Application Program Interface. It is a means to exchange structure data between servers primarily a json data. An example of an api:https://restcountries.eu/rest/v2/all. Let's read this API using _requests_ module. ```py import requests @@ -326,9 +327,9 @@ print(countries[:1]) # we sliced only the first country, remove the slicing to We use _json()_ method from response object, if the we are fetching JSON data. For txt, html, xml and other file formats we can use _text_. -### Creating a package +### Creating a Package -We organize a large number of files in different folders and subfolders based on some criteria, so that we can find and manage them easily. As you know, a module can contain multiple objects, such as classes, functions, etc. A package can contain one or more relevant modules.A package is actually a folder containing one or more module files. Let's create a package named mypackage, using the following steps: +We organize a large number of files in different folders and subfolders based on some criteria, so that we can find and manage them easily. As you know, a module can contain multiple objects, such as classes, functions, etc. A package can contain one or more relevant modules. A package is actually a folder containing one or more module files. Let's create a package named mypackage, using the following steps: Create a new folder named mypacakge inside 30DaysOfPython folder Create an empty **init**.py file in the mypackage folder. @@ -406,10 +407,10 @@ Type "help", "copyright", "credits" or "license" for more information. >>> ``` -As you can see our package works perfect. The package folder contains a special file called **init**.py which stores the package's content. If we put **init**.py in the package folder, python start recognizes it as a package. +As you can see our package works perfectly. The package folder contains a special file called **init**.py - it stores the package's content. If we put **init**.py in the package folder, python start recognizes it as a package. The **init**.py exposes specified resources from its modules to be imported to other python files. An empty **init**.py file makes all functions available when a package is imported. The **init**.py is essential for the folder to be recognized by Python as a package. -### Further information about packages +### Further Information About Packages - Database - SQLAlchemy or SQLObject - Object oriented access to several different database systems @@ -443,10 +444,10 @@ The **init**.py exposes specified resources from its modules to be imported to o ## Exercises: Day 20 -1. Read this url and find out the 10 most frequent words.romeo_and_juliet = 'http://www.gutenberg.org/files/1112/1112.txt' -1. Read the cats api and cats_api = 'https://api.thecatapi.com/v1/breeds' and find the avarage weight of cat in metric unit. -1. Read the countries api and find out the 10 largest countries -1. UCI is one the most common place for get data set for data science and machine learning. Read the content of UCL(http://mlr.cs.umass.edu/ml/datasets.html). Without library it will be difficult, you may try it with BeautifulSoup4 +1. Read this url and find the 10 most frequent words. Romeo_and_juliet = 'http://www.gutenberg.org/files/1112/1112.txt' +2. Read the cats api and cats_api = 'https://api.thecatapi.com/v1/breeds' and find the avarage weight of a cat in metric units. +3. Read the countries api and find the 10 largest countries +4. UCI is one the most common places to get data sets for data science and machine learning. Read the content of UCL (http://mlr.cs.umass.edu/ml/datasets.html). Without additional libraries it will be difficult, so you may try it with BeautifulSoup4 🎉 CONGRATULATIONS ! 🎉
https://api.github.com/repos/Asabeneh/30-Days-Of-Python/pulls/41
2020-05-17T11:16:28Z
2020-05-17T13:27:39Z
2020-05-17T13:27:39Z
2020-05-17T13:27:39Z
3,382
Asabeneh/30-Days-Of-Python
26,846
Modified the formatting of question_gen_query
diff --git a/llama_index/llama_dataset/generator.py b/llama_index/llama_dataset/generator.py index 3d1fa4e91947d..1f424a803835d 100644 --- a/llama_index/llama_dataset/generator.py +++ b/llama_index/llama_dataset/generator.py @@ -72,11 +72,7 @@ def __init__( self.text_qa_template = text_qa_template or DEFAULT_TEXT_QA_PROMPT self.question_gen_query = ( question_gen_query - or f"You are a Teacher/Professor. Your task is to setup \ - {num_questions_per_chunk} questions for an upcoming \ - quiz/examination. The questions should be diverse in nature \ - across the document. Restrict the questions to the \ - context information provided." + or f"You are a Teacher/Professor. Your task is to setup {num_questions_per_chunk} questions for an upcoming quiz/examination. The questions should be diverse in nature across the document. Restrict the questions to the context information provided." ) self.nodes = nodes self._metadata_mode = metadata_mode
# Description This PR tries to correct the formatting issue of the default question_gen_query string. Without modification the Query Generation prompt looks like, ``` ...... Given the context information and not prior knowledge. generate only questions based on the below query. You are a Teacher/Professor. Your task is to setup 2 questions for an upcoming quiz/examination. The questions should be diverse in nature across the document. Restrict the questions to the context information provided. ``` The changes result in removing the unwanted tab spaces between the characters. ## Type of Change Please delete options that are not relevant. - [x] Bug fix (non-breaking change which fixes an issue) # How Has This Been Tested? Please describe the tests that you ran to verify your changes. Provide instructions so we can reproduce. Please also list any relevant details for your test configuration - [ ] Added new unit/integration tests - [ ] Added new notebook (that tests end-to-end) - [x] I stared at the code and made sure it makes sense # Suggested Checklist: - [x] I have performed a self-review of my own code - [ ] I have commented my code, particularly in hard-to-understand areas - [ ] I have made corresponding changes to the documentation - [ ] I have added Google Colab support for the newly added notebooks. - [ ] My changes generate no new warnings - [ ] I have added tests that prove my fix is effective or that my feature works - [ ] New and existing unit tests pass locally with my changes - [ ] I ran `make format; make lint` to appease the lint gods
https://api.github.com/repos/run-llama/llama_index/pulls/10308
2024-01-27T08:14:39Z
2024-01-27T16:27:03Z
2024-01-27T16:27:03Z
2024-01-27T16:27:03Z
256
run-llama/llama_index
6,233
ci: Update macos runner
diff --git a/.github/workflows/tests-macos.yml b/.github/workflows/tests-macos.yml index fdb9f498008..61f1857f88c 100644 --- a/.github/workflows/tests-macos.yml +++ b/.github/workflows/tests-macos.yml @@ -3,7 +3,7 @@ on: [push, pull_request] jobs: tests: - runs-on: macos-10.15 + runs-on: macos-11 strategy: fail-fast: false matrix:
The GitHub Actions macos-10.15 runner image is now deprecated, and GitHub Actions has begun to temporarily fail jobs referencing it during brownout periods. The image will be fully unsupported by 2022-12-01, which is just about a month away. This change updates the macOS runner image to the latest generally-available version, to help reduce spurious CI failures during the brownout periods, and to stay abreast of the sunsetting of the macos-10.15 image. See also: actions/runner-images#5583
https://api.github.com/repos/scrapy/scrapy/pulls/5695
2022-10-27T01:47:13Z
2022-10-27T07:45:43Z
2022-10-27T07:45:43Z
2022-10-27T14:33:22Z
122
scrapy/scrapy
35,057
Fix in docs for error introduced in #1218
diff --git a/docs/topics/link-extractors.rst b/docs/topics/link-extractors.rst index f9b25ae6383..8253e76507b 100644 --- a/docs/topics/link-extractors.rst +++ b/docs/topics/link-extractors.rst @@ -79,7 +79,7 @@ LxmlLinkExtractor extensions that should be ignored when extracting links. If not given, it will default to the ``IGNORED_EXTENSIONS`` list defined in the - `scrapy.linkextractors`_ module. + `scrapy.linkextractors`_ package. :type deny_extensions: list :param restrict_xpaths: is an XPath (or list of XPath's) which defines
https://api.github.com/repos/scrapy/scrapy/pulls/1233
2015-05-14T23:12:15Z
2015-05-14T23:31:39Z
2015-05-14T23:31:39Z
2015-05-14T23:48:40Z
163
scrapy/scrapy
34,725
[extractor/opencast] Add ltitools interface to OpencastPlaylistIE and fix Tests
diff --git a/yt_dlp/extractor/opencast.py b/yt_dlp/extractor/opencast.py index fa46757f7b6..235ca341c48 100644 --- a/yt_dlp/extractor/opencast.py +++ b/yt_dlp/extractor/opencast.py @@ -105,10 +105,9 @@ def _parse_mediapackage(self, video): class OpencastIE(OpencastBaseIE): - _VALID_URL = r'''(?x) - https?://(?P<host>%s)/paella/ui/watch.html\?.*? - id=(?P<id>%s) - ''' % (OpencastBaseIE._INSTANCES_RE, OpencastBaseIE._UUID_RE) + _VALID_URL = rf'''(?x) + https?://(?P<host>{OpencastBaseIE._INSTANCES_RE})/paella/ui/watch\.html\? + (?:[^#]+&)?id=(?P<id>{OpencastBaseIE._UUID_RE})''' _API_BASE = 'https://%s/search/episode.json?id=%s' @@ -123,6 +122,9 @@ class OpencastIE(OpencastBaseIE): 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1606208400, 'upload_date': '20201124', + 'season_id': 'cf68a4a1-36b1-4a53-a6ba-61af5705a0d0', + 'series': 'Kryptographie - WiSe 15/16', + 'creator': 'Alexander May', }, } ] @@ -134,10 +136,11 @@ def _real_extract(self, url): class OpencastPlaylistIE(OpencastBaseIE): - _VALID_URL = r'''(?x) - https?://(?P<host>%s)/engage/ui/index.html\?.*? - epFrom=(?P<id>%s) - ''' % (OpencastBaseIE._INSTANCES_RE, OpencastBaseIE._UUID_RE) + _VALID_URL = rf'''(?x) + https?://(?P<host>{OpencastBaseIE._INSTANCES_RE})(?: + /engage/ui/index\.html\?(?:[^#]+&)?epFrom=| + /ltitools/index\.html\?(?:[^#]+&)?series= + )(?P<id>{OpencastBaseIE._UUID_RE})''' _API_BASE = 'https://%s/search/episode.json?sid=%s' @@ -148,15 +151,23 @@ class OpencastPlaylistIE(OpencastBaseIE): 'id': 'cf68a4a1-36b1-4a53-a6ba-61af5705a0d0', 'title': 'Kryptographie - WiSe 15/16', }, - 'playlist_mincount': 28, + 'playlist_mincount': 29, }, { - 'url': 'https://oc-video.ruhr-uni-bochum.de/engage/ui/index.html?e=1&p=1&epFrom=b1a54262-3684-403f-9731-8e77c3766f9a', + 'url': 'https://oc-video1.ruhr-uni-bochum.de/ltitools/index.html?subtool=series&series=cf68a4a1-36b1-4a53-a6ba-61af5705a0d0&lng=de', 'info_dict': { - 'id': 'b1a54262-3684-403f-9731-8e77c3766f9a', - 'title': 'inSTUDIES-Social movements and prefigurative politics in a global perspective', + 'id': 'cf68a4a1-36b1-4a53-a6ba-61af5705a0d0', + 'title': 'Kryptographie - WiSe 15/16', + }, + 'playlist_mincount': 29, + }, + { + 'url': 'https://electures.uni-muenster.de/engage/ui/index.html?e=1&p=1&epFrom=39391d10-a711-4d23-b21d-afd2ed7d758c', + 'info_dict': { + 'id': '39391d10-a711-4d23-b21d-afd2ed7d758c', + 'title': '021670 Theologische Themen bei Hans Blumenberg WiSe 2017/18', }, - 'playlist_mincount': 6, + 'playlist_mincount': 13, }, ]
### Description I'm the original author of the Opencast Extractor (bwildenhain then took care of it further so that it would be merged into yt-dlp. Thanks again for that), and I want to make an update to the extractor. Opencast now has a new "interface" to access playlists, and that is now used more often than the old one. For that reason I extended the valid URLs of the OpencastPlaylist Extractor Also I fixed the Tests. oc-video.ruhr-uni-bochum.de seems to be down currently so I replaced it with an other example. I also added the missing keys to the first test. ### Before submitting a *pull request* make sure you have: - [x] At least skimmed through [contributing guidelines](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) including [yt-dlp coding conventions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#yt-dlp-coding-conventions) - [x] [Searched](https://github.com/yt-dlp/yt-dlp/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests - [x] Checked the code with [flake8](https://pypi.python.org/pypi/flake8) and [ran relevant tests](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) ### In order to be accepted and merged into yt-dlp each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check all of the following options that apply: - [x] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/) - [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence) ### What is the purpose of your *pull request*? - [x] Fix or improvement to an extractor (Make sure to add/update tests) - [ ] New extractor ([Piracy websites will not be accepted](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#is-the-website-primarily-used-for-piracy)) - [ ] Core bug fix/improvement - [ ] New feature (It is strongly [recommended to open an issue first](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#adding-new-feature-or-making-overarching-changes)) </details>
https://api.github.com/repos/yt-dlp/yt-dlp/pulls/6371
2023-02-27T16:27:09Z
2023-03-09T16:21:40Z
2023-03-09T16:21:40Z
2023-03-09T16:21:40Z
1,081
yt-dlp/yt-dlp
7,535
add profiling to Debugging Tools section
diff --git a/README.md b/README.md index e64479727..759258359 100644 --- a/README.md +++ b/README.md @@ -844,6 +844,7 @@ A curated list of awesome Python frameworks, libraries and software. Inspired by * [pyringe](https://github.com/google/pyringe) - Debugger capable of attaching to and injecting code into Python processes. * [python-statsd](https://github.com/WoLpH/python-statsd) - Python Client for the [statsd](https://github.com/etsy/statsd/) server. * [memory_profiler](https://github.com/fabianp/memory_profiler) - Monitor Memory usage of Python code. +* [profiling](https://github.com/what-studio/profiling) - An interactive Python profiler. * [django-debug-toolbar](https://github.com/django-debug-toolbar/django-debug-toolbar) - Display various debug information about the current request/response. * [django-devserver](https://github.com/dcramer/django-devserver) - A drop-in replacement for Django's runserver.
https://api.github.com/repos/vinta/awesome-python/pulls/234
2014-10-09T08:36:26Z
2014-10-09T08:46:48Z
2014-10-09T08:46:47Z
2014-10-09T08:46:48Z
237
vinta/awesome-python
27,317
Allow unified_strdate to handle "*3rd" in date strings.
diff --git a/test/test_utils.py b/test/test_utils.py index 3920542bb43..0db37d9d88e 100644 --- a/test/test_utils.py +++ b/test/test_utils.py @@ -340,6 +340,8 @@ def test_unified_dates(self): self.assertEqual(unified_strdate('July 15th, 2013'), '20130715') self.assertEqual(unified_strdate('September 1st, 2013'), '20130901') self.assertEqual(unified_strdate('Sep 2nd, 2013'), '20130902') + self.assertEqual(unified_strdate('November 3rd, 2019'), '20191103') + self.assertEqual(unified_strdate('October 23rd, 2005'), '20051023') def test_unified_timestamps(self): self.assertEqual(unified_timestamp('December 21, 2010'), 1292889600) diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py index aed988b884b..0d30075aa1d 100644 --- a/youtube_dl/utils.py +++ b/youtube_dl/utils.py @@ -1718,13 +1718,16 @@ def random_user_agent(): '%B %d %Y', '%B %dst %Y', '%B %dnd %Y', + '%B %drd %Y', '%B %dth %Y', '%b %d %Y', '%b %dst %Y', '%b %dnd %Y', + '%b %drd %Y', '%b %dth %Y', '%b %dst %Y %I:%M', '%b %dnd %Y %I:%M', + '%b %drd %Y %I:%M', '%b %dth %Y %I:%M', '%Y %m %d', '%Y-%m-%d',
## Please follow the guide below - You will be asked some questions, please read them **carefully** and answer honestly - Put an `x` into all the boxes [ ] relevant to your *pull request* (like that [x]) - Use *Preview* tab to see how your *pull request* will actually look like --- ### Before submitting a *pull request* make sure you have: - [x] At least skimmed through [adding new extractor tutorial](https://github.com/ytdl-org/youtube-dl#adding-support-for-a-new-site) and [youtube-dl coding conventions](https://github.com/ytdl-org/youtube-dl#youtube-dl-coding-conventions) sections - [x] [Searched](https://github.com/ytdl-org/youtube-dl/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests - [x] Checked the code with [flake8](https://pypi.python.org/pypi/flake8) ### In order to be accepted and merged into youtube-dl each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check one of the following options: - [x] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/) - [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence) ### What is the purpose of your *pull request*? - [x] Bug fix - [ ] Improvement - [ ] New extractor - [ ] New feature --- ### Description of your *pull request* and other information unified_strdate was missing "*rd" for for "3rd" and "23rd". It did have "*st", "*nd", and "*th". Fixes #23197
https://api.github.com/repos/ytdl-org/youtube-dl/pulls/23199
2019-11-25T00:22:11Z
2019-11-26T17:08:38Z
2019-11-26T17:08:38Z
2020-08-26T01:48:37Z
433
ytdl-org/youtube-dl
50,552
Fix documentation typo.
diff --git a/requests/adapters.py b/requests/adapters.py index 0adca690cd..77badc7fa3 100644 --- a/requests/adapters.py +++ b/requests/adapters.py @@ -289,7 +289,7 @@ def send(self, request, stream=False, timeout=None, verify=True, cert=None, prox :param stream: (optional) Whether to stream the request content. :param timeout: (optional) The timeout on the request. :param verify: (optional) Whether to verify SSL certificates. - :param vert: (optional) Any user-provided SSL certificate to be trusted. + :param cert: (optional) Any user-provided SSL certificate to be trusted. :param proxies: (optional) The proxies dictionary to apply to the request. """
Fairly sure that 'vert' should be 'cert' :)
https://api.github.com/repos/psf/requests/pulls/1722
2013-11-04T01:59:41Z
2013-11-04T02:16:22Z
2013-11-04T02:16:22Z
2021-09-08T22:01:02Z
182
psf/requests
32,420
Add MYSQL Wide byte injection
diff --git a/SQL Injection/MySQL Injection.md b/SQL Injection/MySQL Injection.md index 5d19b43350..54a352e433 100644 --- a/SQL Injection/MySQL Injection.md +++ b/SQL Injection/MySQL Injection.md @@ -22,6 +22,7 @@ * [Using SLEEP in a subselect](#using-sleep-in-a-subselect) * [Using conditional statements](#using-conditional-statements) * [MYSQL DIOS - Dump in One Shot](#mysql-dios---dump-in-one-shot) +* [MYSQL Wide byte injection](#mysql-wide-byte-injection) * [MYSQL Current queries](#mysql-current-queries) * [MYSQL Read content of a file](#mysql-read-content-of-a-file) * [MYSQL Write a shell](#mysql-write-a-shell) @@ -438,6 +439,36 @@ make_set(6,@:=0x0a,(select(1)from(information_schema.columns)where@:=make_set(51 (select(@a)from(select(@a:=0x00),(select(@a)from(information_schema.columns)where(table_schema!=0x696e666f726d6174696f6e5f736368656d61)and(@a)in(@a:=concat(@a,table_name,0x203a3a20,column_name,0x3c62723e))))a) ``` + +## MYSQL Wide byte injection + +Wide byte injection works only when mysql encoding is set to gbk, a small php example: + +```php +function check_addslashes($string) +{ + $string = preg_replace('/'. preg_quote('\\') .'/', "\\\\\\", $string); //escape any backslash + $string = preg_replace('/\'/i', '\\\'', $string); //escape single quote with a backslash + $string = preg_replace('/\"/', "\\\"", $string); //escape double quote with a backslash + + return $string; +} + +$id=check_addslashes($_GET['id']); +mysql_query("SET NAMES gbk"); +$sql="SELECT * FROM users WHERE id='$id' LIMIT 0,1"; +print_r(mysql_error()); +``` + +PHP will check quote and add backslash, like translates `'` into `\'`. + +When input: `?id=1'` --> PHP add backslash --> `SELECT * FROM users WHERE id='1\'' LIMIT 0,1` --> not working. + +But if add `%df`: `?id=1%df'` --> PHP add backslash --> `SELECT * FROM users WHERE id='1%df\'' LIMIT 0,1` --> ( `\` : `%5c`, `%df%5c` : `連` ) --> `SELECT * FROM users WHERE id='1連'' LIMIT 0,1` --> can escape `'`. + +So, it can be: `?id=1%df' and 1=1 --+` --> PHP add backslash--> `SELECT * FROM users WHERE id='1連' and 1=1 --+' LIMIT 0,1`, it can be inject. + + ## MYSQL Current queries This table can list all operations that DB is performing at the moment.
Add MYSQL Wide byte injection, it can test in Sqli-labs Less-32, writeup (chinese): https://www.cnblogs.com/zhengna/p/12660680.html see: https://github.com/Audi-1/sqli-labs/blob/master/Less-32/index.php
https://api.github.com/repos/swisskyrepo/PayloadsAllTheThings/pulls/672
2023-09-13T15:35:18Z
2023-09-14T08:25:13Z
2023-09-14T08:25:13Z
2023-09-14T08:25:13Z
715
swisskyrepo/PayloadsAllTheThings
8,385
[radiko] Fix extractor
diff --git a/yt_dlp/extractor/radiko.py b/yt_dlp/extractor/radiko.py index 651cfe63b1e..dbb74871509 100644 --- a/yt_dlp/extractor/radiko.py +++ b/yt_dlp/extractor/radiko.py @@ -1,26 +1,22 @@ -import re import base64 -import calendar -import datetime +import re +import urllib.parse from .common import InfoExtractor from ..utils import ( ExtractorError, - update_url_query, clean_html, + time_seconds, + try_call, unified_timestamp, + update_url_query, ) -from ..compat import compat_urllib_parse class RadikoBaseIE(InfoExtractor): _FULL_KEY = None def _auth_client(self): - auth_cache = self._downloader.cache.load('radiko', 'auth_data') - if auth_cache: - return auth_cache - _, auth1_handle = self._download_webpage_handle( 'https://radiko.jp/v2/api/auth1', None, 'Downloading authentication page', headers={ @@ -89,8 +85,8 @@ def _find_program(self, video_id, station, cursor): def _extract_formats(self, video_id, station, is_onair, ft, cursor, auth_token, area_id, query): m3u8_playlist_data = self._download_xml( - 'https://radiko.jp/v3/station/stream/pc_html5/%s.xml' % station, video_id, - note='Downloading m3u8 information') + f'https://radiko.jp/v3/station/stream/pc_html5/{station}.xml', video_id, + note='Downloading stream information') m3u8_urls = m3u8_playlist_data.findall('.//url') formats = [] @@ -102,7 +98,7 @@ def _extract_formats(self, video_id, station, is_onair, ft, cursor, auth_token, 'station_id': station, **query, 'l': '15', - 'lsid': '77d0678df93a1034659c14d6fc89f018', + 'lsid': '88ecea37e968c1f17d5413312d9f8003', 'type': 'b', }) if playlist_url in found: @@ -112,16 +108,17 @@ def _extract_formats(self, video_id, station, is_onair, ft, cursor, auth_token, time_to_skip = None if is_onair else cursor - ft + domain = urllib.parse.urlparse(playlist_url).netloc subformats = self._extract_m3u8_formats( playlist_url, video_id, ext='m4a', - live=True, fatal=False, m3u8_id=None, + live=True, fatal=False, m3u8_id=domain, + note=f'Downloading m3u8 information from {domain}', headers={ 'X-Radiko-AreaId': area_id, 'X-Radiko-AuthToken': auth_token, }) for sf in subformats: - domain = sf['format_id'] = compat_urllib_parse.urlparse(sf['url']).netloc - if re.match(r'^[cf]-radiko\.smartstream\.ne\.jp$', domain): + if re.fullmatch(r'[cf]-radiko\.smartstream\.ne\.jp', domain): # Prioritize live radio vs playback based on extractor sf['preference'] = 100 if is_onair else -100 if not is_onair and url_attrib['timefree'] == '1' and time_to_skip: @@ -151,31 +148,29 @@ class RadikoIE(RadikoBaseIE): def _real_extract(self, url): station, video_id = self._match_valid_url(url).groups() vid_int = unified_timestamp(video_id, False) - - auth_token, area_id = self._auth_client() - prog, station_program, ft, radio_begin, radio_end = self._find_program(video_id, station, vid_int) - title = prog.find('title').text - description = clean_html(prog.find('info').text) - station_name = station_program.find('.//name').text - - formats = self._extract_formats( - video_id=video_id, station=station, is_onair=False, - ft=ft, cursor=vid_int, auth_token=auth_token, area_id=area_id, - query={ - 'start_at': radio_begin, - 'ft': radio_begin, - 'end_at': radio_end, - 'to': radio_end, - 'seek': video_id, - }) + auth_cache = self._downloader.cache.load('radiko', 'auth_data') + for attempt in range(2): + auth_token, area_id = (not attempt and auth_cache) or self._auth_client() + formats = self._extract_formats( + video_id=video_id, station=station, is_onair=False, + ft=ft, cursor=vid_int, auth_token=auth_token, area_id=area_id, + query={ + 'start_at': radio_begin, + 'ft': radio_begin, + 'end_at': radio_end, + 'to': radio_end, + 'seek': video_id, + }) + if formats: + break return { 'id': video_id, - 'title': title, - 'description': description, - 'uploader': station_name, + 'title': try_call(lambda: prog.find('title').text), + 'description': clean_html(try_call(lambda: prog.find('info').text)), + 'uploader': try_call(lambda: station_program.find('.//name').text), 'uploader_id': station, 'timestamp': vid_int, 'formats': formats, @@ -205,8 +200,7 @@ def _real_extract(self, url): auth_token, area_id = self._auth_client() # get current time in JST (GMT+9:00 w/o DST) - vid_now = datetime.datetime.now(datetime.timezone(datetime.timedelta(hours=9))) - vid_now = calendar.timegm(vid_now.timetuple()) + vid_now = time_seconds(hours=9) prog, station_program, ft, _, _ = self._find_program(station, station, vid_now)
<!-- # Please follow the guide below - You will be asked some questions, please read them **carefully** and answer honestly - Put an `x` into all the boxes `[ ]` relevant to your *pull request* (like [x]) - Use *Preview* tab to see how your *pull request* will actually look like --> ### Before submitting a *pull request* make sure you have: - [x] At least skimmed through [contributing guidelines](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) including [yt-dlp coding conventions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#yt-dlp-coding-conventions) - [x] [Searched](https://github.com/yt-dlp/yt-dlp/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests - [x] Checked the code with [flake8](https://pypi.python.org/pypi/flake8) and [ran relevant tests](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) ### In order to be accepted and merged into yt-dlp each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check one of the following options: - [x] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/) - [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence) ### What is the purpose of your *pull request*? - [x] Fix or improvement to an extractor (Make sure to add/update tests) - [ ] New extractor ([Piracy websites will not be accepted](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#is-the-website-primarily-used-for-piracy)) - [ ] Core bug fix/improvement - [ ] New feature (It is strongly [recommended to open an issue first](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#adding-new-feature-or-making-overarching-changes)) --- ### Description of your *pull request* and other information This PR improves debuggability of extractor, even without verbose log. (of course we should always require it!) And some other small fixes. TODO - [ ] ~~Direct users to clear cache in case of no formats found (#3652)~~
https://api.github.com/repos/yt-dlp/yt-dlp/pulls/3655
2022-05-06T01:48:27Z
2022-05-07T15:47:51Z
2022-05-07T15:47:51Z
2022-05-07T15:47:51Z
1,436
yt-dlp/yt-dlp
8,031
Use super().__init__ instead of explicitly calling named super-class.
diff --git a/acme/acme/standalone.py b/acme/acme/standalone.py index cd2caa22107..5dfb271367e 100644 --- a/acme/acme/standalone.py +++ b/acme/acme/standalone.py @@ -34,10 +34,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: else: self.address_family = socket.AF_INET self.certs = kwargs.pop("certs", {}) - self.method = kwargs.pop( - "method", crypto_util._DEFAULT_SSL_METHOD) + self.method = kwargs.pop("method", crypto_util._DEFAULT_SSL_METHOD) self.allow_reuse_address = kwargs.pop("allow_reuse_address", True) - socketserver.TCPServer.__init__(self, *args, **kwargs) + super().__init__(*args, **kwargs) def _wrap_sock(self) -> None: self.socket = crypto_util.SSLSocket( @@ -190,7 +189,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: self.address_family = socket.AF_INET6 else: self.address_family = socket.AF_INET - BaseHTTPServer.HTTPServer.__init__(self, *args, **kwargs) + super().__init__(*args, **kwargs) class HTTP01Server(HTTPServer, ACMEServerMixin): @@ -198,8 +197,8 @@ class HTTP01Server(HTTPServer, ACMEServerMixin): def __init__(self, server_address: Tuple[str, int], resources: Set[challenges.HTTP01], ipv6: bool = False, timeout: int = 30) -> None: - HTTPServer.__init__( - self, server_address, HTTP01RequestHandler.partial_init( + super().__init__( + server_address, HTTP01RequestHandler.partial_init( simple_http_resources=resources, timeout=timeout), ipv6=ipv6) @@ -208,7 +207,7 @@ class HTTP01DualNetworkedServers(BaseDualNetworkedServers): affect the other.""" def __init__(self, *args: Any, **kwargs: Any) -> None: - BaseDualNetworkedServers.__init__(self, HTTP01Server, *args, **kwargs) + super().__init__(HTTP01Server, *args, **kwargs) class HTTP01RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): @@ -226,7 +225,7 @@ class HTTP01RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): def __init__(self, *args: Any, **kwargs: Any) -> None: self.simple_http_resources = kwargs.pop("simple_http_resources", set()) self._timeout = kwargs.pop('timeout', 30) - BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kwargs) + super().__init__(*args, **kwargs) self.server: HTTP01Server # In parent class BaseHTTPRequestHandler, 'timeout' is a class-level property but we diff --git a/acme/tests/standalone_test.py b/acme/tests/standalone_test.py index e0aa5aa2248..ad5751fcf53 100644 --- a/acme/tests/standalone_test.py +++ b/acme/tests/standalone_test.py @@ -165,7 +165,6 @@ def test_bad_alpn(self): class BaseDualNetworkedServersTest(unittest.TestCase): """Test for acme.standalone.BaseDualNetworkedServers.""" - class SingleProtocolServer(socketserver.TCPServer): """Server that only serves on a single protocol. FreeBSD has this behavior for AF_INET6.""" def __init__(self, *args, **kwargs): @@ -175,7 +174,7 @@ def __init__(self, *args, **kwargs): kwargs["bind_and_activate"] = False else: self.address_family = socket.AF_INET - socketserver.TCPServer.__init__(self, *args, **kwargs) + super().__init__(*args, **kwargs) if ipv6: # NB: On Windows, socket.IPPROTO_IPV6 constant may be missing. # We use the corresponding value (41) instead. @@ -202,7 +201,6 @@ def test_fail_to_bind(self, mock_bind): self.assertEqual(em.exception.errno, EADDRINUSE) - def test_ports_equal(self): from acme.standalone import BaseDualNetworkedServers servers = BaseDualNetworkedServers( diff --git a/certbot-nginx/certbot_nginx/_internal/nginxparser.py b/certbot-nginx/certbot_nginx/_internal/nginxparser.py index 2aa677c3801..03aa88db473 100644 --- a/certbot-nginx/certbot_nginx/_internal/nginxparser.py +++ b/certbot-nginx/certbot_nginx/_internal/nginxparser.py @@ -118,7 +118,7 @@ def __init__(self, list_source): # Turn self into a version of the source list that has spaces removed # and all sub-lists also UnspacedList()ed - list.__init__(self, list_source) + super().__init__(list_source) for i, entry in reversed(list(enumerate(self))): if isinstance(entry, list): sublist = UnspacedList(entry) diff --git a/certbot/certbot/_internal/error_handler.py b/certbot/certbot/_internal/error_handler.py index 0e63d02de9a..24ab46d9d19 100644 --- a/certbot/certbot/_internal/error_handler.py +++ b/certbot/certbot/_internal/error_handler.py @@ -167,6 +167,7 @@ def _call_signals(self) -> None: logger.debug("Calling signal %s", signum) os.kill(os.getpid(), signum) + class ExitHandler(ErrorHandler): """Context manager for running code that must be cleaned up. @@ -175,5 +176,5 @@ class ExitHandler(ErrorHandler): regular exit. """ def __init__(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> None: - ErrorHandler.__init__(self, func, *args, **kwargs) + super().__init__(func, *args, **kwargs) self.call_on_regular_exit = True diff --git a/certbot/tests/plugins/dns_common_test.py b/certbot/tests/plugins/dns_common_test.py index 41117f894f4..f68d36137ad 100644 --- a/certbot/tests/plugins/dns_common_test.py +++ b/certbot/tests/plugins/dns_common_test.py @@ -133,7 +133,7 @@ class _MockLoggingHandler(logging.Handler): def __init__(self, *args, **kwargs): self.reset() - logging.Handler.__init__(self, *args, **kwargs) + super().__init__(*args, **kwargs) def emit(self, record): self.messages[record.levelname.lower()].append(record.getMessage())
I think it's largely a question of style/preference. For speed, `super().__init__` is *slower*, but probably more correct if you inherit from those classes. ## Pull Request Checklist - [X] If the change being made is to a [distributed component](https://certbot.eff.org/docs/contributing.html#code-components-and-layout), edit the `master` section of `certbot/CHANGELOG.md` to include a description of the change being made. - [X] Add or update any documentation as needed to support the changes in this PR. - [X] Include your name in `AUTHORS.md` if you like.
https://api.github.com/repos/certbot/certbot/pulls/9166
2022-01-09T20:40:51Z
2022-01-09T21:50:44Z
2022-01-09T21:50:44Z
2022-01-09T22:38:38Z
1,559
certbot/certbot
160
Add models to pull request template
diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 1f3555466..6c1eefb08 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,6 +1,6 @@ ### Describe the changes you have made: -### Reference any relevant issue (Fixes #123) +### Reference any relevant issue (Fixes #000) - [ ] I have performed a self-review of my code: @@ -8,3 +8,11 @@ - [ ] Windows - [ ] MacOS - [ ] Linux + +### AI Language Model (if applicable) +- [ ] GPT4 +- [ ] GPT3 +- [ ] Llama 7B +- [ ] Llama 13B +- [ ] Llama 34B +- [ ] Huggingface model (Please specify which one)
### Describe the changes you have made: I added AI language models to the pull request template. - [x] I have performed a self-review of my code: ### I have tested the code on the following OS: - [ ] Windows - [ ] MacOS - [x] Linux
https://api.github.com/repos/OpenInterpreter/open-interpreter/pulls/423
2023-09-18T03:24:19Z
2023-09-18T20:23:59Z
2023-09-18T20:23:59Z
2023-09-18T20:24:15Z
207
OpenInterpreter/open-interpreter
40,695
logging: log timestamps as local timezone instead of UTC
diff --git a/certbot/log.py b/certbot/log.py index f7c7b126cbe..38c06133507 100644 --- a/certbot/log.py +++ b/certbot/log.py @@ -19,7 +19,6 @@ import os import sys import tempfile -import time import traceback from acme import messages @@ -148,7 +147,6 @@ def setup_log_file_handler(config, logfile, fmt): handler.doRollover() # TODO: creates empty letsencrypt.log.1 file handler.setLevel(logging.DEBUG) handler_formatter = logging.Formatter(fmt=fmt) - handler_formatter.converter = time.gmtime # don't use localtime handler.setFormatter(handler_formatter) return handler, log_file_path diff --git a/certbot/tests/log_test.py b/certbot/tests/log_test.py index 3b0e1c5f644..549d2c5e148 100644 --- a/certbot/tests/log_test.py +++ b/certbot/tests/log_test.py @@ -156,7 +156,7 @@ def _test_success_common(self, should_rollover): handler.close() self.assertEqual(handler.level, logging.DEBUG) - self.assertEqual(handler.formatter.converter, time.gmtime) + self.assertEqual(handler.formatter.converter, time.localtime) expected_path = os.path.join(self.config.logs_dir, log_file) self.assertEqual(log_path, expected_path)
fix for #5604 -- localtime should be used [by default](https://docs.python.org/2/library/logging.html#logging.Formatter.formatTime).
https://api.github.com/repos/certbot/certbot/pulls/5607
2018-02-22T07:45:31Z
2018-03-21T22:41:34Z
2018-03-21T22:41:34Z
2018-03-21T22:47:07Z
320
certbot/certbot
1,275
build: One command per upload
diff --git a/.travis.yml b/.travis.yml index 26c0c15d148dde..9ff9f17065f1f4 100644 --- a/.travis.yml +++ b/.travis.yml @@ -52,13 +52,13 @@ after_failure: - dmesg | tail -n 100 after_script: - - zeus upload -t "text/xml+xunit" .artifacts/*.junit.xml - -t "text/xml+coverage" .artifacts/*.coverage.xml - -t "text/xml+coverage" .artifacts/coverage/cobertura-coverage.xml - -t "text/html+pytest" .artifacts/*.pytest.html - -t "text/plain+pycodestyle" .artifacts/*.pycodestyle.log - -t "text/xml+checkstyle" .artifacts/*.checkstyle.xml - -t "application/webpack-stats+json" .artifacts/webpack-stats.json + - zeus upload -t "text/xml+xunit" .artifacts/*junit.xml + - zeus upload -t "text/xml+coverage" .artifacts/*coverage.xml + - zeus upload -t "text/xml+coverage" .artifacts/coverage/cobertura-coverage.xml + - zeus upload -t "text/html+pytest" .artifacts/*pytest.html + - zeus upload -t "text/plain+pycodestyle" .artifacts/*pycodestyle.log + - zeus upload -t "text/xml+checkstyle" .artifacts/*checkstyle.xml + - zeus upload -t "application/webpack-stats+json" .artifacts/*webpack-stats.json # each job in the matrix inherits `env/global` and uses everything above, # but custom `services`, `before_install`, `install`, and `before_script` directives
https://api.github.com/repos/getsentry/sentry/pulls/9188
2018-07-25T13:09:03Z
2018-07-25T13:40:11Z
2018-07-25T13:40:11Z
2020-12-21T16:09:13Z
418
getsentry/sentry
44,440
Added < and > to list of quesitons.
diff --git a/README.md b/README.md index 432d00d0e..b56983b85 100644 --- a/README.md +++ b/README.md @@ -2513,6 +2513,13 @@ These are files directly not displayed after performing a standard ls direct lis `ls -a` </b></details> +<details> +<summary>What do > and < do in terms of input and output for programs?</summary><br><b> +They take in input (<) and output for a given file (>) using stdin and stdout. + +`myProgram < input.txt > executionOutput.txt` +</b></details> + <details> <summary>Explain what each of the following commands does and give an example on how to use it:
Added in > and < for Linux, as well as yes command (yes XYZ repeats XYZ in the console, useful for repetitive input)
https://api.github.com/repos/bregman-arie/devops-exercises/pulls/132
2021-06-30T01:07:25Z
2021-06-30T12:20:00Z
2021-06-30T12:20:00Z
2021-06-30T12:20:00Z
174
bregman-arie/devops-exercises
17,398
add RCE via Apache logs in log poisoning
diff --git a/File Inclusion/README.md b/File Inclusion/README.md index 8bb0a16d11..cc4a67fc64 100644 --- a/File Inclusion/README.md +++ b/File Inclusion/README.md @@ -345,6 +345,22 @@ In some cases you can also send the email with the `mail` command line. mail -s "<?php system($_GET['cmd']);?>" [email protected]. < /dev/null ``` +### RCE via Apache logs + +Poison the User-Agent in access logs: + +``` +$ curl http://example.org/ -A "<?php system(\$_GET['cmd']);?>" +``` + +Note: The logs will escape double quotes so use single quotes for strings in the PHP payload. + +Then request the logs via the LFI and execute your command. + +``` +$ curl http://example.org/test.php?page=/var/log/apache2/access.log&cmd=id +``` + ## LFI to RCE via PHP sessions Check if the website use PHP Session (PHPSESSID)
https://api.github.com/repos/swisskyrepo/PayloadsAllTheThings/pulls/362
2021-05-10T09:48:23Z
2021-05-10T11:13:34Z
2021-05-10T11:13:34Z
2021-05-10T12:52:57Z
246
swisskyrepo/PayloadsAllTheThings
8,847
add unittest for multiple username
diff --git a/sherlock/tests/test_multiple_usernames.py b/sherlock/tests/test_multiple_usernames.py new file mode 100644 index 000000000..12aac41f2 --- /dev/null +++ b/sherlock/tests/test_multiple_usernames.py @@ -0,0 +1,17 @@ +import imp +import unittest +import sys +sys.path.append('../') +import sherlock as sh + +checksymbols = [] +checksymbols = ["_", "-", "."] + +class TestMulripleUsernames(unittest.TestCase): + def test_area(self): + test_usernames = ["test{?}test" , "test{?feo" , "test"] + for name in test_usernames: + if(sh.CheckForParameter(name)): + self.assertAlmostEqual(sh.MultipleUsernames(name), ["test_test" , "test-test" , "test.test"]) + else: + self.assertAlmostEqual(name, name) \ No newline at end of file
I created a unit test for the feature i made a few days ago for multiple usernames.
https://api.github.com/repos/sherlock-project/sherlock/pulls/1339
2022-05-08T11:24:34Z
2022-05-09T21:12:12Z
2022-05-09T21:12:12Z
2022-06-20T11:33:54Z
218
sherlock-project/sherlock
36,442
Fix pylint issues - broken master
diff --git a/docs/build_docs.py b/docs/build_docs.py index 120a90e7586f8..f0486ebea03d4 100755 --- a/docs/build_docs.py +++ b/docs/build_docs.py @@ -137,19 +137,17 @@ def build_docs_for_packages( all_spelling_errors: Dict[str, List[SpellingError]] = defaultdict(list) for package_no, package_name in enumerate(current_packages, start=1): print("#" * 20, f"[{package_no}/{len(current_packages)}] {package_name}", "#" * 20) - builder = AirflowDocsBuilder( - package_name=package_name, for_production=for_production, verbose=verbose - ) + builder = AirflowDocsBuilder(package_name=package_name, for_production=for_production) builder.clean_files() if not docs_only: with with_group(f"Check spelling: {package_name}"): - spelling_errors = builder.check_spelling() + spelling_errors = builder.check_spelling(verbose=verbose) if spelling_errors: all_spelling_errors[package_name].extend(spelling_errors) if not spellcheck_only: with with_group(f"Building docs: {package_name}"): - docs_errors = builder.build_sphinx_docs() + docs_errors = builder.build_sphinx_docs(verbose=verbose) if docs_errors: all_build_errors[package_name].extend(docs_errors) diff --git a/docs/exts/docs_build/docs_builder.py b/docs/exts/docs_build/docs_builder.py index 55db4190a54fa..6874f783828ec 100644 --- a/docs/exts/docs_build/docs_builder.py +++ b/docs/exts/docs_build/docs_builder.py @@ -43,10 +43,9 @@ class AirflowDocsBuilder: """Documentation builder for Airflow.""" - def __init__(self, package_name: str, for_production: bool, verbose: bool): + def __init__(self, package_name: str, for_production: bool): self.package_name = package_name self.for_production = for_production - self.verbose = verbose @property def _doctree_dir(self) -> str: @@ -100,7 +99,7 @@ def clean_files(self) -> None: os.makedirs(api_dir, exist_ok=True) os.makedirs(self._build_dir, exist_ok=True) - def check_spelling(self): + def check_spelling(self, verbose): """Checks spelling.""" spelling_errors = [] with TemporaryDirectory() as tmp_dir, NamedTemporaryFile() as output: @@ -119,7 +118,7 @@ def check_spelling(self): tmp_dir, ] print("Executing cmd: ", " ".join([shlex.quote(c) for c in build_cmd])) - if not self.verbose: + if not verbose: print("The output is hidden until an error occurs.") env = os.environ.copy() env['AIRFLOW_PACKAGE_NAME'] = self.package_name @@ -129,8 +128,8 @@ def check_spelling(self): build_cmd, cwd=self._src_dir, env=env, - stdout=output if not self.verbose else None, - stderr=output if not self.verbose else None, + stdout=output if not verbose else None, + stderr=output if not verbose else None, timeout=PROCESS_TIMEOUT, ) if completed_proc.returncode != 0: @@ -157,7 +156,7 @@ def check_spelling(self): spelling_errors.extend(parse_spelling_warnings(warning_text, self._src_dir)) return spelling_errors - def build_sphinx_docs(self) -> List[DocBuildError]: + def build_sphinx_docs(self, verbose) -> List[DocBuildError]: """Build Sphinx documentation""" build_errors = [] with NamedTemporaryFile() as tmp_file, NamedTemporaryFile() as output: @@ -177,7 +176,7 @@ def build_sphinx_docs(self) -> List[DocBuildError]: self._build_dir, # path to output directory ] print("Executing cmd: ", " ".join([shlex.quote(c) for c in build_cmd])) - if not self.verbose: + if not verbose: print("The output is hidden until an error occurs.") env = os.environ.copy() @@ -189,8 +188,8 @@ def build_sphinx_docs(self) -> List[DocBuildError]: build_cmd, cwd=self._src_dir, env=env, - stdout=output if not self.verbose else None, - stderr=output if not self.verbose else None, + stdout=output if not verbose else None, + stderr=output if not verbose else None, timeout=PROCESS_TIMEOUT, ) if completed_proc.returncode != 0:
``` ************* Module publish_docs docs/publish_docs.py:92:18: E1120: No value for argument 'verbose' in constructor call (no-value-for-parameter) ``` <!-- Thank you for contributing! Please make sure that your code changes are covered with tests. And in case of new features or big changes remember to adjust the documentation. Feel free to ping committers for the review! In case of existing issue, reference it using one of the following: closes: #ISSUE related: #ISSUE How to write a good git commit message: http://chris.beams.io/posts/git-commit/ --> --- **^ Add meaningful description above** Read the **[Pull Request Guidelines](https://github.com/apache/airflow/blob/master/CONTRIBUTING.rst#pull-request-guidelines)** for more information. In case of fundamental code change, Airflow Improvement Proposal ([AIP](https://cwiki.apache.org/confluence/display/AIRFLOW/Airflow+Improvements+Proposals)) is needed. In case of a new dependency, check compliance with the [ASF 3rd Party License Policy](https://www.apache.org/legal/resolved.html#category-x). In case of backwards incompatible changes please leave a note in [UPDATING.md](https://github.com/apache/airflow/blob/master/UPDATING.md).
https://api.github.com/repos/apache/airflow/pulls/13427
2021-01-02T03:07:03Z
2021-01-02T10:52:26Z
2021-01-02T10:52:26Z
2021-01-02T10:52:27Z
1,041
apache/airflow
14,492
Added fingerprint for Hyundai Elantra 2021
diff --git a/selfdrive/car/hyundai/values.py b/selfdrive/car/hyundai/values.py index be94e264ab327d..026281a3f5a8a9 100644 --- a/selfdrive/car/hyundai/values.py +++ b/selfdrive/car/hyundai/values.py @@ -654,7 +654,10 @@ class Buttons: (Ecu.transmission, 0x7e1, None): [b'\xf1\x816U2V8051\x00\x00\xf1\x006U2V0_C2\x00\x006U2V8051\x00\x00DJF0T16NL0\t\xd2GW'], }, CAR.ELANTRA_2021: { - (Ecu.fwdRadar, 0x7d0, None): [b'\xf1\x00CN7_ SCC FHCUP 1.00 1.01 99110-AA000 '], + (Ecu.fwdRadar, 0x7d0, None): [ + b'\xf1\x00CN7_ SCC FHCUP 1.00 1.01 99110-AA000 ' + b'\xf1\x00CN7_ SCC F-CUP 1.00 1.01 99110-AA000 ', + ], (Ecu.eps, 0x7d4, None): [ b'\xf1\x87\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf1\x00CN7 MDPS C 1.00 1.06 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 4CNDC106\xf1\xa01.06', b'\xf1\x8756310AA050\x00\xf1\x00CN7 MDPS C 1.00 1.06 56310AA050\x00 4CNDC106\xf1\xa01.06', @@ -668,8 +671,12 @@ class Buttons: b'\xf1\x00HT6WA280BLHT6VA640A1CCN0N20NS5\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', b'\xf1\x00HT6WA280BLHT6VA640A1CCN0N20NS5\x00\x00\x00\x00\x00\x00\xe8\xba\xce\xfa', b'\xf1\x87CXMQFM2135005JB2E\xb9\x89\x98W\xa9y\x97h\xa9\x98\x99wxvwh\x87\177\xffx\xff\xff\xff,,\xf1\x89HT6VA640A1\xf1\x82CCN0N20NS5\x00\x00\x00\x00\x00\x00', + b'\xf1\x87CXMQFM1916035JB2\x88vvgg\x87Wuwgev\xa9\x98\x88\x98h\x99\x9f\xffh\xff\xff\xff\xa5\xee\xf1\x89HT6VA640A1\xf1\x82CCN0N20NS5\x00\x00\x00\x00\x00\x00' + ], + (Ecu.engine, 0x7e0, None): [ + b'\xf1\x82CNCWD0AMFCXCSFFA', + b'\xf1\x82CNCWD0AMFCXCSFFB', ], - (Ecu.engine, 0x7e0, None): [b'\xf1\x82CNCWD0AMFCXCSFFA'], }, CAR.ELANTRA_HEV_2021: { (Ecu.fwdCamera, 0x7c4, None) : [
This fingerprint started popping up on my 2021 Hyundai Elantra SEL (non-hybrid). <!-- Please copy and paste the relevant template --> <!--- ***** Template: Car bug fix ***** **Description** [](A description of the bug and the fix. Also link any relevant issues.) **Verification** [](Explain how you tested this bug fix.) **Route** Route: [a route with the bug fix] --> <!--- ***** Template: Bug fix ***** **Description** [](A description of the bug and the fix. Also link any relevant issues.) **Verification** [](Explain how you tested this bug fix.) --> <!--- ***** Template: Car port ***** **Checklist** - [ ] added to README - [ ] test route added to [test_routes.py](https://github.com/commaai/openpilot/blob/master/selfdrive/test/test_routes.py) - [ ] route with openpilot: - [ ] route with stock system: --> <!--- ***** Template: Refactor ***** **Description** [](A description of the refactor, including the goals it accomplishes.) **Verification** [](Explain how you tested the refactor for regressions.) -->
https://api.github.com/repos/commaai/openpilot/pulls/22256
2021-09-16T22:42:30Z
2021-09-17T23:59:58Z
2021-09-17T23:59:58Z
2021-09-17T23:59:58Z
842
commaai/openpilot
9,047
Add parameters to limit replace to certain section of the file
diff --git a/lib/ansible/modules/files/replace.py b/lib/ansible/modules/files/replace.py index 059d05d9724f3f..5bfc7274ef0cef 100644 --- a/lib/ansible/modules/files/replace.py +++ b/lib/ansible/modules/files/replace.py @@ -57,6 +57,22 @@ - The string to replace regexp matches. May contain backreferences that will get expanded with the regexp capture groups if the regexp matches. If not set, matches are removed entirely. + after: + required: false + version_added: "2.3" + description: + - If specified, the line after the replace/remove will start. Can be used + in combination with C(before). + Uses Python regular expressions; see + U(http://docs.python.org/2/library/re.html). + before: + required: false + version_added: "2.3" + description: + - If specified, the line before the replace/remove will occur. Can be used + in combination with C(after). + Uses Python regular expressions; see + U(http://docs.python.org/2/library/re.html). backup: required: false default: "no" @@ -87,6 +103,31 @@ replace: '\1new.host.name\2' backup: yes +# Replace after the expression till the end of the file +- replace: + path: /etc/hosts + regexp: '(\s+)old\.host\.name(\s+.*)?$' + replace: '\1new.host.name\2' + after: 'Start after line.*' + backup: yes + +# Replace before the expression till the begin of the file +- replace: + path: /etc/hosts + regexp: '(\s+)old\.host\.name(\s+.*)?$' + replace: '\1new.host.name\2' + before: 'Start before line.*' + backup: yes + +# Replace between the expressions +- replace: + path: /etc/hosts + regexp: '(\s+)old\.host\.name(\s+.*)?$' + replace: '\1new.host.name\2' + after: 'Start after line.*' + before: 'Start before line.*' + backup: yes + - replace: path: /home/jdoe/.ssh/known_hosts regexp: '^old\.host\.name[^\n]*\n' @@ -129,6 +170,7 @@ def write_changes(module, contents, path): if valid: module.atomic_move(tmpfile, path, unsafe_writes=module.params['unsafe_writes']) + def check_file_attrs(module, changed, message): file_args = module.load_file_common_arguments(module.params) @@ -141,12 +183,15 @@ def check_file_attrs(module, changed, message): return message, changed + def main(): module = AnsibleModule( argument_spec=dict( path=dict(required=True, aliases=['dest', 'destfile', 'name'], type='path'), regexp=dict(required=True), replace=dict(default='', type='str'), + after=dict(required=False), + before=dict(required=False), backup=dict(default=False, type='bool'), validate=dict(default=None, type='str'), ), @@ -168,8 +213,28 @@ def main(): contents = to_text(f.read(), errors='surrogate_or_strict') f.close() - mre = re.compile(params['regexp'], re.MULTILINE) - result = re.subn(mre, params['replace'], contents, 0) + pattern = '' + if params['after']: + pattern = '%s(.*)' % params['after'] + elif params['before']: + pattern = '(.*)%s' % params['before'] + elif params['after'] and params['before']: + pattern = '%s(.*?)%s' % (params['before'], params['after']) + + if pattern: + section_re = re.compile(pattern, re.DOTALL) + match = re.search(section_re, contents) + if match: + section = match.group(0) + + mre = re.compile(params['regexp'], re.MULTILINE) + result = re.subn(mre, params['replace'], section, 0) + if result[1] > 0 and section != result[0]: + result = (contents.replace(section, result[0]), result[1]) + + else: + mre = re.compile(params['regexp'], re.MULTILINE) + result = re.subn(mre, params['replace'], contents, 0) if result[1] > 0 and contents != result[0]: msg = '%s replacements made' % result[1] @@ -195,5 +260,6 @@ def main(): res_args['msg'], res_args['changed'] = check_file_attrs(module, changed, msg) module.exit_json(**res_args) + if __name__ == '__main__': main()
##### SUMMARY Add parameters `after` and `before` to limit the replacement/removals of lines to a certain section of the file. - The parameter `after` defines after which line the replacements/removals should start until the end of file - The parameter `before` defines before which line the replacements/removals should stop from the begin of file - Both parameters can be used together to further restrict/limit the section ##### ISSUE TYPE - Feature Pull Request ##### COMPONENT NAME `modules/files/replace.py` ##### ANSIBLE VERSION ``` ansible 2.2.1.0 config file = /etc/ansible/ansible.cfg configured module search path = Default w/o overrides ```
https://api.github.com/repos/ansible/ansible/pulls/22535
2017-03-12T01:00:15Z
2017-03-16T21:34:58Z
2017-03-16T21:34:58Z
2019-04-26T20:56:01Z
1,130
ansible/ansible
48,925
Profiles~
diff --git a/interpreter/terminal_interface/profiles.yaml b/interpreter/terminal_interface/profiles.yaml new file mode 100644 index 000000000..aff44a2f9 --- /dev/null +++ b/interpreter/terminal_interface/profiles.yaml @@ -0,0 +1,13 @@ +default: + llm.model: "gpt-4-1106-preview" + llm.context_window: 128000 + llm.max_tokens: 4096 + llm.supports_functions: True + +local: + offline: True + llm.model: "openai/x" # "openai/" tells Lit + llm.api_base: "http://localhost:1234/v1" + llm.max_tokens: 1000 + llm.context_window: 3000 + llm.api_key: "x" \ No newline at end of file diff --git a/interpreter/terminal_interface/start_terminal_interface.py b/interpreter/terminal_interface/start_terminal_interface.py index 2a24c6c47..376888100 100644 --- a/interpreter/terminal_interface/start_terminal_interface.py +++ b/interpreter/terminal_interface/start_terminal_interface.py @@ -13,6 +13,7 @@ from .utils.check_for_update import check_for_update from .utils.display_markdown_message import display_markdown_message from .utils.get_config import get_config_path +from .utils.profiles import apply_profile, get_profile_path from .validate_llm_settings import validate_llm_settings @@ -23,13 +24,14 @@ def start_terminal_interface(interpreter): arguments = [ # Profiles coming soon— after we seperate core from TUI - # { - # "name": "profile", - # "nickname": "p", - # "help_text": "profile (from your config file) to use. sets multiple settings at once", - # "type": str, - # "default": "default", - # }, + { + "name": "profile", + "nickname": "p", + "help_text": "profile (from your config file) to use. sets multiple settings at once", + "type": str, + "default": "default", + "attribute": {"object": interpreter, "attr_name": "profile"}, + }, { "name": "custom_instructions", "nickname": "ci", @@ -181,7 +183,6 @@ def start_terminal_interface(interpreter): "type": str, "attribute": {"object": interpreter, "attr_name": "config_file"}, }, - # Profiles { "name": "fast", "nickname": "f", @@ -628,6 +629,13 @@ def start_terminal_interface(interpreter): # Apply default config file interpreter = apply_config(interpreter) + if args.profile: + # We can add custom profile path, I'll leave it out for first PR + print(vars(args).get("profile")) + interpreter.profile = vars(args).get("profile") + user_profile = get_profile_path() + interpreter = apply_profile(interpreter, user_profile) + # Set attributes on interpreter for argument_name, argument_value in vars(args).items(): if argument_value != None: diff --git a/interpreter/terminal_interface/utils/get_config.py b/interpreter/terminal_interface/utils/get_config.py index 8d2807560..f2e374283 100644 --- a/interpreter/terminal_interface/utils/get_config.py +++ b/interpreter/terminal_interface/utils/get_config.py @@ -55,7 +55,7 @@ def get_config(path=user_config_path): config = None try: - with open(path, "r", encoding='utf-8') as file: + with open(path, "r", encoding="utf-8") as file: config = yaml.safe_load(file) if config is not None: return config diff --git a/interpreter/terminal_interface/utils/profiles.py b/interpreter/terminal_interface/utils/profiles.py new file mode 100644 index 000000000..17271102c --- /dev/null +++ b/interpreter/terminal_interface/utils/profiles.py @@ -0,0 +1,93 @@ +import logging +import os +import shutil + +import yaml + +from .local_storage_path import get_storage_path + +# Constants for file paths +PROFILE_FILENAME = "profiles.yaml" +USER_PROFILE_PATH = os.path.join(get_storage_path(), PROFILE_FILENAME) + + +def get_profile_path(path=USER_PROFILE_PATH): + """ + Retrieve the path to the profile. If the path does not exist, create a new profile. + :param path: The path or filename for the profile. + :return: The full path to the profile. + """ + # Constructing full paths for various locations + profile_dir = get_storage_path() + current_dir = os.getcwd() + + # Check if path exists, or if it's in profile or current directory + if not os.path.exists(path): + if os.path.exists(os.path.join(profile_dir, path)): + path = os.path.join(profile_dir, path) + elif os.path.exists(os.path.join(current_dir, path)): + path = os.path.join(current_dir, path) + else: + # Create directory if it doesn't exist + directory = os.path.dirname(path) + if directory and not os.path.exists(directory): + os.makedirs(directory, exist_ok=True) + else: + os.makedirs(profile_dir, exist_ok=True) + path = os.path.join(profile_dir, path) + + # Copy default profile + default_profile_path = os.path.join( + os.path.dirname(os.path.dirname(__file__)), PROFILE_FILENAME + ) + shutil.copy(default_profile_path, path) + + return path + + +def get_profile(path=USER_PROFILE_PATH): + """ + Load and return the user profile from the given path. + :param path: The path to the profile file. + :return: A dictionary containing the profile data. + """ + path = get_profile_path(path) + try: + with open(path, "r", encoding="utf-8") as file: + profile = yaml.safe_load(file) + return profile if profile else {} + except UnicodeDecodeError: + logging.warning( + "Profile file can't be read due to a Unicode decoding error. " + "Ensure it is saved in UTF-8 format. Run `interpreter --reset_profile` to reset it." + ) + except Exception as e: + logging.warning(f"An error occurred while reading the profile file: {e}.") + return {} + + +def apply_profile(self, profile_path=None): + """ + Apply the user profile settings from the specified path. + If profile_path is None, the default path is used. + The method uses self.profile to access the current profile name. + :param profile_path: The path to the profile file. + """ + if profile_path is None: + profile_path = get_profile_path() + + profile = get_profile(profile_path) + + # Retrieve the specific profile based on the current profile name + selected_profile = profile.get(self.profile, {}) + + # Apply settings from the selected profile + for key, value in selected_profile.items(): + if key.startswith("llm."): + setattr(self.llm, key[4:], value) # For 'llm.' prefixed keys + elif key.startswith("computer."): + setattr(self.computer, key[9:], value) # For 'computer.' prefixed keys + else: + setattr(self, key, value) # For other keys + + return self
### Describe the changes you have made: ### Reference any relevant issues (e.g. "Fixes #000"): ### Pre-Submission Checklist (optional but appreciated): - [ ] I have included relevant documentation updates (stored in /docs) - [x] I have read `docs/CONTRIBUTING.md` - [ ] I have read `docs/ROADMAP.md` ### OS Tests (optional but appreciated): - [x] Tested on Windows - [ ] Tested on MacOS - [ ] Tested on Linux
https://api.github.com/repos/OpenInterpreter/open-interpreter/pulls/933
2024-01-16T20:57:50Z
2024-01-18T07:20:11Z
2024-01-18T07:20:11Z
2024-01-18T07:20:12Z
1,735
OpenInterpreter/open-interpreter
40,835
[extractor/tiktok] TikTokBaseIE update api hostname
diff --git a/yt_dlp/extractor/tiktok.py b/yt_dlp/extractor/tiktok.py index 1bbf88495e6..95223f5de95 100644 --- a/yt_dlp/extractor/tiktok.py +++ b/yt_dlp/extractor/tiktok.py @@ -30,7 +30,7 @@ class TikTokBaseIE(InfoExtractor): _WORKING_APP_VERSION = None _APP_NAME = 'trill' _AID = 1180 - _API_HOSTNAME = 'api-h2.tiktokv.com' + _API_HOSTNAME = 'api16-normal-c-useast1a.tiktokv.com' _UPLOADER_URL_FORMAT = 'https://www.tiktok.com/@%s' _WEBPAGE_HOST = 'https://www.tiktok.com/' QUALITIES = ('360p', '540p', '720p', '1080p')
**IMPORTANT**: PRs without the template will be CLOSED ### Description of your *pull request* and other information </details> <!-- Explanation of your *pull request* in arbitrary form goes here. Please **make sure the description explains the purpose and effect** of your *pull request* and is worded well enough to be understood. Provide as much **context and examples** as possible --> Modified the api hostname for TikTok. Looks like TikTok only shut down or changed the previous API hostname and did not start requiring additional signatures. Cannot run tests for some reason due to a RecursionError :), so I manually ran some of the tests. Fixes #5688 <details open><summary>Template</summary> <!-- OPEN is intentional --> <!-- # PLEASE FOLLOW THE GUIDE BELOW - You will be asked some questions, please read them **carefully** and answer honestly - Put an `x` into all the boxes `[ ]` relevant to your *pull request* (like [x]) - Use *Preview* tab to see how your *pull request* will actually look like --> ### Before submitting a *pull request* make sure you have: - [x] At least skimmed through [contributing guidelines](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) including [yt-dlp coding conventions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#yt-dlp-coding-conventions) - [x] [Searched](https://github.com/yt-dlp/yt-dlp/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests - [x] Checked the code with [flake8](https://pypi.python.org/pypi/flake8) and [ran relevant tests](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) ### In order to be accepted and merged into yt-dlp each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check one of the following options: - [x] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/) - [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence) ### What is the purpose of your *pull request*? - [x] Fix or improvement to an extractor (Make sure to add/update tests) - [ ] New extractor ([Piracy websites will not be accepted](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#is-the-website-primarily-used-for-piracy)) - [ ] Core bug fix/improvement - [ ] New feature (It is strongly [recommended to open an issue first](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#adding-new-feature-or-making-overarching-changes))
https://api.github.com/repos/yt-dlp/yt-dlp/pulls/5690
2022-12-02T08:33:02Z
2022-12-02T09:38:00Z
2022-12-02T09:38:00Z
2022-12-02T10:40:08Z
218
yt-dlp/yt-dlp
7,587
[MRG+1] split data using _safe_split in _permutaion_test_scorer to fix error…
diff --git a/sklearn/cross_validation.py b/sklearn/cross_validation.py index a4a1e3d65c7ca..03c74b88f5f28 100644 --- a/sklearn/cross_validation.py +++ b/sklearn/cross_validation.py @@ -1756,8 +1756,10 @@ def _permutation_test_score(estimator, X, y, cv, scorer): """Auxiliary function for permutation_test_score""" avg_score = [] for train, test in cv: - estimator.fit(X[train], y[train]) - avg_score.append(scorer(estimator, X[test], y[test])) + X_train, y_train = _safe_split(estimator, X, y, train) + X_test, y_test = _safe_split(estimator, X, y, test, train) + estimator.fit(X_train, y_train) + avg_score.append(scorer(estimator, X_test, y_test)) return np.mean(avg_score) @@ -1770,7 +1772,7 @@ def _shuffle(y, labels, random_state): for label in np.unique(labels): this_mask = (labels == label) ind[this_mask] = random_state.permutation(ind[this_mask]) - return y[ind] + return safe_indexing(y, ind) def check_cv(cv, X=None, y=None, classifier=False): diff --git a/sklearn/model_selection/_validation.py b/sklearn/model_selection/_validation.py index 88c3922f99363..91f60366f8717 100644 --- a/sklearn/model_selection/_validation.py +++ b/sklearn/model_selection/_validation.py @@ -622,8 +622,10 @@ def _permutation_test_score(estimator, X, y, groups, cv, scorer): """Auxiliary function for permutation_test_score""" avg_score = [] for train, test in cv.split(X, y, groups): - estimator.fit(X[train], y[train]) - avg_score.append(scorer(estimator, X[test], y[test])) + X_train, y_train = _safe_split(estimator, X, y, train) + X_test, y_test = _safe_split(estimator, X, y, test, train) + estimator.fit(X_train, y_train) + avg_score.append(scorer(estimator, X_test, y_test)) return np.mean(avg_score) @@ -636,7 +638,7 @@ def _shuffle(y, groups, random_state): for group in np.unique(groups): this_mask = (groups == group) indices[this_mask] = random_state.permutation(indices[this_mask]) - return y[indices] + return safe_indexing(y, indices) def learning_curve(estimator, X, y, groups=None, diff --git a/sklearn/model_selection/tests/test_validation.py b/sklearn/model_selection/tests/test_validation.py index 830a079a0fc6d..d1f83b469d6c8 100644 --- a/sklearn/model_selection/tests/test_validation.py +++ b/sklearn/model_selection/tests/test_validation.py @@ -966,3 +966,22 @@ def test_score_memmap(): break except WindowsError: sleep(1.) + + +def test_permutation_test_score_pandas(): + # check permutation_test_score doesn't destroy pandas dataframe + types = [(MockDataFrame, MockDataFrame)] + try: + from pandas import Series, DataFrame + types.append((Series, DataFrame)) + except ImportError: + pass + for TargetType, InputFeatureType in types: + # X dataframe, y series + iris = load_iris() + X, y = iris.data, iris.target + X_df, y_ser = InputFeatureType(X), TargetType(y) + check_df = lambda x: isinstance(x, InputFeatureType) + check_series = lambda x: isinstance(x, TargetType) + clf = CheckingClassifier(check_X=check_df, check_y=check_series) + permutation_test_score(clf, X_df, y_ser)
… when using Pandas DataFrame/Series Related to issue #5696
https://api.github.com/repos/scikit-learn/scikit-learn/pulls/5697
2015-11-02T23:57:53Z
2016-12-29T01:46:53Z
2016-12-29T01:46:53Z
2016-12-29T08:26:12Z
904
scikit-learn/scikit-learn
46,754
Update README.rst
diff --git a/README.rst b/README.rst index 8a59d6d3ba..2c68568ebc 100644 --- a/README.rst +++ b/README.rst @@ -895,7 +895,7 @@ Colors and Formatting --------------------- Syntax highlighting is applied to HTTP headers and bodies (where it makes -sense). You can choose your prefered color scheme via the ``--style`` option +sense). You can choose your preferred color scheme via the ``--style`` option if you don't like the default one (see ``$ http --help`` for the possible values).
Fix a simple typo
https://api.github.com/repos/httpie/cli/pulls/387
2015-10-07T17:18:06Z
2015-10-07T17:41:24Z
2015-10-07T17:41:24Z
2015-10-07T17:41:35Z
141
httpie/cli
34,144
My solution to the problem
diff --git a/Project Euler/Problem 01/sol4.py b/Project Euler/Problem 01/sol4.py new file mode 100644 index 000000000000..0f5dc370b441 --- /dev/null +++ b/Project Euler/Problem 01/sol4.py @@ -0,0 +1,32 @@ +def mulitples(limit): + xmulti = [] + zmulti = [] + z = 3 + x = 5 + temp = 1 + while True: + result = z * temp + if (result < limit): + zmulti.append(result) + temp += 1 + continue + else: + temp = 1 + break + while True: + result = x * temp + if (result < limit): + xmulti.append(result) + temp += 1 + continue + else: + temp = 1 + break + return (sum(zmulti) + sum(xmulti)) + + + + + + +print (mulitples(100)) diff --git a/Project Euler/Problem 02/sol2.py b/Project Euler/Problem 02/sol2.py new file mode 100644 index 000000000000..f0502a389707 --- /dev/null +++ b/Project Euler/Problem 02/sol2.py @@ -0,0 +1,13 @@ +def fib(n): + ls = [] + a,b = 0,1 + n += 1 + for i in range(n): + if (b % 2 == 0): + ls.append(b) + else: + pass + a,b = b, a+b + print (sum(ls)) + return None +fib(10)
Looks kind of bad compared to the other ones huh
https://api.github.com/repos/TheAlgorithms/Python/pulls/244
2018-01-21T08:28:22Z
2018-01-22T01:46:12Z
2018-01-22T01:46:12Z
2018-01-22T01:46:12Z
428
TheAlgorithms/Python
30,171
Add project: Upgini
diff --git a/README.md b/README.md index 17d3eddd..9f4c6bba 100644 --- a/README.md +++ b/README.md @@ -1220,6 +1220,7 @@ be * [Shapash](https://github.com/MAIF/shapash) : Shapash is a Python library that provides several types of visualization that display explicit labels that everyone can understand. * [Eurybia](https://github.com/MAIF/eurybia): Eurybia monitors data and model drift over time and securizes model deployment with data validation. * [Colossal-AI](https://github.com/hpcaitech/ColossalAI): An open-source deep learning system for large-scale model training and inference with high efficiency and low cost. +* [Upgini](https://github.com/upgini/river): Free automated data & feature enrichment library for machine learning - automatically searches through thousands of ready-to-use features from public and community shared data sources and enriches your training dataset with only the accuracy improving features. <a name="python-data-analysis--data-visualization"></a> #### Data Analysis / Data Visualization
Hi! I added Upgini library. It's a python library for automated data & feature enrichment for machine learning: automatically searches through thousands of ready-to-use features from public and community shared data sources and enriches your training dataset with only the accuracy improving features License: BSD-3 All the best, Roma
https://api.github.com/repos/josephmisiti/awesome-machine-learning/pulls/866
2022-06-27T18:44:42Z
2022-06-29T20:04:24Z
2022-06-29T20:04:24Z
2022-06-29T20:04:24Z
254
josephmisiti/awesome-machine-learning
52,486
changing describe_parameters call to use paginator
diff --git a/lib/ansible/modules/cloud/amazon/aws_ssm_parameter_store.py b/lib/ansible/modules/cloud/amazon/aws_ssm_parameter_store.py index f075456a6635df..7265dfda79c9d3 100644 --- a/lib/ansible/modules/cloud/amazon/aws_ssm_parameter_store.py +++ b/lib/ansible/modules/cloud/amazon/aws_ssm_parameter_store.py @@ -185,7 +185,10 @@ def create_update_parameter(client, module): # Description field not available from get_parameter function so get it from describe_parameters describe_existing_parameter = None try: - describe_existing_parameter = client.describe_parameters(Filters=[{"Key": "Name", "Values": [args['Name']]}]) + describe_existing_parameter_paginator = client.get_paginator('describe_parameters') + describe_existing_parameter = describe_existing_parameter_paginator.paginate( + Filters=[{"Key": "Name", "Values": [args['Name']]}]).build_full_result() + except ClientError as e: module.fail_json_aws(e, msg="getting description value")
##### SUMMARY updated create_update_parameter() to use a paginator for the describe_parameter method when updating an existing parameter. I ran into an issue where the parameter I needed to update was not in the first result set of items returned by: client.get_parameter(Name=args['Name'], WithDecryption=True) See ADDITIONAL INFORMATION for details ##### ISSUE TYPE - Bugfix Pull Request ##### COMPONENT NAME aws_ssm_parameter_store ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ``` ansible 2.6.4 config file = /etc/ansible/ansible.cfg configured module search path = [u'/root/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules'] ansible python module location = /usr/lib/python2.7/site-packages/ansible executable location = /bin/ansible python version = 2.7.5 (default, Aug 4 2017, 00:39:18) [GCC 4.8.5 20150623 (Red Hat 4.8.5-16)] ``` ##### ADDITIONAL INFORMATION I ran into an issue where the parameter I needed to update was not in the first result set of items returned by: client.get_parameter(Name=args['Name'], WithDecryption=True) I got this error when running my playbook: ``` Result: <127.0.0.1> ESTABLISH LOCAL CONNECTION FOR USER: root <127.0.0.1> EXEC /bin/sh -c 'echo ~root && sleep 0' <127.0.0.1> EXEC /bin/sh -c '( umask 77 && mkdir -p "` echo /root/.ansible/tmp/ansible-tmp-1536863695.84-147938161401239 `" && echo ansible-tmp-1536863695.84-147938161401239="` echo /root/.ansible/tmp/ansible-tmp-1536863695.84-147938161401239 `" ) && sleep 0' Using module file /usr/lib/python2.7/site-packages/ansible/modules/cloud/amazon/aws_ssm_parameter_store.py <127.0.0.1> PUT /root/.ansible/tmp/ansible-local-30457dmHRd8/tmp_9kYoS TO /root/.ansible/tmp/ansible-tmp-1536863695.84-147938161401239/aws_ssm_parameter_store.py <127.0.0.1> EXEC /bin/sh -c 'chmod u+x /root/.ansible/tmp/ansible-tmp-1536863695.84-147938161401239/ /root/.ansible/tmp/ansible-tmp-1536863695.84-147938161401239/aws_ssm_parameter_store.py && sleep 0' <127.0.0.1> EXEC /bin/sh -c '/usr/bin/python2 /root/.ansible/tmp/ansible-tmp-1536863695.84-147938161401239/aws_ssm_parameter_store.py && sleep 0' <127.0.0.1> EXEC /bin/sh -c 'rm -f -r /root/.ansible/tmp/ansible-tmp-1536863695.84-147938161401239/ > /dev/null 2>&1 && sleep 0' The full traceback is: Traceback (most recent call last): File "/tmp/ansible_0pStDP/ansible_module_aws_ssm_parameter_store.py", line 253, in <module> main() File "/tmp/ansible_0pStDP/ansible_module_aws_ssm_parameter_store.py", line 248, in main (changed, response) = invocations[state](client, module) File "/tmp/ansible_0pStDP/ansible_module_aws_ssm_parameter_store.py", line 192, in create_update_parameter if describe_existing_parameter['Parameters'][0]['Description'] != args['Description']: IndexError: list index out of range fatal: [localhost]: FAILED! => { "changed": false, "module_stderr": "Traceback (most recent call last):\n File \"/tmp/ansible_0pStDP/ansible_module_aws_ssm_parameter_store.py\", line 253, in <module>\n main()\n File \"/tmp/ansible_0pStDP/ansible_module_aws_ssm_parameter_store.py\", line 248, in main\n (changed, response) = invocations[state](client, module)\n File \"/tmp/ansible_0pStDP/ansible_module_aws_ssm_parameter_store.py\", line 192, in create_update_parameter\n if describe_existing_parameter['Parameters'][0]['Description'] != args['Description']:\nIndexError: list index out of range\n", "module_stdout": "", "msg": "MODULE FAILURE", "rc": 1 } ``` The call to client.get_parameter(Name=args['Name'], WithDecryption=True) was actually returning this: ``` {u'NextToken': u'AAEAAYo5rAFED3zqXPRdM2barhRoPEy7XKiKyQseIg/NfpRoAAAAAFuai7PyhJdo0bspyIm/QCuI/KQWErCGt+O7i4bwVHyJXsX+Z0jYMdddl9TpkJwG+vxArVfXLlu+0rfdy14DbLbK8audCg4cJiThDTRdFhQLCHWptaOTIiblxoJnJfd5KT8yfXsFig/dVTcx9T8oYOFXKF6SdlJ4hlvDmzA/cdGUxTuLGqCXAj0OBmDXsH3jVRm57qEVqkWeTbXzWpZNJvQ6rsAK+UQQKJG4UlgNu3j/0iIlWjgRWr4fd3LaNjTmadFPXUW6sWw8o+tLR0j324YmVCg0s/rSo+2iXnSdh0GgPOAZfmVaBUvH2VkmYe+7yESYlUsYc7Bgzhd+BhE0Cj5h5atle5ipSiJa2/mdtwBJigg7lQiQg2tOfsCvzf9gyvpdqrNvdND37xktB6dOxu+tpUeaJDZ+joqGqdEWLTKE1jfA1ajLCjKXiP/gkUMNrZnXXx/++nmL3ohaWXdV0OfXDgzR2YxQSLTFCPhTm6TcPlK5ibhFYeaC8IX+TtsRTxJD8LMDB5lhbxwXNJO0gLm7LkSFsrTKbaiJhELgChQZBx20zX7t8XCm8pn/Zq23L7wigvnl4Ce0dIKMCqiNk7Vqls2U+90QnHK+X08ulPOS2tF4nzJ0MlTaLyYypWDlr0/Ktn/rp+kCHl1vrYOiwRJuqt8t2zrK+J5faOkleVV0Fx3g5Z4XPrpvYmt8ZlNPGE2zXQjk5insxzLFt+C7vj14xcvql4O2iAK5duFoTQE/L4/ZCw==', 'ResponseMetadata': {'RetryAttempts': 0, 'HTTPStatusCode': 200, 'RequestId': 'XXXXXXX', 'HTTPHeaders': {'x-amzn-requestid': 'XXXXXXX', 'date': 'Thu, 13 Sep 2018 16:09:22 GMT', 'content-length': '840', 'content-type': 'application/x-amz-json-1.1'}}, u'Parameters': []}) ```
https://api.github.com/repos/ansible/ansible/pulls/45632
2018-09-13T19:25:35Z
2018-09-14T19:17:16Z
2018-09-14T19:17:16Z
2019-07-22T16:39:09Z
237
ansible/ansible
49,022
[XHamster] test case fix
diff --git a/youtube_dl/extractor/xhamster.py b/youtube_dl/extractor/xhamster.py index 68652a22fc7..d1bc992fd95 100644 --- a/youtube_dl/extractor/xhamster.py +++ b/youtube_dl/extractor/xhamster.py @@ -39,7 +39,7 @@ class XHamsterIE(InfoExtractor): 'uploader': 'Ruseful2011', 'duration': 893, 'age_limit': 18, - 'categories': ['Fake Hub', 'Amateur', 'MILFs', 'POV', 'Boss', 'Office', 'Oral', 'Reality', 'Sexy'], + 'categories': ['Fake Hub', 'Amateur', 'MILFs', 'POV', 'Beauti', 'Beauties', 'Beautiful', 'Boss', 'Office', 'Oral', 'Reality', 'Sexy', 'Taking'], }, }, { 'url': 'http://xhamster.com/movies/2221348/britney_spears_sexy_booty.html?hd',
### Before submitting a *pull request* make sure you have: - [x] At least skimmed through [adding new extractor tutorial](https://github.com/rg3/youtube-dl#adding-support-for-a-new-site) and [youtube-dl coding conventions](https://github.com/rg3/youtube-dl#youtube-dl-coding-conventions) sections - [x] [Searched](https://github.com/rg3/youtube-dl/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests - [x] Checked the code with [flake8](https://pypi.python.org/pypi/flake8) ### In order to be accepted and merged into youtube-dl each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check one of the following options: - [ ] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/) - [x] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence) ### What is the purpose of your *pull request*? - [x] Bug fix - [ ] Improvement - [ ] New extractor - [ ] New feature --- ### Description of your *pull request* and other information Fixed/Updated XHamster test case. Updated the expected category array
https://api.github.com/repos/ytdl-org/youtube-dl/pulls/15656
2018-02-20T11:22:32Z
2018-02-20T15:18:50Z
2018-02-20T15:18:50Z
2018-02-20T20:20:28Z
240
ytdl-org/youtube-dl
50,340
Bump peter-evans/create-or-update-comment from 3.0.2 to 3.1.0
diff --git a/.github/workflows/diff_shades_comment.yml b/.github/workflows/diff_shades_comment.yml index b86bd93410e..49fd376d85e 100644 --- a/.github/workflows/diff_shades_comment.yml +++ b/.github/workflows/diff_shades_comment.yml @@ -41,7 +41,7 @@ jobs: - name: Create or update PR comment if: steps.metadata.outputs.needs-comment == 'true' - uses: peter-evans/create-or-update-comment@c6c9a1a66007646a28c153e2a8580a5bad27bcfa + uses: peter-evans/create-or-update-comment@23ff15729ef2fc348714a3bb66d2f655ca9066f2 with: comment-id: ${{ steps.find-comment.outputs.comment-id }} issue-number: ${{ steps.metadata.outputs.pr-number }}
Bumps [peter-evans/create-or-update-comment](https://github.com/peter-evans/create-or-update-comment) from 3.0.2 to 3.1.0. <details> <summary>Release notes</summary> <p><em>Sourced from <a href="https://github.com/peter-evans/create-or-update-comment/releases">peter-evans/create-or-update-comment's releases</a>.</em></p> <blockquote> <h2>Create or Update Comment v3.1.0</h2> <h2>What's Changed</h2> <ul> <li>Add truncate warning to body of comment by <a href="https://github.com/ethanmdavidson"><code>@​ethanmdavidson</code></a> and <a href="https://github.com/peter-evans"><code>@​peter-evans</code></a> in <a href="https://redirect.github.com/peter-evans/create-or-update-comment/pull/272">peter-evans/create-or-update-comment#272</a></li> <li>46 dependency updates by <a href="https://github.com/dependabot"><code>@​dependabot</code></a></li> </ul> <p><strong>Full Changelog</strong>: <a href="https://github.com/peter-evans/create-or-update-comment/compare/v3.0.2...v3.1.0">https://github.com/peter-evans/create-or-update-comment/compare/v3.0.2...v3.1.0</a></p> </blockquote> </details> <details> <summary>Commits</summary> <ul> <li><a href="https://github.com/peter-evans/create-or-update-comment/commit/23ff15729ef2fc348714a3bb66d2f655ca9066f2"><code>23ff157</code></a> Add truncate warning to body of comment (<a href="https://redirect.github.com/peter-evans/create-or-update-comment/issues/272">#272</a>)</li> <li><a href="https://github.com/peter-evans/create-or-update-comment/commit/d85800fae53099456d49b13f853b32f2109df604"><code>d85800f</code></a> build(deps-dev): bump <code>@​babel/traverse</code> from 7.21.3 to 7.23.2 (<a href="https://redirect.github.com/peter-evans/create-or-update-comment/issues/270">#270</a>)</li> <li><a href="https://github.com/peter-evans/create-or-update-comment/commit/ebb1ca676819f745fbf2a6780f6ad44966192876"><code>ebb1ca6</code></a> build(deps-dev): bump <code>@​types/node</code> from 18.18.4 to 18.18.5 (<a href="https://redirect.github.com/peter-evans/create-or-update-comment/issues/269">#269</a>)</li> <li><a href="https://github.com/peter-evans/create-or-update-comment/commit/ac8e6509d7545ebc2e5e7c35eaa12195c2f77adc"><code>ac8e650</code></a> build(deps-dev): bump eslint-plugin-prettier from 5.0.0 to 5.0.1 (<a href="https://redirect.github.com/peter-evans/create-or-update-comment/issues/267">#267</a>)</li> <li><a href="https://github.com/peter-evans/create-or-update-comment/commit/8fbd66cf812a450fb63f15370cf4ee6ee32943e7"><code>8fbd66c</code></a> build(deps-dev): bump <code>@​types/node</code> from 18.18.3 to 18.18.4 (<a href="https://redirect.github.com/peter-evans/create-or-update-comment/issues/266">#266</a>)</li> <li><a href="https://github.com/peter-evans/create-or-update-comment/commit/8cbbe4467976cf643055508ca597827250b42212"><code>8cbbe44</code></a> build(deps-dev): bump eslint from 8.50.0 to 8.51.0 (<a href="https://redirect.github.com/peter-evans/create-or-update-comment/issues/265">#265</a>)</li> <li><a href="https://github.com/peter-evans/create-or-update-comment/commit/e3645dd16d792dc1461bba740dab47338596a26a"><code>e3645dd</code></a> build(deps): bump chuhlomin/render-template from 1.7 to 1.8 (<a href="https://redirect.github.com/peter-evans/create-or-update-comment/issues/263">#263</a>)</li> <li><a href="https://github.com/peter-evans/create-or-update-comment/commit/23ecd34cbd99b240d449add8c024c9cffd109832"><code>23ecd34</code></a> build(deps-dev): bump eslint-plugin-jest from 27.4.0 to 27.4.2 (<a href="https://redirect.github.com/peter-evans/create-or-update-comment/issues/262">#262</a>)</li> <li><a href="https://github.com/peter-evans/create-or-update-comment/commit/c3f58b10816cb33bd0570424df8ff08f77cf0bc0"><code>c3f58b1</code></a> build(deps-dev): bump <code>@​types/node</code> from 18.18.0 to 18.18.3 (<a href="https://redirect.github.com/peter-evans/create-or-update-comment/issues/261">#261</a>)</li> <li><a href="https://github.com/peter-evans/create-or-update-comment/commit/ddff993e3c91296d410ace8836568b0e4aeada34"><code>ddff993</code></a> build(deps-dev): bump eslint-plugin-github from 4.10.0 to 4.10.1 (<a href="https://redirect.github.com/peter-evans/create-or-update-comment/issues/260">#260</a>)</li> <li>Additional commits viewable in <a href="https://github.com/peter-evans/create-or-update-comment/compare/c6c9a1a66007646a28c153e2a8580a5bad27bcfa...23ff15729ef2fc348714a3bb66d2f655ca9066f2">compare view</a></li> </ul> </details> <br /> [![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=peter-evans/create-or-update-comment&package-manager=github_actions&previous-version=3.0.2&new-version=3.1.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) --- <details> <summary>Dependabot commands and options</summary> <br /> You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show <dependency name> ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) </details>
https://api.github.com/repos/psf/black/pulls/3966
2023-10-23T06:14:08Z
2023-10-23T14:42:50Z
2023-10-23T14:42:50Z
2023-10-23T14:42:51Z
207
psf/black
23,638
Should redirect API docs for mongo to a notebook
diff --git a/docs/conf.py b/docs/conf.py index 6bd587d2acba7..97cc30b66d54b 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -115,6 +115,7 @@ "core_modules/supporting_modules/callbacks/root": "/en/stable/module_guides/observability/callbacks/root.html", "core_modules/supporting_modules/evaluation/root": "/en/stable/module_guides/evaluating/root.html", "core_modules/supporting_modules/cost_analysis/root": "/en/stable/understanding/evaluating/cost_analysis/root.html", + "api/llama_index.vector_stores.MongoDBAtlasVectorSearch": "/en/stable/examples/vector_stores/MongoDBAtlasVectorSearch.html", } gtagjs_ids = [
# Description Please include a summary of the change and which issue is fixed. Please also include relevant motivation and context. List any dependencies that are required for this change. Fixes # (issue) ## Type of Change Please delete options that are not relevant. - [ ] Bug fix (non-breaking change which fixes an issue) - [ ] New feature (non-breaking change which adds functionality) - [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected) - [ ] This change requires a documentation update # How Has This Been Tested? Please describe the tests that you ran to verify your changes. Provide instructions so we can reproduce. Please also list any relevant details for your test configuration - [ ] Added new unit/integration tests - [ ] Added new notebook (that tests end-to-end) - [ ] I stared at the code and made sure it makes sense # Suggested Checklist: - [ ] I have performed a self-review of my own code - [ ] I have commented my code, particularly in hard-to-understand areas - [ ] I have made corresponding changes to the documentation - [ ] I have added Google Colab support for the newly added notebooks. - [ ] My changes generate no new warnings - [ ] I have added tests that prove my fix is effective or that my feature works - [ ] New and existing unit tests pass locally with my changes - [ ] I ran `make format; make lint` to appease the lint gods
https://api.github.com/repos/run-llama/llama_index/pulls/11053
2024-02-20T22:04:50Z
2024-02-20T23:45:30Z
2024-02-20T23:45:30Z
2024-02-20T23:45:30Z
177
run-llama/llama_index
5,964
Fix CartPole being made instead of Pendulum
diff --git a/gym/envs/classic_control/pendulum.py b/gym/envs/classic_control/pendulum.py index 1c65ddbd98a..d979b242084 100644 --- a/gym/envs/classic_control/pendulum.py +++ b/gym/envs/classic_control/pendulum.py @@ -64,7 +64,7 @@ class PendulumEnv(gym.Env): `g=10.0`. ``` - gym.make('CartPole-v1', g=9.81) + gym.make('Pendulum-v1', g=9.81) ``` ## Version History
https://api.github.com/repos/openai/gym/pulls/2607
2022-02-11T01:51:03Z
2022-02-11T15:01:28Z
2022-02-11T15:01:28Z
2022-02-11T15:01:29Z
147
openai/gym
5,451
Added txpostgres, py2neo, telephus, txredis
diff --git a/README.md b/README.md index dd0f87834..ed6a944a1 100644 --- a/README.md +++ b/README.md @@ -291,11 +291,15 @@ long, literate-programming-style documentation generator. * [mysql-connector-python](https://pypi.python.org/pypi/mysql-connector-python) - A pure Python MySQL driver from Oracle (in case you don't want or can't install system MySQL library) * [oursql](https://pythonhosted.org/oursql/) - A better MySQL connector for Python with support for native prepared statements and BLOBs. * [psycopg2](http://initd.org/psycopg/) - The most popular PostgreSQL adapter for the Python. + * [txpostgres](http://txpostgres.readthedocs.org/) - Twisted based asynchronous driver for PostgreSQL. * NoSQL Databases * [cassandra-python-driver](https://github.com/datastax/python-driver) - Python driver for Cassandra by Datastax. * [pycassa](https://github.com/pycassa/pycassa) - Python Thrift driver for Cassandra. * [PyMongo](http://docs.mongodb.org/ecosystem/drivers/python/) - The official Python client for MongoDB. * [redis-py](https://github.com/andymccurdy/redis-py) - The Redis Python Client. + * [py2neo](http://book.py2neo.org/) - Python wrapper client for Neo4j's restful interface. + * [telephus](https://github.com/driftx/Telephus) - Twisted based client for Cassandra. + * [txRedis](https://github.com/deldotdr/txRedis) - Twisted based client for Redis. ## ORM
https://api.github.com/repos/vinta/awesome-python/pulls/128
2014-07-10T13:55:24Z
2014-07-12T13:06:42Z
2014-07-12T13:06:42Z
2014-07-12T13:06:42Z
391
vinta/awesome-python
26,962
LogCameraInfo: remove unused variables
diff --git a/selfdrive/loggerd/loggerd.h b/selfdrive/loggerd/loggerd.h index 170f37049d624e..0101a91a5e0198 100644 --- a/selfdrive/loggerd/loggerd.h +++ b/selfdrive/loggerd/loggerd.h @@ -44,8 +44,6 @@ const int SEGMENT_LENGTH = LOGGERD_TEST ? atoi(getenv("LOGGERD_SEGMENT_LENGTH")) struct LogCameraInfo { CameraType type; const char *filename; - const char *frame_packet_name; - const char *encode_idx_name; VisionStreamType stream_type; int frame_width, frame_height; int fps; @@ -63,7 +61,6 @@ const LogCameraInfo cameras_logged[] = { .type = RoadCam, .stream_type = VISION_STREAM_ROAD, .filename = "fcamera.hevc", - .frame_packet_name = "roadCameraState", .fps = MAIN_FPS, .bitrate = MAIN_BITRATE, .is_h265 = true, @@ -77,7 +74,6 @@ const LogCameraInfo cameras_logged[] = { .type = DriverCam, .stream_type = VISION_STREAM_DRIVER, .filename = "dcamera.hevc", - .frame_packet_name = "driverCameraState", .fps = MAIN_FPS, // on EONs, more compressed this way .bitrate = DCAM_BITRATE, .is_h265 = true, @@ -91,7 +87,6 @@ const LogCameraInfo cameras_logged[] = { .type = WideRoadCam, .stream_type = VISION_STREAM_WIDE_ROAD, .filename = "ecamera.hevc", - .frame_packet_name = "wideRoadCameraState", .fps = MAIN_FPS, .bitrate = MAIN_BITRATE, .is_h265 = true,
<!-- Please copy and paste the relevant template --> <!--- ***** Template: Car bug fix ***** **Description** [](A description of the bug and the fix. Also link any relevant issues.) **Verification** [](Explain how you tested this bug fix.) **Route** Route: [a route with the bug fix] --> <!--- ***** Template: Bug fix ***** **Description** [](A description of the bug and the fix. Also link any relevant issues.) **Verification** [](Explain how you tested this bug fix.) --> <!--- ***** Template: Car port ***** **Checklist** - [ ] added to README - [ ] test route added to [test_routes.py](https://github.com/commaai/openpilot/blob/master/selfdrive/test/test_routes.py) - [ ] route with openpilot: - [ ] route with stock system: --> <!--- ***** Template: Refactor ***** **Description** [](A description of the refactor, including the goals it accomplishes.) **Verification** [](Explain how you tested the refactor for regressions.) -->
https://api.github.com/repos/commaai/openpilot/pulls/23142
2021-12-06T13:07:54Z
2021-12-06T13:25:28Z
2021-12-06T13:25:28Z
2021-12-06T13:25:56Z
413
commaai/openpilot
9,138
[twitch:vod] Support links to VoDs from the schedule tab
diff --git a/yt_dlp/extractor/twitch.py b/yt_dlp/extractor/twitch.py index 9b333f6f675..3b2d2c57d6a 100644 --- a/yt_dlp/extractor/twitch.py +++ b/yt_dlp/extractor/twitch.py @@ -194,7 +194,8 @@ class TwitchVodIE(TwitchBaseIE): https?:// (?: (?:(?:www|go|m)\.)?twitch\.tv/(?:[^/]+/v(?:ideo)?|videos)/| - player\.twitch\.tv/\?.*?\bvideo=v? + player\.twitch\.tv/\?.*?\bvideo=v?| + www\.twitch\.tv/[^/]+/schedule\?vodID= ) (?P<id>\d+) ''' @@ -363,6 +364,9 @@ class TwitchVodIE(TwitchBaseIE): 'skip_download': True }, 'expected_warnings': ['Unable to download JSON metadata: HTTP Error 403: Forbidden'] + }, { + 'url': 'https://www.twitch.tv/tangotek/schedule?vodID=1822395420', + 'only_matching': True, }] def _download_info(self, item_id):
Add support for Twitch URLs of the following format: ``` https://www.twitch.tv/<CHANNEL>/schedule?vodID=<ID> ``` These URLs appear when selecting a past broadcast in the schedule tab and clicking "Copy Link" in the pop-up. ![image](https://github.com/yt-dlp/yt-dlp/assets/320854/f0e3c83d-8035-4af3-aad6-08662e96470d) <details open><summary>Template</summary> <!-- OPEN is intentional --> <!-- # PLEASE FOLLOW THE GUIDE BELOW - You will be asked some questions, please read them **carefully** and answer honestly - Put an `x` into all the boxes `[ ]` relevant to your *pull request* (like [x]) - Use *Preview* tab to see how your *pull request* will actually look like --> ### Before submitting a *pull request* make sure you have: - [x] At least skimmed through [contributing guidelines](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) including [yt-dlp coding conventions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#yt-dlp-coding-conventions) - [x] [Searched](https://github.com/yt-dlp/yt-dlp/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests - [x] Checked the code with [flake8](https://pypi.python.org/pypi/flake8) and [ran relevant tests](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) ### In order to be accepted and merged into yt-dlp each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check all of the following options that apply: - [x] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/) - [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence) ### What is the purpose of your *pull request*? - [x] Fix or improvement to an extractor (Make sure to add/update tests) - [ ] New extractor ([Piracy websites will not be accepted](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#is-the-website-primarily-used-for-piracy)) - [ ] Core bug fix/improvement - [ ] New feature (It is strongly [recommended to open an issue first](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#adding-new-feature-or-making-overarching-changes)) </details>
https://api.github.com/repos/yt-dlp/yt-dlp/pulls/7071
2023-05-18T11:23:21Z
2023-05-29T14:30:20Z
2023-05-29T14:30:20Z
2023-05-30T03:04:28Z
298
yt-dlp/yt-dlp
7,544
Revert "replaced PenalizedLoss function with keras alternative"
diff --git a/lib/model/losses.py b/lib/model/losses.py index 439096fb4b..66b6873310 100644 --- a/lib/model/losses.py +++ b/lib/model/losses.py @@ -167,17 +167,47 @@ def extract_image_patches(self, x, ksizes, ssizes, padding='same', patches = K.permute_dimensions(patches, [0, 1, 2, 4, 5, 3]) return patches - # <<< START: from Dfaker >>> # -def PenalizedLoss(mask, loss_func, mask_prop=1.0): +class PenalizedLoss(): # pylint: disable=too-few-public-methods """ Penalized Loss from: https://github.com/dfaker/df """ - mask_as_k_inv_prop = 1 - mask_prop - mask = mask * mask_prop + mask_as_k_inv_prop - mask = K.repeat_elements(mask, 3, axis=3) - def inner_loss(y_true, y_pred): - return loss_func(y_true * mask, y_pred * mask) - return inner_loss + def __init__(self, mask, loss_func, mask_prop=1.0): + self.mask = mask + self.loss_func = loss_func + self.mask_prop = mask_prop + self.mask_as_k_inv_prop = 1-mask_prop + + def __call__(self, y_true, y_pred): + # pylint: disable=invalid-name + tro, tgo, tbo = tf.split(y_true, 3, 3) + pro, pgo, pbo = tf.split(y_pred, 3, 3) + + tr = tro + tg = tgo + tb = tbo + + pr = pro + pg = pgo + pb = pbo + m = self.mask + + m = m * self.mask_prop + m += self.mask_as_k_inv_prop + tr *= m + tg *= m + tb *= m + + pr *= m + pg *= m + pb *= m + + y = tf.concat([tr, tg, tb], 3) + p = tf.concat([pr, pg, pb], 3) + + # yo = tf.stack([tro,tgo,tbo],3) + # po = tf.stack([pro,pgo,pbo],3) + + return self.loss_func(y, p) # <<< END: from Dfaker >>> #
Reverts deepfakes/faceswap#665 This implementation of Keras Penalized Loss leads to the following tensorflow errors: ```2019-03-30 23:22:04.305508: E tensorflow/core/grappler/optimizers/dependency_optimizer.cc:666] Iteration = 0, topological sort failed with message: The graph couldn't be sorted in topological order. 2019-03-30 23:22:04.325095: E tensorflow/core/grappler/optimizers/dependency_optimizer.cc:666] Iteration = 1, topological sort failed with message: The graph couldn't be sorted in topological order. 2019-03-30 23:22:04.485177: E tensorflow/core/grappler/optimizers/dependency_optimizer.cc:666] Iteration = 0, topological sort failed with message: The graph couldn't be sorted in topological order. 2019-03-30 23:22:04.499192: E tensorflow/core/grappler/optimizers/dependency_optimizer.cc:666] Iteration = 1, topological sort failed with message: The graph couldn't be sorted in topological order. ```
https://api.github.com/repos/deepfakes/faceswap/pulls/687
2019-03-31T18:28:48Z
2019-03-31T18:29:42Z
2019-03-31T18:29:42Z
2019-04-03T18:17:00Z
576
deepfakes/faceswap
18,595
Add Ambrosia
diff --git a/README.md b/README.md index 6d3df9eb..99931942 100644 --- a/README.md +++ b/README.md @@ -1764,6 +1764,7 @@ be * [MLEM](https://github.com/iterative/mlem) - Version and deploy your ML models following GitOps principles * [DockerDL](https://github.com/matifali/dockerdl) - Ready to use deeplearning docker images. * [Aqueduct](https://github.com/aqueducthq/aqueduct) - Aqueduct enables you to easily define, run, and manage AI & ML tasks on any cloud infrastructure. +* [Ambrosia](https://github.com/reactorsh/ambrosia) - Ambrosia helps you clean up your LLM datasets using _other_ LLMs. <a name="books"></a> ## Books
Add Ambrosia, a tool for cleaning text datasets.
https://api.github.com/repos/josephmisiti/awesome-machine-learning/pulls/942
2023-05-30T02:56:00Z
2023-05-30T12:59:13Z
2023-05-30T12:59:13Z
2023-05-30T12:59:13Z
198
josephmisiti/awesome-machine-learning
51,736
[launcher] validate passwordless-ssh works when using hostfile launching
diff --git a/deepspeed/launcher/runner.py b/deepspeed/launcher/runner.py index 2b717d71db75..551d24e0e1d1 100755 --- a/deepspeed/launcher/runner.py +++ b/deepspeed/launcher/runner.py @@ -351,7 +351,22 @@ def main(args=None): args.exclude) env = os.environ.copy() + # validate that passwordless-ssh is workly properly with this hostfile + if multi_node_exec: + first_host = list(active_resources.keys())[0] + try: + subprocess.check_call( + f'ssh -o PasswordAuthentication=no {first_host} hostname', + stderr=subprocess.DEVNULL, + stdout=subprocess.DEVNULL, + shell=True) + except subprocess.CalledProcessError: + raise RuntimeError( + f"Using hostfile at {args.hostfile} but host={first_host} was not reachable via ssh. If you are running with a single node please remove {args.hostfile} or setup passwordless ssh." + ) + if not args.master_addr: + assert multi_node_exec first_host = list(active_resources.keys())[0] hostname_cmd = [f"ssh {first_host} hostname -I"] result = subprocess.check_output(hostname_cmd, shell=True)
Validate that passwordless ssh is working properly if the user is launching with an active hostfile. This prevents a confusing password prompt if ssh isn't setup properly as well. /cc @awan-10
https://api.github.com/repos/microsoft/DeepSpeed/pulls/1832
2022-03-14T21:10:15Z
2022-03-14T21:51:46Z
2022-03-14T21:51:46Z
2022-03-14T21:51:46Z
296
microsoft/DeepSpeed
10,366
[idefics] fix vision's `hidden_act`
diff --git a/src/transformers/models/idefics/configuration_idefics.py b/src/transformers/models/idefics/configuration_idefics.py index 0d3fa7a589c31..12d710d726dc0 100644 --- a/src/transformers/models/idefics/configuration_idefics.py +++ b/src/transformers/models/idefics/configuration_idefics.py @@ -57,7 +57,7 @@ class IdeficsVisionConfig(PretrainedConfig): Number of attention heads for each attention layer in the Transformer encoder. image_num_channels (`int`, *optional*, defaults to `3`): Number of image channels. - hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): + hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-5): @@ -86,7 +86,7 @@ def __init__( num_hidden_layers=32, num_attention_heads=16, num_channels=3, - hidden_act="quick_gelu", + hidden_act="gelu", layer_norm_eps=1e-5, attention_dropout=0.0, initializer_range=0.02,
Thanks to @rwightman's discovery this PR is fixing vision config's `hidden_act` to `gelu`. It looks like we messed things up when splitting the original config into 3 groups during the porting and inherited `clip`'s default config. Whereas the model used during training was using `gelu` as can be seen here. https://huggingface.co/laion/CLIP-ViT-H-14-laion2B-s32B-b79K/blob/main/config.json Thank you, @rwightman
https://api.github.com/repos/huggingface/transformers/pulls/25787
2023-08-27T21:56:01Z
2023-08-28T14:37:37Z
2023-08-28T14:37:37Z
2023-08-28T14:38:08Z
341
huggingface/transformers
12,695
Update ML-DL Projects 2022
diff --git a/ML-DL Projects 2022 b/ML-DL Projects 2022 index fa241595..a4683b24 100644 --- a/ML-DL Projects 2022 +++ b/ML-DL Projects 2022 @@ -1,6 +1,7 @@ Heart_Disease_Preditor Boston/California/Banglore_House_Price_Prectictor Car_Price/Sales_Predictor +Cat Image Classifier CreditCard_Fraud/Scam_Predictor Customer_Segmentation Diabetes_Predictor
Hi there, I've added the Cat Image Classifier project to this repository ML-DL Projects 2022. The project includes a logistic regression model and a deep neural network model that can recognize cats in images. I've also included instructions for building and training the models. Github link: https://github.com/muhammadanas0716/Machine-Learning-101/tree/main/Projects/Cat%20Image%20Classifier Please let me know if you have any feedback or questions about the project. Thank you!
https://api.github.com/repos/josephmisiti/awesome-machine-learning/pulls/926
2023-04-04T17:40:28Z
2023-04-12T00:39:49Z
2023-04-12T00:39:49Z
2023-04-12T00:39:50Z
123
josephmisiti/awesome-machine-learning
52,471
Added SQL injection for bypassing logins.
diff --git a/blns.txt b/blns.txt index 70ff0d5..4a53cf6 100644 --- a/blns.txt +++ b/blns.txt @@ -474,6 +474,7 @@ perl -e 'print "<IMG SRC=java\0script:alert(\"XSS\")>";' > out 1;DROP TABLE users 1'; DROP TABLE users-- +' OR 1 == 1; # Server Code Injection #
or other true/false checks that aren't sanitized.
https://api.github.com/repos/minimaxir/big-list-of-naughty-strings/pulls/54
2015-08-19T02:18:44Z
2015-08-19T03:36:40Z
2015-08-19T03:36:40Z
2015-08-19T03:36:40Z
105
minimaxir/big-list-of-naughty-strings
4,902
Remove ref counting dependencies on ray.get()
diff --git a/python/ray/_raylet.pyx b/python/ray/_raylet.pyx index 8e9f6541d070e..a1f18591283f5 100644 --- a/python/ray/_raylet.pyx +++ b/python/ray/_raylet.pyx @@ -139,6 +139,20 @@ else: if PY3: from ray.async_compat import sync_to_async + +def set_internal_config(dict options): + cdef: + unordered_map[c_string, c_string] c_options + + if options is None: + return + + for key, value in options.items(): + c_options[str(key).encode("ascii")] = str(value).encode("ascii") + + RayConfig.instance().initialize(c_options) + + cdef int check_status(const CRayStatus& status) nogil except -1: if status.ok(): return 0 diff --git a/python/ray/includes/ray_config.pxd b/python/ray/includes/ray_config.pxd index fa412f527e67e..2d61c9820c811 100644 --- a/python/ray/includes/ray_config.pxd +++ b/python/ray/includes/ray_config.pxd @@ -86,4 +86,4 @@ cdef extern from "ray/common/ray_config.h" nogil: uint32_t maximum_gcs_deletion_batch_size() const - void initialize(const unordered_map[c_string, int] &config_map) + void initialize(const unordered_map[c_string, c_string] &config_map) diff --git a/python/ray/services.py b/python/ray/services.py index 7e55775459e35..3ed49f870a4e5 100644 --- a/python/ray/services.py +++ b/python/ray/services.py @@ -1200,10 +1200,12 @@ def start_raylet(redis_address, "--object-store-name={} " "--raylet-name={} " "--redis-address={} " + "--config-list={} " "--temp-dir={}".format( sys.executable, worker_path, node_ip_address, node_manager_port, plasma_store_name, - raylet_name, redis_address, temp_dir)) + raylet_name, redis_address, config_str, + temp_dir)) if redis_password: start_worker_command += " --redis-password {}".format(redis_password) diff --git a/python/ray/worker.py b/python/ray/worker.py index 693944cf8b51e..80d4844f024a5 100644 --- a/python/ray/worker.py +++ b/python/ray/worker.py @@ -795,7 +795,9 @@ def init(address=None, log_to_driver=log_to_driver, worker=global_worker, driver_object_store_memory=driver_object_store_memory, - job_id=job_id) + job_id=job_id, + internal_config=json.loads(_internal_config) + if _internal_config else {}) for hook in _post_init_hooks: hook() @@ -1064,7 +1066,8 @@ def connect(node, log_to_driver=False, worker=global_worker, driver_object_store_memory=None, - job_id=None): + job_id=None, + internal_config=None): """Connect this worker to the raylet, to Plasma, and to Redis. Args: @@ -1077,6 +1080,8 @@ def connect(node, driver_object_store_memory: Limit the amount of memory the driver can use in the object store when creating objects. job_id: The ID of job. If it's None, then we will generate one. + internal_config: Dictionary of (str,str) containing internal config + options to override the defaults. """ # Do some basic checking to make sure we didn't call ray.init twice. error_message = "Perhaps you called ray.init twice by accident?" @@ -1087,6 +1092,8 @@ def connect(node, if not faulthandler.is_enabled(): faulthandler.enable(all_threads=False) + ray._raylet.set_internal_config(internal_config) + if mode is not LOCAL_MODE: # Create a Redis client to primary. # The Redis client can safely be shared between threads. However, diff --git a/python/ray/workers/default_worker.py b/python/ray/workers/default_worker.py index 2f178a3ad08f3..93f5f22b0a1fa 100644 --- a/python/ray/workers/default_worker.py +++ b/python/ray/workers/default_worker.py @@ -3,6 +3,7 @@ from __future__ import print_function import argparse +import json import ray import ray.actor @@ -55,6 +56,12 @@ type=str, default=ray_constants.LOGGER_FORMAT, help=ray_constants.LOGGER_FORMAT_HELP) +parser.add_argument( + "--config-list", + required=False, + type=str, + default=None, + help="Override internal config options for the worker process.") parser.add_argument( "--temp-dir", required=False, @@ -77,6 +84,15 @@ ray.utils.setup_logger(args.logging_level, args.logging_format) + internal_config = {} + if args.config_list is not None: + config_list = args.config_list.split(",") + if len(config_list) > 1: + i = 0 + while i < len(config_list): + internal_config[config_list[i]] = config_list[i + 1] + i += 2 + ray_params = RayParams( node_ip_address=args.node_ip_address, node_manager_port=args.node_manager_port, @@ -86,7 +102,9 @@ raylet_socket_name=args.raylet_name, temp_dir=args.temp_dir, load_code_from_local=args.load_code_from_local, - use_pickle=args.use_pickle) + use_pickle=args.use_pickle, + _internal_config=json.dumps(internal_config), + ) node = ray.node.Node( ray_params, @@ -95,5 +113,6 @@ spawn_reaper=False, connect_only=True) ray.worker._global_node = node - ray.worker.connect(node, mode=ray.WORKER_MODE) + ray.worker.connect( + node, mode=ray.WORKER_MODE, internal_config=internal_config) ray.worker.global_worker.main_loop() diff --git a/src/ray/common/ray_config.h b/src/ray/common/ray_config.h index 544868552e2eb..0f5685ae34084 100644 --- a/src/ray/common/ray_config.h +++ b/src/ray/common/ray_config.h @@ -43,21 +43,15 @@ class RayConfig { } void initialize(const std::unordered_map<std::string, std::string> &config_map) { - RAY_CHECK(!initialized_); for (auto const &pair : config_map) { // We use a big chain of if else statements because C++ doesn't allow // switch statements on strings. #include "ray_config_def.h" RAY_LOG(FATAL) << "Received unexpected config parameter " << pair.first; } - initialized_ = true; } /// --------------------------------------------------------------------- #undef RAY_CONFIG - - /// Whether the initialization of the instance has been called before. - /// The RayConfig instance can only (and must) be initialized once. - bool initialized_ = false; }; // clang-format on diff --git a/src/ray/core_worker/core_worker.cc b/src/ray/core_worker/core_worker.cc index 7b43ee81c7963..2e78112e1db1b 100644 --- a/src/ray/core_worker/core_worker.cc +++ b/src/ray/core_worker/core_worker.cc @@ -1,11 +1,11 @@ +#include "ray/core_worker/core_worker.h" + #include <cstdlib> #include "boost/fiber/all.hpp" - #include "ray/common/ray_config.h" #include "ray/common/task/task_util.h" #include "ray/core_worker/context.h" -#include "ray/core_worker/core_worker.h" #include "ray/core_worker/transport/direct_actor_transport.h" #include "ray/core_worker/transport/raylet_transport.h" @@ -269,14 +269,16 @@ void CoreWorker::SetCurrentTaskId(const TaskID &task_id) { } void CoreWorker::ReportActiveObjectIDs() { - std::unordered_set<ObjectID> active_object_ids = - reference_counter_->GetAllInScopeObjectIDs(); - RAY_LOG(DEBUG) << "Sending " << active_object_ids.size() << " object IDs to raylet."; - auto max_active = RayConfig::instance().raylet_max_active_object_ids(); - if (max_active && active_object_ids.size() > max_active) { - RAY_LOG(INFO) << active_object_ids.size() << " object IDs are currently in scope."; + std::unordered_set<ObjectID> active_object_ids; + size_t max_active = RayConfig::instance().raylet_max_active_object_ids(); + if (max_active > 0) { + active_object_ids = reference_counter_->GetAllInScopeObjectIDs(); + if (active_object_ids.size() > max_active) { + RAY_LOG(INFO) << active_object_ids.size() << " object IDs are currently in scope."; + } } + RAY_LOG(DEBUG) << "Sending " << active_object_ids.size() << " object IDs to raylet."; if (!local_raylet_client_->ReportActiveObjectIDs(active_object_ids).ok()) { RAY_LOG(ERROR) << "Raylet connection failed. Shutting down."; Shutdown(); @@ -434,6 +436,12 @@ Status CoreWorker::Get(const std::vector<ObjectID> &ids, const int64_t timeout_m // object. will_throw_exception = true; } + // If we got the result for this plasma ObjectID, the task that created it must + // have finished. Therefore, we can safely remove its reference counting + // dependencies. + if (!ids[i].IsDirectCallType()) { + RemoveObjectIDDependencies(ids[i]); + } } else { missing_result = true; } diff --git a/src/ray/core_worker/core_worker.h b/src/ray/core_worker/core_worker.h index 16c024fe1e466..f4c196ee512e5 100644 --- a/src/ray/core_worker/core_worker.h +++ b/src/ray/core_worker/core_worker.h @@ -2,7 +2,6 @@ #define RAY_CORE_WORKER_CORE_WORKER_H #include "absl/container/flat_hash_map.h" - #include "ray/common/buffer.h" #include "ray/core_worker/actor_handle.h" #include "ray/core_worker/common.h" @@ -469,6 +468,17 @@ class CoreWorker { std::vector<std::shared_ptr<RayObject>> *args, std::vector<ObjectID> *arg_reference_ids); + /// Remove reference counting dependencies of this object ID. + /// + /// \param[in] object_id The object whose dependencies should be removed. + void RemoveObjectIDDependencies(const ObjectID &object_id) { + std::vector<ObjectID> deleted; + reference_counter_->RemoveDependencies(object_id, &deleted); + if (ref_counting_enabled_) { + memory_store_->Delete(deleted); + } + } + /// Type of this worker (i.e., DRIVER or WORKER). const WorkerType worker_type_; diff --git a/src/ray/core_worker/reference_count.cc b/src/ray/core_worker/reference_count.cc index 50c29400a1b61..f15c7d6a6c9ff 100644 --- a/src/ray/core_worker/reference_count.cc +++ b/src/ray/core_worker/reference_count.cc @@ -52,6 +52,23 @@ void ReferenceCounter::RemoveLocalReference(const ObjectID &object_id, RemoveReferenceRecursive(object_id, deleted); } +void ReferenceCounter::RemoveDependencies(const ObjectID &object_id, + std::vector<ObjectID> *deleted) { + absl::MutexLock lock(&mutex_); + auto entry = object_id_refs_.find(object_id); + if (entry == object_id_refs_.end()) { + RAY_LOG(WARNING) << "Tried to remove dependencies for nonexistent object ID: " + << object_id; + return; + } + if (entry->second.dependencies) { + for (const ObjectID &pending_task_object_id : *entry->second.dependencies) { + RemoveReferenceRecursive(pending_task_object_id, deleted); + } + entry->second.dependencies = nullptr; + } +} + void ReferenceCounter::RemoveReferenceRecursive(const ObjectID &object_id, std::vector<ObjectID> *deleted) { auto entry = object_id_refs_.find(object_id); diff --git a/src/ray/core_worker/reference_count.h b/src/ray/core_worker/reference_count.h index d4d0b13a763d5..60000a5818c9d 100644 --- a/src/ray/core_worker/reference_count.h +++ b/src/ray/core_worker/reference_count.h @@ -4,7 +4,6 @@ #include "absl/base/thread_annotations.h" #include "absl/container/flat_hash_map.h" #include "absl/synchronization/mutex.h" - #include "ray/common/id.h" #include "ray/protobuf/common.pb.h" #include "ray/util/logging.h" @@ -30,11 +29,19 @@ class ReferenceCounter { /// zero, it will be erased from the map and the reference count for all of its /// dependencies will be decreased be one. /// - /// \param[in] object_id The object to to decrement the count for. + /// \param[in] object_id The object to decrement the count for. /// \param[out] deleted List to store objects that hit zero ref count. void RemoveLocalReference(const ObjectID &object_id, std::vector<ObjectID> *deleted) LOCKS_EXCLUDED(mutex_); + /// Remove any references to dependencies that this object may have. This does *not* + /// decrease the object's own reference count. + /// + /// \param[in] object_id The object whose dependencies should be removed. + /// \param[out] deleted List to store objects that hit zero ref count. + void RemoveDependencies(const ObjectID &object_id, std::vector<ObjectID> *deleted) + LOCKS_EXCLUDED(mutex_); + /// Add an object that we own. The object may depend on other objects. /// Dependencies for each ObjectID must be set at most once. The direct /// reference count for the ObjectID is set to zero and the reference count diff --git a/src/ray/core_worker/reference_count_test.cc b/src/ray/core_worker/reference_count_test.cc index 288cfdc62b100..384768d2d9298 100644 --- a/src/ray/core_worker/reference_count_test.cc +++ b/src/ray/core_worker/reference_count_test.cc @@ -1,8 +1,9 @@ +#include "ray/core_worker/reference_count.h" + #include <vector> #include "gtest/gtest.h" #include "ray/common/ray_object.h" -#include "ray/core_worker/reference_count.h" #include "ray/core_worker/store_provider/memory_store/memory_store.h" namespace ray { @@ -141,6 +142,49 @@ TEST_F(ReferenceCountTest, TestRecursiveDependencies) { ASSERT_EQ(out.size(), 4); } +TEST_F(ReferenceCountTest, TestRemoveDependenciesOnly) { + std::vector<ObjectID> out; + ObjectID id1 = ObjectID::FromRandom(); + ObjectID id2 = ObjectID::FromRandom(); + ObjectID id3 = ObjectID::FromRandom(); + ObjectID id4 = ObjectID::FromRandom(); + + std::shared_ptr<std::vector<ObjectID>> deps2 = + std::make_shared<std::vector<ObjectID>>(); + deps2->push_back(id3); + deps2->push_back(id4); + rc->AddOwnedObject(id2, TaskID::Nil(), rpc::Address(), deps2); + + std::shared_ptr<std::vector<ObjectID>> deps1 = + std::make_shared<std::vector<ObjectID>>(); + deps1->push_back(id2); + rc->AddOwnedObject(id1, TaskID::Nil(), rpc::Address(), deps1); + + rc->AddLocalReference(id1); + rc->AddLocalReference(id2); + rc->AddLocalReference(id4); + ASSERT_EQ(rc->NumObjectIDsInScope(), 4); + + rc->RemoveDependencies(id2, &out); + ASSERT_EQ(rc->NumObjectIDsInScope(), 3); + ASSERT_EQ(out.size(), 1); + rc->RemoveDependencies(id1, &out); + ASSERT_EQ(rc->NumObjectIDsInScope(), 3); + ASSERT_EQ(out.size(), 1); + + rc->RemoveLocalReference(id1, &out); + ASSERT_EQ(rc->NumObjectIDsInScope(), 2); + ASSERT_EQ(out.size(), 2); + + rc->RemoveLocalReference(id2, &out); + ASSERT_EQ(rc->NumObjectIDsInScope(), 1); + ASSERT_EQ(out.size(), 3); + + rc->RemoveLocalReference(id4, &out); + ASSERT_EQ(rc->NumObjectIDsInScope(), 0); + ASSERT_EQ(out.size(), 4); +} + // Tests that we can get the owner address correctly for objects that we own, // objects that we borrowed via a serialized object ID, and objects whose // origin we do not know.
<!-- Thank you for your contribution! Please review https://github.com/ray-project/ray/blob/master/CONTRIBUTING.rst before opening a pull request. --> ## Why are these changes needed? Long chains of task dependencies can cause the reference counter to grow indefinitely, as revealed by `test_many_tasks`. ## Related issue number <!-- For example: "Closes #1234" --> ## Checks - [x] I've run `scripts/format.sh` to lint the changes in this PR. - [x] I've included any doc changes needed for https://ray.readthedocs.io/en/latest/. - [ ] I've made sure the tests are passing. Note that there might be a few flaky tests, see the recent failure rates at https://ray-travis-tracker.herokuapp.com/.
https://api.github.com/repos/ray-project/ray/pulls/6412
2019-12-10T05:12:34Z
2019-12-11T02:11:35Z
2019-12-11T02:11:35Z
2019-12-11T05:54:09Z
3,924
ray-project/ray
19,370