title
stringlengths 2
169
| diff
stringlengths 235
19.5k
| body
stringlengths 0
30.5k
| url
stringlengths 48
84
| created_at
stringlengths 20
20
| closed_at
stringlengths 20
20
| merged_at
stringlengths 20
20
| updated_at
stringlengths 20
20
| diff_len
float64 101
3.99k
| repo_name
stringclasses 83
values | __index_level_0__
int64 15
52.7k
|
|---|---|---|---|---|---|---|---|---|---|---|
TST: Fix broken test cases where Timedelta/Timestamp raise
|
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 08176af2b326d..5801384bf8db9 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -350,14 +350,21 @@ def maybe_promote(dtype, fill_value=np.nan):
# returns tuple of (dtype, fill_value)
if issubclass(dtype.type, np.datetime64):
- fill_value = tslibs.Timestamp(fill_value).to_datetime64()
+ try:
+ fill_value = tslibs.Timestamp(fill_value).to_datetime64()
+ except (TypeError, ValueError):
+ dtype = np.dtype(np.object_)
elif issubclass(dtype.type, np.timedelta64):
- fv = tslibs.Timedelta(fill_value)
- if fv is NaT:
- # NaT has no `to_timedelta6` method
- fill_value = np.timedelta64("NaT", "ns")
+ try:
+ fv = tslibs.Timedelta(fill_value)
+ except ValueError:
+ dtype = np.dtype(np.object_)
else:
- fill_value = fv.to_timedelta64()
+ if fv is NaT:
+ # NaT has no `to_timedelta64` method
+ fill_value = np.timedelta64("NaT", "ns")
+ else:
+ fill_value = fv.to_timedelta64()
elif is_datetime64tz_dtype(dtype):
if isna(fill_value):
fill_value = NaT
diff --git a/pandas/tests/dtypes/cast/test_promote.py b/pandas/tests/dtypes/cast/test_promote.py
index cf7a168074e9e..1b7de9b20f42f 100644
--- a/pandas/tests/dtypes/cast/test_promote.py
+++ b/pandas/tests/dtypes/cast/test_promote.py
@@ -272,10 +272,6 @@ def test_maybe_promote_any_with_bool(any_numpy_dtype_reduced, box):
pytest.xfail("falsely upcasts to object")
if boxed and dtype not in (str, object) and box_dtype is None:
pytest.xfail("falsely upcasts to object")
- if not boxed and dtype.kind == "M":
- pytest.xfail("raises error")
- if not boxed and dtype.kind == "m":
- pytest.xfail("raises error")
# filling anything but bool with bool casts to object
expected_dtype = np.dtype(object) if dtype != bool else dtype
@@ -348,8 +344,6 @@ def test_maybe_promote_any_with_datetime64(
or (box_dtype is None and is_datetime64_dtype(type(fill_value)))
):
pytest.xfail("mix of lack of upcasting, resp. wrong missing value")
- if not boxed and is_timedelta64_dtype(dtype):
- pytest.xfail("raises error")
# special case for box_dtype
box_dtype = np.dtype(datetime64_dtype) if box_dtype == "dt_dtype" else box_dtype
@@ -490,9 +484,7 @@ def test_maybe_promote_any_numpy_dtype_with_datetimetz(
fill_dtype = DatetimeTZDtype(tz=tz_aware_fixture)
boxed, box_dtype = box # read from parametrized fixture
- if dtype.kind == "m" and not boxed:
- pytest.xfail("raises error")
- elif dtype.kind == "M" and not boxed:
+ if dtype.kind == "M" and not boxed:
pytest.xfail("Comes back as M8 instead of object")
fill_value = pd.Series([fill_value], dtype=fill_dtype)[0]
@@ -549,8 +541,6 @@ def test_maybe_promote_any_with_timedelta64(
else:
if boxed and box_dtype is None and is_timedelta64_dtype(type(fill_value)):
pytest.xfail("does not upcast correctly")
- if not boxed and is_datetime64_dtype(dtype):
- pytest.xfail("raises error")
# special case for box_dtype
box_dtype = np.dtype(timedelta64_dtype) if box_dtype == "td_dtype" else box_dtype
@@ -622,9 +612,6 @@ def test_maybe_promote_any_with_string(any_numpy_dtype_reduced, string_dtype, bo
fill_dtype = np.dtype(string_dtype)
boxed, box_dtype = box # read from parametrized fixture
- if is_datetime_or_timedelta_dtype(dtype) and box_dtype != object:
- pytest.xfail("does not upcast or raises")
-
# create array of given dtype
fill_value = "abc"
@@ -678,9 +665,6 @@ def test_maybe_promote_any_with_object(any_numpy_dtype_reduced, object_dtype, bo
dtype = np.dtype(any_numpy_dtype_reduced)
boxed, box_dtype = box # read from parametrized fixture
- if not boxed and is_datetime_or_timedelta_dtype(dtype):
- pytest.xfail("raises error")
-
# create array of object dtype from a scalar value (i.e. passing
# dtypes.common.is_scalar), which can however not be cast to int/float etc.
fill_value = pd.DateOffset(1)
|
Orthogonal to #28725, but will need to be rebased after that is merged.
|
https://api.github.com/repos/pandas-dev/pandas/pulls/28729
|
2019-10-01T15:30:13Z
|
2019-10-02T11:55:36Z
|
2019-10-02T11:55:36Z
|
2019-10-02T13:27:34Z
| 1,149
|
pandas-dev/pandas
| 44,916
|
Add deprecation warning re unset namespace in k8s hook
|
diff --git a/airflow/providers/cncf/kubernetes/CHANGELOG.rst b/airflow/providers/cncf/kubernetes/CHANGELOG.rst
index 6a0c5edb2962d..b379dc45be43f 100644
--- a/airflow/providers/cncf/kubernetes/CHANGELOG.rst
+++ b/airflow/providers/cncf/kubernetes/CHANGELOG.rst
@@ -39,6 +39,13 @@ Features
* KubernetsPodOperator argument ``namespace`` is now optional. If not supplied via KPO param or pod template file or full pod spec, then we'll check the airflow conn,
then if in a k8s pod, try to infer the namespace from the container, then finally will use the ``default`` namespace.
+Deprecations
+~~~~~~~~~~~~
+
+* In ``KubernetesHook.get_namespace``, if a connection is defined but a namespace isn't set, we currently return 'default'; this behavior is deprecated. In the next release, we'll return ``None``.
+
+
+
4.4.0
.....
diff --git a/airflow/providers/cncf/kubernetes/hooks/kubernetes.py b/airflow/providers/cncf/kubernetes/hooks/kubernetes.py
index b22cf58085004..27064e6b5abe3 100644
--- a/airflow/providers/cncf/kubernetes/hooks/kubernetes.py
+++ b/airflow/providers/cncf/kubernetes/hooks/kubernetes.py
@@ -72,6 +72,8 @@ class KubernetesHook(BaseHook):
conn_type = "kubernetes"
hook_name = "Kubernetes Cluster Connection"
+ DEFAULT_NAMESPACE = "default"
+
@staticmethod
def get_connection_form_widgets() -> dict[str, Any]:
"""Returns connection widgets to add to connection form"""
@@ -268,8 +270,7 @@ def create_custom_object(
:param namespace: kubernetes namespace
"""
api = client.CustomObjectsApi(self.api_client)
- if namespace is None:
- namespace = self.get_namespace()
+ namespace = namespace or self._get_namespace() or self.DEFAULT_NAMESPACE
if isinstance(body, str):
body_dict = _load_body_to_dict(body)
else:
@@ -308,8 +309,7 @@ def get_custom_object(
:param namespace: kubernetes namespace
"""
api = client.CustomObjectsApi(self.api_client)
- if namespace is None:
- namespace = self.get_namespace()
+ namespace = namespace or self._get_namespace() or self.DEFAULT_NAMESPACE
try:
response = api.get_namespaced_custom_object(
group=group, version=version, namespace=namespace, plural=plural, name=name
@@ -319,9 +319,32 @@ def get_custom_object(
raise AirflowException(f"Exception when calling -> get_custom_object: {e}\n")
def get_namespace(self) -> str | None:
- """Returns the namespace that defined in the connection"""
+ """
+ Returns the namespace defined in the connection or 'default'.
+
+ TODO: in provider version 6.0, return None when namespace not defined in connection
+ """
+ namespace = self._get_namespace()
+ if self.conn_id and not namespace:
+ warnings.warn(
+ "Airflow connection defined but namespace is not set; returning 'default'. In "
+ "cncf.kubernetes provider version 6.0 we will return None when namespace is "
+ "not defined in the connection so that it's clear whether user intends 'default' or "
+ "whether namespace is unset (which is required in order to apply precedence logic in "
+ "KubernetesPodOperator).",
+ DeprecationWarning,
+ )
+ return "default"
+ return namespace
+
+ def _get_namespace(self) -> str | None:
+ """
+ Returns the namespace that defined in the connection
+
+ TODO: in provider version 6.0, get rid of this method and make it the behavior of get_namespace.
+ """
if self.conn_id:
- return self._get_field("namespace") or "default"
+ return self._get_field("namespace")
return None
def get_pod_log_stream(
@@ -344,7 +367,7 @@ def get_pod_log_stream(
self.core_v1_client.read_namespaced_pod_log,
name=pod_name,
container=container,
- namespace=namespace if namespace else self.get_namespace(),
+ namespace=namespace or self._get_namespace() or self.DEFAULT_NAMESPACE,
),
)
@@ -365,7 +388,7 @@ def get_pod_logs(
name=pod_name,
container=container,
_preload_content=False,
- namespace=namespace if namespace else self.get_namespace(),
+ namespace=namespace or self._get_namespace() or self.DEFAULT_NAMESPACE,
)
diff --git a/tests/providers/cncf/kubernetes/hooks/test_kubernetes.py b/tests/providers/cncf/kubernetes/hooks/test_kubernetes.py
index c8bbef106fc80..e15648a5e9816 100644
--- a/tests/providers/cncf/kubernetes/hooks/test_kubernetes.py
+++ b/tests/providers/cncf/kubernetes/hooks/test_kubernetes.py
@@ -32,11 +32,16 @@
from airflow.providers.cncf.kubernetes.hooks.kubernetes import KubernetesHook
from airflow.utils import db
from tests.test_utils.db import clear_db_connections
+from tests.test_utils.providers import get_provider_min_airflow_version
KUBE_CONFIG_PATH = os.getenv("KUBECONFIG", "~/.kube/config")
HOOK_MODULE = "airflow.providers.cncf.kubernetes.hooks.kubernetes"
+class DeprecationRemovalRequired(AirflowException):
+ ...
+
+
class TestKubernetesHook:
@classmethod
def setup_class(cls) -> None:
@@ -304,6 +309,12 @@ def test_default_kube_config_connection(self, mock_kube_config_merger, mock_kube
def test_get_namespace(self, conn_id, expected):
hook = KubernetesHook(conn_id=conn_id)
assert hook.get_namespace() == expected
+ if get_provider_min_airflow_version("apache-airflow-providers-cncf-kubernetes") >= (6, 0):
+ raise DeprecationRemovalRequired(
+ "You must update get_namespace so that if namespace not set "
+ "in the connection, then None is returned. To do so, remove get_namespace "
+ "and rename _get_namespace to get_namespace."
+ )
@patch("kubernetes.config.kube_config.KubeConfigLoader")
@patch("kubernetes.config.kube_config.KubeConfigMerger")
|
Currently when k8s conn defined but namespace not set, we return 'default'. This is not good behavior because e.g. in KPO we aren't able to tell whether user explicitly set namespace or whether it's just inserted as a fallback -- something that's important for applying precedence.
So we deprecate the return of 'default'. In next major, we'll just return None.
|
https://api.github.com/repos/apache/airflow/pulls/27202
|
2022-10-22T19:46:48Z
|
2022-10-26T22:10:05Z
|
2022-10-26T22:10:05Z
|
2022-10-26T22:10:06Z
| 1,434
|
apache/airflow
| 14,916
|
[AIRFLOW-3834] Remove dagbag from /log
|
diff --git a/airflow/www/views.py b/airflow/www/views.py
index fb77aa9483047..2750c5005dfe4 100644
--- a/airflow/www/views.py
+++ b/airflow/www/views.py
@@ -600,7 +600,7 @@ def log(self, session=None):
execution_date = request.args.get('execution_date')
dttm = pendulum.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
- dag = dagbag.get_dag(dag_id)
+ dag_model = DagModel.get_dagmodel(dag_id)
ti = session.query(models.TaskInstance).filter(
models.TaskInstance.dag_id == dag_id,
@@ -617,8 +617,8 @@ def log(self, session=None):
root = request.args.get('root', '')
return self.render(
'airflow/ti_log.html',
- logs=logs, dag=dag, title="Log by attempts",
- dag_id=dag.dag_id, task_id=task_id,
+ logs=logs, dag=dag_model, title="Log by attempts",
+ dag_id=dag_id, task_id=task_id,
execution_date=execution_date, form=form,
root=root)
diff --git a/tests/www/test_views.py b/tests/www/test_views.py
index e73b67e6517e6..065a563d8fd2e 100644
--- a/tests/www/test_views.py
+++ b/tests/www/test_views.py
@@ -624,6 +624,7 @@ def setUp(self):
from airflow.www.views import dagbag
dag = DAG(self.DAG_ID, start_date=self.DEFAULT_DATE)
+ dag.sync_to_db()
task = DummyOperator(task_id=self.TASK_ID, dag=dag)
dagbag.bag_dag(dag, parent_dag=dag, root_dag=dag)
with create_session() as session:
|
Make sure you have checked _all_ steps below.
### Jira
- [x] My PR addresses the following [Airflow Jira](https://issues.apache.org/jira/browse/AIRFLOW/) issues and references them in the PR title.
- https://issues.apache.org/jira/browse/AIRFLOW-3834
### Description
Remove DagBag from the /log endpoint
### Tests
- [x] My PR adds the following unit tests __OR__ does not need testing for this extremely good reason:
### Commits
- [x] My commits all reference Jira issues in their subject lines, and I have squashed multiple commits if they address the same issue. In addition, my commits follow the guidelines from "[How to write a good git commit message](http://chris.beams.io/posts/git-commit/)":
1. Subject is separated from body by a blank line
1. Subject is limited to 50 characters (not including Jira issue reference)
1. Subject does not end with a period
1. Subject uses the imperative mood ("add", not "adding")
1. Body wraps at 72 characters
1. Body explains "what" and "why", not "how"
### Documentation
- [x] In case of new functionality, my PR adds documentation that describes how to use it.
- When adding new operators/hooks/sensors, the autoclass documentation generation needs to be added.
### Code Quality
- [x] Passes `flake8`
|
https://api.github.com/repos/apache/airflow/pulls/4841
|
2019-03-05T16:25:08Z
|
2019-03-07T15:06:05Z
|
2019-03-07T15:06:05Z
|
2019-03-07T15:06:05Z
| 428
|
apache/airflow
| 14,034
|
Reorganize MoE news and tutorials.
|
diff --git a/README.md b/README.md
index ed55a2c6e7cf..7bc63d942800 100755
--- a/README.md
+++ b/README.md
@@ -13,16 +13,16 @@ Remove until pypi issue is resolved: https://status.python.org/incidents/2jj696s
[](https://pepy.tech/project/deepspeed)
-->
## Latest News
-* [2021/12/09] [DeepSpeed-MoE for NLG: Reducing the training cost of language models by 5 times](https://www.deepspeed.ai/news/2021/12/09/deepspeed-moe-nlg.html)
- * [2021/08/18] [DeepSpeed powers 8x larger MoE model training with high performance](https://www.microsoft.com/en-us/research/blog/deepspeed-powers-8x-larger-moe-model-training-with-high-performance/)
- * [Mixture of Experts (MoE) tutorial](https://www.deepspeed.ai/tutorials/mixture-of-experts/).
+* [2022/1/19] [DeepSpeed: Advancing MoE inference and training to power next-generation AI scale](https://www.microsoft.com/en-us/research/blog/deepspeed-advancing-moe-inference-and-training-to-power-next-generation-ai-scale/)
* [Mixture of Experts (MoE) for NLG tutorial](https://www.deepspeed.ai/tutorials/mixture-of-experts-nlg/).
+ * [Mixture of Experts (MoE) Inference tutorial](https://www.deepspeed.ai/tutorials/moe-inference-tutorial).
* [2021/11/15] [Autotuning: Automatically discover the optimal DeepSpeed configuration that delivers good training speed](https://www.deepspeed.ai/news/2021/11/15/autotuning.html)
* [2021/10/11] [Using DeepSpeed and Megatron to Train Megatron-Turing NLG 530B, the World’s Largest and Most Powerful Generative Language Model](https://www.microsoft.com/en-us/research/blog/using-deepspeed-and-megatron-to-train-megatron-turing-nlg-530b-the-worlds-largest-and-most-powerful-generative-language-model/)
* Read more on how to [train large models with DeepSpeed](https://www.deepspeed.ai/tutorials/large-models-w-deepspeed/)
+* [2021/08/18] [DeepSpeed powers 8x larger MoE model training with high performance](https://www.microsoft.com/en-us/research/blog/deepspeed-powers-8x-larger-moe-model-training-with-high-performance/)
* [2021/08/16] [Curriculum learning: a regularization method for stable and 3.3x faster GPT-2 pre-training with 8x/4x larger batch size/learning rate](https://www.deepspeed.ai/tutorials/curriculum-learning/)
-### DeepSpeed is hiring, [come join us!](https://careers.microsoft.com/us/en/search-results?keywords=deepspeed%20open%20source)
+### DeepSpeed is hiring, [come join us!](https://careers.microsoft.com/us/en/search-results?keywords=http:%2F%2Fdeepspeed.ai)
---
[DeepSpeed](https://www.deepspeed.ai/) is a deep learning optimization
diff --git a/docs/_config.yml b/docs/_config.yml
index 31d29afb3f7b..87cc953255fe 100644
--- a/docs/_config.yml
+++ b/docs/_config.yml
@@ -91,4 +91,4 @@ breadcrumbs: true
press_release_v3: https://www.microsoft.com/en-us/research/blog/deepspeed-extreme-scale-model-training-for-everyone/
press_release_v5: https://www.microsoft.com/en-us/research/blog/deepspeed-powers-8x-larger-moe-model-training-with-high-performance/
-press_release_v6: https://www.microsoft.com
+press_release_v6: https://www.microsoft.com/en-us/research/blog/deepspeed-advancing-moe-inference-and-training-to-power-next-generation-ai-scale/
diff --git a/docs/_posts/2022-01-19-moe-inference.md b/docs/_posts/2022-01-19-moe-inference.md
new file mode 100644
index 000000000000..88046f72c6fa
--- /dev/null
+++ b/docs/_posts/2022-01-19-moe-inference.md
@@ -0,0 +1,9 @@
+---
+layout: single
+title: "DeepSpeed: Advancing MoE inference and training to power next-generation AI scale"
+excerpt: ""
+categories: news
+link: https://www.microsoft.com/en-us/research/blog/deepspeed-advancing-moe-inference-and-training-to-power-next-generation-ai-scale/
+new_post: true
+date: 2022-01-19 00:00:00
+---
diff --git a/docs/index.md b/docs/index.md
index 244cdf203882..0c1f6e8ca2ef 100755
--- a/docs/index.md
+++ b/docs/index.md
@@ -6,7 +6,7 @@ toc_label: "Contents"
<b> DeepSpeed+Megatron trained the world's most powerful language model: [MT-530B](https://www.microsoft.com/en-us/research/blog/using-deepspeed-and-megatron-to-train-megatron-turing-nlg-530b-the-worlds-largest-and-most-powerful-generative-language-model/) <b>
-<b> DeepSpeed is hiring, [come join us!](https://careers.microsoft.com/us/en/search-results?keywords=deepspeed%20open%20source) </b>
+<b> DeepSpeed is hiring, [come join us!](https://careers.microsoft.com/us/en/search-results?keywords=http:%2F%2Fdeepspeed.ai) </b>
DeepSpeed is a deep learning optimization library that makes distributed training easy,
efficient, and effective.
@@ -32,7 +32,9 @@ initiative to enable next-generation AI capabilities at scale, where you can fin
information [here](https://innovation.microsoft.com/en-us/exploring-ai-at-scale).
# What's New?
-* [2021/12/09] [DeepSpeed-MoE for NLG: Reducing the training cost of language models by 5 times](https://www.deepspeed.ai/news/2021/12/09/deepspeed-moe-nlg.html)
+* [2022/1/19] [DeepSpeed: Advancing MoE inference and training to power next-generation AI scale](https://www.microsoft.com/en-us/research/blog/deepspeed-advancing-moe-inference-and-training-to-power-next-generation-ai-scale/)
+ * [Mixture of Experts (MoE) for NLG tutorial](https://www.deepspeed.ai/tutorials/mixture-of-experts-nlg/).
+ * [Mixture of Experts (MoE) Inference tutorial](https://www.deepspeed.ai/tutorials/moe-inference-tutorial).
* [2021/11/15] [Autotuning: Automatically discover the optimal DeepSpeed configuration that delivers good training speed](https://www.deepspeed.ai/news/2021/11/15/autotuning.html)
* [2021/10/11] [Using DeepSpeed and Megatron to Train Megatron-Turing NLG 530B, the World’s Largest and Most Powerful Generative Language Model](https://www.microsoft.com/en-us/research/blog/using-deepspeed-and-megatron-to-train-megatron-turing-nlg-530b-the-worlds-largest-and-most-powerful-generative-language-model/)
* Read more on how to [train large models with DeepSpeed](https://www.deepspeed.ai/tutorials/large-models-w-deepspeed/)
|
https://api.github.com/repos/microsoft/DeepSpeed/pulls/1708
|
2022-01-19T05:20:05Z
|
2022-01-19T17:38:56Z
|
2022-01-19T17:38:56Z
|
2022-01-19T17:39:00Z
| 1,740
|
microsoft/DeepSpeed
| 10,262
|
|
Update SimpleStopWatch.py
|
diff --git a/README.md b/README.md
index fbbc086a17..8b1d4c4698 100644
--- a/README.md
+++ b/README.md
@@ -60,5 +60,9 @@ In the scripts the comments and other documents are lined up correctly when they
- [Google_News.py](https://github.com/geekcomputers/Python/blob/master/Google_News.py) - Uses BeautifulSoup to provide Latest News Headline along with news link.
- [cricket_live_score](https://github.com/geekcomputers/Python/blob/master/Cricket_score.py) - Uses BeautifulSoup to provide live cricket score.
+
- [youtube.py](https://github.com/geekcomputers/Python/blob/master/youtube.py) - Takes a song name as input and fetches the YouTube url of the best matching song and plays it.
-- [site_health.py](https://github.com/geekcomputers/Python/blob/master/youtube.py) - This script is very useful for when you just to do a health check on a remote server.
+
+- [site_health.py](https://github.com/geekcomputers/Python/blob/master/site_health.py) - This script is very useful for when you just to do a health check on a remote server.
+
+- [SimpleStopWatch.py](https://github.com/geekcomputers/Python/blob/master/SimpleStopWatch.py) - Simple Stop Watch implementation using Python's time module.
diff --git a/SimpleStopWatch.py b/SimpleStopWatch.py
index 4d6791ff6e..dd63d7767c 100644
--- a/SimpleStopWatch.py
+++ b/SimpleStopWatch.py
@@ -9,6 +9,9 @@
input() # For ENTER. Use raw_input() if you are running python 2.x instead of input()
starttime = time.time()
print('Started')
+ while True:
+ print('Time Elapsed: ', round(time.time() - starttime, 0), 'secs', end="\r")
+ time.sleep(1)
except KeyboardInterrupt:
print('Stopped')
endtime = time.time()
|
- Add a live time counter display to stopwatch (updates every second)
<img width="464" alt="preview" src="https://user-images.githubusercontent.com/5627160/47959077-43b00b80-e001-11e8-9ac5-ae355ca5e36e.png">
- Update README.md
|
https://api.github.com/repos/geekcomputers/Python/pulls/428
|
2018-11-04T01:45:01Z
|
2018-11-04T21:58:19Z
|
2018-11-04T21:58:19Z
|
2018-11-04T21:58:23Z
| 460
|
geekcomputers/Python
| 31,020
|
yum: cover "Nothing to do" not only for groups
|
diff --git a/lib/ansible/modules/packaging/os/yum.py b/lib/ansible/modules/packaging/os/yum.py
index e924a3bdc02cb1..2e4b89d53d2ce6 100644
--- a/lib/ansible/modules/packaging/os/yum.py
+++ b/lib/ansible/modules/packaging/os/yum.py
@@ -688,11 +688,8 @@ def exec_install(module, items, action, pkgs, res, yum_basecmd):
res['msg'] += err
res['changed'] = True
- # special case for groups
- for spec in items:
- if spec.startswith('@'):
- if ('Nothing to do' in out and rc == 0) or ('does not have any packages to install' in err):
- res['changed'] = False
+ if ('Nothing to do' in out and rc == 0) or ('does not have any packages to install' in err):
+ res['changed'] = False
if rc != 0:
res['changed'] = False
|
##### SUMMARY
<!--- Describe the change, including rationale and design decisions -->
<!---
If you are fixing an existing issue, please include "Fixes #nnn" in your
commit message and your description; but you should still explain what
the change does.
-->
##### ISSUE TYPE
<!--- Pick one below and delete the rest: -->
- Bugfix Pull Request
##### COMPONENT NAME
yum
##### ANSIBLE VERSION
<!--- Paste verbatim output from “ansible --version” between quotes below -->
```
devel
```
##### ADDITIONAL INFORMATION
<!---
Include additional information to help people understand the change here.
For bugs that don't have a linked bug report, a step-by-step reproduction
of the problem is helpful.
-->
<!--- Paste verbatim command output below, e.g. before and after your change -->
|
https://api.github.com/repos/ansible/ansible/pulls/28283
|
2017-08-16T15:03:03Z
|
2017-08-16T15:24:52Z
|
2017-08-16T15:24:52Z
|
2019-04-26T22:17:45Z
| 240
|
ansible/ansible
| 49,520
|
Correct login request example
|
diff --git a/docs/topics/request-response.rst b/docs/topics/request-response.rst
index 69e4b738b9b..62be0b29b96 100644
--- a/docs/topics/request-response.rst
+++ b/docs/topics/request-response.rst
@@ -367,7 +367,7 @@ method for this job. Here's an example spider which uses it::
def after_login(self, response):
# check login succeed before going on
if "authentication failed" in response.body:
- self.log("Login failed", level=log.ERROR)
+ self.log("Login failed", level=scrapy.log.ERROR)
return
# continue scraping with authenticated session...
|
In the example log is not imported
and not found in the global name space
For more recent branches than 0.24
requests documentation is adapted
with newer logging methods
so this patch is not needed
|
https://api.github.com/repos/scrapy/scrapy/pulls/1208
|
2015-05-05T08:46:59Z
|
2015-05-05T11:46:19Z
|
2015-05-05T11:46:19Z
|
2015-05-05T11:46:19Z
| 147
|
scrapy/scrapy
| 34,521
|
[Apache v2] Implement ApacheConfigurator get_virtual_hosts()
|
diff --git a/certbot-apache/certbot_apache/apache_util.py b/certbot-apache/certbot_apache/apache_util.py
index 70febc9495b..085ccddc8fa 100644
--- a/certbot-apache/certbot_apache/apache_util.py
+++ b/certbot-apache/certbot_apache/apache_util.py
@@ -1,5 +1,6 @@
""" Utility functions for certbot-apache plugin """
import binascii
+import fnmatch
import logging
import re
import subprocess
@@ -114,6 +115,22 @@ def unique_id():
return binascii.hexlify(os.urandom(16)).decode("utf-8")
+def included_in_paths(filepath, paths):
+ """
+ Returns true if the filepath is included in the list of paths
+ that may contain full paths or wildcard paths that need to be
+ expanded.
+
+ :param str filepath: Filepath to check
+ :params list paths: List of paths to check against
+
+ :returns: True if included
+ :rtype: bool
+ """
+
+ return any([fnmatch.fnmatch(filepath, path) for path in paths])
+
+
def parse_defines(apachectl):
"""
Gets Defines from httpd process and returns a dictionary of
diff --git a/certbot-apache/certbot_apache/assertions.py b/certbot-apache/certbot_apache/assertions.py
index c7a61f44679..1a5ce20965d 100644
--- a/certbot-apache/certbot_apache/assertions.py
+++ b/certbot-apache/certbot_apache/assertions.py
@@ -60,6 +60,8 @@ def assertEqualDirective(first, second):
def isPass(value): # pragma: no cover
"""Checks if the value is set to PASS"""
+ if isinstance(value, bool):
+ return True
return PASS in value
def isPassDirective(block):
@@ -105,6 +107,26 @@ def assertEqualSimple(first, second):
if not isPass(first) and not isPass(second):
assert first == second
+def isEqualVirtualHost(first, second):
+ """
+ Checks that two VirtualHost objects are similar. There are some built
+ in differences with the implementations: VirtualHost created by ParserNode
+ implementation doesn't have "path" defined, as it was used for Augeas path
+ and that cannot obviously be used in the future. Similarly the legacy
+ version lacks "node" variable, that has a reference to the BlockNode for the
+ VirtualHost.
+ """
+ return (
+ first.name == second.name and
+ first.aliases == second.aliases and
+ first.filep == second.filep and
+ first.addrs == second.addrs and
+ first.ssl == second.ssl and
+ first.enabled == second.enabled and
+ first.modmacro == second.modmacro and
+ first.ancestor == second.ancestor
+ )
+
def assertEqualPathsList(first, second): # pragma: no cover
"""
Checks that the two lists of file paths match. This assertion allows for wildcard
diff --git a/certbot-apache/certbot_apache/augeasparser.py b/certbot-apache/certbot_apache/augeasparser.py
index d2771c9d2df..1c6ce6675f9 100644
--- a/certbot-apache/certbot_apache/augeasparser.py
+++ b/certbot-apache/certbot_apache/augeasparser.py
@@ -115,7 +115,8 @@ def find_ancestors(self, name):
while True:
# Get the path of ancestor node
parent = parent.rpartition("/")[0]
- if not parent:
+ # Root of the tree
+ if not parent or parent == "/files":
break
anc = self._create_blocknode(parent)
if anc.name.lower() == name.lower():
@@ -134,7 +135,13 @@ def _create_blocknode(self, path):
name = self._aug_get_name(path)
metadata = {"augeasparser": self.parser, "augeaspath": path}
+ # Check if the file was included from the root config or initial state
+ enabled = self.parser.parsed_in_original(
+ apache_util.get_file_path(path)
+ )
+
return AugeasBlockNode(name=name,
+ enabled=enabled,
ancestor=assertions.PASS,
filepath=apache_util.get_file_path(path),
metadata=metadata)
@@ -265,10 +272,15 @@ def add_child_block(self, name, parameters=None, position=None): # pragma: no c
# Create the new block
self.parser.aug.insert(insertpath, name, before)
+ # Check if the file was included from the root config or initial state
+ enabled = self.parser.parsed_in_original(
+ apache_util.get_file_path(realpath)
+ )
# Parameters will be set at the initialization of the new object
new_block = AugeasBlockNode(name=name,
parameters=parameters,
+ enabled=enabled,
ancestor=assertions.PASS,
filepath=apache_util.get_file_path(realpath),
metadata=new_metadata)
@@ -291,9 +303,14 @@ def add_child_directive(self, name, parameters=None, position=None): # pragma:
self.parser.aug.insert(insertpath, "directive", before)
# Set the directive key
self.parser.aug.set(realpath, name)
+ # Check if the file was included from the root config or initial state
+ enabled = self.parser.parsed_in_original(
+ apache_util.get_file_path(realpath)
+ )
new_dir = AugeasDirectiveNode(name=name,
parameters=parameters,
+ enabled=enabled,
ancestor=assertions.PASS,
filepath=apache_util.get_file_path(realpath),
metadata=new_metadata)
@@ -394,8 +411,14 @@ def parsed_paths(self):
:returns: list of file paths of files that have been parsed
"""
- parsed_paths = self.parser.aug.match("/augeas/load/Httpd/incl")
- return [self.parser.aug.get(path) for path in parsed_paths]
+ res_paths = []
+
+ paths = self.parser.existing_paths
+ for directory in paths:
+ for filename in paths[directory]:
+ res_paths.append(os.path.join(directory, filename))
+
+ return res_paths
def _create_commentnode(self, path):
"""Helper function to create a CommentNode from Augeas path"""
@@ -416,10 +439,13 @@ def _create_directivenode(self, path):
name = self.parser.get_arg(path)
metadata = {"augeasparser": self.parser, "augeaspath": path}
- # Because of the dynamic nature, and the fact that we're not populating
- # the complete ParserNode tree, we use the search parent as ancestor
+ # Check if the file was included from the root config or initial state
+ enabled = self.parser.parsed_in_original(
+ apache_util.get_file_path(path)
+ )
return AugeasDirectiveNode(name=name,
ancestor=assertions.PASS,
+ enabled=enabled,
filepath=apache_util.get_file_path(path),
metadata=metadata)
diff --git a/certbot-apache/certbot_apache/configurator.py b/certbot-apache/certbot_apache/configurator.py
index 9a9dec7a863..d4466cc53dc 100644
--- a/certbot-apache/certbot_apache/configurator.py
+++ b/certbot-apache/certbot_apache/configurator.py
@@ -202,7 +202,11 @@ def __init__(self, *args, **kwargs):
self._autohsts = {} # type: Dict[str, Dict[str, Union[int, float]]]
# Reverter save notes
self.save_notes = ""
-
+ # Should we use ParserNode implementation instead of the old behavior
+ self.USE_PARSERNODE = False
+ # Saves the list of file paths that were parsed initially, and
+ # not added to parser tree by self.conf("vhost-root") for example.
+ self.parsed_paths = [] # type: List[str]
# These will be set in the prepare function
self._prepared = False
self.parser = None
@@ -261,6 +265,7 @@ def prepare(self):
"augeaspath": self.parser.get_root_augpath(),
"ac_ast": None}
self.parser_root = self.get_parsernode_root(pn_meta)
+ self.parsed_paths = self.parser_root.parsed_paths()
# Check for errors in parsing files with Augeas
self.parser.check_parsing_errors("httpd.aug")
@@ -897,6 +902,29 @@ def _create_vhost(self, path):
return vhost
def get_virtual_hosts(self):
+ """
+ Temporary wrapper for legacy and ParserNode version for
+ get_virtual_hosts. This should be replaced with the ParserNode
+ implementation when ready.
+ """
+
+ v1_vhosts = self.get_virtual_hosts_v1()
+ v2_vhosts = self.get_virtual_hosts_v2()
+
+ for v1_vh in v1_vhosts:
+ found = False
+ for v2_vh in v2_vhosts:
+ if assertions.isEqualVirtualHost(v1_vh, v2_vh):
+ found = True
+ break
+ if not found:
+ raise AssertionError("Equivalent for {} was not found".format(v1_vh.path))
+
+ if self.USE_PARSERNODE:
+ return v2_vhosts
+ return v1_vhosts
+
+ def get_virtual_hosts_v1(self):
"""Returns list of virtual hosts found in the Apache configuration.
:returns: List of :class:`~certbot_apache.obj.VirtualHost`
@@ -949,6 +977,79 @@ def get_virtual_hosts(self):
vhs.append(new_vhost)
return vhs
+ def get_virtual_hosts_v2(self):
+ """Returns list of virtual hosts found in the Apache configuration using
+ ParserNode interface.
+ :returns: List of :class:`~certbot_apache.obj.VirtualHost`
+ objects found in configuration
+ :rtype: list
+ """
+
+ vhs = []
+ vhosts = self.parser_root.find_blocks("VirtualHost", exclude=False)
+ for vhblock in vhosts:
+ vhs.append(self._create_vhost_v2(vhblock))
+ return vhs
+
+ def _create_vhost_v2(self, node):
+ """Used by get_virtual_hosts_v2 to create vhost objects using ParserNode
+ interfaces.
+ :param interfaces.BlockNode node: The BlockNode object of VirtualHost block
+ :returns: newly created vhost
+ :rtype: :class:`~certbot_apache.obj.VirtualHost`
+ """
+ addrs = set()
+ for param in node.parameters:
+ addrs.add(obj.Addr.fromstring(param))
+
+ is_ssl = False
+ sslengine = node.find_directives("SSLEngine")
+ if sslengine:
+ for directive in sslengine:
+ if directive.parameters[0].lower() == "on":
+ is_ssl = True
+ break
+
+ # "SSLEngine on" might be set outside of <VirtualHost>
+ # Treat vhosts with port 443 as ssl vhosts
+ for addr in addrs:
+ if addr.get_port() == "443":
+ is_ssl = True
+
+ enabled = apache_util.included_in_paths(node.filepath, self.parsed_paths)
+
+ macro = False
+ # Check if the VirtualHost is contained in a mod_macro block
+ if node.find_ancestors("Macro"):
+ macro = True
+ vhost = obj.VirtualHost(
+ node.filepath, None, addrs, is_ssl, enabled, modmacro=macro, node=node
+ )
+ self._populate_vhost_names_v2(vhost)
+ return vhost
+
+ def _populate_vhost_names_v2(self, vhost):
+ """Helper function that populates the VirtualHost names.
+ :param host: In progress vhost whose names will be added
+ :type host: :class:`~certbot_apache.obj.VirtualHost`
+ """
+
+ servername_match = vhost.node.find_directives("ServerName",
+ exclude=False)
+ serveralias_match = vhost.node.find_directives("ServerAlias",
+ exclude=False)
+
+ servername = None
+ if servername_match:
+ servername = servername_match[-1].parameters[-1]
+
+ if not vhost.modmacro:
+ for alias in serveralias_match:
+ for serveralias in alias.parameters:
+ vhost.aliases.add(serveralias)
+ vhost.name = servername
+
+
def is_name_vhost(self, target_addr):
"""Returns if vhost is a name based vhost
diff --git a/certbot-apache/certbot_apache/obj.py b/certbot-apache/certbot_apache/obj.py
index 22abc85cdef..939251802ce 100644
--- a/certbot-apache/certbot_apache/obj.py
+++ b/certbot-apache/certbot_apache/obj.py
@@ -124,7 +124,7 @@ class VirtualHost(object): # pylint: disable=too-few-public-methods
strip_name = re.compile(r"^(?:.+://)?([^ :$]*)")
def __init__(self, filep, path, addrs, ssl, enabled, name=None,
- aliases=None, modmacro=False, ancestor=None):
+ aliases=None, modmacro=False, ancestor=None, node=None):
# pylint: disable=too-many-arguments
"""Initialize a VH."""
@@ -137,6 +137,7 @@ def __init__(self, filep, path, addrs, ssl, enabled, name=None,
self.enabled = enabled
self.modmacro = modmacro
self.ancestor = ancestor
+ self.node = node
def get_names(self):
"""Return a set of all names."""
diff --git a/certbot-apache/certbot_apache/tests/parsernode_configurator_test.py b/certbot-apache/certbot_apache/tests/parsernode_configurator_test.py
new file mode 100644
index 00000000000..97f07d3d299
--- /dev/null
+++ b/certbot-apache/certbot_apache/tests/parsernode_configurator_test.py
@@ -0,0 +1,36 @@
+"""Tests for ApacheConfigurator for AugeasParserNode classes"""
+import unittest
+
+import mock
+
+from certbot_apache.tests import util
+
+
+class ConfiguratorParserNodeTest(util.ApacheTest): # pylint: disable=too-many-public-methods
+ """Test AugeasParserNode using available test configurations"""
+
+ def setUp(self): # pylint: disable=arguments-differ
+ super(ConfiguratorParserNodeTest, self).setUp()
+
+ self.config = util.get_apache_configurator(
+ self.config_path, self.vhost_path, self.config_dir, self.work_dir)
+ self.vh_truth = util.get_vh_truth(
+ self.temp_dir, "debian_apache_2_4/multiple_vhosts")
+
+ def test_parsernode_get_vhosts(self):
+ self.config.USE_PARSERNODE = True
+ vhosts = self.config.get_virtual_hosts()
+ # Legacy get_virtual_hosts() do not set the node
+ self.assertTrue(vhosts[0].node is not None)
+
+ def test_parsernode_get_vhosts_mismatch(self):
+ vhosts = self.config.get_virtual_hosts_v2()
+ # One of the returned VirtualHost objects differs
+ vhosts[0].name = "IdidntExpectThat"
+ self.config.get_virtual_hosts_v2 = mock.MagicMock(return_value=vhosts)
+ with self.assertRaises(AssertionError):
+ _ = self.config.get_virtual_hosts()
+
+
+if __name__ == "__main__":
+ unittest.main() # pragma: no cover
|
This replaces #7398 as there are quite a few changes (mostly to other parts than the actual ApacheConfigurator logic though), so I guessed it'd probably be best to start from scratch. The diff looks a bit messy as this is built on top of #7561 and #7562 but it'll clear up when those are taken care of.
This PR also implements runtime assertion against the legacy implementation for VirtualHost object creation to ensure that the returned lists match where it matters. The objects created by the old implementation have Augeas paths, and the ones created by the new one have ParserNode references, but those obviously aren't matched.
|
https://api.github.com/repos/certbot/certbot/pulls/7564
|
2019-11-15T21:45:10Z
|
2019-12-19T08:51:42Z
|
2019-12-19T08:51:42Z
|
2019-12-19T08:51:42Z
| 3,651
|
certbot/certbot
| 3,114
|
DOC: Fixing EX01 - Added more examples
|
diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 28fec987efd1a..304b4616355db 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -263,16 +263,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
pandas.core.window.ewm.ExponentialMovingWindow.cov \
pandas.api.indexers.BaseIndexer \
pandas.api.indexers.VariableOffsetWindowIndexer \
- pandas.core.groupby.DataFrameGroupBy.__iter__ \
- pandas.core.groupby.SeriesGroupBy.__iter__ \
- pandas.core.groupby.DataFrameGroupBy.groups \
- pandas.core.groupby.SeriesGroupBy.groups \
- pandas.core.groupby.DataFrameGroupBy.indices \
- pandas.core.groupby.SeriesGroupBy.indices \
- pandas.core.groupby.DataFrameGroupBy.get_group \
- pandas.core.groupby.SeriesGroupBy.get_group \
- pandas.core.groupby.DataFrameGroupBy.all \
- pandas.core.groupby.DataFrameGroupBy.any \
pandas.core.groupby.DataFrameGroupBy.count \
pandas.core.groupby.DataFrameGroupBy.cummax \
pandas.core.groupby.DataFrameGroupBy.cummin \
@@ -293,8 +283,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
pandas.core.groupby.DataFrameGroupBy.std \
pandas.core.groupby.DataFrameGroupBy.sum \
pandas.core.groupby.DataFrameGroupBy.var \
- pandas.core.groupby.SeriesGroupBy.all \
- pandas.core.groupby.SeriesGroupBy.any \
pandas.core.groupby.SeriesGroupBy.count \
pandas.core.groupby.SeriesGroupBy.cummax \
pandas.core.groupby.SeriesGroupBy.cummin \
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 6ea5fc437f5a2..5d15be19f34f7 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -720,6 +720,33 @@ def __repr__(self) -> str:
def groups(self) -> dict[Hashable, np.ndarray]:
"""
Dict {group name -> group labels}.
+
+ Examples
+ --------
+
+ For SeriesGroupBy:
+
+ >>> lst = ['a', 'a', 'b']
+ >>> ser = pd.Series([1, 2, 3], index=lst)
+ >>> ser
+ a 1
+ a 2
+ b 3
+ dtype: int64
+ >>> ser.groupby(level=0).groups
+ {'a': ['a', 'a'], 'b': ['b']}
+
+ For DataFrameGroupBy:
+
+ >>> data = [[1, 2, 3], [1, 5, 6], [7, 8, 9]]
+ >>> df = pd.DataFrame(data, columns=["a", "b", "c"])
+ >>> df
+ a b c
+ 0 1 2 3
+ 1 1 5 6
+ 2 7 8 9
+ >>> df.groupby(by=["a"]).groups
+ {1: [0, 1], 7: [2]}
"""
return self.grouper.groups
@@ -733,6 +760,34 @@ def ngroups(self) -> int:
def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]:
"""
Dict {group name -> group indices}.
+
+ Examples
+ --------
+
+ For SeriesGroupBy:
+
+ >>> lst = ['a', 'a', 'b']
+ >>> ser = pd.Series([1, 2, 3], index=lst)
+ >>> ser
+ a 1
+ a 2
+ b 3
+ dtype: int64
+ >>> ser.groupby(level=0).indices
+ {'a': array([0, 1]), 'b': array([2])}
+
+ For DataFrameGroupBy:
+
+ >>> data = [[1, 2, 3], [1, 5, 6], [7, 8, 9]]
+ >>> df = pd.DataFrame(data, columns=["a", "b", "c"],
+ ... index=["owl", "toucan", "eagle"])
+ >>> df
+ a b c
+ owl 1 2 3
+ toucan 1 5 6
+ eagle 7 8 9
+ >>> df.groupby(by=["a"]).indices
+ {1: array([0, 1]), 7: array([2])}
"""
return self.grouper.indices
@@ -867,6 +922,38 @@ def get_group(self, name, obj=None) -> DataFrame | Series:
Returns
-------
same type as obj
+
+ Examples
+ --------
+
+ For SeriesGroupBy:
+
+ >>> lst = ['a', 'a', 'b']
+ >>> ser = pd.Series([1, 2, 3], index=lst)
+ >>> ser
+ a 1
+ a 2
+ b 3
+ dtype: int64
+ >>> ser.groupby(level=0).get_group("a")
+ a 1
+ a 2
+ dtype: int64
+
+ For DataFrameGroupBy:
+
+ >>> data = [[1, 2, 3], [1, 5, 6], [7, 8, 9]]
+ >>> df = pd.DataFrame(data, columns=["a", "b", "c"],
+ ... index=["owl", "toucan", "eagle"])
+ >>> df
+ a b c
+ owl 1 2 3
+ toucan 1 5 6
+ eagle 7 8 9
+ >>> df.groupby(by=["a"]).get_group(1)
+ a b c
+ owl 1 2 3
+ toucan 1 5 6
"""
if obj is None:
obj = self._selected_obj
@@ -886,6 +973,47 @@ def __iter__(self) -> Iterator[tuple[Hashable, NDFrameT]]:
-------
Generator yielding sequence of (name, subsetted object)
for each group
+
+ Examples
+ --------
+
+ For SeriesGroupBy:
+
+ >>> lst = ['a', 'a', 'b']
+ >>> ser = pd.Series([1, 2, 3], index=lst)
+ >>> ser
+ a 1
+ a 2
+ b 3
+ dtype: int64
+ >>> for x, y in ser.groupby(level=0):
+ ... print(f'{x}\\n{y}\\n')
+ a
+ a 1
+ a 2
+ dtype: int64
+ b
+ b 3
+ dtype: int64
+
+ For DataFrameGroupBy:
+
+ >>> data = [[1, 2, 3], [1, 5, 6], [7, 8, 9]]
+ >>> df = pd.DataFrame(data, columns=["a", "b", "c"])
+ >>> df
+ a b c
+ 0 1 2 3
+ 1 1 5 6
+ 2 7 8 9
+ >>> for x, y in df.groupby(by=["a"]):
+ ... print(f'{x}\\n{y}\\n')
+ (1,)
+ a b c
+ 0 1 2 3
+ 1 1 5 6
+ (7,)
+ a b c
+ 2 7 8 9
"""
keys = self.keys
level = self.level
@@ -1787,7 +1915,7 @@ def _obj_1d_constructor(self) -> Callable:
@final
@Substitution(name="groupby")
- @Appender(_common_see_also)
+ @Substitution(see_also=_common_see_also)
def any(self, skipna: bool = True):
"""
Return True if any value in the group is truthful, else False.
@@ -1802,6 +1930,38 @@ def any(self, skipna: bool = True):
Series or DataFrame
DataFrame or Series of boolean values, where a value is True if any element
is True within its respective group, False otherwise.
+ %(see_also)s
+ Examples
+ --------
+ For SeriesGroupBy:
+
+ >>> lst = ['a', 'a', 'b']
+ >>> ser = pd.Series([1, 2, 0], index=lst)
+ >>> ser
+ a 1
+ a 2
+ b 0
+ dtype: int64
+ >>> ser.groupby(level=0).any()
+ a True
+ b False
+ dtype: bool
+
+ For DataFrameGroupBy:
+
+ >>> data = [[1, 0, 3], [1, 0, 6], [7, 1, 9]]
+ >>> df = pd.DataFrame(data, columns=["a", "b", "c"],
+ ... index=["ostrich", "penguin", "parrot"])
+ >>> df
+ a b c
+ ostrich 1 0 3
+ penguin 1 0 6
+ parrot 7 1 9
+ >>> df.groupby(by=["a"]).any()
+ b c
+ a
+ 1 False True
+ 7 True True
"""
return self._cython_agg_general(
"any",
@@ -1811,7 +1971,7 @@ def any(self, skipna: bool = True):
@final
@Substitution(name="groupby")
- @Appender(_common_see_also)
+ @Substitution(see_also=_common_see_also)
def all(self, skipna: bool = True):
"""
Return True if all values in the group are truthful, else False.
@@ -1826,6 +1986,39 @@ def all(self, skipna: bool = True):
Series or DataFrame
DataFrame or Series of boolean values, where a value is True if all elements
are True within its respective group, False otherwise.
+ %(see_also)s
+ Examples
+ --------
+
+ For SeriesGroupBy:
+
+ >>> lst = ['a', 'a', 'b']
+ >>> ser = pd.Series([1, 2, 0], index=lst)
+ >>> ser
+ a 1
+ a 2
+ b 0
+ dtype: int64
+ >>> ser.groupby(level=0).all()
+ a True
+ b False
+ dtype: bool
+
+ For DataFrameGroupBy:
+
+ >>> data = [[1, 0, 3], [1, 5, 6], [7, 8, 9]]
+ >>> df = pd.DataFrame(data, columns=["a", "b", "c"],
+ ... index=["ostrich", "penguin", "parrot"])
+ >>> df
+ a b c
+ ostrich 1 0 3
+ penguin 1 5 6
+ parrot 7 8 9
+ >>> df.groupby(by=["a"]).all()
+ b c
+ a
+ 1 False True
+ 7 True True
"""
return self._cython_agg_general(
"all",
|
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
Towards https://github.com/pandas-dev/pandas/issues/37875
|
https://api.github.com/repos/pandas-dev/pandas/pulls/53540
|
2023-06-06T17:08:56Z
|
2023-06-07T15:36:23Z
|
2023-06-07T15:36:23Z
|
2023-06-07T17:38:10Z
| 2,762
|
pandas-dev/pandas
| 45,758
|
text-ify config warning message
|
diff --git a/lib/ansible/config/manager.py b/lib/ansible/config/manager.py
index cbce2048ffb0d0..de146062e34048 100644
--- a/lib/ansible/config/manager.py
+++ b/lib/ansible/config/manager.py
@@ -393,7 +393,7 @@ def _loop_entries(self, container, entry_list):
try:
temp_value = container.get(name, None)
except UnicodeEncodeError:
- self.WARNINGS.add('value for config entry {0} contains invalid characters, ignoring...'.format(to_native(name)))
+ self.WARNINGS.add(u'value for config entry {0} contains invalid characters, ignoring...'.format(to_text(name)))
continue
if temp_value is not None: # only set if env var is defined
value = temp_value
|
##### SUMMARY
ensure that config warning messages are always wide strings
##### ISSUE TYPE
- Bugfix Pull Request
##### COMPONENT NAME
<!--- Write the short name of the module, plugin, task or feature below -->
##### ADDITIONAL INFORMATION
<!--- Include additional information to help people understand the change here -->
<!--- A step-by-step reproduction of the problem is helpful if there is no related issue -->
<!--- Paste verbatim command output below, e.g. before and after your change -->
```paste below
```
|
https://api.github.com/repos/ansible/ansible/pulls/63349
|
2019-10-10T17:24:05Z
|
2019-10-10T18:31:54Z
|
2019-10-10T18:31:54Z
|
2019-11-13T20:00:41Z
| 182
|
ansible/ansible
| 49,264
|
fix(alert): Save less tags when recording saveAlertRule transaction
|
diff --git a/static/app/views/settings/incidentRules/ruleForm/index.tsx b/static/app/views/settings/incidentRules/ruleForm/index.tsx
index 20259d6a32f6c..9177b471da913 100644
--- a/static/app/views/settings/incidentRules/ruleForm/index.tsx
+++ b/static/app/views/settings/incidentRules/ruleForm/index.tsx
@@ -448,9 +448,11 @@ class RuleFormContainer extends AsyncComponent<Props, State> {
transaction.setTag('operation', !rule.id ? 'create' : 'edit');
for (const trigger of sanitizedTriggers) {
for (const action of trigger.actions) {
- transaction.setTag(action.type, true);
- if (action.integrationId) {
- transaction.setTag(`integrationId:${action.integrationId}`, true);
+ if (action.type === 'slack') {
+ transaction.setTag(action.type, true);
+ if (action.integrationId) {
+ transaction.setTag(`integrationId:${action.integrationId}`, true);
+ }
}
}
}
diff --git a/static/app/views/settings/projectAlerts/issueRuleEditor/index.tsx b/static/app/views/settings/projectAlerts/issueRuleEditor/index.tsx
index a527500f9d99a..500a793412512 100644
--- a/static/app/views/settings/projectAlerts/issueRuleEditor/index.tsx
+++ b/static/app/views/settings/projectAlerts/issueRuleEditor/index.tsx
@@ -269,7 +269,7 @@ class IssueRuleEditor extends AsyncView<Props, State> {
// Grab the last part of something like 'sentry.mail.actions.NotifyEmailAction'
const splitActionId = action.id.split('.');
const actionName = splitActionId[splitActionId.length - 1];
- if (actionName) {
+ if (actionName === 'SlackNotifyServiceAction') {
transaction.setTag(actionName, true);
}
}
|
Previously this transaction saved all actions that the alert rule was being saved with, but this is a little too cluttered and a lot of these tags are not important to know for performance reasons. It seems the most important tag to capture is whether the user is saving a slack rule.
This PR removes all other action/integration tags except the slack one.
|
https://api.github.com/repos/getsentry/sentry/pulls/25642
|
2021-04-26T23:21:05Z
|
2021-04-27T17:43:04Z
|
2021-04-27T17:43:04Z
|
2024-03-05T19:38:17Z
| 420
|
getsentry/sentry
| 43,996
|
TST Ignore Kmeans test failures on MacOS (0.20.X)
|
diff --git a/sklearn/cluster/tests/test_k_means.py b/sklearn/cluster/tests/test_k_means.py
index cec0fa2897546..a467b80538447 100644
--- a/sklearn/cluster/tests/test_k_means.py
+++ b/sklearn/cluster/tests/test_k_means.py
@@ -336,6 +336,10 @@ def test_k_means_fit_predict(algo, dtype, constructor, seed, max_iter, tol):
# There's a very small chance of failure with elkan on unstructured dataset
# because predict method uses fast euclidean distances computation which
# may cause small numerical instabilities.
+ if sys.platform == "darwin":
+ pytest.xfail(
+ "Known failures on MacOS, See "
+ "https://github.com/scikit-learn/scikit-learn/issues/12644")
if not (algo == 'elkan' and constructor is sp.csr_matrix):
rng = np.random.RandomState(seed)
|
See https://github.com/scikit-learn/scikit-learn/issues/12644#issuecomment-440949725
Tag #12644 from 0.20.1 to 0.21
For 0.20.X branch.
|
https://api.github.com/repos/scikit-learn/scikit-learn/pulls/12651
|
2018-11-22T15:13:58Z
|
2018-11-22T20:21:54Z
|
2018-11-22T20:21:54Z
|
2018-11-23T04:13:53Z
| 215
|
scikit-learn/scikit-learn
| 45,891
|
New rule: sl -> ls
|
diff --git a/README.md b/README.md
index 6d584ed22..ec680efb5 100644
--- a/README.md
+++ b/README.md
@@ -163,6 +163,7 @@ using matched rule and run it. Rules enabled by default:
* `mkdir_p` – adds `-p` when you trying to create directory without parent;
* `no_command` – fixes wrong console commands, for example `vom/vim`;
* `python_command` – prepends `python` when you trying to run not executable/without `./` python script;
+* `sl_ls` – changes `sl` to `ls`;
* `rm_dir` – adds `-rf` when you trying to remove directory;
* `ssh_known_hosts` – removes host from `known_hosts` on warning;
* `sudo` – prepends `sudo` to previous command if it failed because of permissions;
diff --git a/tests/rules/test_sl_ls.py b/tests/rules/test_sl_ls.py
new file mode 100644
index 000000000..fd83b04d0
--- /dev/null
+++ b/tests/rules/test_sl_ls.py
@@ -0,0 +1,12 @@
+
+from thefuck.types import Command
+from thefuck.rules.sl_ls import match, get_new_command
+
+
+def test_match():
+ assert match(Command('sl', '', ''), None)
+ assert not match(Command('ls', '', ''), None)
+
+
+def test_get_new_command():
+ assert get_new_command(Command('sl', '', ''), None) == 'ls'
diff --git a/thefuck/rules/sl_ls.py b/thefuck/rules/sl_ls.py
new file mode 100644
index 000000000..0b3d017ab
--- /dev/null
+++ b/thefuck/rules/sl_ls.py
@@ -0,0 +1,14 @@
+"""
+This happens way too often
+
+When typing really fast cause I'm a 1337 H4X0R,
+I often fuck up 'ls' and type 'sl'. No more!
+"""
+
+
+def match(command, settings):
+ return command.script == 'sl'
+
+
+def get_new_command(command, settings):
+ return 'ls'
|
When typing quickly, I make this mistake all the time. This addition allows you to unfuck sl, replacing it with ls.
|
https://api.github.com/repos/nvbn/thefuck/pulls/106
|
2015-04-23T05:15:33Z
|
2015-04-23T13:01:57Z
|
2015-04-23T13:01:57Z
|
2015-04-23T13:01:57Z
| 508
|
nvbn/thefuck
| 30,924
|
DeepSpeed examples refresh
|
diff --git a/.gitmodules b/.gitmodules
deleted file mode 100644
index 37adb6f39e5c..000000000000
--- a/.gitmodules
+++ /dev/null
@@ -1,4 +0,0 @@
-[submodule "DeepSpeedExamples"]
- path = DeepSpeedExamples
- url = https://github.com/microsoft/DeepSpeedExamples
- branch = master
diff --git a/DeepSpeedExamples b/DeepSpeedExamples
deleted file mode 160000
index 36212dd59cb3..000000000000
--- a/DeepSpeedExamples
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit 36212dd59cb3eb342c39bc8965aaba04d5491933
diff --git a/examples/README.md b/examples/README.md
new file mode 100644
index 000000000000..c61ee047df9a
--- /dev/null
+++ b/examples/README.md
@@ -0,0 +1,9 @@
+# DeepSpeed Examples
+
+If you are looking for examples using DeepSpeed please see the following resources:
+
+1. [DeepSpeedExamples](https://github.com/microsoft/DeepSpeedExamples)
+2. [Megatron-DeepSpeed](https://github.com/microsoft/Megatron-DeepSpeed)
+3. [DeepSpeed + AzureML](https://github.com/Azure/azureml-examples/tree/main/python-sdk/workflows/train/deepspeed)
+4. [DeepSpeed + Hugging Face Transformers Integration](https://huggingface.co/docs/transformers/main_classes/deepspeed)
+5. [DeepSpeed + PyTorch Lightning](https://pytorch-lightning.readthedocs.io/en/latest/api/pytorch_lightning.utilities.deepspeed.html)
diff --git a/setup.py b/setup.py
index 3d484f8edc88..532d0dd2976a 100755
--- a/setup.py
+++ b/setup.py
@@ -281,10 +281,18 @@ def create_dir_symlink(src, dest):
},
install_requires=install_requires,
extras_require=extras_require,
- packages=find_packages(exclude=["docker",
- "third_party",
- "csrc",
- "op_builder"]),
+ packages=find_packages(exclude=[
+ "azure",
+ "csrc",
+ "docker",
+ "docs",
+ "examples",
+ "op_builder",
+ "release",
+ "requirements",
+ "scripts",
+ "tests"
+ ]),
include_package_data=True,
scripts=[
'bin/deepspeed',
|
Removes the DeepSpeedExamples submodule, adds an `examples` folder that points people to several sources of DeepSpeed examples. Also updates python setup to exclude all non-relevant folders.
|
https://api.github.com/repos/microsoft/DeepSpeed/pulls/2021
|
2022-06-16T00:39:01Z
|
2022-06-16T01:46:31Z
|
2022-06-16T01:46:31Z
|
2022-06-16T01:46:33Z
| 576
|
microsoft/DeepSpeed
| 10,129
|
Better DNS error handling
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index b5778b4a78..c02c9c07e4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -15,6 +15,7 @@ This project adheres to [Semantic Versioning](https://semver.org/).
- Added support for basic JSON types on `--form`/`--multipart` when using JSON only operators (`:=`/`:=@`). ([#1212](https://github.com/httpie/httpie/issues/1212))
- Added support for automatically enabling `--stream` when `Content-Type` is `text/event-stream`. ([#376](https://github.com/httpie/httpie/issues/376))
- Added new `pie-dark`/`pie-light` (and `pie`) styles that match with [HTTPie for Web and Desktop](https://httpie.io/product). ([#1237](https://github.com/httpie/httpie/issues/1237))
+- Added support for better error handling on DNS failures. ([#1248](https://github.com/httpie/httpie/issues/1248))
- Broken plugins will no longer crash the whole application. ([#1204](https://github.com/httpie/httpie/issues/1204))
- Fixed auto addition of XML declaration to every formatted XML response. ([#1156](https://github.com/httpie/httpie/issues/1156))
- Fixed highlighting when `Content-Type` specifies `charset`. ([#1242](https://github.com/httpie/httpie/issues/1242))
diff --git a/httpie/core.py b/httpie/core.py
index 48d21bc4f0..bc03686b14 100644
--- a/httpie/core.py
+++ b/httpie/core.py
@@ -2,6 +2,7 @@
import os
import platform
import sys
+import socket
from typing import List, Optional, Tuple, Union, Callable
import requests
@@ -21,6 +22,7 @@
from .output.writer import write_message, write_stream, MESSAGE_SEPARATOR_BYTES
from .plugins.registry import plugin_manager
from .status import ExitStatus, http_status_to_exit_status
+from .utils import unwrap_context
# noinspection PyDefaultArgument
@@ -41,6 +43,21 @@ def raw_main(
include_debug_info = '--debug' in args
include_traceback = include_debug_info or '--traceback' in args
+ def handle_generic_error(e, annotation=None):
+ msg = str(e)
+ if hasattr(e, 'request'):
+ request = e.request
+ if hasattr(request, 'url'):
+ msg = (
+ f'{msg} while doing a {request.method}'
+ f' request to URL: {request.url}'
+ )
+ if annotation:
+ msg += annotation
+ env.log_error(f'{type(e).__name__}: {msg}')
+ if include_traceback:
+ raise
+
if include_debug_info:
print_debug_info(env)
if args == ['--debug']:
@@ -90,19 +107,23 @@ def raw_main(
f'Too many redirects'
f' (--max-redirects={parsed_args.max_redirects}).'
)
+ except requests.exceptions.ConnectionError as exc:
+ annotation = None
+ original_exc = unwrap_context(exc)
+ if isinstance(original_exc, socket.gaierror):
+ if original_exc.errno == socket.EAI_AGAIN:
+ annotation = '\nCouldn\'t connect to a DNS server. Perhaps check your connection and try again.'
+ elif original_exc.errno == socket.EAI_NONAME:
+ annotation = '\nCouldn\'t resolve the given hostname. Perhaps check it and try again.'
+ propagated_exc = original_exc
+ else:
+ propagated_exc = exc
+
+ handle_generic_error(propagated_exc, annotation=annotation)
+ exit_status = ExitStatus.ERROR
except Exception as e:
# TODO: Further distinction between expected and unexpected errors.
- msg = str(e)
- if hasattr(e, 'request'):
- request = e.request
- if hasattr(request, 'url'):
- msg = (
- f'{msg} while doing a {request.method}'
- f' request to URL: {request.url}'
- )
- env.log_error(f'{type(e).__name__}: {msg}')
- if include_traceback:
- raise
+ handle_generic_error(e)
exit_status = ExitStatus.ERROR
return exit_status
diff --git a/httpie/utils.py b/httpie/utils.py
index 8669de8caf..fa19fa7cde 100644
--- a/httpie/utils.py
+++ b/httpie/utils.py
@@ -229,3 +229,11 @@ def split(iterable: Iterable[T], key: Callable[[T], bool]) -> Tuple[List[T], Lis
else:
right.append(item)
return left, right
+
+
+def unwrap_context(exc: Exception) -> Optional[Exception]:
+ context = exc.__context__
+ if isinstance(context, Exception):
+ return unwrap_context(context)
+ else:
+ return exc
diff --git a/tests/test_errors.py b/tests/test_errors.py
index 5a1a0f2476..fca48fff15 100644
--- a/tests/test_errors.py
+++ b/tests/test_errors.py
@@ -1,3 +1,5 @@
+import pytest
+import socket
from unittest import mock
from pytest import raises
from requests import Request
@@ -31,6 +33,21 @@ def test_error_traceback(program):
http('--traceback', 'www.google.com')
[email protected]('httpie.core.program')
[email protected]("error_code, expected_message", [
+ (socket.EAI_AGAIN, "check your connection"),
+ (socket.EAI_NONAME, "check the URL"),
+])
+def test_error_custom_dns(program, error_code, expected_message):
+ exc = ConnectionError('Connection aborted')
+ exc.__context__ = socket.gaierror(error_code, "<test>")
+ program.side_effect = exc
+
+ r = http('www.google.com', tolerate_error_exit_status=True)
+ assert r.exit_status == ExitStatus.ERROR
+ assert expected_message in r.stderr
+
+
def test_max_headers_limit(httpbin_both):
with raises(ConnectionError) as e:
http('--max-headers=1', httpbin_both + '/get')
|
E.g
```
$ http non.existent.url
http: error: gaierror: [Errno -2] Name or service not known
Couldn't resolve the given URL. Perhaps check the URL and try again.
```
or
```
http: error: gaierror: [Errno -3] Temporary failure in name resolution
Couldn't connect to a DNS server. Perhaps check your connection and try again.
```
Fixes #1248
|
https://api.github.com/repos/httpie/cli/pulls/1249
|
2021-12-22T15:30:40Z
|
2021-12-23T19:35:30Z
|
2021-12-23T19:35:30Z
|
2021-12-23T19:35:30Z
| 1,405
|
httpie/cli
| 34,103
|
Install primer.json with black
|
diff --git a/CHANGES.md b/CHANGES.md
index d114c5136ff..37b5da6a40d 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,5 +1,11 @@
## Change Log
+### Unreleased
+
+#### _Packaging_
+
+- Install `primer.json` (used by `black-primer` by default) with black. (#2154)
+
### 21.4b1
#### _Black_
diff --git a/setup.py b/setup.py
index 0928c63afc2..f1792a46fe8 100644
--- a/setup.py
+++ b/setup.py
@@ -64,7 +64,11 @@ def get_long_description() -> str:
ext_modules=ext_modules,
packages=["blackd", "black", "blib2to3", "blib2to3.pgen2", "black_primer"],
package_dir={"": "src"},
- package_data={"blib2to3": ["*.txt"], "black": ["py.typed"]},
+ package_data={
+ "blib2to3": ["*.txt"],
+ "black": ["py.typed"],
+ "black_primer": ["primer.json"],
+ },
python_requires=">=3.6.2",
zip_safe=False,
install_requires=[
|
Fixes https://github.com/psf/black/issues/2153
|
https://api.github.com/repos/psf/black/pulls/2154
|
2021-04-27T15:36:22Z
|
2021-04-27T15:58:39Z
|
2021-04-27T15:58:39Z
|
2021-04-27T15:58:40Z
| 303
|
psf/black
| 24,547
|
bpo-18748: test_io: silence destructor errors
|
diff --git a/Lib/test/test_io.py b/Lib/test/test_io.py
index 811a446f92be31..5406a2891bb251 100644
--- a/Lib/test/test_io.py
+++ b/Lib/test/test_io.py
@@ -991,6 +991,9 @@ def flush(self):
# This would cause an assertion failure.
self.assertRaises(OSError, f.close)
+ # Silence destructor error
+ R.flush = lambda self: None
+
class CIOTest(IOTest):
@@ -1167,6 +1170,10 @@ def bad_close():
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(b.closed)
+ # Silence destructor error
+ raw.close = lambda: None
+ b.flush = lambda: None
+
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
raw = self.MockRawIO()
@@ -1184,6 +1191,10 @@ def bad_close():
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(b.closed)
+ # Silence destructor error
+ b.flush = lambda: None
+ raw.close = lambda: None
+
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
@@ -2039,6 +2050,9 @@ def reader_close():
self.assertFalse(reader.closed)
self.assertTrue(writer.closed)
+ # Silence destructor error
+ reader.close = lambda: None
+
def test_writer_close_error_on_close(self):
def writer_close():
writer_non_existing
@@ -2053,6 +2067,9 @@ def writer_close():
self.assertTrue(reader.closed)
self.assertFalse(writer.closed)
+ # Silence destructor error
+ writer.close = lambda: None
+
def test_reader_writer_close_error_on_close(self):
def reader_close():
reader_non_existing
@@ -2072,6 +2089,10 @@ def writer_close():
self.assertFalse(reader.closed)
self.assertFalse(writer.closed)
+ # Silence destructor error
+ reader.close = lambda: None
+ writer.close = lambda: None
+
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
@@ -3270,6 +3291,10 @@ def bad_close():
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(txt.closed)
+ # Silence destructor error
+ buffer.close = lambda: None
+ txt.flush = lambda: None
+
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
buffer = self.BytesIO(self.testdata)
@@ -3287,6 +3312,10 @@ def bad_close():
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(txt.closed)
+ # Silence destructor error
+ buffer.close = lambda: None
+ txt.flush = lambda: None
+
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
|
<!-- issue-number: [bpo-18748](https://bugs.python.org/issue18748) -->
https://bugs.python.org/issue18748
<!-- /issue-number -->
|
https://api.github.com/repos/python/cpython/pulls/12805
|
2019-04-12T15:09:59Z
|
2019-04-12T19:58:25Z
|
2019-04-12T19:58:25Z
|
2023-08-31T07:08:30Z
| 693
|
python/cpython
| 4,124
|
Changed order of tests for travis
|
diff --git a/.travis.yml b/.travis.yml
index 38686725ece..5b2a1c9fd7c 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -3,18 +3,18 @@ dist: trusty
language: python
matrix:
include:
- - python: 3.4
- env: KERAS_BACKEND=theano
- - python: 3.4
- env: KERAS_BACKEND=tensorflow
- python: 2.7
- env: KERAS_BACKEND=theano
- - python: 2.7
- env: KERAS_BACKEND=tensorflow
+ env: KERAS_BACKEND=theano TEST_MODE=PEP8
- python: 2.7
env: KERAS_BACKEND=theano TEST_MODE=INTEGRATION_TESTS
- python: 2.7
- env: KERAS_BACKEND=theano TEST_MODE=PEP8
+ env: KERAS_BACKEND=tensorflow
+ - python: 3.4
+ env: KERAS_BACKEND=tensorflow
+ - python: 2.7
+ env: KERAS_BACKEND=theano
+ - python: 3.4
+ env: KERAS_BACKEND=theano
install:
# code below is taken from http://conda.pydata.org/docs/travis.html
# We do this conditionally because it saves us some downloading if the
|
Shortest tests first.
If they fail, travis will show the error.
Should give pull requesters faster feedback.
|
https://api.github.com/repos/keras-team/keras/pulls/4643
|
2016-12-08T10:59:26Z
|
2016-12-08T12:09:43Z
|
2016-12-08T12:09:43Z
|
2016-12-12T00:45:30Z
| 347
|
keras-team/keras
| 47,064
|
bpo-33883: Mention mypy, pyre, pytype and PyAnnotate in FAQ
|
diff --git a/Doc/faq/programming.rst b/Doc/faq/programming.rst
index 53f3b7f528c065..fd720c1a304b0f 100644
--- a/Doc/faq/programming.rst
+++ b/Doc/faq/programming.rst
@@ -71,6 +71,11 @@ length, whether variable names are well-formed according to your coding
standard, whether declared interfaces are fully implemented, and more.
https://docs.pylint.org/ provides a full list of Pylint's features.
+Static type checkers such as `Mypy <http://mypy-lang.org/>`_,
+`Pyre <https://pyre-check.org/>`_, and
+`Pytype <https://github.com/google/pytype>`_ can check type hints in Python
+source code.
+
How can I create a stand-alone binary from a Python script?
-----------------------------------------------------------
|
<!--
Thanks for your contribution!
Please read this comment in its entirety. It's quite important.
# Pull Request title
It should be in the following format:
```
bpo-NNNN: Summary of the changes made
```
Where: bpo-NNNN refers to the issue number in the https://bugs.python.org.
Most PRs will require an issue number. Trivial changes, like fixing a typo, do not need an issue.
# Backport Pull Request title
If this is a backport PR (PR made against branches other than `master`),
please ensure that the PR title is in the following format:
```
[X.Y] <title from the original PR> (GH-NNNN)
```
Where: [X.Y] is the branch name, e.g. [3.6].
GH-NNNN refers to the PR number from `master`.
-->
<!-- issue-number: bpo-33883 -->
https://bugs.python.org/issue33883
<!-- /issue-number -->
|
https://api.github.com/repos/python/cpython/pulls/7760
|
2018-06-17T00:53:43Z
|
2018-09-11T05:12:42Z
|
2018-09-11T05:12:42Z
|
2018-09-24T19:55:12Z
| 203
|
python/cpython
| 3,832
|
Pass reference to func, as well as args, when pushing frame.
|
diff --git a/Include/internal/pycore_frame.h b/Include/internal/pycore_frame.h
index 85b9cf0f77bcb0..1ad156290a55e9 100644
--- a/Include/internal/pycore_frame.h
+++ b/Include/internal/pycore_frame.h
@@ -87,12 +87,12 @@ static inline void _PyFrame_StackPush(InterpreterFrame *f, PyObject *value) {
void _PyFrame_Copy(InterpreterFrame *src, InterpreterFrame *dest);
+/* Consumes reference to func */
static inline void
_PyFrame_InitializeSpecials(
InterpreterFrame *frame, PyFunctionObject *func,
PyObject *locals, int nlocalsplus)
{
- Py_INCREF(func);
frame->f_func = func;
frame->f_code = (PyCodeObject *)Py_NewRef(func->func_code);
frame->f_builtins = func->func_builtins;
@@ -166,9 +166,6 @@ _PyFrame_FastToLocalsWithError(InterpreterFrame *frame);
void
_PyFrame_LocalsToFast(InterpreterFrame *frame, int clear);
-InterpreterFrame *_PyThreadState_PushFrame(
- PyThreadState *tstate, PyFunctionObject *func, PyObject *locals);
-
extern InterpreterFrame *
_PyThreadState_BumpFramePointerSlow(PyThreadState *tstate, size_t size);
@@ -189,6 +186,7 @@ _PyThreadState_BumpFramePointer(PyThreadState *tstate, size_t size)
void _PyThreadState_PopFrame(PyThreadState *tstate, InterpreterFrame *frame);
+/* Consume reference to func */
InterpreterFrame *
_PyFrame_Push(PyThreadState *tstate, PyFunctionObject *func);
diff --git a/Objects/frameobject.c b/Objects/frameobject.c
index 15da1325d1480f..78f3894111bc3c 100644
--- a/Objects/frameobject.c
+++ b/Objects/frameobject.c
@@ -784,6 +784,8 @@ _Py_IDENTIFIER(__builtins__);
static void
init_frame(InterpreterFrame *frame, PyFunctionObject *func, PyObject *locals)
{
+ /* _PyFrame_InitializeSpecials consumes reference to func */
+ Py_INCREF(func);
PyCodeObject *code = (PyCodeObject *)func->func_code;
_PyFrame_InitializeSpecials(frame, func, locals, code->co_nlocalsplus);
for (Py_ssize_t i = 0; i < code->co_nlocalsplus; i++) {
diff --git a/Python/ceval.c b/Python/ceval.c
index 70748e8911f9fe..59c977781d446c 100644
--- a/Python/ceval.c
+++ b/Python/ceval.c
@@ -2243,6 +2243,7 @@ _PyEval_EvalFrameDefault(PyThreadState *tstate, InterpreterFrame *frame, int thr
goto error;
}
CALL_STAT_INC(frames_pushed);
+ Py_INCREF(getitem);
_PyFrame_InitializeSpecials(new_frame, getitem,
NULL, code->co_nlocalsplus);
STACK_SHRINK(2);
@@ -4585,7 +4586,6 @@ _PyEval_EvalFrameDefault(PyThreadState *tstate, InterpreterFrame *frame, int thr
STACK_SHRINK(call_shape.postcall_shrink);
// The frame has stolen all the arguments from the stack,
// so there is no need to clean them up.
- Py_DECREF(function);
if (new_frame == NULL) {
goto error;
}
@@ -4670,7 +4670,6 @@ _PyEval_EvalFrameDefault(PyThreadState *tstate, InterpreterFrame *frame, int thr
new_frame->localsplus[i] = NULL;
}
STACK_SHRINK(call_shape.postcall_shrink);
- Py_DECREF(func);
_PyFrame_SetStackPointer(frame, stack_pointer);
new_frame->previous = frame;
frame = cframe.current_frame = new_frame;
@@ -4707,7 +4706,6 @@ _PyEval_EvalFrameDefault(PyThreadState *tstate, InterpreterFrame *frame, int thr
new_frame->localsplus[i] = NULL;
}
STACK_SHRINK(call_shape.postcall_shrink);
- Py_DECREF(func);
_PyFrame_SetStackPointer(frame, stack_pointer);
new_frame->previous = frame;
frame = cframe.current_frame = new_frame;
@@ -6072,7 +6070,7 @@ initialize_locals(PyThreadState *tstate, PyFunctionObject *func,
return -1;
}
-/* Consumes all the references to the args */
+/* Consumes references to func and all the args */
static InterpreterFrame *
_PyEvalFramePushAndInit(PyThreadState *tstate, PyFunctionObject *func,
PyObject *locals, PyObject* const* args,
@@ -6126,7 +6124,9 @@ _PyEval_Vector(PyThreadState *tstate, PyFunctionObject *func,
PyObject* const* args, size_t argcount,
PyObject *kwnames)
{
- /* _PyEvalFramePushAndInit consumes all the references to its arguments */
+ /* _PyEvalFramePushAndInit consumes the references
+ * to func and all its arguments */
+ Py_INCREF(func);
for (size_t i = 0; i < argcount; i++) {
Py_INCREF(args[i]);
}
diff --git a/Python/frame.c b/Python/frame.c
index ca7c5f9c94e07b..76697cfa083138 100644
--- a/Python/frame.c
+++ b/Python/frame.c
@@ -109,6 +109,7 @@ _PyFrame_Clear(InterpreterFrame *frame)
Py_DECREF(frame->f_code);
}
+/* Consumes reference to func */
InterpreterFrame *
_PyFrame_Push(PyThreadState *tstate, PyFunctionObject *func)
{
@@ -117,6 +118,7 @@ _PyFrame_Push(PyThreadState *tstate, PyFunctionObject *func)
CALL_STAT_INC(frames_pushed);
InterpreterFrame *new_frame = _PyThreadState_BumpFramePointer(tstate, size);
if (new_frame == NULL) {
+ Py_DECREF(func);
return NULL;
}
_PyFrame_InitializeSpecials(new_frame, func, NULL, code->co_nlocalsplus);
diff --git a/Python/pystate.c b/Python/pystate.c
index 77467944e2afba..a85460c15103d1 100644
--- a/Python/pystate.c
+++ b/Python/pystate.c
@@ -2212,26 +2212,6 @@ _PyThreadState_BumpFramePointerSlow(PyThreadState *tstate, size_t size)
return (InterpreterFrame *)base;
}
-
-InterpreterFrame *
-_PyThreadState_PushFrame(PyThreadState *tstate, PyFunctionObject *func, PyObject *locals)
-{
- PyCodeObject *code = (PyCodeObject *)func->func_code;
- int nlocalsplus = code->co_nlocalsplus;
- size_t size = nlocalsplus + code->co_stacksize +
- FRAME_SPECIALS_SIZE;
- CALL_STAT_INC(frames_pushed);
- InterpreterFrame *frame = _PyThreadState_BumpFramePointer(tstate, size);
- if (frame == NULL) {
- return NULL;
- }
- _PyFrame_InitializeSpecials(frame, func, locals, nlocalsplus);
- for (int i=0; i < nlocalsplus; i++) {
- frame->localsplus[i] = NULL;
- }
- return frame;
-}
-
void
_PyThreadState_PopFrame(PyThreadState *tstate, InterpreterFrame * frame)
{
|
Minor efficiency improvement. When pushing a frame we transfer the references to the args to the frame. This does the same for the `func`.
Also removes unused function `_PyThreadState_PushFrame`.
This is all internal.
|
https://api.github.com/repos/python/cpython/pulls/31100
|
2022-02-03T12:05:36Z
|
2022-02-03T18:36:29Z
|
2022-02-03T18:36:29Z
|
2022-02-03T18:36:37Z
| 1,728
|
python/cpython
| 4,412
|
Update CI to TF 2.16 RC0
|
diff --git a/requirements-jax-cuda.txt b/requirements-jax-cuda.txt
index 7998d6734cc..1fb6eeb0515 100644
--- a/requirements-jax-cuda.txt
+++ b/requirements-jax-cuda.txt
@@ -1,5 +1,5 @@
# Tensorflow cpu-only version (needed for testing).
-tf-nightly-cpu==2.16.0.dev20240101 # Pin a working nightly until rc0.
+tensorflow-cpu==2.16.0rc0 # Pin to rc until TF 2.16 release
# Torch cpu-only version (needed for testing).
--extra-index-url https://download.pytorch.org/whl/cpu
diff --git a/requirements-tensorflow-cuda.txt b/requirements-tensorflow-cuda.txt
index 06390f77046..488ffde5e46 100644
--- a/requirements-tensorflow-cuda.txt
+++ b/requirements-tensorflow-cuda.txt
@@ -1,5 +1,5 @@
# Tensorflow with cuda support.
-tf-nightly[and-cuda]==2.16.0.dev20240101 # Pin a working nightly until rc0.
+tensorflow[and-cuda]==2.16.0rc0 # Pin to rc until TF 2.16 release
# Torch cpu-only version (needed for testing).
--extra-index-url https://download.pytorch.org/whl/cpu
diff --git a/requirements-torch-cuda.txt b/requirements-torch-cuda.txt
index 90b8a3693cb..aa742eee6af 100644
--- a/requirements-torch-cuda.txt
+++ b/requirements-torch-cuda.txt
@@ -1,5 +1,5 @@
# Tensorflow cpu-only version (needed for testing).
-tf-nightly-cpu==2.16.0.dev20240101 # Pin a working nightly until rc0.
+tensorflow-cpu==2.16.0rc0 # Pin to rc until TF 2.16 release
# Torch with cuda support.
--extra-index-url https://download.pytorch.org/whl/cu121
diff --git a/requirements.txt b/requirements.txt
index aca523e9976..4f65e854f33 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,5 +1,5 @@
# Tensorflow.
-tf-nightly-cpu==2.16.0.dev20240101 # Pin a working nightly until rc0.
+tensorflow-cpu==2.16.0rc0 # Pin to rc until TF 2.16 release
# Torch.
--extra-index-url https://download.pytorch.org/whl/cpu
|
Now that RC0 is released, pin to TF 2.16 RC0 instead of Nightly.
Spectral Norm test is failing on the Master as well for TF GPU Backend.
|
https://api.github.com/repos/keras-team/keras/pulls/19222
|
2024-02-23T21:51:28Z
|
2024-02-23T22:37:28Z
|
2024-02-23T22:37:28Z
|
2024-02-23T22:37:32Z
| 601
|
keras-team/keras
| 47,322
|
[fix] fix typo s/get_defualt_parser /get_default_parser
|
diff --git a/examples/language/grok-1/inference.py b/examples/language/grok-1/inference.py
index faef7ae9d7ca..58ba3872f856 100644
--- a/examples/language/grok-1/inference.py
+++ b/examples/language/grok-1/inference.py
@@ -2,10 +2,10 @@
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
-from utils import get_defualt_parser, inference, print_output
+from utils import get_default_parser, inference, print_output
if __name__ == "__main__":
- parser = get_defualt_parser()
+ parser = get_default_parser()
args = parser.parse_args()
start = time.time()
torch.set_default_dtype(torch.bfloat16)
diff --git a/examples/language/grok-1/inference_tp.py b/examples/language/grok-1/inference_tp.py
index cf05880dc21d..e10c4929cdbf 100644
--- a/examples/language/grok-1/inference_tp.py
+++ b/examples/language/grok-1/inference_tp.py
@@ -3,7 +3,7 @@
import torch
from grok1_policy import Grok1ForCausalLMPolicy
from transformers import AutoModelForCausalLM, AutoTokenizer
-from utils import get_defualt_parser, inference, print_output
+from utils import get_default_parser, inference, print_output
import colossalai
from colossalai.booster import Booster
@@ -13,7 +13,7 @@
from colossalai.utils import get_current_device
if __name__ == "__main__":
- parser = get_defualt_parser()
+ parser = get_default_parser()
args = parser.parse_args()
start = time.time()
colossalai.launch_from_torch({})
diff --git a/examples/language/grok-1/utils.py b/examples/language/grok-1/utils.py
index 7663127a5515..29c86e411db1 100644
--- a/examples/language/grok-1/utils.py
+++ b/examples/language/grok-1/utils.py
@@ -33,7 +33,7 @@ def inference(model, tokenizer, text, **generate_kwargs):
return outputs[0].tolist()
-def get_defualt_parser():
+def get_default_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--pretrained", type=str, default="hpcaitech/grok-1")
parser.add_argument("--tokenizer", type=str, default="tokenizer.model")
|
## 📌 Checklist before creating the PR
- [ ] I have created an issue for this PR for traceability
- [ ] The title follows the standard format: `[doc/gemini/tensor/...]: A concise description`
- [ ] I have added relevant tags if possible for us to better distinguish different PRs
## 🚨 Issue number
Not involved
## 📝 What does this PR do?
fix typo s/get_defualt_parser /get_default_parser
detail info
modified: examples/language/grok-1/inference.py
modified: examples/language/grok-1/inference_tp.py
modified: examples/language/grok-1/utils.py
## 💥 Checklist before requesting a review
- [ ] I have linked my PR to an issue ([instruction](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue))
- [ ] My issue clearly describes the problem/feature/proposal, with diagrams/charts/table/code if possible
- [ ] I have performed a self-review of my code
- [ ] I have added thorough tests.
- [ ] I have added docstrings for all the functions/methods I implemented
## ⭐️ Do you enjoy contributing to Colossal-AI?
- [x] 🌝 Yes, I do.
- [ ] 🌚 No, I don't.
Tell us more if you don't enjoy contributing to Colossal-AI.
|
https://api.github.com/repos/hpcaitech/ColossalAI/pulls/5548
|
2024-04-03T03:51:27Z
|
2024-04-07T11:04:58Z
|
2024-04-07T11:04:58Z
|
2024-04-07T11:49:00Z
| 540
|
hpcaitech/ColossalAI
| 11,249
|
Run tests on Travis CI
|
diff --git a/.travis.yml b/.travis.yml
index 3eb6ce63..f64a78ee 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -4,6 +4,14 @@ language:
- generic
before_install:
- - docker-compose up -d
- - docker ps
+ - docker-compose build
- docker images
+ - docker-compose -f docker-compose.yml -f docker-compose.debug.yml up -d
+ - docker-compose ps
+
+script:
+ - sleep 3
+ - curl http://localhost:8002
+ - docker-compose logs --no-color
+ - docker logs chtsh
+ - CHEATSH_TEST_STANDALONE=NO bash tests/run-tests.sh
diff --git a/Dockerfile b/Dockerfile
index e78be409..e41aa0a5 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -15,5 +15,5 @@ RUN mkdir -p /root/.cheat.sh/log/ \
# installing server dependencies
RUN apk add --update --no-cache py3-jinja2 py3-flask bash gawk
-ENTRYPOINT ["python3"]
-CMD ["bin/srv.py"]
+ENTRYPOINT ["python3", "-u", "bin/srv.py"]
+CMD [""]
diff --git a/README.md b/README.md
index 98d4c6ef..86284cc2 100644
--- a/README.md
+++ b/README.md
@@ -447,8 +447,16 @@ scoop install cht
### Docker
-Currently, the easiest way to get a self-hosted instance running is by using the docker-compose.yml file provided in the extra/docker folder.
-This pulls down the latest image with baked in cheatsheets and starts the app and a Redis instance to back it, making the service available on port 8002 of the local host. This is currently an early implementation and should probably not be used for anything outside of internal/dev/personal use right now.
+Currently, the easiest way to get a self-hosted instance running is by using
+the `docker-compose.yml` file.
+
+ docker-compose up
+
+This builds and runs the image with baked in cheatsheets and starts the app
+and a Redis instance to back it, making the service available at
+http://localhost:8002 This is currently an early implementation and should
+probably not be used for anything outside of internal/dev/personal use right
+now.
## Editors integration
diff --git a/bin/clean_cache.py b/bin/clean_cache.py
index 1b4fe630..bc19aa84 100644
--- a/bin/clean_cache.py
+++ b/bin/clean_cache.py
@@ -1,6 +1,6 @@
import sys
import redis
-REDIS = redis.StrictRedis(host='localhost', port=6379, db=0)
+REDIS = redis.Redis(host='localhost', port=6379, db=0)
for key in sys.argv[1:]:
REDIS.delete(key)
diff --git a/docker-compose.debug.yml b/docker-compose.debug.yml
new file mode 100644
index 00000000..56c5e9c9
--- /dev/null
+++ b/docker-compose.debug.yml
@@ -0,0 +1,8 @@
+# Compose override to add --debug option to bin/srv.py
+# call to print tracebacks on errors to stdout.
+#
+# See https://docs.docker.com/compose/extends/
+version: '2'
+services:
+ app:
+ command: "--debug"
diff --git a/docker-compose.yml b/docker-compose.yml
index 589033c6..9e5f58f2 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -1,10 +1,13 @@
version: '2'
services:
app:
- build:
- context: .
+ build: .
+ image: cheat.sh
+ container_name: chtsh
depends_on:
- redis
+ environment:
+ - CHEATSH_CACHE_REDIS_HOST=redis
ports:
- "8002:8002"
redis:
diff --git a/lib/cache.py b/lib/cache.py
index 5f552763..8e2b1263 100644
--- a/lib/cache.py
+++ b/lib/cache.py
@@ -19,7 +19,7 @@
_REDIS = None
if CONFIG['cache.type'] == 'redis':
import redis
- _REDIS = redis.StrictRedis(
+ _REDIS = redis.Redis(
host=CONFIG['cache.redis.host'],
port=CONFIG['cache.redis.port'],
db=CONFIG['cache.redis.db'])
diff --git a/tests/run-tests.sh b/tests/run-tests.sh
index 7b50fdf0..9f92e8e1 100644
--- a/tests/run-tests.sh
+++ b/tests/run-tests.sh
@@ -2,28 +2,37 @@
# 1) start server:
# without caching:
-# REDIS_HOST=None CHEATSH_PORT=50000 python bin/srv.py
+# CHEATSH_CACHE_TYPE=none CHEATSH_PORT=50000 python bin/srv.py
# (recommended)
# with caching:
-# REDIS_PREFIX=TEST1 CHEATSH_PORT=50000 python bin/srv.py
+# CHEATSH_REDIS_PREFIX=TEST1 CHEATSH_PORT=50000 python bin/srv.py
# (for complex search queries + to test caching)
# 2) configure CHTSH_URL
# 3) run the script
+# work from script's dir
+cd "$(dirname "$0")" || exit
+
+# detect Python - if not set in env, try default virtualenv
PYTHON="${PYTHON:-../ve/bin/python}"
-"$PYTHON" --version 2>&1 | grep -q 'Python 2' && python_version=2 || python_version=3
+# if no virtalenv, try current python3 binary
+if ! command -v "$PYTHON" &> /dev/null; then
+ PYTHON=$(command -v python3)
+fi
+python_version="$($PYTHON -c 'import sys; print(sys.version_info[0])')"
+echo "Using PYTHON $python_version: $PYTHON"
skip_online="${CHEATSH_TEST_SKIP_ONLINE:-NO}"
test_standalone="${CHEATSH_TEST_STANDALONE:-YES}"
show_details="${CHEATSH_TEST_SHOW_DETAILS:-YES}"
update_tests_results="${CHEATSH_UPDATE_TESTS_RESULTS:-NO}"
+CHTSH_URL="${CHTSH_URL:-http://localhost:8002}"
TMP=$(mktemp /tmp/cht.sh.tests-XXXXXXXXXXXXXX)
TMP2=$(mktemp /tmp/cht.sh.tests-XXXXXXXXXXXXXX)
TMP3=$(mktemp /tmp/cht.sh.tests-XXXXXXXXXXXXXX)
trap 'rm -rf $TMP $TMP2 $TMP3' EXIT
-export CHTSH_URL=http://cht.sh:50000
CHTSH_SCRIPT=$(dirname "$(dirname "$(readlink -f "$0")")")/share/cht.sh.txt
export PYTHONIOENCODING=UTF-8
@@ -40,6 +49,7 @@ failed=0
while read -r number test_line; do
+ echo -e "\e[34mRunning $number: \e[36m$test_line\e[0m"
if [ "$skip_online" = YES ]; then
if [[ $test_line = *\[online\]* ]]; then
echo "$number is [online]; skipping"
@@ -48,10 +58,12 @@ while read -r number test_line; do
fi
if [[ "$python_version" = 2 ]] && [[ $test_line = *\[python3\]* ]]; then
+ echo "$number is for Python 3; skipping"
continue
fi
if [[ "$python_version" = 3 ]] && [[ $test_line = *\[python2\]* ]]; then
+ echo "$number is for Python 2; skipping"
continue
fi
@@ -60,20 +72,25 @@ while read -r number test_line; do
if [ "$test_standalone" = YES ]; then
test_line="${test_line//cht.sh /}"
- "${PYTHON}" ../lib/standalone.py "$test_line" > "$TMP" 2> /dev/null
+ [[ $show_details == YES ]] && echo "${PYTHON} ../lib/standalone.py $test_line"
+ "${PYTHON}" ../lib/standalone.py "$test_line" > "$TMP"
elif [[ $test_line = "cht.sh "* ]]; then
test_line="${test_line//cht.sh /}"
+ [[ $show_details == YES ]] && echo "bash $CHTSH_SCRIPT $test_line"
eval "bash $CHTSH_SCRIPT $test_line" > "$TMP"
else
+ [[ $show_details == YES ]] && echo "curl -s $CHTSH_URL/$test_line"
eval "curl -s $CHTSH_URL/$test_line" > "$TMP"
fi
- if ! diff results/"$number" "$TMP" > "$TMP2"; then
+ if ! diff -u3 --color=always results/"$number" "$TMP" > "$TMP2"; then
if [[ $CHEATSH_UPDATE_TESTS_RESULTS = NO ]]; then
if [ "$show_details" = YES ]; then
- echo "$ CHEATSH_CACHE_TYPE=none python ../lib/standalone.py $test_line"
cat "$TMP2"
fi
+ if grep -q "Internal Server Error" "$TMP2"; then
+ [[ $TRAVIS == true ]] && docker logs chtsh
+ fi
echo "FAILED: [$number] $test_line"
else
cat "$TMP" > results/"$number"
|
Travis status from my fork https://travis-ci.com/github/abitrolly/cheat.sh/builds
---
They don't really work well.

|
https://api.github.com/repos/chubin/cheat.sh/pulls/224
|
2020-07-29T17:05:17Z
|
2020-08-06T04:31:21Z
|
2020-08-06T04:31:21Z
|
2020-10-13T05:10:11Z
| 2,167
|
chubin/cheat.sh
| 15,259
|
Correctly return get_unit_size
|
diff --git a/manimlib/mobject/number_line.py b/manimlib/mobject/number_line.py
index 40577194a3..f8a7c17fa4 100644
--- a/manimlib/mobject/number_line.py
+++ b/manimlib/mobject/number_line.py
@@ -131,7 +131,7 @@ def p2n(self, point):
return self.point_to_number(point)
def get_unit_size(self):
- return (self.x_max - self.x_min) / self.get_length()
+ return self.get_length() / (self.x_max - self.x_min)
def default_numbers_to_display(self):
if self.numbers_to_show is not None:
|
A very simple fix for `mobject.number_line.get_number_line()`
Closes https://github.com/3b1b/manim/issues/997
> In `manimlib/mobject/number_line.py` the method `get_unit_size()` is defined thus
>
> ```python
> def get_unit_size(self):
> return (self.x_max - self.x_min) / self.get_length()
> ```
>
> However, the dividend should be flipped with the divisor:
>
> ```python
> def get_unit_size(self):
> return self.get_length() / (self.x_max - self.x_min)
> ```
|
https://api.github.com/repos/3b1b/manim/pulls/1064
|
2020-05-13T16:20:45Z
|
2020-09-30T15:21:57Z
|
2020-09-30T15:21:57Z
|
2020-09-30T15:21:57Z
| 157
|
3b1b/manim
| 18,079
|
Update SimPy Link
|
diff --git a/README.md b/README.md
index c93f2c393..fe6c218ec 100644
--- a/README.md
+++ b/README.md
@@ -1024,7 +1024,7 @@ Inspired by [awesome-php](https://github.com/ziadoz/awesome-php).
* [QuTiP](http://qutip.org/) - Quantum Toolbox in Python.
* [RDKit](http://www.rdkit.org/) - Cheminformatics and Machine Learning Software.
* [SciPy](https://www.scipy.org/) - A Python-based ecosystem of open-source software for mathematics, science, and engineering.
-* [SimPy](https://bitbucket.org/simpy/simpy) - A process-based discrete-event simulation framework.
+* [SimPy](https://gitlab.com/team-simpy/simpy) - A process-based discrete-event simulation framework.
* [statsmodels](https://github.com/statsmodels/statsmodels) - Statistical modeling and econometrics in Python.
* [SymPy](https://github.com/sympy/sympy) - A Python library for symbolic mathematics.
* [Zipline](https://github.com/quantopian/zipline) - A Pythonic algorithmic trading library.
|
The previous link was returning 404. Updated with current working URL
|
https://api.github.com/repos/vinta/awesome-python/pulls/1602
|
2020-08-26T17:38:23Z
|
2020-08-27T16:45:48Z
|
2020-08-27T16:45:48Z
|
2020-08-27T17:24:12Z
| 271
|
vinta/awesome-python
| 27,252
|
changed VAE sampling variance to 1
|
diff --git a/examples/variational_autoencoder.py b/examples/variational_autoencoder.py
index be8c51f8890..69b846aab25 100644
--- a/examples/variational_autoencoder.py
+++ b/examples/variational_autoencoder.py
@@ -16,7 +16,7 @@
latent_dim = 2
intermediate_dim = 256
nb_epoch = 50
-epsilon_std = 0.01
+epsilon_std = 1.0
x = Input(batch_shape=(batch_size, original_dim))
h = Dense(intermediate_dim, activation='relu')(x)
diff --git a/examples/variational_autoencoder_deconv.py b/examples/variational_autoencoder_deconv.py
index 25821eca0d8..1b28a12ef87 100644
--- a/examples/variational_autoencoder_deconv.py
+++ b/examples/variational_autoencoder_deconv.py
@@ -27,7 +27,7 @@
original_img_size = (img_rows, img_cols, img_chns)
latent_dim = 2
intermediate_dim = 128
-epsilon_std = 0.01
+epsilon_std = 1.0
nb_epoch = 5
x = Input(batch_shape=(batch_size,) + original_img_size)
|
In discussion with Amar Shah @amarshah.
The loss/negative lower bound is now correct with this fix.
|
https://api.github.com/repos/keras-team/keras/pulls/4211
|
2016-10-27T12:26:47Z
|
2016-11-02T22:58:32Z
|
2016-11-02T22:58:32Z
|
2016-11-02T22:58:32Z
| 275
|
keras-team/keras
| 47,342
|
Pass binary data via APIGateway - Integration test
|
diff --git a/tests/integration/test_api_gateway.py b/tests/integration/test_api_gateway.py
index ed47bdb7b9f88..d6316b889d906 100644
--- a/tests/integration/test_api_gateway.py
+++ b/tests/integration/test_api_gateway.py
@@ -450,6 +450,11 @@ def _test_api_gateway_lambda_proxy_integration(
result = requests.post(url, data=json.dumps({"return_raw_body": body_msg}))
self.assertEqual(body_msg, to_str(result.content))
+ # send message with binary data
+ binary_msg = b"\xff \xaa \x11"
+ result = requests.post(url, data=binary_msg)
+ self.assertEqual("/yCqIBE=", json.loads(to_str(result.content))["body"])
+
def test_api_gateway_lambda_proxy_integration_any_method(self):
self._test_api_gateway_lambda_proxy_integration_any_method(
self.TEST_LAMBDA_PROXY_BACKEND_ANY_METHOD,
|
This PR extends the integration test in APIGateway to cover the passing of binary data via APIGateway.
It is a follow-up PR to PR #4249
|
https://api.github.com/repos/localstack/localstack/pulls/4272
|
2021-07-07T22:14:39Z
|
2021-12-29T20:45:41Z
|
2021-12-29T20:45:41Z
|
2022-01-11T16:01:54Z
| 203
|
localstack/localstack
| 28,869
|
Resonant Frequency & Electrical Impedance
|
diff --git a/electronics/electrical_impedance.py b/electronics/electrical_impedance.py
new file mode 100644
index 000000000000..44041ff790b6
--- /dev/null
+++ b/electronics/electrical_impedance.py
@@ -0,0 +1,46 @@
+"""Electrical impedance is the measure of the opposition that a
+circuit presents to a current when a voltage is applied.
+Impedance extends the concept of resistance to alternating current (AC) circuits.
+Source: https://en.wikipedia.org/wiki/Electrical_impedance
+"""
+
+from __future__ import annotations
+
+from math import pow, sqrt
+
+
+def electrical_impedance(
+ resistance: float, reactance: float, impedance: float
+) -> dict[str, float]:
+ """
+ Apply Electrical Impedance formula, on any two given electrical values,
+ which can be resistance, reactance, and impedance, and then in a Python dict
+ return name/value pair of the zero value.
+
+ >>> electrical_impedance(3,4,0)
+ {'impedance': 5.0}
+ >>> electrical_impedance(0,4,5)
+ {'resistance': 3.0}
+ >>> electrical_impedance(3,0,5)
+ {'reactance': 4.0}
+ >>> electrical_impedance(3,4,5)
+ Traceback (most recent call last):
+ ...
+ ValueError: One and only one argument must be 0
+ """
+ if (resistance, reactance, impedance).count(0) != 1:
+ raise ValueError("One and only one argument must be 0")
+ if resistance == 0:
+ return {"resistance": sqrt(pow(impedance, 2) - pow(reactance, 2))}
+ elif reactance == 0:
+ return {"reactance": sqrt(pow(impedance, 2) - pow(resistance, 2))}
+ elif impedance == 0:
+ return {"impedance": sqrt(pow(resistance, 2) + pow(reactance, 2))}
+ else:
+ raise ValueError("Exactly one argument must be 0")
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/electronics/resonant_frequency.py b/electronics/resonant_frequency.py
new file mode 100644
index 000000000000..4f95043b600a
--- /dev/null
+++ b/electronics/resonant_frequency.py
@@ -0,0 +1,50 @@
+# https://en.wikipedia.org/wiki/LC_circuit
+
+"""An LC circuit, also called a resonant circuit, tank circuit, or tuned circuit,
+is an electric circuit consisting of an inductor, represented by the letter L,
+and a capacitor, represented by the letter C, connected together.
+The circuit can act as an electrical resonator, an electrical analogue of a
+tuning fork, storing energy oscillating at the circuit's resonant frequency.
+Source: https://en.wikipedia.org/wiki/LC_circuit
+"""
+
+from __future__ import annotations
+
+from math import pi, sqrt
+
+
+def resonant_frequency(inductance: float, capacitance: float) -> tuple:
+ """
+ This function can calculate the resonant frequency of LC circuit,
+ for the given value of inductance and capacitnace.
+
+ Examples are given below:
+ >>> resonant_frequency(inductance=10, capacitance=5)
+ ('Resonant frequency', 0.022507907903927652)
+ >>> resonant_frequency(inductance=0, capacitance=5)
+ Traceback (most recent call last):
+ ...
+ ValueError: Inductance cannot be 0 or negative
+ >>> resonant_frequency(inductance=10, capacitance=0)
+ Traceback (most recent call last):
+ ...
+ ValueError: Capacitance cannot be 0 or negative
+ """
+
+ if inductance <= 0:
+ raise ValueError("Inductance cannot be 0 or negative")
+
+ elif capacitance <= 0:
+ raise ValueError("Capacitance cannot be 0 or negative")
+
+ else:
+ return (
+ "Resonant frequency",
+ float(1 / (2 * pi * (sqrt(inductance * capacitance)))),
+ )
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
|
### Describe your change:
* [x] Add an algorithm?
* [ ] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
|
https://api.github.com/repos/TheAlgorithms/Python/pulls/6983
|
2022-10-11T08:35:46Z
|
2022-10-29T15:25:27Z
|
2022-10-29T15:25:27Z
|
2022-10-29T15:26:40Z
| 1,041
|
TheAlgorithms/Python
| 29,519
|
Create one_cycle() function
|
diff --git a/train.py b/train.py
index 14d7ac8fc02..d0f3bdcc1ab 100644
--- a/train.py
+++ b/train.py
@@ -28,7 +28,7 @@
from utils.datasets import create_dataloader
from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \
fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \
- print_mutation, set_logging
+ print_mutation, set_logging, one_cycle
from utils.google_utils import attempt_download
from utils.loss import compute_loss
from utils.plots import plot_images, plot_labels, plot_results, plot_evolution
@@ -126,12 +126,12 @@ def train(hyp, opt, device, tb_writer=None, wandb=None):
# Scheduler https://arxiv.org/pdf/1812.01187.pdf
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
- lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - hyp['lrf']) + hyp['lrf'] # cosine
+ lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
# plot_lr_scheduler(optimizer, scheduler, epochs)
# Logging
- if wandb and wandb.run is None:
+ if rank in [-1, 0] and wandb and wandb.run is None:
opt.hyp = hyp # add hyperparameters
wandb_run = wandb.init(config=opt, resume="allow",
project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem,
diff --git a/utils/general.py b/utils/general.py
index 12249059c8f..fceefb0a4a9 100755
--- a/utils/general.py
+++ b/utils/general.py
@@ -102,6 +102,11 @@ def clean_str(s):
return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s)
+def one_cycle(y1=0.0, y2=1.0, steps=100):
+ # lambda function for sinusoidal ramp from y1 to y2
+ return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1
+
+
def labels_to_class_weights(labels, nc=80):
# Get class weights (inverse frequency) from training labels
if labels[0] is None: # no labels loaded
diff --git a/utils/plots.py b/utils/plots.py
index 3a4dccdc34c..6b4e30147b7 100644
--- a/utils/plots.py
+++ b/utils/plots.py
@@ -190,6 +190,7 @@ def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):
plt.xlim(0, epochs)
plt.ylim(0)
plt.savefig(Path(save_dir) / 'LR.png', dpi=200)
+ plt.close()
def plot_test_txt(): # from utils.plots import *; plot_test()
|
## 🛠️ PR Summary
<sub>Made with ❤️ by [Ultralytics Actions](https://github.com/ultralytics/actions)<sub>
### 🌟 Summary
Improved learning rate scheduling and logging during training in YOLOv5.
### 📊 Key Changes
- Refactored the learning rate scheduling by using a new `one_cycle` method.
- Adjusted the condition for initializing Weights & Biases (wandb) logging to ensure it only happens on the main process.
- Added a function closure for a sinusoidal learning rate scheduler within `general.py`.
- Included a line to close the matplotlib plot in `plots.py` to prevent memory leakage.
### 🎯 Purpose & Impact
- The new `one_cycle` function helps create smoother learning rate changes, potentially leading to better training performance. 🚀
- Wandb logging now correctly initiates only on the main process, preventing unnecessary initialization and ensuring cleaner logs. 📊
- Closing the matplotlib plot after saving helps with resource optimization, especially when running multiple training sessions. 🖼️
Overall, these changes aim to enhance the model training process and improve resource management for users of the YOLOv5 object detection system.
|
https://api.github.com/repos/ultralytics/yolov5/pulls/1836
|
2021-01-04T23:45:11Z
|
2021-01-04T23:49:08Z
|
2021-01-04T23:49:08Z
|
2024-01-19T19:57:06Z
| 751
|
ultralytics/yolov5
| 25,111
|
Pyplot should invoke function for coordinates
|
diff --git a/lib/streamlit/elements/pyplot.py b/lib/streamlit/elements/pyplot.py
index 343e8993e26f..6703007396fd 100644
--- a/lib/streamlit/elements/pyplot.py
+++ b/lib/streamlit/elements/pyplot.py
@@ -89,7 +89,7 @@ def pyplot(dg, fig=None, clear_figure=None, **kwargs):
dg.exception(PyplotGlobalUseWarning()) # type: ignore
image_list_proto = ImageListProto()
- marshall(dg._get_coordinates, image_list_proto, fig, clear_figure, **kwargs) # type: ignore
+ marshall(dg._get_coordinates(), image_list_proto, fig, clear_figure, **kwargs) # type: ignore
return dg._enqueue("imgs", image_list_proto) # type: ignore
|
Originally, Pyplot Mixin just sends the function to retrieve the coordinates rather than the coordinates themselves. This caused a problem for the MediaFileManager deleting files since the media was replacing at the "same coordinate"
|
https://api.github.com/repos/streamlit/streamlit/pulls/2158
|
2020-10-12T17:44:22Z
|
2020-10-12T18:15:54Z
|
2020-10-12T18:15:54Z
|
2021-07-24T00:36:47Z
| 188
|
streamlit/streamlit
| 21,992
|
Added Georgian translation
|
diff --git a/README.md b/README.md
index 2c85d793741..3ac67dbca9b 100644
--- a/README.md
+++ b/README.md
@@ -57,6 +57,7 @@ Translations
* [Croatian](https://github.com/sqlmapproject/sqlmap/blob/master/doc/translations/README-hr-HR.md)
* [Dutch](https://github.com/sqlmapproject/sqlmap/blob/master/doc/translations/README-nl-NL.md)
* [French](https://github.com/sqlmapproject/sqlmap/blob/master/doc/translations/README-fr-FR.md)
+* [Georgian](https://github.com/sqlmapproject/sqlmap/blob/master/doc/translations/README-ka-GE.md)
* [German](https://github.com/sqlmapproject/sqlmap/blob/master/doc/translations/README-de-GER.md)
* [Greek](https://github.com/sqlmapproject/sqlmap/blob/master/doc/translations/README-gr-GR.md)
* [Indonesian](https://github.com/sqlmapproject/sqlmap/blob/master/doc/translations/README-id-ID.md)
diff --git a/doc/translations/README-ka-GE.md b/doc/translations/README-ka-GE.md
new file mode 100644
index 00000000000..83c2fc6e78f
--- /dev/null
+++ b/doc/translations/README-ka-GE.md
@@ -0,0 +1,49 @@
+# sqlmap 
+
+[](https://github.com/sqlmapproject/sqlmap/actions/workflows/tests.yml) [](https://www.python.org/) [](https://raw.githubusercontent.com/sqlmapproject/sqlmap/master/LICENSE) [](https://twitter.com/sqlmap)
+
+sqlmap არის შეღწევადობის ტესტირებისათვის განკუთვილი ინსტრუმენტი, რომლის კოდიც ღიად არის ხელმისაწვდომი. ინსტრუმენტი ახდენს SQL-ინექციის სისუსტეების აღმოჩენისა, გამოყენების და მონაცემთა ბაზათა სერვერების დაუფლების პროცესების ავტომატიზაციას. იგი აღჭურვილია მძლავრი აღმომჩენი მექანიძმით, შეღწევადობის პროფესიონალი ტესტერისათვის შესაფერისი ბევრი ფუნქციით და სკრიპტების ფართო სპექტრით, რომლებიც შეიძლება გამოყენებულ იქნეს მრავალი მიზნით, მათ შორის: მონაცემთა ბაზიდან მონაცემების შეგროვებისათვის, ძირითად საფაილო სისტემაზე წვდომისათვის და out-of-band კავშირების გზით ოპერაციულ სისტემაში ბრძანებათა შესრულებისათვის.
+
+ეკრანის ანაბეჭდები
+----
+
+
+
+შეგიძლიათ ესტუმროთ [ეკრანის ანაბეჭდთა კოლექციას](https://github.com/sqlmapproject/sqlmap/wiki/Screenshots), სადაც დემონსტრირებულია ინსტრუმენტის ზოგიერთი ფუნქცია.
+
+ინსტალაცია
+----
+
+თქვენ შეგიძლიათ უახლესი tar-არქივის ჩამოტვირთვა [აქ](https://github.com/sqlmapproject/sqlmap/tarball/master) დაწკაპუნებით, ან უახლესი zip-არქივის ჩამოტვირთვა [აქ](https://github.com/sqlmapproject/sqlmap/zipball/master) დაწკაპუნებით.
+
+ასევე შეგიძლიათ (და სასურველია) sqlmap-ის ჩამოტვირთვა [Git](https://github.com/sqlmapproject/sqlmap)-საცავის (repository) კლონირებით:
+
+ git clone --depth 1 https://github.com/sqlmapproject/sqlmap.git sqlmap-dev
+
+sqlmap ნებისმიერ პლატფორმაზე მუშაობს [Python](https://www.python.org/download/)-ის **2.6**, **2.7** და **3.x** ვერსიებთან.
+
+გამოყენება
+----
+
+ძირითადი ვარიანტებისა და პარამეტრების ჩამონათვალის მისაღებად გამოიყენეთ ბრძანება:
+
+ python sqlmap.py -h
+
+ვარიანტებისა და პარამეტრების სრული ჩამონათვალის მისაღებად გამოიყენეთ ბრძანება:
+
+ python sqlmap.py -hh
+
+გამოყენების მარტივი მაგალითი შეგიძლიათ იხილოთ [აქ](https://asciinema.org/a/46601). sqlmap-ის შესაძლებლობათა მიმოხილვის, მხარდაჭერილი ფუნქციონალისა და ყველა ვარიანტის აღწერების მისაღებად გამოყენების მაგალითებთან ერთად, გირჩევთ, იხილოთ [მომხმარებლის სახელმძღვანელო](https://github.com/sqlmapproject/sqlmap/wiki/Usage).
+
+ბმულები
+----
+
+* საწყისი გვერდი: https://sqlmap.org
+* ჩამოტვირთვა: [.tar.gz](https://github.com/sqlmapproject/sqlmap/tarball/master) ან [.zip](https://github.com/sqlmapproject/sqlmap/zipball/master)
+* RSS არხი: https://github.com/sqlmapproject/sqlmap/commits/master.atom
+* პრობლემებისათვის თვალყურის დევნება: https://github.com/sqlmapproject/sqlmap/issues
+* მომხმარებლის სახელმძღვანელო: https://github.com/sqlmapproject/sqlmap/wiki
+* ხშირად დასმული კითხვები (ხდკ): https://github.com/sqlmapproject/sqlmap/wiki/FAQ
+* Twitter: [@sqlmap](https://twitter.com/sqlmap)
+* დემონსტრაციები: [https://www.youtube.com/user/inquisb/videos](https://www.youtube.com/user/inquisb/videos)
+* ეკრანის ანაბეჭდები: https://github.com/sqlmapproject/sqlmap/wiki/Screenshots
|
I have added a Georgian translation for the README.
I have also added the Georgian language to the language list.
|
https://api.github.com/repos/sqlmapproject/sqlmap/pulls/5095
|
2022-05-09T16:28:08Z
|
2022-05-13T16:13:02Z
|
2022-05-13T16:13:02Z
|
2022-05-31T13:00:59Z
| 3,746
|
sqlmapproject/sqlmap
| 14,952
|
Upgrade snap to be based on core20
|
diff --git a/snap/local/build_and_install.sh b/snap/local/build_and_install.sh
index e51e6770c68..7e76fcdc1a5 100755
--- a/snap/local/build_and_install.sh
+++ b/snap/local/build_and_install.sh
@@ -21,7 +21,8 @@ source "${DIR}/common.sh"
RegisterQemuHandlers
ResolveArch "${SNAP_ARCH}"
-tools/strip_hashes.py letsencrypt-auto-source/pieces/dependency-requirements.txt > snap-constraints.txt
+tools/strip_hashes.py letsencrypt-auto-source/pieces/dependency-requirements.txt \
+ | grep -v python-augeas > snap-constraints.txt
pushd "${DIR}/packages"
"${CERTBOT_DIR}/tools/simple_http_server.py" 8080 >/dev/null 2>&1 &
diff --git a/snap/local/compile_native_wheels.sh b/snap/local/compile_native_wheels.sh
index df909ba483a..cabbf99e773 100755
--- a/snap/local/compile_native_wheels.sh
+++ b/snap/local/compile_native_wheels.sh
@@ -14,7 +14,8 @@ source "${DIR}/common.sh"
RegisterQemuHandlers
-tools/strip_hashes.py letsencrypt-auto-source/pieces/dependency-requirements.txt > "${DIR}/snap-constraints.txt"
+tools/strip_hashes.py letsencrypt-auto-source/pieces/dependency-requirements.txt \
+ | grep -v python-augeas > "${DIR}/snap-constraints.txt"
for SNAP_ARCH in ${TARGET_ARCHS}; do
ResolveArch "${SNAP_ARCH}"
DownloadQemuStatic "${QEMU_ARCH}" "${DIR}"
@@ -24,7 +25,7 @@ for SNAP_ARCH in ${TARGET_ARCHS}; do
-v "${DIR}/qemu-${QEMU_ARCH}-static:/usr/bin/qemu-${QEMU_ARCH}-static" \
-v "${DIR}:/workspace" \
-w "/workspace" \
- "${DOCKER_ARCH}/ubuntu:18.04" \
+ "${DOCKER_ARCH}/ubuntu:20.04" \
sh -c "\
apt-get update \
&& DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends python3 python3-venv python3-dev libffi-dev libssl-dev gcc \
diff --git a/snap/local/packages/cffi/cffi-1.14.0-cp36-cp36m-linux_aarch64.whl b/snap/local/packages/cffi/cffi-1.14.0-cp36-cp36m-linux_aarch64.whl
deleted file mode 100644
index 36dfa9adede..00000000000
Binary files a/snap/local/packages/cffi/cffi-1.14.0-cp36-cp36m-linux_aarch64.whl and /dev/null differ
diff --git a/snap/local/packages/cffi/cffi-1.14.0-cp36-cp36m-linux_armv7l.whl b/snap/local/packages/cffi/cffi-1.14.0-cp36-cp36m-linux_armv7l.whl
deleted file mode 100644
index 26f107cc9d4..00000000000
Binary files a/snap/local/packages/cffi/cffi-1.14.0-cp36-cp36m-linux_armv7l.whl and /dev/null differ
diff --git a/snap/local/packages/cffi/cffi-1.14.0-cp38-cp38-linux_aarch64.whl b/snap/local/packages/cffi/cffi-1.14.0-cp38-cp38-linux_aarch64.whl
new file mode 100644
index 00000000000..aded6d5956a
Binary files /dev/null and b/snap/local/packages/cffi/cffi-1.14.0-cp38-cp38-linux_aarch64.whl differ
diff --git a/snap/local/packages/cffi/cffi-1.14.0-cp38-cp38-linux_armv7l.whl b/snap/local/packages/cffi/cffi-1.14.0-cp38-cp38-linux_armv7l.whl
new file mode 100644
index 00000000000..4f6d0ab7d83
Binary files /dev/null and b/snap/local/packages/cffi/cffi-1.14.0-cp38-cp38-linux_armv7l.whl differ
diff --git a/snap/local/packages/cryptography/cryptography-2.8-cp36-cp36m-linux_aarch64.whl b/snap/local/packages/cryptography/cryptography-2.8-cp36-cp36m-linux_aarch64.whl
deleted file mode 100644
index 3a969945a85..00000000000
Binary files a/snap/local/packages/cryptography/cryptography-2.8-cp36-cp36m-linux_aarch64.whl and /dev/null differ
diff --git a/snap/local/packages/cryptography/cryptography-2.8-cp36-cp36m-linux_armv7l.whl b/snap/local/packages/cryptography/cryptography-2.8-cp36-cp36m-linux_armv7l.whl
deleted file mode 100644
index ea49f5dab58..00000000000
Binary files a/snap/local/packages/cryptography/cryptography-2.8-cp36-cp36m-linux_armv7l.whl and /dev/null differ
diff --git a/snap/local/packages/cryptography/cryptography-2.8-cp38-cp38-linux_aarch64.whl b/snap/local/packages/cryptography/cryptography-2.8-cp38-cp38-linux_aarch64.whl
new file mode 100644
index 00000000000..e0392fcd404
Binary files /dev/null and b/snap/local/packages/cryptography/cryptography-2.8-cp38-cp38-linux_aarch64.whl differ
diff --git a/snap/local/packages/cryptography/cryptography-2.8-cp38-cp38-linux_armv7l.whl b/snap/local/packages/cryptography/cryptography-2.8-cp38-cp38-linux_armv7l.whl
new file mode 100644
index 00000000000..38ee1eada56
Binary files /dev/null and b/snap/local/packages/cryptography/cryptography-2.8-cp38-cp38-linux_armv7l.whl differ
diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml
index 88833bce200..515a6f688e2 100644
--- a/snap/snapcraft.yaml
+++ b/snap/snapcraft.yaml
@@ -15,7 +15,7 @@ description: |
- Help you revoke the certificate if that ever becomes necessary.
confinement: classic
grade: devel
-base: core18
+base: core20
adopt-info: certbot
apps:
@@ -26,7 +26,7 @@ apps:
AUGEAS_LENS_LIB: "$SNAP/usr/share/augeas/lenses/dist"
LD_LIBRARY_PATH: "$SNAP/usr/lib/x86_64-linux-gnu/:$LD_LIBRARY_PATH"
renew:
- command: certbot -q renew
+ command: bin/certbot -q renew
daemon: oneshot
environment:
PATH: "$SNAP/bin:$SNAP/usr/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games"
@@ -35,58 +35,42 @@ apps:
# Run approximately twice a day with randomization
timer: 00:00~24:00/2
+
parts:
- python-augeas:
- plugin: python
- source: git://github.com/basak/python-augeas
- source-branch: snap
- python-version: python3
- build-packages: [libaugeas-dev]
- acme:
- plugin: python
- source: .
- source-subdir: acme
- constraints: [$SNAPCRAFT_PART_SRC/snap-constraints.txt]
- python-version: python3
- # To build cryptography and cffi if needed
- build-packages: [libffi-dev, libssl-dev]
certbot:
plugin: python
source: .
- source-subdir: certbot
constraints: [$SNAPCRAFT_PART_SRC/snap-constraints.txt]
- python-version: python3
- after: [acme]
- override-pull: |
- snapcraftctl pull
- snapcraftctl set-version `cd $SNAPCRAFT_PART_SRC && git describe|sed s/^v//`
- # Workaround for lack of site-packages leading to empty sitecustomize.py
- stage:
- - -usr/lib/python3.6/sitecustomize.py
- certbot-apache:
- plugin: python
- source: .
- source-subdir: certbot-apache
- constraints: [$SNAPCRAFT_PART_SRC/snap-constraints.txt]
- python-version: python3
- after: [python-augeas, certbot]
- stage-packages: [libaugeas0]
+ python-packages:
+ - git+https://github.com/basak/python-augeas.git@snap
+ - ./acme
+ - ./certbot
+ - ./certbot-apache
+ - ./certbot-nginx
stage:
+ - -usr/lib/python3.8/sitecustomize.py # maybe unnecessary
# Prefer cffi
- - -lib/python3.6/site-packages/augeas.py
- certbot-nginx:
- plugin: python
- source: .
- source-subdir: certbot-nginx
- constraints: [$SNAPCRAFT_PART_SRC/snap-constraints.txt]
- python-version: python3
- # This is the last step, compile pycache now as there should be no conflicts.
- override-prime: |
- snapcraftctl prime
- ./usr/bin/python3 -m compileall -q .
- # After certbot-apache to not rebuild duplicates (essentially sharing what was already staged,
- # like zope)
- after: [certbot-apache]
+ - -lib/python3.8/site-packages/augeas.py
+ stage-packages:
+ - libaugeas0
+ # added to stage python:
+ - libpython3-stdlib
+ - libpython3.8-stdlib
+ - libpython3.8-minimal
+ - python3-pip
+ - python3-setuptools
+ - python3-wheel
+ - python3-venv
+ - python3-minimal
+ - python3-distutils
+ - python3-pkg-resources
+ - python3.8-minimal
+ # To build cryptography and cffi if needed
+ build-packages: [libffi-dev, libssl-dev, git, libaugeas-dev, python3-dev]
+ override-pull: |
+ snapcraftctl pull
+ snapcraftctl set-version `cd $SNAPCRAFT_PART_SRC/certbot && git describe|sed s/^v//`
+
wrappers:
plugin: dump
source: .
|
A squashed version of https://github.com/certbot/certbot/pull/8078. The diff between the two branches is identical.
#8078 LGTM otherwise.
This PR makes several changes to be built on top of the core20 base snap.
The main changes are to `snapcraft.yaml`. With Snapcraft 4.0/core20 base, the python plugin is a thin wrapper, basically creating a `venv` and installing the packages from the source. The trouble with this is that it runs pycache, creating caches that conflict from the different parts. So to solve that, we put everything in a single part. Other changes include:
- We use classic confinement, so we need to specify a bunch of python packages to `stage-packages`, as mentioned [here](https://forum.snapcraft.io/t/trouble-bundling-python-with-classic-confinement-in-core20-4-0-4/18234/2).
- The certbot executable now lives in `bin`, so specify running `certbot/bin`.
- Since `python-augeas` is now being pulled into the single part, remove the pinning from constraints so we can use the latest version directly from github.
- Precompile our `cryptography` and `cffi` wheels to be based on python3.8.
Separately, we had to upgrade the snapcraft docker image to be based on focal, due to the thin wrapper situation. This was accomplished [here](https://github.com/adferrand/snapcraft/pull/1).
|
https://api.github.com/repos/certbot/certbot/pulls/8086
|
2020-06-17T23:59:29Z
|
2020-06-18T00:20:47Z
|
2020-06-18T00:20:47Z
|
2020-06-18T00:22:24Z
| 2,495
|
certbot/certbot
| 1,420
|
P.5 example read int array size mismatch
|
diff --git a/CppCoreGuidelines.md b/CppCoreGuidelines.md
index 0694513fe..e31741913 100644
--- a/CppCoreGuidelines.md
+++ b/CppCoreGuidelines.md
@@ -697,7 +697,7 @@ Or better still just use the type system and replace `Int` with `int32_t`.
void read(int* p, int n); // read max n integers into *p
int a[100];
- read(a, 1000); // bad
+ read(a, 1000); // bad, off the end
better
|
It appears as though the max read int size is larger than the int array size itself. Was this the intention in showing that there are possible bugs associated with such non-compile-time code? If so, I think it would be clearer to put an exact equivalent approach.
|
https://api.github.com/repos/isocpp/CppCoreGuidelines/pulls/1161
|
2018-03-10T22:31:50Z
|
2018-03-11T08:13:56Z
|
2018-03-11T08:13:56Z
|
2018-03-11T08:13:56Z
| 139
|
isocpp/CppCoreGuidelines
| 15,295
|
Add debugging option for drawing landmarks on face extraction.
|
diff --git a/scripts/extract.py b/scripts/extract.py
index e59bfad0d0..972806b37a 100644
--- a/scripts/extract.py
+++ b/scripts/extract.py
@@ -37,6 +37,12 @@ def add_optional_arguments(self, parser):
type=int,
default=1,
help="Number of processes to use.")
+
+ parser.add_argument('-dl', '--debug-landmarks',
+ action="store_true",
+ dest="debug_landmarks",
+ default=False,
+ help="Draw landmarks for debug.")
return parser
def process(self):
@@ -73,7 +79,12 @@ def handleImage(self, image, filename):
rvals = []
for idx, face in faces:
count = idx
-
+
+ # Draws landmarks for debug
+ if self.arguments.debug_landmarks:
+ for (x, y) in face.landmarksAsXY():
+ cv2.circle(image, (x, y), 2, (0, 0, 255), -1)
+
resized_image = self.extractor.extract(image, face, 256)
output_file = get_folder(self.output_dir) / Path(filename).stem
cv2.imwrite(str(output_file) + str(idx) + Path(filename).suffix, resized_image)
|
https://api.github.com/repos/deepfakes/faceswap/pulls/199
|
2018-02-15T11:13:48Z
|
2018-03-03T11:02:51Z
|
2018-03-03T11:02:51Z
|
2018-03-07T09:16:01Z
| 290
|
deepfakes/faceswap
| 18,621
|
|
Docs: fixes
|
diff --git a/docs/cli.rst b/docs/cli.rst
index 171352ebe1..477802eb42 100644
--- a/docs/cli.rst
+++ b/docs/cli.rst
@@ -23,7 +23,7 @@ the same.
The way this script works is by providing access to all the commands on
your Flask application's :attr:`Flask.cli` instance as well as some
built-in commands that are always there. Flask extensions can also
-register more commands there if they so desire.
+register more commands there if they desire so.
For the ``flask`` script to work, an application needs to be discovered.
The two most common ways are either an environment variable
@@ -106,8 +106,8 @@ Factory Functions
In case you are using factory functions to create your application (see
:ref:`app-factories`) you will discover that the ``flask`` command cannot
work with them directly. Flask won't be able to figure out how to
-instanciate your application properly by itself. Because of this reason
-the recommendation is to create a separate file that instanciates
+instantiate your application properly by itself. Because of this reason
+the recommendation is to create a separate file that instantiates
applications. This is by far not the only way to make this work. Another
is the :ref:`custom-scripts` support.
@@ -115,8 +115,7 @@ For instance if you have a factory function that creates an application
from a filename you could make a separate file that creates such an
application from an environment variable.
-For instance this could be a file named ``autoapp.py`` with these
-contents::
+This could be a file named ``autoapp.py`` with these contents::
import os
from yourapplication import create_app
@@ -162,8 +161,8 @@ We won't go into detail now about the differences but if you are curious
you can have a look at the :ref:`script-info-object` section to learn all
about it.
-To explain all of this here an example ``manage.py`` script that manages a
-hypothetical wiki application. We will go through the details
+To explain all of this, here is an example ``manage.py`` script that
+manages a hypothetical wiki application. We will go through the details
afterwards::
import click
@@ -185,12 +184,12 @@ afterwards::
That's a lot of code for not much, so let's go through all parts step by
step.
-1. At first we import regular ``click`` as well as the click extensions
+1. First we import the ``click`` library as well as the click extensions
from the ``flask.cli`` package. Primarily we are here interested
in the :class:`~flask.cli.FlaskGroup` click group and the
:func:`~flask.cli.script_info_option` decorator.
2. The next thing we do is defining a function that is invoked with the
- script info object (:ref:`script-info-object`) from flask and it's
+ script info object (:ref:`script-info-object`) from Flask and its
purpose is to fully import and create the application. This can
either directly import an application object or create it (see
:ref:`app-factories`).
@@ -200,9 +199,9 @@ step.
will come back to this later.
3. Next step is to create a :class:`FlaskGroup`. In this case we just
make an empty function with a help doc string that just does nothing
- and then pass the ``create_wiki_app`` function as factory function.
+ and then pass the ``create_wiki_app`` function as a factory function.
- Whenever click now needs to operate on a flask application it will
+ Whenever click now needs to operate on a Flask application it will
call that function with the script info and ask for it to be created.
4. In step 2 you could see that the config is passed to the actual
creation function. This config comes from the :func:`script_info_option`
@@ -223,11 +222,11 @@ application unless it has to. The reason for this is added flexibility.
This way an application can provide custom commands, but even in the
absence of an application the ``flask`` script is still operational on a
-basic level. In addition to that does it mean that the individual
-commands have the option to not create an instance of the Flask
-application unless required. This is very useful as it allows the server
-command for instance, the load the application on first request instead of
-immediately to give a better debug experience.
+basic level. In addition to that it means that the individual commands
+have the option to avoid creating an instance of the Flask application
+unless required. This is very useful as it allows the server commands for
+instance to load the application on a first request instead of
+immediately, therefore giving a better debug experience.
All of this is provided through the :class:`flask.cli.ScriptInfo` object
and some helper utilities around. The basic way it operates is that when
@@ -239,4 +238,4 @@ the :func:`flask.cli.script_info_option` decorator was added.
One Flask actually needs the individual Flask application it will invoke
the :meth:`flask.cli.ScriptInfo.load_app` method. This happens when the
server starts, when the shell is launched or when the script looks for an
-application provided click command.
+application-provided click command.
|
https://api.github.com/repos/pallets/flask/pulls/1050
|
2014-05-07T20:37:39Z
|
2014-05-09T13:50:11Z
|
2014-05-09T13:50:11Z
|
2020-11-14T03:20:08Z
| 1,220
|
pallets/flask
| 20,601
|
|
Update using_black_with_other_tools.md to ensure flake8 configuration examples are consistant
|
diff --git a/docs/guides/using_black_with_other_tools.md b/docs/guides/using_black_with_other_tools.md
index 22c641a7420..e642a1aef33 100644
--- a/docs/guides/using_black_with_other_tools.md
+++ b/docs/guides/using_black_with_other_tools.md
@@ -145,7 +145,7 @@ There are a few deviations that cause incompatibilities with _Black_.
```
max-line-length = 88
-extend-ignore = E203
+extend-ignore = E203, E704
```
#### Why those options above?
@@ -184,7 +184,7 @@ extend-ignore = E203, E704
```ini
[flake8]
max-line-length = 88
-extend-ignore = E203
+extend-ignore = E203, E704
```
</details>
@@ -195,7 +195,7 @@ extend-ignore = E203
```ini
[flake8]
max-line-length = 88
-extend-ignore = E203
+extend-ignore = E203, E704
```
</details>
|
### Description
On the documentation page "Using Black with other tools" the `.flake8` example included `E7041` in the ignore, but none of the other examples include this ignore and there is no mention of this above this. Removing `E7041` from the example.
### Checklist - did you ...
<!-- If any of the following items aren't relevant for your contribution
please still tick them so we know you've gone through the checklist.
All user-facing changes should get an entry. Otherwise, signal to us
this should get the magical label to silence the CHANGELOG entry check.
Tests are required for bugfixes and new features. Documentation changes
are necessary for formatting and most enhancement changes. -->
- [x] Add an entry in `CHANGES.md` if necessary?
- [x] Add / update tests if necessary?
- [x] Add new / update outdated documentation?
<!-- Just as a reminder, everyone in all psf/black spaces including PRs
must follow the PSF Code of Conduct (link below).
Finally, once again thanks for your time and effort. If you have any
feedback in regards to your experience contributing here, please
let us know!
Helpful links:
PSF COC: https://www.python.org/psf/conduct/
Contributing docs: https://black.readthedocs.io/en/latest/contributing/index.html
Chat on Python Discord: https://discord.gg/RtVdv86PrH -->
|
https://api.github.com/repos/psf/black/pulls/4157
|
2024-01-18T14:55:28Z
|
2024-01-19T23:54:33Z
|
2024-01-19T23:54:33Z
|
2024-01-22T14:13:43Z
| 243
|
psf/black
| 24,586
|
There is a typo in face_recognition/example/facerec_ipcamera_knn.py
|
diff --git a/examples/facerec_ipcamera_knn.py b/examples/facerec_ipcamera_knn.py
index ae9223416..55623ed8f 100644
--- a/examples/facerec_ipcamera_knn.py
+++ b/examples/facerec_ipcamera_knn.py
@@ -209,6 +209,6 @@ def show_prediction_labels_on_image(frame, predictions):
frame = show_prediction_labels_on_image(frame, predictions)
cv2.imshow('camera', frame)
if ord('q') == cv2.waitKey(10):
- cap1.release()
+ cap.release()
cv2.destroyAllWindows()
exit(0)
|
The third last line (line no. 212) is written as cap1.release(). It should be cap.release instead.
cap1.release() > cap.release()
|
https://api.github.com/repos/ageitgey/face_recognition/pulls/1279
|
2021-02-07T14:31:15Z
|
2022-06-10T09:03:37Z
|
2022-06-10T09:03:37Z
|
2022-06-10T09:03:37Z
| 143
|
ageitgey/face_recognition
| 22,562
|
Introducing the cd-cs feature
|
diff --git a/README.md b/README.md
index 532df6be5..7a18ab3dd 100644
--- a/README.md
+++ b/README.md
@@ -179,6 +179,7 @@ following rules are enabled by default:
* `cargo_no_command` – fixes wrongs commands like `cargo buid`;
* `cat_dir` – replaces `cat` with `ls` when you try to `cat` a directory;
* `cd_correction` – spellchecks and correct failed cd commands;
+* `cd_cs` – changes `cs` to `cd`;
* `cd_mkdir` – creates directories before cd'ing into them;
* `cd_parent` – changes `cd..` to `cd ..`;
* `chmod_x` – add execution bit;
diff --git a/tests/rules/test_cd_cs.py b/tests/rules/test_cd_cs.py
new file mode 100644
index 000000000..204c651d9
--- /dev/null
+++ b/tests/rules/test_cd_cs.py
@@ -0,0 +1,11 @@
+from thefuck.rules.cd_cs import match, get_new_command
+from thefuck.types import Command
+
+
+def test_match():
+ assert match(Command('cs', 'cs: command not found'))
+ assert match(Command('cs /etc/', 'cs: command not found'))
+
+
+def test_get_new_command():
+ assert get_new_command(Command('cs /etc/', 'cs: command not found')) == 'cd /etc/'
diff --git a/thefuck/rules/cd_cs.py b/thefuck/rules/cd_cs.py
new file mode 100644
index 000000000..f95415d97
--- /dev/null
+++ b/thefuck/rules/cd_cs.py
@@ -0,0 +1,21 @@
+# -*- encoding: utf-8 -*-
+
+# Redirects cs to cd when there is a typo
+# Due to the proximity of the keys - d and s - this seems like a common typo
+# ~ > cs /etc/
+# cs: command not found
+# ~ > fuck
+# cd /etc/ [enter/↑/↓/ctrl+c]
+# /etc >
+
+
+def match(command):
+ if command.script_parts[0] == 'cs':
+ return True
+
+
+def get_new_command(command):
+ return 'cd' + ''.join(command.script[2:])
+
+
+priority = 900
|
I found in my usage that due to the proximity of the 's' and 'd' key, I would commonly type cs when I mean to say cd.
I am hoping to address that with this update to the rules.
|
https://api.github.com/repos/nvbn/thefuck/pulls/1167
|
2021-02-14T01:17:05Z
|
2021-03-22T19:55:45Z
|
2021-03-22T19:55:45Z
|
2021-03-22T19:56:08Z
| 551
|
nvbn/thefuck
| 30,638
|
don't run more than 61 workers on Windows
|
diff --git a/README.md b/README.md
index afc9e8e808..28c4fbe0d8 100644
--- a/README.md
+++ b/README.md
@@ -986,6 +986,8 @@ More details can be found in [CONTRIBUTING](CONTRIBUTING.md).
### 19.5b0
+* don't crash when run on a Windows machine with more than 61 cores (#838)
+
* remove unnecessary parentheses around `yield` expressions (#834)
* add parentheses around long tuples in unpacking assignments (#832)
diff --git a/black.py b/black.py
index e7dce5bf2a..18d60c02d6 100644
--- a/black.py
+++ b/black.py
@@ -440,22 +440,10 @@ def main(
report=report,
)
else:
- loop = asyncio.get_event_loop()
- executor = ProcessPoolExecutor(max_workers=os.cpu_count())
- try:
- loop.run_until_complete(
- schedule_formatting(
- sources=sources,
- fast=fast,
- write_back=write_back,
- mode=mode,
- report=report,
- loop=loop,
- executor=executor,
- )
- )
- finally:
- shutdown(loop)
+ reformat_many(
+ sources=sources, fast=fast, write_back=write_back, mode=mode, report=report
+ )
+
if verbose or not quiet:
bang = "💥 💔 💥" if report.return_code else "✨ 🍰 ✨"
out(f"All done! {bang}")
@@ -497,6 +485,36 @@ def reformat_one(
report.failed(src, str(exc))
+def reformat_many(
+ sources: Set[Path],
+ fast: bool,
+ write_back: WriteBack,
+ mode: FileMode,
+ report: "Report",
+) -> None:
+ """Reformat multiple files using a ProcessPoolExecutor."""
+ loop = asyncio.get_event_loop()
+ worker_count = os.cpu_count()
+ if sys.platform == "win32":
+ # Work around https://bugs.python.org/issue26903
+ worker_count = min(worker_count, 61)
+ executor = ProcessPoolExecutor(max_workers=worker_count)
+ try:
+ loop.run_until_complete(
+ schedule_formatting(
+ sources=sources,
+ fast=fast,
+ write_back=write_back,
+ mode=mode,
+ report=report,
+ loop=loop,
+ executor=executor,
+ )
+ )
+ finally:
+ shutdown(loop)
+
+
async def schedule_formatting(
sources: Set[Path],
fast: bool,
|
Fixes #564
|
https://api.github.com/repos/psf/black/pulls/838
|
2019-05-07T15:49:41Z
|
2019-05-07T17:11:21Z
|
2019-05-07T17:11:21Z
|
2019-06-25T23:36:12Z
| 611
|
psf/black
| 23,753
|
Add Supervisor to DevOps Tools section
|
diff --git a/README.md b/README.md
index b6ed6add6..b2e8c525e 100644
--- a/README.md
+++ b/README.md
@@ -986,6 +986,7 @@ A curated list of awesome Python frameworks, libraries and software. Inspired by
* [fig](http://www.fig.sh/) - Fast, isolated development environments using [Docker](https://www.docker.com/).
* [hgapi](http://bitbucket.org/haard/hgapi) - Pure-Python API for Mercurial.
* [gitapi](http://bitbucket.org/haard/gitapi) - Pure-Python API for git.
+* [supervisor](https://github.com/Supervisor/supervisor) - Supervisor process control system for UNIX.
## Job Scheduler
|
It's a bit strange that Supervisor was not added previously to this awesome list. I saw `honcho` in DevOps section, so I added Supervisor here.
|
https://api.github.com/repos/vinta/awesome-python/pulls/312
|
2015-03-03T04:04:19Z
|
2015-03-04T05:26:15Z
|
2015-03-04T05:26:15Z
|
2015-03-04T06:32:51Z
| 178
|
vinta/awesome-python
| 26,888
|
Include added/deleted TXT record name in RFC 2136 debug log
|
diff --git a/certbot-dns-rfc2136/certbot_dns_rfc2136/_internal/dns_rfc2136.py b/certbot-dns-rfc2136/certbot_dns_rfc2136/_internal/dns_rfc2136.py
index ee71c9681d7..cb4d5addb09 100644
--- a/certbot-dns-rfc2136/certbot_dns_rfc2136/_internal/dns_rfc2136.py
+++ b/certbot-dns-rfc2136/certbot_dns_rfc2136/_internal/dns_rfc2136.py
@@ -129,7 +129,7 @@ def add_txt_record(self, record_name, record_content, record_ttl):
rcode = response.rcode()
if rcode == dns.rcode.NOERROR:
- logger.debug('Successfully added TXT record')
+ logger.debug('Successfully added TXT record %s', record_name)
else:
raise errors.PluginError('Received response from server: {0}'
.format(dns.rcode.to_text(rcode)))
@@ -164,7 +164,7 @@ def del_txt_record(self, record_name, record_content):
rcode = response.rcode()
if rcode == dns.rcode.NOERROR:
- logger.debug('Successfully deleted TXT record')
+ logger.debug('Successfully deleted TXT record %s', record_name)
else:
raise errors.PluginError('Received response from server: {0}'
.format(dns.rcode.to_text(rcode)))
|
## Pull Request Checklist
- [ ] If the change being made is to a [distributed component](https://certbot.eff.org/docs/contributing.html#code-components-and-layout), edit the `master` section of `certbot/CHANGELOG.md` to include a description of the change being made.
- [ ] Include your name in `AUTHORS.md` if you like.
|
https://api.github.com/repos/certbot/certbot/pulls/7696
|
2020-01-17T13:58:11Z
|
2020-01-17T14:42:11Z
|
2020-01-17T14:42:11Z
|
2020-01-18T09:39:04Z
| 332
|
certbot/certbot
| 3,060
|
Add PlayStore
|
diff --git a/data.json b/data.json
index 816996fa5..7d2287f88 100644
--- a/data.json
+++ b/data.json
@@ -867,6 +867,13 @@
"username_claimed": "blue",
"username_unclaimed": "noonewouldeverusethis7"
},
+ "PlayStore": {
+ "errorType": "status_code",
+ "url": "https://play.google.com/store/apps/developer?id={}",
+ "urlMain": "https://play.google.com/store",
+ "username_claimed": "Facebook",
+ "username_unclaimed": "noonewouldeverusethis7"
+ },
"Plug.DJ": {
"errorType": "status_code",
"rank": 32278,
@@ -1373,4 +1380,4 @@
"username_claimed": "blue",
"username_unclaimed": "noonewouldeverusethis7"
}
-}
\ No newline at end of file
+}
|
Please note: Google PlayStore dev ids are case-sensitive. So for example, If `Facebook` is registered and you search for `facebook`, then it'll return `404`.
There is no way out as of now.
|
https://api.github.com/repos/sherlock-project/sherlock/pulls/282
|
2019-07-22T23:50:35Z
|
2019-07-23T06:55:16Z
|
2019-07-23T06:55:16Z
|
2019-07-23T08:42:28Z
| 232
|
sherlock-project/sherlock
| 36,523
|
Fix example `SECRET_KEY` generation code in documentation
|
diff --git a/docs/config.rst b/docs/config.rst
index 2f387b4600..ad73710987 100644
--- a/docs/config.rst
+++ b/docs/config.rst
@@ -180,7 +180,7 @@ The following configuration values are used internally by Flask:
application. It should be a long random ``bytes`` or ``str``. For
example, copy the output of this to your config::
- $ python -c 'import secrets; print(secrets.token_hex()))'
+ $ python -c 'import secrets; print(secrets.token_hex())'
'192b9bdd22ab9ed4d12e236c78afcb9a393ec15f71bbf5dc987d54727823bcbf'
**Do not reveal the secret key when posting questions or committing code.**
|
Removes the extra `)` in:
```python
print(secrets.token_hex()))
^
```
|
https://api.github.com/repos/pallets/flask/pulls/4304
|
2021-10-18T08:39:53Z
|
2021-10-18T08:41:39Z
|
2021-10-18T08:41:39Z
|
2021-11-02T00:03:26Z
| 188
|
pallets/flask
| 20,595
|
FIX: acx.fetch_ohlcv handled since parameter incorrect
|
diff --git a/js/acx.js b/js/acx.js
index 8a1e825f3797..83ae62ec001a 100644
--- a/js/acx.js
+++ b/js/acx.js
@@ -303,7 +303,7 @@ module.exports = class acx extends Exchange {
'limit': limit,
};
if (since !== undefined) {
- request['timestamp'] = since;
+ request['timestamp'] = parseInt (since / 1000);
}
const response = await this.publicGetK (this.extend (request, params));
return this.parseOHLCVs (response, market, timeframe, since, limit);
|
@kroitor I see a pattern there. Is there a reason for this? Was the parameter in seconds before?
|
https://api.github.com/repos/ccxt/ccxt/pulls/5379
|
2019-06-26T16:23:48Z
|
2019-06-26T23:31:04Z
|
2019-06-26T23:31:04Z
|
2019-08-31T16:52:01Z
| 146
|
ccxt/ccxt
| 13,381
|
BUG: replace with inplace not respecting cow
|
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst
index 9fd9faf057a8a..ad4dc9edffefd 100644
--- a/doc/source/whatsnew/v2.0.0.rst
+++ b/doc/source/whatsnew/v2.0.0.rst
@@ -247,6 +247,9 @@ Copy-on-Write improvements
can never update the original Series or DataFrame. Therefore, an informative
error is raised to the user instead of silently doing nothing (:issue:`49467`)
+- :meth:`DataFrame.replace` will now respect the Copy-on-Write mechanism
+ when ``inplace=True``.
+
Copy-on-Write can be enabled through one of
.. code-block:: python
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index e66011acb978b..ce48e6b85c430 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -665,6 +665,7 @@ def replace_list(
dest_list: Sequence[Any],
inplace: bool = False,
regex: bool = False,
+ using_cow: bool = False,
) -> list[Block]:
"""
See BlockManager.replace_list docstring.
@@ -674,7 +675,11 @@ def replace_list(
if isinstance(values, Categorical):
# TODO: avoid special-casing
# GH49404
- blk = self if inplace else self.copy()
+ if using_cow and inplace:
+ # TODO(CoW): Optimize
+ blk = self.copy()
+ else:
+ blk = self if inplace else self.copy()
values = cast(Categorical, blk.values)
values._replace(to_replace=src_list, value=dest_list, inplace=True)
return [blk]
@@ -703,7 +708,11 @@ def replace_list(
masks = [extract_bool_array(x) for x in masks]
- rb = [self if inplace else self.copy()]
+ if using_cow and inplace:
+ # TODO(CoW): Optimize
+ rb = [self.copy()]
+ else:
+ rb = [self if inplace else self.copy()]
for i, (src, dest) in enumerate(pairs):
convert = i == src_len # only convert once at the end
new_rb: list[Block] = []
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 517e6d7e48275..4973c0827245f 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -473,6 +473,7 @@ def replace_list(
dest_list=dest_list,
inplace=inplace,
regex=regex,
+ using_cow=using_copy_on_write(),
)
bm._consolidate_inplace()
return bm
diff --git a/pandas/tests/copy_view/test_replace.py b/pandas/tests/copy_view/test_replace.py
new file mode 100644
index 0000000000000..a1347d8e12950
--- /dev/null
+++ b/pandas/tests/copy_view/test_replace.py
@@ -0,0 +1,38 @@
+import numpy as np
+
+from pandas import (
+ Categorical,
+ DataFrame,
+)
+import pandas._testing as tm
+from pandas.tests.copy_view.util import get_array
+
+
+def test_replace_categorical_inplace_reference(using_copy_on_write):
+ df = DataFrame({"a": Categorical([1, 2, 3])})
+ df_orig = df.copy()
+ arr_a = get_array(df, "a")
+ view = df[:] # noqa
+ df.replace(to_replace=[1], value=2, inplace=True)
+
+ if using_copy_on_write:
+ assert not np.shares_memory(get_array(df, "a").codes, arr_a.codes)
+ assert df._mgr._has_no_reference(0)
+ assert view._mgr._has_no_reference(0)
+ tm.assert_frame_equal(view, df_orig)
+ else:
+ assert np.shares_memory(get_array(df, "a").codes, arr_a.codes)
+
+
+def test_replace_inplace_reference(using_copy_on_write):
+ df = DataFrame({"a": [1.5, 2, 3]})
+ arr_a = get_array(df, "a")
+ view = df[:] # noqa
+ df.replace(to_replace=[1.5], value=15.5, inplace=True)
+
+ if using_copy_on_write:
+ assert not np.shares_memory(get_array(df, "a"), arr_a)
+ assert df._mgr._has_no_reference(0)
+ assert view._mgr._has_no_reference(0)
+ else:
+ assert np.shares_memory(get_array(df, "a"), arr_a)
|
- [x] closes #51277 (Replace xxxx with the GitHub issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
This is a short term change that we should get in for the rc to avoid updating multiple object. Will optimise after the other replace pr is merged.
|
https://api.github.com/repos/pandas-dev/pandas/pulls/51278
|
2023-02-09T21:18:28Z
|
2023-02-10T10:47:35Z
|
2023-02-10T10:47:35Z
|
2023-02-10T10:47:39Z
| 1,122
|
pandas-dev/pandas
| 45,543
|
Improve type hint in homeassistant trigger
|
diff --git a/homeassistant/components/homeassistant/trigger.py b/homeassistant/components/homeassistant/trigger.py
index 588b6713007dcf..3160af580794ba 100644
--- a/homeassistant/components/homeassistant/trigger.py
+++ b/homeassistant/components/homeassistant/trigger.py
@@ -1,16 +1,17 @@
"""Home Assistant trigger dispatcher."""
import importlib
-from homeassistant.components.device_automation.trigger import (
- DeviceAutomationTriggerProtocol,
-)
from homeassistant.const import CONF_PLATFORM
from homeassistant.core import CALLBACK_TYPE, HomeAssistant
-from homeassistant.helpers.trigger import TriggerActionType, TriggerInfo
+from homeassistant.helpers.trigger import (
+ TriggerActionType,
+ TriggerInfo,
+ TriggerProtocol,
+)
from homeassistant.helpers.typing import ConfigType
-def _get_trigger_platform(config: ConfigType) -> DeviceAutomationTriggerProtocol:
+def _get_trigger_platform(config: ConfigType) -> TriggerProtocol:
return importlib.import_module(f"..triggers.{config[CONF_PLATFORM]}", __name__)
@@ -20,7 +21,7 @@ async def async_validate_trigger_config(
"""Validate config."""
platform = _get_trigger_platform(config)
if hasattr(platform, "async_validate_trigger_config"):
- return await getattr(platform, "async_validate_trigger_config")(hass, config)
+ return await platform.async_validate_trigger_config(hass, config)
return platform.TRIGGER_SCHEMA(config)
|
## Proposed change
<!--
Describe the big picture of your changes here to communicate to the
maintainers why we should accept this pull request. If it fixes a bug
or resolves a feature request, be sure to link to that issue in the
additional information section.
-->
As follow-up to #88511
## Type of change
<!--
What type of change does your PR introduce to Home Assistant?
NOTE: Please, check only 1! box!
If your PR requires multiple boxes to be checked, you'll most likely need to
split it into multiple PRs. This makes things easier and faster to code review.
-->
- [ ] Dependency upgrade
- [ ] Bugfix (non-breaking change which fixes an issue)
- [ ] New integration (thank you!)
- [ ] New feature (which adds functionality to an existing integration)
- [ ] Deprecation (breaking change to happen in the future)
- [ ] Breaking change (fix/feature causing existing functionality to break)
- [x] Code quality improvements to existing code or addition of tests
## Additional information
<!--
Details are important, and help maintainers processing your PR.
Please be sure to fill out additional details, if applicable.
-->
- This PR fixes or closes issue: fixes #
- This PR is related to issue:
- Link to documentation pull request:
## Checklist
<!--
Put an `x` in the boxes that apply. You can also fill these out after
creating the PR. If you're unsure about any of them, don't hesitate to ask.
We're here to help! This is simply a reminder of what we are going to look
for before merging your code.
-->
- [ ] The code change is tested and works locally.
- [ ] Local tests pass. **Your PR cannot be merged unless tests pass**
- [ ] There is no commented out code in this PR.
- [ ] I have followed the [development checklist][dev-checklist]
- [ ] The code has been formatted using Black (`black --fast homeassistant tests`)
- [ ] Tests have been added to verify that the new code works.
If user exposed functionality or configuration variables are added/changed:
- [ ] Documentation added/updated for [www.home-assistant.io][docs-repository]
If the code communicates with devices, web services, or third-party tools:
- [ ] The [manifest file][manifest-docs] has all fields filled out correctly.
Updated and included derived files by running: `python3 -m script.hassfest`.
- [ ] New or updated dependencies have been added to `requirements_all.txt`.
Updated by running `python3 -m script.gen_requirements_all`.
- [ ] For the updated dependencies - a link to the changelog, or at minimum a diff between library versions is added to the PR description.
- [ ] Untested files have been added to `.coveragerc`.
<!--
This project is very active and we have a high turnover of pull requests.
Unfortunately, the number of incoming pull requests is higher than what our
reviewers can review and merge so there is a long backlog of pull requests
waiting for review. You can help here!
By reviewing another pull request, you will help raise the code quality of
that pull request and the final review will be faster. This way the general
pace of pull request reviews will go up and your wait time will go down.
When picking a pull request to review, try to choose one that hasn't yet
been reviewed.
Thanks for helping out!
-->
To help with the load of incoming pull requests:
- [ ] I have reviewed two other [open pull requests][prs] in this repository.
[prs]: https://github.com/home-assistant/core/pulls?q=is%3Aopen+is%3Apr+-author%3A%40me+-draft%3Atrue+-label%3Awaiting-for-upstream+sort%3Acreated-desc+review%3Anone+-status%3Afailure
<!--
Thank you for contributing <3
Below, some useful links you could explore:
-->
[dev-checklist]: https://developers.home-assistant.io/docs/en/development_checklist.html
[manifest-docs]: https://developers.home-assistant.io/docs/en/creating_integration_manifest.html
[quality-scale]: https://developers.home-assistant.io/docs/en/next/integration_quality_scale_index.html
[docs-repository]: https://github.com/home-assistant/home-assistant.io
|
https://api.github.com/repos/home-assistant/core/pulls/88596
|
2023-02-22T11:07:40Z
|
2023-02-22T12:56:31Z
|
2023-02-22T12:56:31Z
|
2023-02-23T13:07:13Z
| 309
|
home-assistant/core
| 39,280
|
Small Travis cleanups
|
diff --git a/.travis.yml b/.travis.yml
index 48b9b43cbfd..55f18338d9d 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -10,9 +10,6 @@ before_install:
before_script:
- 'if [ $TRAVIS_OS_NAME = osx ] ; then ulimit -n 1024 ; fi'
-# using separate envs with different TOXENVs creates 4x1 Travis build
-# matrix, which allows us to clearly distinguish which component under
-# test has failed
matrix:
include:
- python: "2.7"
@@ -22,23 +19,14 @@ matrix:
- python: "2.7"
env: TOXENV=py27-oldest BOULDER_INTEGRATION=1
sudo: required
- after_failure:
- - sudo cat /var/log/mysql/error.log
- - ps aux | grep mysql
services: docker
- python: "2.6"
env: TOXENV=py26 BOULDER_INTEGRATION=1
sudo: required
- after_failure:
- - sudo cat /var/log/mysql/error.log
- - ps aux | grep mysql
services: docker
- python: "2.7"
env: TOXENV=py27_install BOULDER_INTEGRATION=1
sudo: required
- after_failure:
- - sudo cat /var/log/mysql/error.log
- - ps aux | grep mysql
services: docker
- sudo: required
env: TOXENV=apache_compat
@@ -81,30 +69,18 @@ matrix:
- python: "3.3"
env: TOXENV=py33 BOULDER_INTEGRATION=1
sudo: required
- after_failure:
- - sudo cat /var/log/mysql/error.log
- - ps aux | grep mysql
services: docker
- python: "3.4"
env: TOXENV=py34 BOULDER_INTEGRATION=1
sudo: required
- after_failure:
- - sudo cat /var/log/mysql/error.log
- - ps aux | grep mysql
services: docker
- python: "3.5"
env: TOXENV=py35 BOULDER_INTEGRATION=1
sudo: required
- after_failure:
- - sudo cat /var/log/mysql/error.log
- - ps aux | grep mysql
services: docker
- python: "3.6"
env: TOXENV=py36 BOULDER_INTEGRATION=1
sudo: required
- after_failure:
- - sudo cat /var/log/mysql/error.log
- - ps aux | grep mysql
services: docker
- python: "2.7"
env: TOXENV=nginxroundtrip
@@ -130,17 +106,6 @@ branches:
sudo: false
addons:
- # Custom /etc/hosts required for simple verification of http-01
- # and tls-sni-01, and for certbot_test_nginx
- hosts:
- - le.wtf
- - le1.wtf
- - le2.wtf
- - le3.wtf
- - nginx.wtf
- - boulder
- - boulder-mysql
- - boulder-rabbitmq
apt:
sources:
- augeas
|
- Remove obsolete comment about build matrix.
- Remove after_failure debugging code for MariaDB failures: doesn't work anymore, and isn't needed anymore.
- Remove list of host overrides. This is now taken care of inside Boulder's test configs, by using a fake DNS server.
|
https://api.github.com/repos/certbot/certbot/pulls/5273
|
2017-11-29T00:11:03Z
|
2017-11-29T02:22:01Z
|
2017-11-29T02:22:01Z
|
2017-11-29T02:22:04Z
| 782
|
certbot/certbot
| 80
|
Avoid src being marked as optional in help
|
diff --git a/src/black/__init__.py b/src/black/__init__.py
index 8e2123d50cc..51384fb08da 100644
--- a/src/black/__init__.py
+++ b/src/black/__init__.py
@@ -328,6 +328,7 @@ def validate_regex(
exists=True, file_okay=True, dir_okay=True, readable=True, allow_dash=True
),
is_eager=True,
+ metavar="SRC ...",
)
@click.option(
"--config",
|
https://github.com/psf/black/issues/704#issuecomment-490251352
I *hope* this is trivial enough where it doesn't need a changelog entry (mostly because I forgot about it and am too lazy to add one).
|
https://api.github.com/repos/psf/black/pulls/2356
|
2021-07-03T02:13:56Z
|
2021-07-09T01:46:32Z
|
2021-07-09T01:46:32Z
|
2021-07-09T01:46:35Z
| 115
|
psf/black
| 24,392
|
[version] bump version to 0.8.34
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 2c32af3f7a7b6..6cfd6126909a6 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,16 +1,24 @@
# ChangeLog
-## Unreleased
+## [0.8.34] - 2023-09-26
### New Features
- Added `Konko` LLM support (#7775)
- Add before/after context sentence (#7821)
+- EverlyAI integration with LlamaIndex through OpenAI library (#7820)
+- add Arize Phoenix tracer to global handlers (#7835)
### Bug Fixes / Nits
- Normalize scores returned from ElasticSearch vector store (#7792)
- Fixed `refresh_ref_docs()` bug with order of operations (#7664)
- Delay postgresql connection for `PGVectorStore` until actually needed (#7793)
- Fix KeyError in delete method of `SimpleVectorStore` related to metadata filters (#7829)
+- Fix KeyError in delete method of `SimpleVectorStore` related to metadata filters (#7831)
+- Addressing PyYAML import error (#7784)
+- ElasticsearchStore: Update User-Agent + Add example docker compose (#7832)
+- `StorageContext.persist` supporting `Path` (#7783)
+- Update ollama.py (#7839)
+- fix bug for self._session_pool (#7834)
## [0.8.33] - 2023-09-25
diff --git a/llama_index/VERSION b/llama_index/VERSION
index f90154748a9ef..b326a53c94fc2 100644
--- a/llama_index/VERSION
+++ b/llama_index/VERSION
@@ -1 +1 @@
-0.8.33
+0.8.34
|
https://api.github.com/repos/run-llama/llama_index/pulls/7841
|
2023-09-26T23:29:16Z
|
2023-09-26T23:40:41Z
|
2023-09-26T23:40:41Z
|
2023-09-26T23:40:41Z
| 405
|
run-llama/llama_index
| 6,724
|
|
Remove deprecated os_server_actions alias
|
diff --git a/lib/ansible/modules/cloud/openstack/_os_server_actions.py b/lib/ansible/modules/cloud/openstack/_os_server_actions.py
deleted file mode 120000
index fbfc7fba62a247..00000000000000
--- a/lib/ansible/modules/cloud/openstack/_os_server_actions.py
+++ /dev/null
@@ -1 +0,0 @@
-os_server_action.py
\ No newline at end of file
diff --git a/lib/ansible/modules/cloud/openstack/_os_server_actions.py b/lib/ansible/modules/cloud/openstack/_os_server_actions.py
new file mode 100644
index 00000000000000..3668554e9c7ace
--- /dev/null
+++ b/lib/ansible/modules/cloud/openstack/_os_server_actions.py
@@ -0,0 +1,19 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['removed'],
+ 'supported_by': 'community'}
+
+
+from ansible.module_utils.common.removed import removed_module
+
+
+if __name__ == '__main__':
+ removed_module(removed_in='2.8')
diff --git a/lib/ansible/modules/cloud/openstack/os_server_action.py b/lib/ansible/modules/cloud/openstack/os_server_action.py
index fc1a4147630f65..25004bd851e567 100644
--- a/lib/ansible/modules/cloud/openstack/os_server_action.py
+++ b/lib/ansible/modules/cloud/openstack/os_server_action.py
@@ -130,9 +130,6 @@ def main():
required_if=[('action', 'rebuild', ['image'])],
**module_kwargs)
- if module._name == 'os_server_actions':
- module.deprecate("The 'os_server_actions' module is being renamed 'os_server_action'", version=2.8)
-
action = module.params['action']
wait = module.params['wait']
timeout = module.params['timeout']
diff --git a/test/sanity/ansible-doc/skip.txt b/test/sanity/ansible-doc/skip.txt
index 1074959e085754..1c1e7e7fb5f7d6 100644
--- a/test/sanity/ansible-doc/skip.txt
+++ b/test/sanity/ansible-doc/skip.txt
@@ -18,3 +18,4 @@ cs_nic
ec2_remote_facts
netscaler
win_msi
+os_server_actions
|
##### SUMMARY
Remove deprecated os_server_actions alias. Fixes #44991
##### ISSUE TYPE
- Bugfix Pull Request
##### COMPONENT NAME
lib/ansible/modules/cloud/openstack/_os_server_actions.py
lib/ansible/modules/cloud/openstack/os_server_action.py
##### ANSIBLE VERSION
<!--- Paste verbatim output from "ansible --version" between quotes -->
```paste below
2.8
```
##### ADDITIONAL INFORMATION
<!--- Include additional information to help people understand the change here -->
<!--- A step-by-step reproduction of the problem is helpful if there is no related issue -->
<!--- Paste verbatim command output below, e.g. before and after your change -->
```paste below
```
|
https://api.github.com/repos/ansible/ansible/pulls/47208
|
2018-10-17T15:54:00Z
|
2018-10-18T06:44:44Z
|
2018-10-18T06:44:44Z
|
2019-07-22T16:59:53Z
| 608
|
ansible/ansible
| 49,078
|
Add inference backend support for data opt out
|
diff --git a/inference/server/alembic/versions/2023_04_24_2130-401eef162771_add_chat_data_opt_out_field.py b/inference/server/alembic/versions/2023_04_24_2130-401eef162771_add_chat_data_opt_out_field.py
new file mode 100644
index 0000000000..df7f4a41f1
--- /dev/null
+++ b/inference/server/alembic/versions/2023_04_24_2130-401eef162771_add_chat_data_opt_out_field.py
@@ -0,0 +1,27 @@
+"""Add chat data opt out field
+
+Revision ID: 401eef162771
+Revises: b66fd8f9da1f
+Create Date: 2023-04-24 21:30:19.947411
+
+"""
+import sqlalchemy as sa
+from alembic import op
+
+# revision identifiers, used by Alembic.
+revision = "401eef162771"
+down_revision = "b66fd8f9da1f"
+branch_labels = None
+depends_on = None
+
+
+def upgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.add_column("chat", sa.Column("allow_data_use", sa.Boolean(), server_default=sa.text("true"), nullable=False))
+ # ### end Alembic commands ###
+
+
+def downgrade() -> None:
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_column("chat", "allow_data_use")
+ # ### end Alembic commands ###
diff --git a/inference/server/oasst_inference_server/models/chat.py b/inference/server/oasst_inference_server/models/chat.py
index 490f16965a..9c565cb8e0 100644
--- a/inference/server/oasst_inference_server/models/chat.py
+++ b/inference/server/oasst_inference_server/models/chat.py
@@ -76,6 +76,8 @@ class DbChat(SQLModel, table=True):
hidden: bool = Field(False, sa_column=sa.Column(sa.Boolean, nullable=False, server_default=sa.false()))
+ allow_data_use: bool = Field(True, sa_column=sa.Column(sa.Boolean, nullable=False, server_default=sa.true()))
+
def to_list_read(self) -> chat_schema.ChatListRead:
return chat_schema.ChatListRead(
id=self.id,
diff --git a/inference/server/oasst_inference_server/routes/chats.py b/inference/server/oasst_inference_server/routes/chats.py
index f195e784c6..06136c1fc3 100644
--- a/inference/server/oasst_inference_server/routes/chats.py
+++ b/inference/server/oasst_inference_server/routes/chats.py
@@ -307,6 +307,7 @@ async def handle_update_chat(
chat_id=chat_id,
title=request.title,
hidden=request.hidden,
+ allow_data_use=request.allow_data_use,
)
except Exception:
logger.exception("Error when updating chat")
diff --git a/inference/server/oasst_inference_server/schemas/chat.py b/inference/server/oasst_inference_server/schemas/chat.py
index 64ba2b94b5..653e677e89 100644
--- a/inference/server/oasst_inference_server/schemas/chat.py
+++ b/inference/server/oasst_inference_server/schemas/chat.py
@@ -89,3 +89,4 @@ def __init__(self, message: inference.MessageRead):
class ChatUpdateRequest(pydantic.BaseModel):
title: pydantic.constr(max_length=100) | None = None
hidden: bool | None = None
+ allow_data_use: bool | None = None
diff --git a/inference/server/oasst_inference_server/user_chat_repository.py b/inference/server/oasst_inference_server/user_chat_repository.py
index fe5475457a..a760602fc3 100644
--- a/inference/server/oasst_inference_server/user_chat_repository.py
+++ b/inference/server/oasst_inference_server/user_chat_repository.py
@@ -275,6 +275,7 @@ async def update_chat(
chat_id: str,
title: str | None = None,
hidden: bool | None = None,
+ allow_data_use: bool | None = None,
) -> None:
logger.info(f"Updating chat {chat_id=}: {title=} {hidden=}")
chat = await self.get_chat_by_id(chat_id=chat_id, include_messages=False)
@@ -287,4 +288,8 @@ async def update_chat(
logger.info(f"Setting chat {chat_id=} to {'hidden' if hidden else 'visible'}")
chat.hidden = hidden
+ if allow_data_use is not None:
+ logger.info(f"Updating allow_data_use of chat {chat_id=}: {allow_data_use=}")
+ chat.allow_data_use = allow_data_use
+
await self.session.commit()
|
Currently there is no functionality to change as we don't have a mechanism of exporting chats.
|
https://api.github.com/repos/LAION-AI/Open-Assistant/pulls/2820
|
2023-04-21T21:22:30Z
|
2023-04-29T10:38:23Z
|
2023-04-29T10:38:23Z
|
2023-04-29T16:36:03Z
| 1,121
|
LAION-AI/Open-Assistant
| 37,013
|
Add Wayland support
|
diff --git a/share/cht.sh.txt b/share/cht.sh.txt
index c49ca7b7..23335093 100755
--- a/share/cht.sh.txt
+++ b/share/cht.sh.txt
@@ -24,8 +24,8 @@
# count words in text counter
# group elements list
-__CHTSH_VERSION=0.0.2
-__CHTSH_DATETIME="2021-04-23 09:30:30 +0200"
+__CHTSH_VERSION=0.0.3
+__CHTSH_DATETIME="2021-04-25 09:30:30 +0200"
# cht.sh configuration loading
#
@@ -514,7 +514,11 @@ else
fi
if [ "$is_macos" != yes ]; then
- command -v xsel >/dev/null || echo 'DEPENDENCY: please install "xsel" for "copy"' >&2
+ if [ "$XDG_SESSION_TYPE" = wayland ]; then
+ command -v wl-copy >/dev/null || echo 'DEPENDENCY: please install "wl-copy" for "copy"' >&2
+ else
+ command -v xsel >/dev/null || echo 'DEPENDENCY: please install "xsel" for "copy"' >&2
+ fi
fi
command -v rlwrap >/dev/null || { echo 'DEPENDENCY: install "rlwrap" to use cht.sh in the shell mode' >&2; exit 1; }
@@ -562,7 +566,11 @@ cmd_copy() {
else
curl -s "${CHTSH_URL}"/"$(get_query_options "$query"?T)" > "$TMP1"
if [ "$is_macos" != yes ]; then
- xsel -bi < "$TMP1"
+ if [ "$XDG_SESSION_TYPE" = wayland ]; then
+ wl-copy < "$TMP1"
+ else
+ xsel -bi < "$TMP1"
+ fi
else
pbcopy < "$TMP1"
fi
@@ -578,7 +586,11 @@ cmd_ccopy() {
else
curl -s "${CHTSH_URL}"/"$(get_query_options "$query"?TQ)" > "$TMP1"
if [ "$is_macos" != yes ]; then
- xsel -bi < "$TMP1"
+ if [ "$XDG_SESSION_TYPE" = wayland ]; then
+ wl-copy < "$TMP1"
+ else
+ xsel -bi < "$TMP1"
+ fi
else
pbcopy < "$TMP1"
fi
@@ -674,7 +686,11 @@ cmd_stealth() {
if [ "$is_macos" = yes ]; then
past=$(pbpaste)
else
- past=$(xsel -o)
+ if [ "$XDG_SESSION_TYPE" = wayland ]; then
+ past=$(wl-paste -p)
+ else
+ past=$(xsel -o)
+ fi
fi
printf "\033[0;31mstealth:\033[0m you are in the stealth mode; select any text in any window for a query\n"
printf "\033[0;31mstealth:\033[0m selections longer than $STEALTH_MAX_SELECTION_LENGTH words are ignored\n"
@@ -686,7 +702,11 @@ cmd_stealth() {
if [ "$is_macos" = yes ]; then
current=$(pbpaste)
else
- current=$(xsel -o)
+ if [ "$XDG_SESSION_TYPE" = wayland ]; then
+ current=$(wl-paste -p)
+ else
+ current=$(xsel -o)
+ fi
fi
if [ "$past" != "$current" ]; then
past=$current
diff --git a/tests/results/8 b/tests/results/8
index c49ca7b7..23335093 100644
--- a/tests/results/8
+++ b/tests/results/8
@@ -24,8 +24,8 @@
# count words in text counter
# group elements list
-__CHTSH_VERSION=0.0.2
-__CHTSH_DATETIME="2021-04-23 09:30:30 +0200"
+__CHTSH_VERSION=0.0.3
+__CHTSH_DATETIME="2021-04-25 09:30:30 +0200"
# cht.sh configuration loading
#
@@ -514,7 +514,11 @@ else
fi
if [ "$is_macos" != yes ]; then
- command -v xsel >/dev/null || echo 'DEPENDENCY: please install "xsel" for "copy"' >&2
+ if [ "$XDG_SESSION_TYPE" = wayland ]; then
+ command -v wl-copy >/dev/null || echo 'DEPENDENCY: please install "wl-copy" for "copy"' >&2
+ else
+ command -v xsel >/dev/null || echo 'DEPENDENCY: please install "xsel" for "copy"' >&2
+ fi
fi
command -v rlwrap >/dev/null || { echo 'DEPENDENCY: install "rlwrap" to use cht.sh in the shell mode' >&2; exit 1; }
@@ -562,7 +566,11 @@ cmd_copy() {
else
curl -s "${CHTSH_URL}"/"$(get_query_options "$query"?T)" > "$TMP1"
if [ "$is_macos" != yes ]; then
- xsel -bi < "$TMP1"
+ if [ "$XDG_SESSION_TYPE" = wayland ]; then
+ wl-copy < "$TMP1"
+ else
+ xsel -bi < "$TMP1"
+ fi
else
pbcopy < "$TMP1"
fi
@@ -578,7 +586,11 @@ cmd_ccopy() {
else
curl -s "${CHTSH_URL}"/"$(get_query_options "$query"?TQ)" > "$TMP1"
if [ "$is_macos" != yes ]; then
- xsel -bi < "$TMP1"
+ if [ "$XDG_SESSION_TYPE" = wayland ]; then
+ wl-copy < "$TMP1"
+ else
+ xsel -bi < "$TMP1"
+ fi
else
pbcopy < "$TMP1"
fi
@@ -674,7 +686,11 @@ cmd_stealth() {
if [ "$is_macos" = yes ]; then
past=$(pbpaste)
else
- past=$(xsel -o)
+ if [ "$XDG_SESSION_TYPE" = wayland ]; then
+ past=$(wl-paste -p)
+ else
+ past=$(xsel -o)
+ fi
fi
printf "\033[0;31mstealth:\033[0m you are in the stealth mode; select any text in any window for a query\n"
printf "\033[0;31mstealth:\033[0m selections longer than $STEALTH_MAX_SELECTION_LENGTH words are ignored\n"
@@ -686,7 +702,11 @@ cmd_stealth() {
if [ "$is_macos" = yes ]; then
current=$(pbpaste)
else
- current=$(xsel -o)
+ if [ "$XDG_SESSION_TYPE" = wayland ]; then
+ current=$(wl-paste -p)
+ else
+ current=$(xsel -o)
+ fi
fi
if [ "$past" != "$current" ]; then
past=$current
|
This adds support for wayland (#271) using `wl-clipboard`.
Basically, if `$XDG_SESSION_TYPE` is `wayland`, instead of calling `xsel`, the corresponding `wl-(copy|pase)` command is called instead.
Stealth mode, `c` and `cc` are supported, and every `xsel` grepped has a `wl` command to compliment it.
|
https://api.github.com/repos/chubin/cheat.sh/pulls/293
|
2021-04-24T15:41:37Z
|
2021-04-25T06:45:05Z
|
2021-04-25T06:45:05Z
|
2021-04-25T06:45:42Z
| 1,697
|
chubin/cheat.sh
| 15,123
|
Fixes to main path problem
|
diff --git a/gpt_engineer/applications/cli/main.py b/gpt_engineer/applications/cli/main.py
index f0c10235bd..f03c90fb2d 100644
--- a/gpt_engineer/applications/cli/main.py
+++ b/gpt_engineer/applications/cli/main.py
@@ -28,11 +28,9 @@
import logging
import os
-from importlib.util import find_spec
from pathlib import Path
import openai
-import toml
import typer
from dotenv import load_dotenv
@@ -49,16 +47,6 @@
from gpt_engineer.core.preprompts_holder import PrepromptsHolder
from gpt_engineer.tools.custom_steps import clarified_gen, lite_gen, self_heal
-# Load the names of the optional dependencies from the pyprojecct file and determine whether
-# they can be imported
-with open("pyproject.toml", "r") as file:
- pyproject = toml.load(file)
-
-dependency_group = pyproject["tool"]["poetry"]["group"]["experimental"]["dependencies"]
-optional_deps_importable = all(
- [find_spec(dep_name.replace("-", "_")) is not None for dep_name in dependency_group]
-)
-
app = typer.Typer() # creates a CLI app
@@ -110,11 +98,6 @@ def main(
"-i",
help="Improve files_dict from existing project.",
),
- improve_all_mode: bool = typer.Option(
- False,
- "--improve-all-experimental",
- help="Improve files_dict from existing project, without manually choosing which files to improve, using vector store (EXPERIMENTAL).",
- ),
lite_mode: bool = typer.Option(
False,
"--lite",
@@ -156,9 +139,6 @@ def main(
"""
logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)
#
- if improve_all_mode and not improve_mode:
- print("Vector improve mode implies improve mode, setting improve_mode=True")
- improve_mode = True
if improve_mode:
assert not (
@@ -192,18 +172,7 @@ def main(
else:
execution_fn = execute_entrypoint
- if improve_all_mode and optional_deps_importable:
- from gpt_engineer.tools.experimental.experimental_steps import (
- improve_automatic_file_selection,
- )
-
- improve_fn = improve_automatic_file_selection
- elif improve_all_mode:
- raise ImportError(
- "The experimental improve_all_mode is selected, but the optional dependencies to use it are not installed. Please run 'poetry install --with experimental'"
- )
- else:
- improve_fn = improve
+ improve_fn = improve
preprompts_path = get_preprompts_path(use_custom_preprompts, Path(project_path))
preprompts_holder = PrepromptsHolder(preprompts_path)
@@ -221,10 +190,7 @@ def main(
store = FileStore(project_path)
if improve_mode:
- if improve_all_mode:
- files_dict = store.download()
- else:
- files_dict = ask_for_files(project_path)
+ files_dict = ask_for_files(project_path)
files_dict = agent.improve(files_dict, prompt)
else:
files_dict = agent.init(prompt)
diff --git a/pyproject.toml b/pyproject.toml
index 0046ceaf31..1df123c49a 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "gpt-engineer"
-version = "0.2.2"
+version = "0.2.3"
description = "Specify what you want it to build, the AI asks for clarification, and then builds it."
authors = ["Anton Osika <[email protected]>"]
license = "MIT"
|
https://api.github.com/repos/gpt-engineer-org/gpt-engineer/pulls/899
|
2023-12-13T13:48:35Z
|
2023-12-13T13:49:33Z
|
2023-12-13T13:49:33Z
|
2023-12-13T13:49:40Z
| 850
|
gpt-engineer-org/gpt-engineer
| 33,130
|
|
[inference] overlap comm and compute in Linear1D_Row when stream_chunk_num > 1
|
diff --git a/colossalai/nn/layer/parallel_1d/layers.py b/colossalai/nn/layer/parallel_1d/layers.py
index 1976da95adb4..b64488a123d7 100644
--- a/colossalai/nn/layer/parallel_1d/layers.py
+++ b/colossalai/nn/layer/parallel_1d/layers.py
@@ -706,13 +706,22 @@ def forward(self, input_: Tensor) -> Tensor:
input_ = split_forward_gather_backward(input_, ParallelMode.PARALLEL_1D, dim=-1)
if self.stream_chunk_num > 1:
- output_parallel_list = [None for i in range(self.stream_chunk_num)]
- for i in range(self.stream_chunk_num):
- output_parallel_list[i] = F.linear(input_, self.weight_list[i])
- output_parallel_list[i] = reduce_input(output_parallel_list[i], ParallelMode.PARALLEL_1D)
- output = torch.cat(output_parallel_list, dim=-1)
+ if self.training:
+ raise RuntimeError("use stream_chunk_num=1 in Linear1D_Row for training!")
+ with torch.no_grad():
+ output_parallel_list = [None for i in range(self.stream_chunk_num)]
+ handle_list = []
+ for i in range(self.stream_chunk_num):
+ output_parallel_list[i] = F.linear(input_, self.weight_list[i])
+ handle = torch.distributed.all_reduce(output_parallel_list[i],
+ group=gpc.get_group(ParallelMode.PARALLEL_1D),
+ async_op=True)
+ handle_list.append(handle)
+ # output_parallel_list[i] = reduce_input(output_parallel_list[i], ParallelMode.PARALLEL_1D)
+ for handle in handle_list:
+ handle.wait()
+ output = torch.cat(output_parallel_list, dim=-1)
else:
- print(input_.shape, self.weight.shape)
output_parallel = F.linear(input_, self.weight)
# output_parallel = linear_with_async_comm(input_, self.weight, None, ParallelMode.PARALLEL_1D, False)
output = reduce_input(output_parallel, ParallelMode.PARALLEL_1D)
diff --git a/tests/test_layers/test_1d/checks_1d/check_layer_1d.py b/tests/test_layers/test_1d/checks_1d/check_layer_1d.py
index 7d77391ea621..668b8a334800 100644
--- a/tests/test_layers/test_1d/checks_1d/check_layer_1d.py
+++ b/tests/test_layers/test_1d/checks_1d/check_layer_1d.py
@@ -514,8 +514,9 @@ def check_linear_row_stream_inference():
i = gpc.get_local_rank(ParallelMode.PARALLEL_1D)
- assert HIDDEN_SIZE % 2 == 0
- layer = Linear1D_Row(OUTPUT_SIZE, INPUT_SIZE, stream_chunk_num=2)
+ stream_chunk_num = 4
+ assert HIDDEN_SIZE % stream_chunk_num == 0
+ layer = Linear1D_Row(OUTPUT_SIZE, INPUT_SIZE, stream_chunk_num=stream_chunk_num)
A_shape = (BATCH_SIZE, SEQ_LENGTH, OUTPUT_SIZE)
A_master = torch.randn(A_shape, dtype=dtype, device=device)
@@ -537,6 +538,8 @@ def check_linear_row_stream_inference():
layer.weight = Parameter(W)
layer.bias = Parameter(B)
layer.chunk_weight()
+ layer.eval()
+
out = layer(A)
A_master = A_master.clone()
|
https://api.github.com/repos/hpcaitech/ColossalAI/pulls/1876
|
2022-11-10T09:09:23Z
|
2022-11-10T09:36:42Z
|
2022-11-10T09:36:42Z
|
2022-11-10T09:36:46Z
| 795
|
hpcaitech/ColossalAI
| 11,256
|
|
version bump
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 297ab1235..63d6f5e54 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -6,7 +6,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
-## [13.3.2] - Unreleased
+## [13.3.2] - 2023-02-04
### Fixed
@@ -1925,6 +1925,7 @@ Major version bump for a breaking change to `Text.stylize signature`, which corr
- First official release, API still to be stabilized
+[13.3.2]: https://github.com/textualize/rich/compare/v13.3.1...v13.3.2
[13.3.1]: https://github.com/textualize/rich/compare/v13.3.0...v13.3.1
[13.3.0]: https://github.com/textualize/rich/compare/v13.2.0...v13.3.0
[13.2.0]: https://github.com/textualize/rich/compare/v13.1.0...v13.2.0
|
Version 13.3.2
|
https://api.github.com/repos/Textualize/rich/pulls/2853
|
2023-03-04T16:23:01Z
|
2023-03-04T16:28:38Z
|
2023-03-04T16:28:38Z
|
2023-03-04T16:28:39Z
| 312
|
Textualize/rich
| 48,360
|
Bump some versions to avoid downgrading them
|
diff --git a/requirements_versions.txt b/requirements_versions.txt
index df8c6861bf8..0a276b0b4b8 100644
--- a/requirements_versions.txt
+++ b/requirements_versions.txt
@@ -5,12 +5,12 @@ basicsr==1.4.2
gfpgan==1.3.8
gradio==3.29.0
numpy==1.23.5
-Pillow==9.4.0
+Pillow==9.5.0
realesrgan==0.3.0
torch
omegaconf==2.2.3
pytorch_lightning==1.9.4
-scikit-image==0.19.2
+scikit-image==0.20.0
timm==0.6.7
piexif==1.1.3
einops==0.4.1
|
**Describe what this pull request is trying to achieve.**
While working on #10291, I noticed some packages were getting downgraded after a higher version had been installed from other deps. Might as well not.
Images seem to generate & save fine on my machine.
**Environment this was tested in**
- OS: Windows
- Browser: Chrome
- Graphics card: GTX 1070, 8 GB
|
https://api.github.com/repos/AUTOMATIC1111/stable-diffusion-webui/pulls/10292
|
2023-05-11T20:22:40Z
|
2023-05-14T04:59:29Z
|
2023-05-14T04:59:28Z
|
2023-06-13T09:30:53Z
| 197
|
AUTOMATIC1111/stable-diffusion-webui
| 40,463
|
Adding two resources
|
diff --git a/README.md b/README.md
index ba032593..b4af0bc8 100644
--- a/README.md
+++ b/README.md
@@ -285,6 +285,7 @@ For a list of free machine learning books available for download, go [here](http
* [chartjs](http://www.chartjs.org/)
* [dimple](http://dimplejs.org/)
* [amCharts](http://www.amcharts.com/)
+* [D3xter](https://github.com/NathanEpstein/D3xter) - Straight forward plotting built on D3
* [statkit](https://github.com/rigtorp/statkit) - Statistics kit for JavaScript
* [science.js](https://github.com/jasondavies/science.js/) - Scientific and statistical computing in JavaScript.
@@ -303,6 +304,7 @@ For a list of free machine learning books available for download, go [here](http
* [Brain](https://github.com/harthur/brain) - Neural networks in JavaScript
* [Bayesian-Bandit](https://github.com/omphalos/bayesian-bandit.js) - Bayesian bandit implementation for Node and the browser.
* [Synaptic](https://github.com/cazala/synaptic) - Architecture-free neural network library for node.js and the browser
+* [kNear](https://github.com/NathanEpstein/kNear) - JavaScript implementation of the k nearest neighbors algorithm for supervised learning
<a name="julia" />
## Julia
|
Adding two libraries to the list:
1) D3xter - provides simple functions for making standard plots built on D3. Optional configuration means plots can be rendered with as little as one line (added to javascript -> Data Analysis / Data Visualization).
2) kNear - javascript implementation of the k nearest neighbors algorithm for supervised machine learning (added to javascript -> General-Purpose Machine Learning).
|
https://api.github.com/repos/josephmisiti/awesome-machine-learning/pulls/94
|
2014-11-21T17:59:59Z
|
2014-11-21T18:09:48Z
|
2014-11-21T18:09:48Z
|
2014-11-22T02:27:49Z
| 336
|
josephmisiti/awesome-machine-learning
| 51,964
|
Update changelog for 0.34.1. (#7021)
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 82eac94cb9e..44a4bdf6733 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,6 +2,19 @@
Certbot adheres to [Semantic Versioning](https://semver.org/).
+## 0.34.1 - master
+
+### Fixed
+
+* certbot-auto no longer prints a blank line when there are no permissions
+ problems.
+
+Despite us having broken lockstep, we are continuing to release new versions of
+all Certbot components during releases for the time being, however, the only
+changes in this release were to certbot-auto.
+
+More details about these changes can be found on our GitHub repo.
+
## 0.34.0 - 2019-05-01
### Changed
|
(cherry picked from commit 4bf6eb2091e3190282b0e2c6540186e64bf4d846)
`master` in the changelog below will be changed to the date by the release script at https://github.com/certbot/certbot/blob/b19d4801c9dea2898402c5b388da4bd10b103d01/tools/_release.sh#L69
|
https://api.github.com/repos/certbot/certbot/pulls/7023
|
2019-05-02T22:01:36Z
|
2019-05-02T22:28:28Z
|
2019-05-02T22:28:28Z
|
2019-05-02T22:28:31Z
| 198
|
certbot/certbot
| 2,385
|
[TrueID] Add extractor
|
diff --git a/yt_dlp/extractor/extractors.py b/yt_dlp/extractor/extractors.py
index 0741a728f11..572c327515c 100644
--- a/yt_dlp/extractor/extractors.py
+++ b/yt_dlp/extractor/extractors.py
@@ -1527,6 +1527,7 @@
TrovoChannelVodIE,
TrovoChannelClipIE,
)
+from .trueid import TrueIDIE
from .trunews import TruNewsIE
from .trutv import TruTVIE
from .tube8 import Tube8IE
diff --git a/yt_dlp/extractor/trueid.py b/yt_dlp/extractor/trueid.py
new file mode 100644
index 00000000000..fc98303abd7
--- /dev/null
+++ b/yt_dlp/extractor/trueid.py
@@ -0,0 +1,139 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..compat import compat_HTTPError
+from ..utils import (
+ determine_ext,
+ ExtractorError,
+ int_or_none,
+ parse_age_limit,
+ traverse_obj,
+ unified_timestamp,
+ url_or_none
+)
+
+
+class TrueIDIE(InfoExtractor):
+ _VALID_URL = r'https?://(?P<domain>vn\.trueid\.net|trueid\.(?:id|ph))/(?:movie|series/[^/]+)/(?P<id>[^/?#&]+)'
+ _TESTS = [{
+ 'url': 'https://trueid.id/movie/XYNlDOZZJzL6/pengabdi-setan/',
+ 'md5': '2552c7535125885901f1a2a4bcf32ca3',
+ 'info_dict': {
+ 'id': 'XYNlDOZZJzL6',
+ 'ext': 'mp4',
+ 'title': 'Pengabdi Setan',
+ 'display_id': 'pengabdi-setan',
+ 'description': 'md5:b0b41df08601e85e5291496c9bbe52cd',
+ 'timestamp': 1600243511,
+ 'categories': ['Film Indonesia', 'Horror', 'Mystery'],
+ 'release_timestamp': 1593536400,
+ 'release_year': 1982,
+ 'cast': list,
+ 'thumbnail': 'https://cms.dmpcdn.com/movie/2020/09/18/8b6e35c0-f97f-11ea-81fe-c52fc9dd314f_original.png',
+ 'upload_date': '20200916',
+ 'release_date': '20200630',
+ },
+ 'expected_warnings': ['Video is geo restricted.']
+ }, {
+ 'url': 'https://trueid.id/series/zZOBVPb62EwR/qXY73rwyl7oj/one-piece-ep-1/',
+ 'md5': '1c6d976049bc3c89a8a25aed2c3fb081',
+ 'info_dict': {
+ 'id': 'qXY73rwyl7oj',
+ 'ext': 'mp4',
+ 'title': 'One Piece Ep. 1',
+ 'display_id': 'one-piece-ep-1',
+ 'description': 'md5:13226d603bd03c4150a1cf5758e842ea',
+ 'timestamp': 1610421085,
+ 'categories': ['Animation & Cartoon', 'Kids & Family', 'Adventure'],
+ 'release_timestamp': 1612112400,
+ 'release_year': 1999,
+ 'age_limit': 7,
+ 'cast': ['Kounosuke Uda', 'Junji Shimizu'],
+ 'thumbnail': 'https://cms.dmpcdn.com/movie/2021/01/13/f84e9e70-5562-11eb-9fe2-dd6c2099a468_original.png',
+ 'upload_date': '20210112',
+ 'release_date': '20210131',
+ },
+ 'expected_warnings': ['Video is geo restricted.']
+ }, {
+ 'url': 'https://vn.trueid.net/series/7DNPM7Bpa9wv/pwLgEQ4Xbda2/haikyu-vua-bong-chuyen-phan-1/',
+ 'info_dict': {
+ 'id': 'pwLgEQ4Xbda2',
+ 'ext': 'mp4',
+ 'title': 'Haikyu!!: Vua Bóng Chuyền Phần 1 - Tập 1',
+ 'display_id': 'haikyu-vua-bong-chuyen-phan-1-tap-1',
+ 'description': 'md5:0374dd44d247799169449ee30cca963a',
+ 'timestamp': 1629270901,
+ 'categories': ['Anime', 'Phim Hài', 'Phim Học Đường', 'Phim Thể Thao', 'Shounen'],
+ 'release_timestamp': 1629270720,
+ 'release_year': 2014,
+ 'age_limit': 13,
+ 'thumbnail': 'https://cms.dmpcdn.com/movie/2021/09/28/b6e7ec00-2039-11ec-8436-974544e5841f_webp_original.jpg',
+ 'upload_date': '20210818',
+ 'release_date': '20210818',
+ },
+ 'expected_warnings': ['Video is geo restricted.']
+ }, {
+ 'url': 'https://trueid.ph/series/l8rvvAw7Jwv8/l8rvvAw7Jwv8/naruto-trailer/',
+ 'only_matching': True,
+ }]
+ _CUSTOM_RATINGS = {
+ 'PG': 7,
+ }
+
+ def _real_extract(self, url):
+ domain, video_id = self._match_valid_url(url).group('domain', 'id')
+ webpage = self._download_webpage(url, video_id)
+ initial_data = traverse_obj(
+ self._search_nextjs_data(webpage, video_id, fatal=False), ('props', 'pageProps', 'initialContentData'), default={})
+
+ try:
+ stream_data = self._download_json(
+ f'https://{domain}/cmsPostProxy/contents/video/{video_id}/streamer?os=android', video_id, data=b'')['data']
+ except ExtractorError as e:
+ if not isinstance(e.cause, compat_HTTPError):
+ raise e
+ errmsg = self._parse_json(e.cause.read().decode(), video_id)['meta']['message']
+ if 'country' in errmsg:
+ self.raise_geo_restricted(
+ errmsg, [initial_data['display_country']] if initial_data.get('display_country') else None, True)
+ else:
+ self.raise_no_formats(errmsg, video_id=video_id)
+
+ if stream_data:
+ stream_url = stream_data['stream']['stream_url']
+ stream_ext = determine_ext(stream_url)
+ if stream_ext == 'm3u8':
+ formats, subs = self._extract_m3u8_formats_and_subtitles(stream_url, video_id, 'mp4')
+ elif stream_ext == 'mpd':
+ formats, subs = self._extract_mpd_formats_and_subtitles(stream_url, video_id)
+ else:
+ formats = [{'url': stream_url}]
+
+ thumbnails = [
+ {'id': thumb_key, 'url': thumb_url}
+ for thumb_key, thumb_url in (initial_data.get('thumb_list') or {}).items()
+ if url_or_none(thumb_url)]
+
+ return {
+ 'id': video_id,
+ 'title': initial_data.get('title') or self._html_search_regex(
+ [r'Nonton (?P<name>.+) Gratis',
+ r'Xem (?P<name>.+) Miễn phí',
+ r'Watch (?P<name>.+) Free'], webpage, 'title', group='name'),
+ 'display_id': initial_data.get('slug_title'),
+ 'description': initial_data.get('synopsis'),
+ 'timestamp': unified_timestamp(initial_data.get('create_date')),
+ # 'duration': int_or_none(initial_data.get('duration'), invscale=60), # duration field must atleast be accurate to the second
+ 'categories': traverse_obj(initial_data, ('article_category_details', ..., 'name')),
+ 'release_timestamp': unified_timestamp(initial_data.get('publish_date')),
+ 'release_year': int_or_none(initial_data.get('release_year')),
+ 'formats': formats,
+ 'subtitles': subs,
+ 'thumbnails': thumbnails,
+ 'age_limit': self._CUSTOM_RATINGS.get(initial_data.get('rate')) or parse_age_limit(initial_data.get('rate')),
+ 'cast': traverse_obj(initial_data, (('actor', 'director'), ...)),
+ 'view_count': int_or_none(initial_data.get('count_views')),
+ 'like_count': int_or_none(initial_data.get('count_likes')),
+ 'average_rating': int_or_none(initial_data.get('count_ratings')),
+ }
|
## Please follow the guide below
- You will be asked some questions, please read them **carefully** and answer honestly
- Put an `x` into all the boxes [ ] relevant to your *pull request* (like that [x])
- Use *Preview* tab to see how your *pull request* will actually look like
---
### Before submitting a *pull request* make sure you have:
- [x] At least skimmed through [contributing guidelines](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) including [yt-dlp coding conventions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#yt-dlp-coding-conventions)
- [x] [Searched](https://github.com/yt-dlp/yt-dlp/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests
- [x] Checked the code with [flake8](https://pypi.python.org/pypi/flake8)
### In order to be accepted and merged into yt-dlp each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check one of the following options:
- [x] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/)
- [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence)
### What is the purpose of your *pull request*?
- [ ] Bug fix
- [ ] Improvement
- [x] New extractor
- [ ] New feature
---
### Description of your *pull request* and other information
TODO:
- [x] Support non-Indonesian domains (trueid.ph (?), vn.trueid.net, ~~movie.trueid.net~~)
- [ ] Series extractor? (Delegated for another PR)
|
https://api.github.com/repos/yt-dlp/yt-dlp/pulls/1847
|
2021-12-01T04:19:07Z
|
2021-12-04T19:23:06Z
|
2021-12-04T19:23:06Z
|
2021-12-04T19:23:06Z
| 2,098
|
yt-dlp/yt-dlp
| 8,207
|
Fix computation of receptive field size for 'channels_last'
|
diff --git a/keras/initializers.py b/keras/initializers.py
index 326c5c5146e..b60d37c4237 100644
--- a/keras/initializers.py
+++ b/keras/initializers.py
@@ -458,7 +458,7 @@ def _compute_fans(shape, data_format='channels_last'):
fan_in = shape[1] * receptive_field_size
fan_out = shape[0] * receptive_field_size
elif data_format == 'channels_last':
- receptive_field_size = np.prod(shape[:2])
+ receptive_field_size = np.prod(shape[:-2])
fan_in = shape[-2] * receptive_field_size
fan_out = shape[-1] * receptive_field_size
else:
|
For the 'channels_last' case the receptive_field_size needs to be computed as the product of all dimensions except the last two (input_depth, depth). The old version computes it incorrectly for Conv1D and Conv3D.
|
https://api.github.com/repos/keras-team/keras/pulls/7200
|
2017-06-30T22:33:01Z
|
2017-06-30T23:36:23Z
|
2017-06-30T23:36:23Z
|
2017-07-05T14:44:40Z
| 173
|
keras-team/keras
| 47,035
|
re-add mkdirs call before starting localstack docker container
|
diff --git a/localstack/utils/bootstrap.py b/localstack/utils/bootstrap.py
index 9ba0450707588..51685ccd7609c 100644
--- a/localstack/utils/bootstrap.py
+++ b/localstack/utils/bootstrap.py
@@ -510,6 +510,8 @@ def prepare_docker_start():
if DOCKER_CLIENT.is_container_running(container_name):
raise ContainerExists('LocalStack container named "%s" is already running' % container_name)
+ config.dirs.mkdirs()
+
def configure_container(container: LocalstackContainer):
"""
|
Looks like something is still not quite right with the directories and the way they are initialized depending on the environment.
The log files from the CLI tests in master suggest that some directories are again not created properly.
```
[Errno 2] No such file or directory: \'/var/lib/localstack/tmp/localstack_main_container.log\'\n
```
It's not clear to me why the file is `/var/lib/localstack/tmp/localstack_main_container.log` - it should not be `/var/lib/localstack` when starting the cli. need to investigate further and clean this up, but adding this band-aid for now.
log output from the test (very easy to read :grimacing: )
```
FAILED tests/bootstrap/test_cli.py::TestCliContainerLifecycle::test_start_cli_within_container - localstack.utils.container_utils.container_client.ContainerException: ('Docker process returned with errorcode 1', b'\n __ _______ __ __\n / / ____ _________ _/ / ___// /_____ ______/ /__\n / / / __ \\/ ___/ __ `/ /\\__ \\/ __/ __ `/ ___/ //_/\n / /___/ /_/ / /__/ /_/ / /___/ / /_/ /_/ / /__/ ,<\n /_____/\\____/\\___/\\__,_/_//____/\\__/\\__,_/\\___/_/|_|\n\n \xf0\x9f\x92\xbb LocalStack CLI 2.0.0.dev\n\n[20:56:54] starting LocalStack in Docker mode \xf0\x9f\x90\xb3 localstack.py:142\n preparing environment bootstrap.py:629\n configuring container bootstrap.py:637\n', b'Traceback (most recent call last):\n File "/opt/code/localstack/bin/localstack", line 23, in <module>\n main()\n File "/opt/code/localstack/bin/localstack", line 19, in main\n main.main()\n File "/opt/code/localstack/localstack/cli/main.py", line 17, in main\n cli()\n File "/opt/code/localstack/localstack/cli/plugin.py", line 15, in __call__\n self.group(*args, **kwargs)\n File "/opt/code/localstack/.venv/lib/python3.10/site-packages/click/core.py", line 1130, in __call__\n return self.main(*args, **kwargs)\n File "/opt/code/localstack/.venv/lib/python3.10/site-packages/click/core.py", line 1055, in main\n rv = self.invoke(ctx)\n File "/opt/code/localstack/.venv/lib/python3.10/site-packages/click/core.py", line 1657, in invoke\n return _process_result(sub_ctx.command.invoke(sub_ctx))\n File "/opt/code/localstack/.venv/lib/python3.10/site-packages/click/core.py", line 1404, in invoke\n return ctx.invoke(self.callback, **ctx.params)\n File "/opt/code/localstack/.venv/lib/python3.10/site-packages/click/core.py", line 760, in invoke\n return __callback(*args, **kwargs)\n File "/opt/code/localstack/localstack/utils/analytics/cli.py", line 66, in publisher_wrapper\n return fn(*args, **kwargs)\n File "/opt/code/localstack/localstack/cli/localstack.py", line 166, in cmd_start\n bootstrap.start_infra_in_docker_detached(console)\n File "/opt/code/localstack/localstack/utils/bootstrap.py", line 641, in start_infra_in_docker_detached\n container.truncate_log()\n File "/opt/code/localstack/localstack/utils/bootstrap.py", line 454, in truncate_log\n with open(self.logfile, "wb") as fd:\nFileNotFoundError: [Errno 2] No such file or directory: \'/var/lib/localstack/tmp/localstack_main_container.log\'\n')
```
|
https://api.github.com/repos/localstack/localstack/pulls/7944
|
2023-03-23T21:17:27Z
|
2023-03-23T21:43:47Z
|
2023-03-23T21:43:47Z
|
2023-03-24T01:48:35Z
| 120
|
localstack/localstack
| 29,398
|
Make `MultiDiscrete` a `Tuple`-like space
|
diff --git a/gym/spaces/multi_discrete.py b/gym/spaces/multi_discrete.py
index e4d30a0fa7e..4ab7b41b27a 100644
--- a/gym/spaces/multi_discrete.py
+++ b/gym/spaces/multi_discrete.py
@@ -1,5 +1,7 @@
import numpy as np
+from gym.logger import warn
from .space import Space
+from .discrete import Discrete
class MultiDiscrete(Space):
@@ -24,7 +26,6 @@ class MultiDiscrete(Space):
"""
def __init__(self, nvec, dtype=np.int64):
-
"""
nvec: vector of counts of each categorical variable
"""
@@ -54,5 +55,19 @@ def from_jsonable(self, sample_n):
def __repr__(self):
return "MultiDiscrete({})".format(self.nvec)
+ def __getitem__(self, index):
+ nvec = self.nvec[index]
+ if nvec.ndim == 0:
+ subspace = Discrete(nvec)
+ else:
+ subspace = MultiDiscrete(nvec, self.dtype)
+ subspace.np_random.set_state(self.np_random.get_state()) # for reproducibility
+ return subspace
+
+ def __len__(self):
+ if self.nvec.ndim >= 2:
+ warn("Get length of a multi-dimensional MultiDiscrete space.")
+ return len(self.nvec)
+
def __eq__(self, other):
return isinstance(other, MultiDiscrete) and np.all(self.nvec == other.nvec)
diff --git a/gym/spaces/tests/test_spaces.py b/gym/spaces/tests/test_spaces.py
index 5a43f154d60..1d427789809 100644
--- a/gym/spaces/tests/test_spaces.py
+++ b/gym/spaces/tests/test_spaces.py
@@ -1,6 +1,5 @@
import json # note: ujson fails this test due to float equality
import copy
-from collections import OrderedDict
import numpy as np
import pytest
@@ -244,6 +243,10 @@ def convert_sample_hashable(sample):
return sample
+def sample_equal(sample1, sample2):
+ return convert_sample_hashable(sample1) == convert_sample_hashable(sample2)
+
+
@pytest.mark.parametrize(
"space",
[
@@ -277,9 +280,7 @@ def test_seed_reproducibility(space):
space2.seed(None)
assert space1.seed(0) == space2.seed(0)
-
- sample1, sample2 = space1.sample(), space2.sample()
- assert convert_sample_hashable(sample1) == convert_sample_hashable(sample2)
+ assert sample_equal(space1.sample(), space2.sample())
@pytest.mark.parametrize(
@@ -314,3 +315,54 @@ def test_seed_subspace_incorrelated(space):
]
assert len(states) == len(set(states))
+
+
+def test_multidiscrete_as_tuple():
+ # 1D multi-discrete
+ space = MultiDiscrete([3, 4, 5])
+
+ assert space.shape == (3,)
+ assert space[0] == Discrete(3)
+ assert space[0:1] == MultiDiscrete([3])
+ assert space[0:2] == MultiDiscrete([3, 4])
+ assert space[:] == space and space[:] is not space
+ assert len(space) == 3
+
+ # 2D multi-discrete
+ space = MultiDiscrete([[3, 4, 5], [6, 7, 8]])
+
+ assert space.shape == (2, 3)
+ assert space[0, 1] == Discrete(4)
+ assert space[0] == MultiDiscrete([3, 4, 5])
+ assert space[0:1] == MultiDiscrete([[3, 4, 5]])
+ assert space[0:2, :] == MultiDiscrete([[3, 4, 5], [6, 7, 8]])
+ assert space[:, 0:1] == MultiDiscrete([[3], [6]])
+ assert space[0:2, 0:2] == MultiDiscrete([[3, 4], [6, 7]])
+ assert space[:] == space and space[:] is not space
+ assert space[:, :] == space and space[:, :] is not space
+
+
+def test_multidiscrete_subspace_reproducibility():
+ # 1D multi-discrete
+ space = MultiDiscrete([100, 200, 300])
+ space.seed(None)
+
+ assert sample_equal(space[0].sample(), space[0].sample())
+ assert sample_equal(space[0:1].sample(), space[0:1].sample())
+ assert sample_equal(space[0:2].sample(), space[0:2].sample())
+ assert sample_equal(space[:].sample(), space[:].sample())
+ assert sample_equal(space[:].sample(), space.sample())
+
+ # 2D multi-discrete
+ space = MultiDiscrete([[300, 400, 500], [600, 700, 800]])
+ space.seed(None)
+
+ assert sample_equal(space[0, 1].sample(), space[0, 1].sample())
+ assert sample_equal(space[0].sample(), space[0].sample())
+ assert sample_equal(space[0:1].sample(), space[0:1].sample())
+ assert sample_equal(space[0:2, :].sample(), space[0:2, :].sample())
+ assert sample_equal(space[:, 0:1].sample(), space[:, 0:1].sample())
+ assert sample_equal(space[0:2, 0:2].sample(), space[0:2, 0:2].sample())
+ assert sample_equal(space[:].sample(), space[:].sample())
+ assert sample_equal(space[:, :].sample(), space[:, :].sample())
+ assert sample_equal(space[:, :].sample(), space.sample())
|
Add `__getitem__` and `__len__` method for `MultiDiscrete` space.
As exampled in `Tuple`:
https://github.com/openai/gym/blob/2bd436bf5362f813fa2db4e8c940e36cadd59ddc/gym/spaces/tuple.py#L5-L11
this PR makes:
```python
t = Tuple((Discrete(2), Discrete(3)))
md = MulitDiscrete([2, 3])
t[0] # -> Discrete(2)
md[0] # -> Discrete(2)
```
|
https://api.github.com/repos/openai/gym/pulls/2364
|
2021-08-27T09:30:36Z
|
2021-09-11T16:54:53Z
|
2021-09-11T16:54:53Z
|
2021-09-11T17:01:16Z
| 1,368
|
openai/gym
| 5,833
|
Fixed false positives
|
diff --git a/sherlock/resources/data.json b/sherlock/resources/data.json
index 363c8b1ed..57a9fbb97 100644
--- a/sherlock/resources/data.json
+++ b/sherlock/resources/data.json
@@ -1789,7 +1789,8 @@
"username_unclaimed": "noonewouldeverusethis7"
},
"Star Citizen": {
- "errorType": "status_code",
+ "errorMsg": "404",
+ "errorType": "message",
"url": "https://robertsspaceindustries.com/citizens/{}",
"urlMain": "https://robertsspaceindustries.com/",
"username_claimed": "blue",
@@ -2075,7 +2076,7 @@
},
"Whonix Forum": {
"errorType": "status_code",
- "url": "https://forums.whonix.org/u/{}",
+ "url": "https://forums.whonix.org/u/{}/summary",
"urlMain": "https://forums.whonix.org/",
"username_claimed": "red",
"username_unclaimed": "noonewouldeverusethis7"
@@ -2094,8 +2095,8 @@
"url": "https://en.wikipedia.org/wiki/Special:CentralAuth/{}?uselang=qqx",
"urlMain": "https://www.wikipedia.org/",
"username_claimed": "Hoadlck",
- "username_unclaimed": "noonewouldeverusethis7"
- },
+ "username_unclaimed": "noonewouldeverusethis8"
+ },
"Windy": {
"errorType": "status_code",
"url": "https://community.windy.com/user/{}",
|
Fixed the false positive with the Star Citizen/Robert Space Industries website so will now correctly display positives and no longer display false positives. Whonix also now correctly reports existing accounts as well, while not displaying false positives.
|
https://api.github.com/repos/sherlock-project/sherlock/pulls/1468
|
2022-09-21T01:47:34Z
|
2022-09-23T20:36:25Z
|
2022-09-23T20:36:25Z
|
2022-09-23T20:36:25Z
| 407
|
sherlock-project/sherlock
| 36,330
|
[extractor/pornhub] Fix download error in UK.
|
diff --git a/yt_dlp/extractor/pornhub.py b/yt_dlp/extractor/pornhub.py
index 2f5a572a5b9..11c14d08fe3 100644
--- a/yt_dlp/extractor/pornhub.py
+++ b/yt_dlp/extractor/pornhub.py
@@ -61,6 +61,7 @@ def _real_initialize(self):
def _set_age_cookies(self, host):
self._set_cookie(host, 'age_verified', '1')
self._set_cookie(host, 'accessAgeDisclaimerPH', '1')
+ self._set_cookie(host, 'accessAgeDisclaimerUK', '1')
self._set_cookie(host, 'accessPH', '1')
def _login(self, host):
|
This fix will resolve the problem of not able to download pornhub videos in UK region.
**IMPORTANT**: PRs without the template will be CLOSED
### Description of your *pull request* and other information
<!--
Explanation of your *pull request* in arbitrary form goes here. Please **make sure the description explains the purpose and effect** of your *pull request* and is worded well enough to be understood. Provide as much **context and examples** as possible
-->
ADD DESCRIPTION HERE
This fix will resolve the problem of not able to download pornhub videos in UK region.
Fixes #
Download issues of pornhub in United Kingdom Region
<details open><summary>Template</summary> <!-- OPEN is intentional -->
<!--
# PLEASE FOLLOW THE GUIDE BELOW
- You will be asked some questions, please read them **carefully** and answer honestly
- Put an `x` into all the boxes `[ ]` relevant to your *pull request* (like [x])
- Use *Preview* tab to see how your *pull request* will actually look like
-->
### Before submitting a *pull request* make sure you have:
- [x] At least skimmed through [contributing guidelines](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) including [yt-dlp coding conventions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#yt-dlp-coding-conventions)
- [x] [Searched](https://github.com/yt-dlp/yt-dlp/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests
- [x] Checked the code with [flake8](https://pypi.python.org/pypi/flake8) and [ran relevant tests](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions)
### In order to be accepted and merged into yt-dlp each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check all of the following options that apply:
- [ ] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/)
- [x] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence)
### What is the purpose of your *pull request*?
- [x] Fix or improvement to an extractor (Make sure to add/update tests)
- [ ] New extractor ([Piracy websites will not be accepted](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#is-the-website-primarily-used-for-piracy))
- [ ] Core bug fix/improvement
- [ ] New feature (It is strongly [recommended to open an issue first](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#adding-new-feature-or-making-overarching-changes))
<!-- Do NOT edit/remove anything below this! -->
</details><details><summary>Copilot Summary</summary>
<!--
copilot:all
-->
### <samp>🤖 Generated by Copilot at 59800c4</samp>
### Summary
📝📱🐛
<!--
1. 📝 - This emoji can signify the addition of the new section on how to use the app, as well as the updates to the existing sections to make them more clear and concise.
2. 📱 - This emoji can represent the improvement of the app's responsiveness and compatibility with different devices and screen sizes, as well as the enhancement of the user interface and design.
3. 🐛 - This emoji can indicate the fixing of various bugs and errors that affected the app's functionality and performance, as well as the removal of unnecessary or redundant code.
-->
Add support for `--write-link` and `--write-url` options to write video or playlist URLs to a file. Refactor the code for writing metadata files to avoid duplication and improve readability.
> _`is_valid` checks_
> _refactored for clarity_
> _autumn leaves falling_
### Walkthrough
* Add support for downloading videos from the website `tubitv.com` (tubitv.py, test_tubitv.py)
</details>
|
https://api.github.com/repos/yt-dlp/yt-dlp/pulls/7591
|
2023-07-14T16:53:33Z
|
2023-07-15T20:54:19Z
|
2023-07-15T20:54:19Z
|
2023-07-15T20:54:20Z
| 176
|
yt-dlp/yt-dlp
| 7,397
|
Correctly handle single-byte Content-Range
|
diff --git a/httpie/downloads.py b/httpie/downloads.py
index 4bd136b109..3c749a53ec 100644
--- a/httpie/downloads.py
+++ b/httpie/downloads.py
@@ -81,7 +81,7 @@ def parse_content_range(content_range: str, resumed_from: int) -> int:
# last-byte-pos value, is invalid. The recipient of an invalid
# byte-content-range- spec MUST ignore it and any content
# transferred along with it."
- if (first_byte_pos >= last_byte_pos
+ if (first_byte_pos > last_byte_pos
or (instance_length is not None
and instance_length <= last_byte_pos)):
raise ContentRangeError(
diff --git a/tests/test_downloads.py b/tests/test_downloads.py
index f32043b82a..969021d9db 100644
--- a/tests/test_downloads.py
+++ b/tests/test_downloads.py
@@ -30,6 +30,9 @@ def test_Content_Range_parsing(self):
assert parse('bytes 100-199/200', 100) == 200
assert parse('bytes 100-199/*', 100) == 200
+ # single byte
+ assert parse('bytes 100-100/*', 100) == 101
+
# missing
pytest.raises(ContentRangeError, parse, None, 100)
@@ -45,9 +48,6 @@ def test_Content_Range_parsing(self):
# invalid byte-range-resp-spec
pytest.raises(ContentRangeError, parse, 'bytes 100-99/199', 100)
- # invalid byte-range-resp-spec
- pytest.raises(ContentRangeError, parse, 'bytes 100-100/*', 100)
-
@pytest.mark.parametrize('header, expected_filename', [
('attachment; filename=hello-WORLD_123.txt', 'hello-WORLD_123.txt'),
('attachment; filename=".hello-WORLD_123.txt"', 'hello-WORLD_123.txt'),
|
HTTPie fails if it continues a download with a single byte left. For example:
```
$ http -b -d example.org
Downloading 1.23 kB to "index.html"
Done. 1.23 kB in 0.00043s (2.81 MB/s)
$ python3 -c 'import sys; sys.stdout.write(open("index.html").read()[:-1])' > index-short.html
$ http -b -d -c -o index-short.html example.org
http: error: ContentRangeError: Invalid Content-Range returned: 'bytes 1255-1255/1256'
```
The range is inclusive on both ends.
|
https://api.github.com/repos/httpie/cli/pulls/1032
|
2021-02-13T20:08:28Z
|
2021-02-14T12:30:59Z
|
2021-02-14T12:30:59Z
|
2021-02-14T12:32:53Z
| 445
|
httpie/cli
| 33,948
|
liquid.com - withdrawals fix #10758
|
diff --git a/js/liquid.js b/js/liquid.js
index 87e881e424c4..eaf76e3c57f0 100644
--- a/js/liquid.js
+++ b/js/liquid.js
@@ -987,31 +987,34 @@ module.exports = class liquid extends Exchange {
const currency = this.currency (code);
const request = {
// 'auth_code': '', // optional 2fa code
- 'currency': currency['id'],
- 'address': address,
- 'amount': amount,
- // 'payment_id': tag, // for XRP only
- // 'memo_type': 'text', // 'text', 'id' or 'hash', for XLM only
- // 'memo_value': tag, // for XLM only
+ 'crypto_withdrawal': {
+ 'currency': currency['id'],
+ 'address': address,
+ 'amount': amount,
+ // 'payment_id': tag, // for XRP only
+ // 'memo_type': 'text', // 'text', 'id' or 'hash', for XLM only
+ // 'memo_value': tag, // for XLM only
+ },
};
if (tag !== undefined) {
if (code === 'XRP') {
- request['payment_id'] = tag;
+ request['crypto_withdrawal']['payment_id'] = tag;
} else if (code === 'XLM') {
- request['memo_type'] = 'text'; // overrideable via params
- request['memo_value'] = tag;
+ request['crypto_withdrawal']['memo_type'] = 'text'; // overrideable via params
+ request['crypto_withdrawal']['memo_value'] = tag;
} else {
throw new NotSupported (this.id + ' withdraw() only supports a tag along the address for XRP or XLM');
}
}
const networks = this.safeValue (this.options, 'networks', {});
- let network = this.safeStringUpper (params, 'network'); // this line allows the user to specify either ERC20 or ETH
+ const paramsCwArray = this.safeValue (params, 'crypto_withdrawal', {});
+ let network = this.safeStringUpper (paramsCwArray, 'network'); // this line allows the user to specify either ERC20 or ETH
network = this.safeString (networks, network, network); // handle ERC20>ETH alias
if (network !== undefined) {
- request['network'] = network;
- params = this.omit (params, 'network');
+ request['crypto_withdrawal']['network'] = network;
+ params['crypto_withdrawal'] = this.omit (params['crypto_withdrawal'], 'network');
}
- const response = await this.privatePostCryptoWithdrawals (this.extend (request, params));
+ const response = await this.privatePostCryptoWithdrawals (this.deepExtend (request, params));
//
// {
// "id": 1353,
|
https://api.github.com/repos/ccxt/ccxt/pulls/10802
|
2021-12-07T17:46:30Z
|
2021-12-07T19:12:00Z
|
2021-12-07T19:12:00Z
|
2021-12-07T19:15:25Z
| 670
|
ccxt/ccxt
| 13,525
|
|
Redirect duplicate
|
diff --git a/Methodology and Resources/Active Directory Attack.md b/Methodology and Resources/Active Directory Attack.md
index 917bef75e6..75bb712cf2 100644
--- a/Methodology and Resources/Active Directory Attack.md
+++ b/Methodology and Resources/Active Directory Attack.md
@@ -224,15 +224,7 @@ Use the correct collector
* AzureHound for Azure Active Directory
* SharpHound for local Active Directory
-* use [AzureHound](https://posts.specterops.io/introducing-bloodhound-4-0-the-azure-update-9b2b26c5e350)
- ```powershell
- # require: Install-Module -name Az -AllowClobber
- # require: Install-Module -name AzureADPreview -AllowClobber
- Connect-AzureAD
- Connect-AzAccount
- . .\AzureHound.ps1
- Invoke-AzureHound
- ```
+* use [AzureHound](https://github.com/BloodHoundAD/AzureHound) (more info: [Cloud - Azure Pentest](Cloud%20-%20Azure%20Pentest.md/#azure-recon-tools))
* use [BloodHound](https://github.com/BloodHoundAD/BloodHound)
```powershell
@@ -4051,4 +4043,4 @@ CME 10.XXX.XXX.XXX:445 HOSTNAME-01 [+] DOMAIN\COMPUTER$ 31d6cfe0d16ae
* [Sapphire tickets - The Hacker Recipes](https://www.thehacker.recipes/ad/movement/kerberos/forged-tickets/sapphire)
* [Exploiting RBCD Using a Normal User Account - tiraniddo.dev - Friday, 13 May 2022](https://www.tiraniddo.dev/2022/05/exploiting-rbcd-using-normal-user.html)
* [Exploring SCCM by Unobfuscating Network Access Accounts - @_xpn_ - Posted on 2022-07-09](https://blog.xpnsec.com/unobfuscating-network-access-accounts/)
-* [.NET Advanced Code Auditing XmlSerializer Deserialization Vulnerability - April 2, 2019 by znlive](https://znlive.com/xmlserializer-deserialization-vulnerability)
\ No newline at end of file
+* [.NET Advanced Code Auditing XmlSerializer Deserialization Vulnerability - April 2, 2019 by znlive](https://znlive.com/xmlserializer-deserialization-vulnerability)
|
I think you will have to be careful with the relative link conversion from the github repo `Cloud%20-%20Azure%20Pentest.md#azure-recon-tools` to the website `Cloud%20-%20Azure%20Pentest/#azure-recon-tools`
|
https://api.github.com/repos/swisskyrepo/PayloadsAllTheThings/pulls/595
|
2022-11-04T13:45:04Z
|
2022-11-04T14:15:55Z
|
2022-11-04T14:15:55Z
|
2022-11-07T10:50:28Z
| 563
|
swisskyrepo/PayloadsAllTheThings
| 8,611
|
fix for is_terminal
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 2ef99e5e8..21386b2a1 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -5,6 +5,12 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+## [13.3.4] - 2023-04-12
+
+### Fixed
+
+- Fixed for `is_terminal` ignoring FORCE_COLOR https://github.com/Textualize/rich/pull/2923
+
## [13.3.3] - 2023-02-27
### Added
@@ -1930,6 +1936,8 @@ Major version bump for a breaking change to `Text.stylize signature`, which corr
- First official release, API still to be stabilized
+[13.3.4]: https://github.com/textualize/rich/compare/v13.3.3...v13.3.4
+[13.3.3]: https://github.com/textualize/rich/compare/v13.3.2...v13.3.3
[13.3.2]: https://github.com/textualize/rich/compare/v13.3.1...v13.3.2
[13.3.1]: https://github.com/textualize/rich/compare/v13.3.0...v13.3.1
[13.3.0]: https://github.com/textualize/rich/compare/v13.2.0...v13.3.0
diff --git a/rich/console.py b/rich/console.py
index 8a0fdcd9b..cd6f5e57e 100644
--- a/rich/console.py
+++ b/rich/console.py
@@ -952,6 +952,7 @@ def is_terminal(self) -> bool:
force_color = self._environ.get("FORCE_COLOR")
if force_color is not None:
self._force_terminal = True
+ return True
isatty: Optional[Callable[[], bool]] = getattr(self.file, "isatty", None)
try:
@@ -2000,7 +2001,6 @@ def _check_buffer(self) -> None:
self._record_buffer.extend(self._buffer[:])
if self._buffer_index == 0:
-
if self.is_jupyter: # pragma: no cover
from .jupyter import display
diff --git a/tests/test_console.py b/tests/test_console.py
index 3bcddefd0..990dce143 100644
--- a/tests/test_console.py
+++ b/tests/test_console.py
@@ -979,3 +979,15 @@ def test_force_color_jupyter():
file=io.StringIO(), _environ={"FORCE_COLOR": "1"}, force_jupyter=True
)
assert not console.is_terminal
+
+
+def test_force_color():
+ console = Console(
+ file=io.StringIO(),
+ _environ={
+ "FORCE_COLOR": "1",
+ "TERM": "xterm-256color",
+ "COLORTERM": "truecolor",
+ },
+ )
+ assert console.color_system in ("truecolor", "windows")
|
Fixes `FORCE_COLOR` not being respected.
Fixes https://github.com/Textualize/rich/issues/2859
|
https://api.github.com/repos/Textualize/rich/pulls/2923
|
2023-04-12T16:35:16Z
|
2023-04-12T16:48:19Z
|
2023-04-12T16:48:19Z
|
2023-04-12T16:48:20Z
| 760
|
Textualize/rich
| 48,015
|
Auto focus chat input
|
diff --git a/website/src/components/Chat/ChatForm.tsx b/website/src/components/Chat/ChatForm.tsx
index 97a98ff2ff..47206b35fc 100644
--- a/website/src/components/Chat/ChatForm.tsx
+++ b/website/src/components/Chat/ChatForm.tsx
@@ -1,8 +1,9 @@
-import { Box, CircularProgress, Flex, Textarea } from "@chakra-ui/react";
+import { Box, CircularProgress, Flex, Textarea, useBreakpointValue } from "@chakra-ui/react";
import { Send } from "lucide-react";
import { useTranslation } from "next-i18next";
-import { forwardRef, KeyboardEvent, SyntheticEvent, useCallback } from "react";
+import { forwardRef, KeyboardEvent, SyntheticEvent, useCallback, useEffect } from "react";
import TextareaAutosize from "react-textarea-autosize";
+import { useFallbackRef } from "src/hooks/ui/useFallbackRef";
import { QueueInfo } from "src/lib/chat_stream";
import { ChatConfigDrawer } from "./ChatConfigMobile";
@@ -15,7 +16,7 @@ type ChatFormProps = {
};
// eslint-disable-next-line react/display-name
-export const ChatForm = forwardRef<HTMLTextAreaElement, ChatFormProps>((props, ref) => {
+export const ChatForm = forwardRef<HTMLTextAreaElement, ChatFormProps>((props, forwardedRef) => {
const { isSending, onSubmit: onSubmit, queueInfo } = props;
const { t } = useTranslation("chat");
const handleSubmit = useCallback(
@@ -34,6 +35,16 @@ export const ChatForm = forwardRef<HTMLTextAreaElement, ChatFormProps>((props, r
},
[onSubmit]
);
+
+ const ref = useFallbackRef(forwardedRef);
+ const isDeskTop = useBreakpointValue({ base: false, md: true });
+
+ useEffect(() => {
+ if (isDeskTop) {
+ ref.current?.focus();
+ }
+ }, [isDeskTop, ref]);
+
return (
<Box as="form" maxWidth={{ base: "3xl", "2xl": "4xl" }} onSubmit={handleSubmit} className="py-2 w-full mx-auto">
<div className="relative">
diff --git a/website/src/hooks/ui/useFallbackRef.ts b/website/src/hooks/ui/useFallbackRef.ts
new file mode 100644
index 0000000000..0dab2c2f53
--- /dev/null
+++ b/website/src/hooks/ui/useFallbackRef.ts
@@ -0,0 +1,7 @@
+import { ForwardedRef, RefObject, useRef } from "react";
+
+export const useFallbackRef = <T>(maybeRef?: ForwardedRef<T>) => {
+ const ref = useRef(null);
+
+ return (maybeRef || ref) as RefObject<T>;
+};
|
I decided to only focus on desktop since it is bad practice to auto-focus on mobile, the keyboard on mobile take a lot space, but let's see how the user feedback with it
|
https://api.github.com/repos/LAION-AI/Open-Assistant/pulls/2998
|
2023-05-01T16:06:43Z
|
2023-05-01T18:01:01Z
|
2023-05-01T18:01:01Z
|
2023-05-01T18:01:03Z
| 640
|
LAION-AI/Open-Assistant
| 37,572
|
[inference] removed redundancy init_batch
|
diff --git a/colossalai/inference/core/request_handler.py b/colossalai/inference/core/request_handler.py
index 585f879456f2..80d77d09759f 100644
--- a/colossalai/inference/core/request_handler.py
+++ b/colossalai/inference/core/request_handler.py
@@ -171,7 +171,7 @@ def schedule(self):
if self.running_list.ready_for_prefill():
for seq in self.running_list.prefill:
seq.mark_running()
- self.prefill_batch.init_batch(self.running_list.prefill)
+ self.prefill_batch.add_seqs(self.running_list.prefill)
return self.prefill_batch
if not self.running_batch.is_empty:
diff --git a/colossalai/inference/struct.py b/colossalai/inference/struct.py
index 22b5b5a3ab2f..766e54ab1415 100644
--- a/colossalai/inference/struct.py
+++ b/colossalai/inference/struct.py
@@ -188,24 +188,6 @@ def __post_init__(self):
if self.fd_inter_tensor is None:
self.fd_inter_tensor = FDIntermTensors()
- def init_batch(self, seqs: List["Sequence"] = None):
- """
- Initializes inference batches by input sentence list.
-
- Args:
- seqs (List["Sequence"]): List of input sequence.
- """
-
- if seqs is not None:
- if not isinstance(seqs, list):
- seqs = [seqs]
- for seq in seqs:
- if seq in self.sequences_set:
- logger.warning(f"The sequence(request_id {seq.request_id}) is already in sequences_set.")
- continue
-
- self.sequences_set.add(seq)
-
def init_fd_tensors(self):
if not self.fd_inter_tensor.is_initialized:
self.fd_inter_tensor.initialize(
@@ -273,19 +255,19 @@ def abort_seq(self, seq: "Sequence") -> "Sequence":
self.sequences_set.discard(seq)
return seq
- def add_seqs(self, seqs: List["Sequence"]) -> None:
+ def add_seqs(self, seqs: Union[Sequence, List[Sequence]]) -> None:
"""
Add new sequence to batch
Args:
seqs (List["Sequence"]): The list of new sequences.
"""
-
- if not isinstance(seqs, list):
+ # covnert single sequence to list
+ if isinstance(seqs, Sequence):
seqs = [seqs]
for seq in seqs:
- if self.sequences_set and seq in self.sequences_set:
+ if seq in self.sequences_set:
logger.warning(f"The sequence(request_id {seq.request_id}) is already in sequences_set.")
continue
self.sequences_set.add(seq)
diff --git a/tests/test_infer/test_config_and_struct.py b/tests/test_infer/test_config_and_struct.py
index 16f5bcc7f0b2..e0736518ca95 100755
--- a/tests/test_infer/test_config_and_struct.py
+++ b/tests/test_infer/test_config_and_struct.py
@@ -60,9 +60,8 @@ def check_config_and_inference():
num_heads=2,
head_dim=128,
)
- batch.init_batch([sequence])
- batch.add_seqs([sequence2, sequence3])
batch.add_seqs([sequence])
+ batch.add_seqs([sequence2, sequence3])
assert batch.is_empty == False
assert batch.get_batch_size() == 3
|
## 📌 Checklist before creating the PR
- [ ] I have created an issue for this PR for traceability
- [ ] The title follows the standard format: `[doc/gemini/tensor/...]: A concise description`
- [ ] I have added relevant tags if possible for us to better distinguish different PRs
## 🚨 Issue number
> Link this PR to your issue with words like fixed to automatically close the linked issue upon merge
>
> e.g. `fixed #1234`, `closed #1234`, `resolved #1234`
## 📝 What does this PR do?
> Summarize your work here.
> if you have any plots/diagrams/screenshots/tables, please attach them here.
## 💥 Checklist before requesting a review
- [ ] I have linked my PR to an issue ([instruction](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue))
- [ ] My issue clearly describes the problem/feature/proposal, with diagrams/charts/table/code if possible
- [ ] I have performed a self-review of my code
- [ ] I have added thorough tests.
- [ ] I have added docstrings for all the functions/methods I implemented
## ⭐️ Do you enjoy contributing to Colossal-AI?
- [ ] 🌝 Yes, I do.
- [ ] 🌚 No, I don't.
Tell us more if you don't enjoy contributing to Colossal-AI.
|
https://api.github.com/repos/hpcaitech/ColossalAI/pulls/5353
|
2024-02-02T03:40:32Z
|
2024-02-02T03:44:15Z
|
2024-02-02T03:44:15Z
|
2024-02-02T03:44:31Z
| 791
|
hpcaitech/ColossalAI
| 11,206
|
Clarify missing files error message.
|
diff --git a/gpt_engineer/db.py b/gpt_engineer/db.py
index c3db67c103..0d7e64eed6 100644
--- a/gpt_engineer/db.py
+++ b/gpt_engineer/db.py
@@ -18,7 +18,7 @@ def __getitem__(self, key):
full_path = self.path / key
if not full_path.is_file():
- raise KeyError(key)
+ raise KeyError(f"File '{key}' could not be found in '{self.path}'")
with full_path.open("r", encoding="utf-8") as f:
return f.read()
|
What do you think in clarify the error message when missing a file? I had trouble understanding what was wrong when I missed some files such as the prompt "main_prompt" or the "feedback" file.
This commit changes the error message to help the user understand what needs to be done to execute the program correctly.
ie. When missing the "feedback" file, the error before this commit was:
`KeyError: 'feedback'`
and now is:
`KeyError: "File 'feedback' could not be found in 'C:\\xxx\\xxxx\\gpt-engineer\\projects\\test'"`
As shows figure above:

|
https://api.github.com/repos/gpt-engineer-org/gpt-engineer/pulls/384
|
2023-06-24T21:00:32Z
|
2023-06-25T14:44:37Z
|
2023-06-25T14:44:37Z
|
2023-06-25T14:44:38Z
| 140
|
gpt-engineer-org/gpt-engineer
| 33,282
|
Explicitly annotate this with `Final[str]` to make it work in mypyc 1.0.0+.
|
diff --git a/src/blib2to3/pgen2/tokenize.py b/src/blib2to3/pgen2/tokenize.py
index 257dbef4a19..a6353d154c9 100644
--- a/src/blib2to3/pgen2/tokenize.py
+++ b/src/blib2to3/pgen2/tokenize.py
@@ -425,7 +425,7 @@ def generate_tokens(
logical line; continuation lines are included.
"""
lnum = parenlev = continued = 0
- numchars: Final = "0123456789"
+ numchars: Final[str] = "0123456789"
contstr, needcont = "", 0
contline: Optional[str] = None
indents = [0]
|
I filed relevant mypyc bug here: https://github.com/mypyc/mypyc/issues/990
This is an error only in mypyc 1.0.0+
|
https://api.github.com/repos/psf/black/pulls/3645
|
2023-04-13T23:03:10Z
|
2023-04-14T00:12:05Z
|
2023-04-14T00:12:05Z
|
2023-04-14T18:08:27Z
| 174
|
psf/black
| 24,042
|
Switch Docker release to be based on Debian instead of Alpine
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index c0901cbe1a..86543af234 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -57,6 +57,7 @@ If you depend on these features, please raise your voice in
* Fix SNI-related reproducibility issues when exporting to curl/httpie commands. (@dkasak)
* Add option `export_preserve_original_ip` to force exported command to connect to IP from original request. Only supports curl at the moment. (@dkasak)
* Major proxy protocol testing (@r00t-)
+* Switch Docker image release to be based on Debian (@PeterDaveHello)
* --- TODO: add new PRs above this line ---
* ... and various other fixes, documentation improvements, dependency version bumps, etc.
diff --git a/release/docker/Dockerfile b/release/docker/Dockerfile
index 0e9ef3fd0f..3916cabd06 100644
--- a/release/docker/Dockerfile
+++ b/release/docker/Dockerfile
@@ -1,36 +1,16 @@
-FROM alpine:3.12
-
-ENV LANG=en_US.UTF-8
+FROM python:3.9-slim-buster
ARG WHEEL_MITMPROXY
ARG WHEEL_BASENAME_MITMPROXY
+RUN useradd -mU mitmproxy
+RUN apt-get update \
+ && apt-get install -y gosu \
+ && rm -rf /var/lib/apt/lists/*
+
COPY $WHEEL_MITMPROXY /home/mitmproxy/
-
-# Add our user first to make sure the ID get assigned consistently,
-# regardless of whatever dependencies get added.
-RUN addgroup -S mitmproxy && adduser -S -G mitmproxy mitmproxy \
- && apk add --no-cache \
- su-exec \
- git \
- g++ \
- libffi \
- libffi-dev \
- libstdc++ \
- openssl \
- openssl-dev \
- python3 \
- python3-dev \
- && python3 -m ensurepip --upgrade \
- && pip3 install -U pip \
- && LDFLAGS=-L/lib pip3 install -U /home/mitmproxy/${WHEEL_BASENAME_MITMPROXY} \
- && apk del --purge \
- git \
- g++ \
- libffi-dev \
- openssl-dev \
- python3-dev \
- && rm -rf ~/.cache/pip /home/mitmproxy/${WHEEL_BASENAME_MITMPROXY}
+RUN pip3 install --no-cache-dir -U /home/mitmproxy/${WHEEL_BASENAME_MITMPROXY} \
+ && rm -rf /home/mitmproxy/${WHEEL_BASENAME_MITMPROXY}
VOLUME /home/mitmproxy/.mitmproxy
diff --git a/release/docker/docker-entrypoint.sh b/release/docker/docker-entrypoint.sh
index 84ea81e6a9..1f267d2f31 100755
--- a/release/docker/docker-entrypoint.sh
+++ b/release/docker/docker-entrypoint.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# WARNING: do not change the shebang - the Docker base image might not have what you want!
set -o errexit
@@ -11,7 +11,7 @@ MITMPROXY_PATH="/home/mitmproxy/.mitmproxy"
if [[ "$1" = "mitmdump" || "$1" = "mitmproxy" || "$1" = "mitmweb" ]]; then
mkdir -p "$MITMPROXY_PATH"
chown -R mitmproxy:mitmproxy "$MITMPROXY_PATH"
- su-exec mitmproxy "$@"
+ gosu mitmproxy "$@"
else
exec "$@"
fi
|
#### Description
As @mhils invited, help rewrite the Docker image to replace Alpine Linux. The final image size is about 179MB, compared with original 123MB, should be not too big :D
It's a large rewrite and I'm not very familiar with mitmproxy, so please help test its functionality to be as expected, thanks.
fix #3712
#### Checklist
- [ ] I have updated tests where applicable.
- [x] I have added an entry to the CHANGELOG.
|
https://api.github.com/repos/mitmproxy/mitmproxy/pulls/4493
|
2021-03-11T15:12:09Z
|
2021-03-11T15:49:08Z
|
2021-03-11T15:49:07Z
|
2021-03-11T16:06:20Z
| 858
|
mitmproxy/mitmproxy
| 27,591
|
Updated playbooks_prompts.rst
|
diff --git a/docs/docsite/rst/user_guide/playbooks_prompts.rst b/docs/docsite/rst/user_guide/playbooks_prompts.rst
index f54ebe23d161e4..79e24f7b718541 100644
--- a/docs/docsite/rst/user_guide/playbooks_prompts.rst
+++ b/docs/docsite/rst/user_guide/playbooks_prompts.rst
@@ -94,7 +94,7 @@ Depending on your platform at most the following crypt schemes are supported:
If you need to put in special characters (i.e `{%`) that might create templating errors, use the ``unsafe`` option::
vars_prompt:
- - name: "my_password_with_wierd_chars"
+ - name: "my_password_with_weird_chars"
prompt: "Enter password"
unsafe: yes
private: yes
|
##### SUMMARY
Fixed typo: "wierd" > "weird"
##### ISSUE TYPE
- Docs Pull Request
+label: docsite_pr
|
https://api.github.com/repos/ansible/ansible/pulls/62003
|
2019-09-09T15:56:13Z
|
2019-09-10T01:27:01Z
|
2019-09-10T01:27:01Z
|
2019-10-08T13:21:10Z
| 187
|
ansible/ansible
| 49,613
|
[NFC] polish ./colossalai/amp/torch_amp/__init__.py code style
|
diff --git a/colossalai/amp/torch_amp/__init__.py b/colossalai/amp/torch_amp/__init__.py
index 8943b86d6aaf..893cc890d68e 100644
--- a/colossalai/amp/torch_amp/__init__.py
+++ b/colossalai/amp/torch_amp/__init__.py
@@ -1,9 +1,12 @@
+from typing import Optional
+
import torch.nn as nn
-from torch.optim import Optimizer
from torch.nn.modules.loss import _Loss
+from torch.optim import Optimizer
+
from colossalai.context import Config
-from .torch_amp import TorchAMPOptimizer, TorchAMPModel, TorchAMPLoss
-from typing import Optional
+
+from .torch_amp import TorchAMPLoss, TorchAMPModel, TorchAMPOptimizer
def convert_to_torch_amp(model: nn.Module,
|
https://api.github.com/repos/hpcaitech/ColossalAI/pulls/1836
|
2022-11-08T11:21:47Z
|
2022-11-08T11:35:19Z
|
2022-11-08T11:35:19Z
|
2022-11-08T11:35:19Z
| 198
|
hpcaitech/ColossalAI
| 11,644
|
|
Fix references to letsencrypt.client
|
diff --git a/MANIFEST.in b/MANIFEST.in
index b628121e1d6..f9364d64f4a 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -3,8 +3,7 @@ include CHANGES.rst
include CONTRIBUTING.md
include linter_plugin.py
include letsencrypt/EULA
-
-recursive-include letsencrypt/client/tests/testdata *
+recursive-include letsencrypt/tests/testdata *
recursive-include acme/schemata *.json
recursive-include acme/jose/testdata *
diff --git a/docs/api/client/proof_of_possession.rst b/docs/api/client/proof_of_possession.rst
deleted file mode 100644
index 9f1ea079366..00000000000
--- a/docs/api/client/proof_of_possession.rst
+++ /dev/null
@@ -1,5 +0,0 @@
-:mod:`letsencrypt.client.proof_of_possession`
---------------------------------------------------
-
-.. automodule:: letsencrypt.client.proof_of_possession
- :members:
diff --git a/docs/api/proof_of_possession.rst b/docs/api/proof_of_possession.rst
new file mode 100644
index 00000000000..db8c6c56319
--- /dev/null
+++ b/docs/api/proof_of_possession.rst
@@ -0,0 +1,5 @@
+:mod:`letsencrypt.proof_of_possession`
+--------------------------------------
+
+.. automodule:: letsencrypt.proof_of_possession
+ :members:
diff --git a/letsencrypt/continuity_auth.py b/letsencrypt/continuity_auth.py
index 8bfc0bfeb11..739e33d4347 100644
--- a/letsencrypt/continuity_auth.py
+++ b/letsencrypt/continuity_auth.py
@@ -19,7 +19,7 @@ class ContinuityAuthenticator(object):
:ivar proof_of_pos: Performs "proofOfPossession" challenges.
:type proof_of_pos:
- :class:`letsencrypt.client.proof_of_possession.Proof_of_Possession`
+ :class:`letsencrypt.proof_of_possession.Proof_of_Possession`
"""
zope.interface.implements(interfaces.IAuthenticator)
@@ -32,7 +32,7 @@ def __init__(self, config, installer):
:type config: :class:`letsencrypt.interfaces.IConfig`
:param installer: Let's Encrypt Installer.
- :type installer: :class:`letsencrypt.client.interfaces.IInstaller`
+ :type installer: :class:`letsencrypt.interfaces.IInstaller`
"""
self.rec_token = recovery_token.RecoveryToken(
diff --git a/letsencrypt_nginx/configurator.py b/letsencrypt_nginx/configurator.py
index ffb9bd3b26b..d2deee15ad9 100644
--- a/letsencrypt_nginx/configurator.py
+++ b/letsencrypt_nginx/configurator.py
@@ -271,7 +271,7 @@ def _make_server_ssl(self, vhost):
the existing one?
:param vhost: The vhost to add SSL to.
- :type vhost: :class:`~letsencrypt.client.plugins.nginx.obj.VirtualHost`
+ :type vhost: :class:`~letsencrypt_nginx.obj.VirtualHost`
"""
ssl_block = [['listen', '443 ssl'],
diff --git a/letsencrypt_nginx/dvsni.py b/letsencrypt_nginx/dvsni.py
index 534c5a8d367..5c188099c87 100644
--- a/letsencrypt_nginx/dvsni.py
+++ b/letsencrypt_nginx/dvsni.py
@@ -78,7 +78,7 @@ def _mod_config(self, ll_addrs):
"""Modifies Nginx config to include challenge server blocks.
:param list ll_addrs: list of lists of
- :class:`letsencrypt.client.plugins.apache.obj.Addr` to apply
+ :class:`letsencrypt_nginx.obj.Addr` to apply
:raises errors.LetsEncryptMisconfigurationError:
Unable to find a suitable HTTP block to include DVSNI hosts.
@@ -115,7 +115,7 @@ def _make_server_block(self, achall, addrs):
"""Creates a server block for a DVSNI challenge.
:param achall: Annotated DVSNI challenge.
- :type achall: :class:`letsencrypt.client.achallenges.DVSNI`
+ :type achall: :class:`letsencrypt.achallenges.DVSNI`
:param list addrs: addresses of challenged domain
:class:`list` of type :class:`~nginx.obj.Addr`
|
cleanup after #401.
|
https://api.github.com/repos/certbot/certbot/pulls/409
|
2015-05-12T22:08:41Z
|
2015-05-13T18:59:50Z
|
2015-05-13T18:59:50Z
|
2016-05-06T19:21:29Z
| 1,051
|
certbot/certbot
| 1,972
|
spelling mistake trining -> training
|
diff --git a/keras/datasets/imdb.py b/keras/datasets/imdb.py
index fcd3c41737f..f911b4363e3 100644
--- a/keras/datasets/imdb.py
+++ b/keras/datasets/imdb.py
@@ -35,7 +35,7 @@ def load_data(path='imdb.npz', num_words=None, skip_top=0,
Note that the 'out of vocabulary' character is only used for
words that were present in the training set but are not included
because they're not making the `num_words` cut here.
- Words that were not seen in the trining set but are in the test set
+ Words that were not seen in the training set but are in the test set
have simply been skipped.
"""
path = get_file(path,
diff --git a/keras/datasets/reuters.py b/keras/datasets/reuters.py
index 22f88bd9758..749f640a52d 100644
--- a/keras/datasets/reuters.py
+++ b/keras/datasets/reuters.py
@@ -33,7 +33,7 @@ def load_data(path='reuters.npz', num_words=None, skip_top=0,
Note that the 'out of vocabulary' character is only used for
words that were present in the training set but are not included
because they're not making the `num_words` cut here.
- Words that were not seen in the trining set but are in the test set
+ Words that were not seen in the training set but are in the test set
have simply been skipped.
"""
path = get_file(path, origin='https://s3.amazonaws.com/text-datasets/reuters.npz')
|
spelling mistake as the title
|
https://api.github.com/repos/keras-team/keras/pulls/5597
|
2017-03-04T01:26:33Z
|
2017-03-04T01:46:15Z
|
2017-03-04T01:46:15Z
|
2017-03-21T02:09:27Z
| 392
|
keras-team/keras
| 47,422
|
bpo-40241: What's New in Python 3.9: opaque PyGC_Head
|
diff --git a/Doc/whatsnew/3.9.rst b/Doc/whatsnew/3.9.rst
index ccc84cced1090b..b20cd14565ae12 100644
--- a/Doc/whatsnew/3.9.rst
+++ b/Doc/whatsnew/3.9.rst
@@ -1098,6 +1098,10 @@ Porting to Python 3.9
and refers to a constant string.
(Contributed by Serhiy Storchaka in :issue:`38650`.)
+* The :c:type:`PyGC_Head` structure is now opaque. It is only defined in the
+ internal C API (``pycore_gc.h``).
+ (Contributed by Victor Stinner in :issue:`40241`.)
+
Removed
-------
|
<!--
Thanks for your contribution!
Please read this comment in its entirety. It's quite important.
# Pull Request title
It should be in the following format:
```
bpo-NNNN: Summary of the changes made
```
Where: bpo-NNNN refers to the issue number in the https://bugs.python.org.
Most PRs will require an issue number. Trivial changes, like fixing a typo, do not need an issue.
# Backport Pull Request title
If this is a backport PR (PR made against branches other than `master`),
please ensure that the PR title is in the following format:
```
[X.Y] <title from the original PR> (GH-NNNN)
```
Where: [X.Y] is the branch name, e.g. [3.6].
GH-NNNN refers to the PR number from `master`.
-->
<!-- issue-number: [bpo-40241](https://bugs.python.org/issue40241) -->
https://bugs.python.org/issue40241
<!-- /issue-number -->
|
https://api.github.com/repos/python/cpython/pulls/20586
|
2020-06-02T01:27:48Z
|
2020-06-02T10:02:59Z
|
2020-06-02T10:02:59Z
|
2020-06-02T10:03:16Z
| 190
|
python/cpython
| 4,252
|
[RLlib] CRR Tests fixes.
|
diff --git a/rllib/BUILD b/rllib/BUILD
index eac79f3842c05..9ad6dcae8e4e4 100644
--- a/rllib/BUILD
+++ b/rllib/BUILD
@@ -793,11 +793,11 @@ py_test(
srcs = ["algorithms/cql/tests/test_cql.py"]
)
-# CRRTrainer
+# CRR
py_test(
name = "test_crr",
- tags = ["team:ml", "trainers_dir"],
- size = "small",
+ tags = ["team:ml", "algorithms_dir"],
+ size = "medium",
srcs = ["algorithms/crr/tests/test_crr.py"]
)
diff --git a/rllib/algorithms/crr/__init__.py b/rllib/algorithms/crr/__init__.py
index 3ae23af257d3d..592549abc109b 100644
--- a/rllib/algorithms/crr/__init__.py
+++ b/rllib/algorithms/crr/__init__.py
@@ -1,4 +1,4 @@
-from .crr import CRR, CRRConfig
+from ray.rllib.algorithms.crr.crr import CRR, CRRConfig
__all__ = [
"CRR",
diff --git a/rllib/algorithms/crr/crr_config.py b/rllib/algorithms/crr/crr_config.py
deleted file mode 100644
index 7df6e2c4b21f8..0000000000000
--- a/rllib/algorithms/crr/crr_config.py
+++ /dev/null
@@ -1,120 +0,0 @@
-import logging
-from typing import Optional, List
-
-from ray.rllib.algorithms.crr import CRR
-from ray.rllib.agents.trainer_config import TrainerConfig
-
-logger = logging.getLogger(__name__)
-
-
-class CRRConfig(TrainerConfig):
- def __init__(self, trainer_class=None):
- super().__init__(trainer_class=trainer_class or CRR)
-
- # fmt: off
- # __sphinx_doc_begin__
- # CRR-specific settings.
- self.weight_type = "bin"
- self.temperature = 1.0
- self.max_weight = 20.0
- self.advantage_type = "mean"
- self.n_action_sample = 4
- self.twin_q = True
- self.target_update_grad_intervals = 100
- self.replay_buffer_config = {
- "type": "ReplayBuffer",
- "capacity": 50000,
- # How many steps of the model to sample before learning starts.
- "learning_starts": 1000,
- "replay_batch_size": 32,
- # The number of contiguous environment steps to replay at once. This
- # may be set to greater than 1 to support recurrent models.
- "replay_sequence_length": 1,
- }
- self.actor_hiddens = [256, 256]
- self.actor_hidden_activation = "relu"
- self.critic_hiddens = [256, 256]
- self.critic_hidden_activation = "relu"
- self.critic_lr = 3e-4
- self.actor_lr = 3e-4
- self.tau = 5e-3
- # __sphinx_doc_end__
- # fmt: on
-
- # overriding the trainer config default
- self.num_workers = 0 # offline RL does not need rollout workers
-
- def training(
- self,
- *,
- weight_type: Optional[str] = None,
- temperature: Optional[float] = None,
- max_weight: Optional[float] = None,
- advantage_type: Optional[str] = None,
- n_action_sample: Optional[int] = None,
- twin_q: Optional[bool] = None,
- target_update_grad_intervals: Optional[int] = None,
- replay_buffer_config: Optional[dict] = None,
- actor_hiddens: Optional[List[int]] = None,
- actor_hidden_activation: Optional[str] = None,
- critic_hiddens: Optional[List[int]] = None,
- critic_hidden_activation: Optional[str] = None,
- tau: Optional[float] = None,
- **kwargs,
- ) -> "CRRConfig":
-
- """
- === CRR configs
-
- Args:
- weight_type: weight type to use `bin` | `exp`.
- temperature: the exponent temperature used in exp weight type.
- max_weight: the max weight limit for exp weight type.
- advantage_type: The way we reduce q values to v_t values `max` | `mean`.
- n_action_sample: the number of actions to sample for v_t estimation.
- twin_q: if True, uses pessimistic q estimation.
- target_update_grad_intervals: The frequency at which we update the
- target copy of the model in terms of the number of gradient updates
- applied to the main model.
- replay_buffer_config: The config dictionary for replay buffer.
- actor_hiddens: The number of hidden units in the actor's fc network.
- actor_hidden_activation: The activation used in the actor's fc network.
- critic_hiddens: The number of hidden units in the critic's fc network.
- critic_hidden_activation: The activation used in the critic's fc network.
- tau: Polyak averaging coefficient
- (making it 1 is reduces it to a hard update).
- **kwargs: forward compatibility kwargs
-
- Returns:
- This updated CRRConfig object.
- """
- super().training(**kwargs)
-
- if weight_type is not None:
- self.weight_type = weight_type
- if temperature is not None:
- self.temperature = temperature
- if max_weight is not None:
- self.max_weight = max_weight
- if advantage_type is not None:
- self.advantage_type = advantage_type
- if n_action_sample is not None:
- self.n_action_sample = n_action_sample
- if twin_q is not None:
- self.twin_q = twin_q
- if target_update_grad_intervals is not None:
- self.target_update_grad_intervals = target_update_grad_intervals
- if replay_buffer_config is not None:
- self.replay_buffer_config = replay_buffer_config
- if actor_hiddens is not None:
- self.actor_hiddens = actor_hiddens
- if actor_hidden_activation is not None:
- self.actor_hidden_activation = actor_hidden_activation
- if critic_hiddens is not None:
- self.critic_hiddens = critic_hiddens
- if critic_hidden_activation is not None:
- self.critic_hidden_activation = critic_hidden_activation
- if tau is not None:
- self.tau = tau
-
- return self
|
<!-- Thank you for your contribution! Please review https://github.com/ray-project/ray/blob/master/CONTRIBUTING.rst before opening a pull request. -->
CRR Tests fixes.
<!-- Please add a reviewer to the assignee section when you create a PR. If you don't have the access to it, we will shortly find a reviewer and assign them to your PR. -->
## Why are these changes needed?
<!-- Please give a short summary of the change and the problem this solves. -->
## Related issue number
<!-- For example: "Closes #1234" -->
## Checks
- [x] I've run `scripts/format.sh` to lint the changes in this PR.
- [ ] I've included any doc changes needed for https://docs.ray.io/en/master/.
- [ ] I've made sure the tests are passing. Note that there might be a few flaky tests, see the recent failures at https://flakey-tests.ray.io/
- Testing Strategy
- [ ] Unit tests
- [ ] Release tests
- [ ] This PR is not tested :(
|
https://api.github.com/repos/ray-project/ray/pulls/25586
|
2022-06-08T15:47:39Z
|
2022-06-08T17:18:56Z
|
2022-06-08T17:18:56Z
|
2022-06-08T17:18:56Z
| 1,532
|
ray-project/ray
| 19,691
|
[tune] Unflattened lookup for ProgressReporter
|
diff --git a/python/ray/tune/progress_reporter.py b/python/ray/tune/progress_reporter.py
index dc0b3ba722459..c0bccc62153b2 100644
--- a/python/ray/tune/progress_reporter.py
+++ b/python/ray/tune/progress_reporter.py
@@ -5,7 +5,7 @@
from ray.tune.result import (EPISODE_REWARD_MEAN, MEAN_ACCURACY, MEAN_LOSS,
TRAINING_ITERATION, TIME_TOTAL_S, TIMESTEPS_TOTAL)
-from ray.tune.utils import flatten_dict
+from ray.tune.utils import unflattened_lookup
try:
from collections.abc import Mapping
@@ -466,9 +466,9 @@ def _get_trial_info(trial, parameters, metrics):
parameters (list[str]): Names of trial parameters to include.
metrics (list[str]): Names of metrics to include.
"""
- result = flatten_dict(trial.last_result)
- config = flatten_dict(trial.config)
+ result = trial.last_result
+ config = trial.config
trial_info = [str(trial), trial.status, str(trial.location)]
- trial_info += [config.get(param) for param in parameters]
- trial_info += [result.get(metric) for metric in metrics]
+ trial_info += [unflattened_lookup(param, config) for param in parameters]
+ trial_info += [unflattened_lookup(metric, result) for metric in metrics]
return trial_info
diff --git a/python/ray/tune/tests/test_progress_reporter.py b/python/ray/tune/tests/test_progress_reporter.py
index c8afa18ed46c7..4141c82333791 100644
--- a/python/ray/tune/tests/test_progress_reporter.py
+++ b/python/ray/tune/tests/test_progress_reporter.py
@@ -22,15 +22,15 @@
EXPECTED_RESULT_2 = """Result logdir: /foo
Number of trials: 5 (1 PENDING, 3 RUNNING, 1 TERMINATED)
-+--------------+------------+-------+-----+-----+
-| Trial name | status | loc | a | b |
-|--------------+------------+-------+-----+-----|
-| 00000 | TERMINATED | here | 0 | 0 |
-| 00001 | PENDING | here | 1 | 2 |
-| 00002 | RUNNING | here | 2 | 4 |
-| 00003 | RUNNING | here | 3 | 6 |
-| 00004 | RUNNING | here | 4 | 8 |
-+--------------+------------+-------+-----+-----+"""
++--------------+------------+-------+-----+-----+---------+---------+
+| Trial name | status | loc | a | b | n/k/0 | n/k/1 |
+|--------------+------------+-------+-----+-----+---------+---------|
+| 00000 | TERMINATED | here | 0 | 0 | 0 | 0 |
+| 00001 | PENDING | here | 1 | 2 | 1 | 2 |
+| 00002 | RUNNING | here | 2 | 4 | 2 | 4 |
+| 00003 | RUNNING | here | 3 | 6 | 3 | 6 |
+| 00004 | RUNNING | here | 4 | 8 | 4 | 8 |
++--------------+------------+-------+-----+-----+---------+---------+"""
EXPECTED_RESULT_3 = """Result logdir: /foo
Number of trials: 5 (1 PENDING, 3 RUNNING, 1 TERMINATED)
@@ -246,21 +246,29 @@ def testProgressStr(self):
t.trial_id = "%05d" % i
t.local_dir = "/foo"
t.location = "here"
- t.config = {"a": i, "b": i * 2}
- t.evaluated_params = t.config
+ t.config = {"a": i, "b": i * 2, "n": {"k": [i, 2 * i]}}
+ t.evaluated_params = {
+ "a": i,
+ "b": i * 2,
+ "n/k/0": i,
+ "n/k/1": 2 * i
+ }
t.last_result = {
"config": {
"a": i,
- "b": i * 2
+ "b": i * 2,
+ "n": {
+ "k": [i, 2 * i]
+ }
},
"metric_1": i / 2,
"metric_2": i / 4
}
t.__str__ = lambda self: self.trial_id
trials.append(t)
- # One metric, all parameters
+ # One metric, two parameters
prog1 = trial_progress_str(
- trials, ["metric_1"], None, fmt="psql", max_rows=3)
+ trials, ["metric_1"], ["a", "b"], fmt="psql", max_rows=3)
print(prog1)
assert prog1 == EXPECTED_RESULT_1
diff --git a/python/ray/tune/utils/__init__.py b/python/ray/tune/utils/__init__.py
index 42d9abc890d7d..0eed502b8ce15 100644
--- a/python/ray/tune/utils/__init__.py
+++ b/python/ray/tune/utils/__init__.py
@@ -1,6 +1,6 @@
from ray.tune.utils.util import deep_update, flatten_dict, get_pinned_object, \
- merge_dicts, pin_in_object_store, UtilMonitor, validate_save_restore, \
- warn_if_slow
+ merge_dicts, pin_in_object_store, unflattened_lookup, UtilMonitor, \
+ validate_save_restore, warn_if_slow
__all__ = [
"deep_update",
@@ -8,6 +8,7 @@
"get_pinned_object",
"merge_dicts",
"pin_in_object_store",
+ "unflattened_lookup",
"UtilMonitor",
"validate_save_restore",
"warn_if_slow",
diff --git a/python/ray/tune/utils/util.py b/python/ray/tune/utils/util.py
index 195d155aa27a6..60fe732b6c74f 100644
--- a/python/ray/tune/utils/util.py
+++ b/python/ray/tune/utils/util.py
@@ -2,7 +2,7 @@
import logging
import threading
import time
-from collections import defaultdict
+from collections import defaultdict, deque, Mapping, Sequence
from threading import Thread
import numpy as np
@@ -216,6 +216,27 @@ def flatten_dict(dt, delimiter="/"):
return dt
+def unflattened_lookup(flat_key, lookup, delimiter="/", default=None):
+ """
+ Unflatten `flat_key` and iteratively look up in `lookup`. E.g.
+ `flat_key="a/0/b"` will try to return `lookup["a"][0]["b"]`.
+ """
+ keys = deque(flat_key.split(delimiter))
+ base = lookup
+ while keys:
+ key = keys.popleft()
+ try:
+ if isinstance(base, Mapping):
+ base = base[key]
+ elif isinstance(base, Sequence):
+ base = base[int(key)]
+ else:
+ raise KeyError()
+ except KeyError:
+ return default
+ return base
+
+
def _to_pinnable(obj):
"""Converts obj to a form that can be pinned in object store memory.
|
<!-- Thank you for your contribution! Please review https://github.com/ray-project/ray/blob/master/CONTRIBUTING.rst before opening a pull request. -->
## Why are these changes needed?
`ProgressReporter` calls `_get_trial_info()` to get the data to display. For the lookup, `_get_trial_info()` flattened the `result` and `config` dicts - however, `flatten_dict()` does not flatten lists. This lead to empty lookups for nested config values, as described in #9307.
With this fix, `_get_trial_info()` uses a new util function `unflattened_lookup()` instead, which works the other way around: The lookup keys are split and iteratively queried from the data dict.
## Related issue number
Should fix #9307
## Checks
- [x] I've run `scripts/format.sh` to lint the changes in this PR.
- [x] I've included any doc changes needed for https://docs.ray.io/en/latest/.
- [x] I've made sure the tests are passing. Note that there might be a few flaky tests, see the recent failure rates at https://ray-travis-tracker.herokuapp.com/.
- Testing Strategy
- [x] Unit tests
- [ ] Release tests
- [ ] This PR is not tested (please justify below)
|
https://api.github.com/repos/ray-project/ray/pulls/9525
|
2020-07-16T18:14:09Z
|
2020-07-17T20:52:55Z
|
2020-07-17T20:52:55Z
|
2020-07-20T07:41:36Z
| 1,760
|
ray-project/ray
| 19,421
|
Log fatal error if plasma manager or local scheduler heartbeats take too long.
|
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt
index 303ecf4ab4eaf..27cb1a969c8e1 100644
--- a/src/common/CMakeLists.txt
+++ b/src/common/CMakeLists.txt
@@ -79,7 +79,7 @@ function(define_test test_name library)
add_executable(${test_name} test/${test_name}.cc ${ARGN})
add_dependencies(${test_name} hiredis flatbuffers_ep)
target_link_libraries(${test_name} common ${FLATBUFFERS_STATIC_LIB} ${library})
- target_compile_options(${test_name} PUBLIC "-DPLASMA_TEST -DLOCAL_SCHEDULER_TEST -DCOMMON_TEST -DRAY_COMMON_LOG_LEVEL=4 -DRAY_TIMEOUT=50")
+ target_compile_options(${test_name} PUBLIC "-DPLASMA_TEST -DLOCAL_SCHEDULER_TEST -DCOMMON_TEST -DRAY_COMMON_LOG_LEVEL=4")
endfunction()
define_test(common_tests "")
diff --git a/src/common/common.cc b/src/common/common.cc
index 91b36797cad9b..e032ae3ecf8ec 100644
--- a/src/common/common.cc
+++ b/src/common/common.cc
@@ -1,5 +1,6 @@
#include "common.h"
+#include <chrono>
#include <stdio.h>
#include <unistd.h>
#include <sys/types.h>
@@ -65,3 +66,10 @@ char *ObjectID_to_string(ObjectID obj_id, char *id_string, int id_length) {
return id_string;
}
+
+int64_t current_time_ms() {
+ std::chrono::milliseconds ms_since_epoch =
+ std::chrono::duration_cast<std::chrono::milliseconds>(
+ std::chrono::system_clock::now().time_since_epoch());
+ return ms_since_epoch.count();
+}
diff --git a/src/common/common.h b/src/common/common.h
index 9560123d7ab84..6c09913acbbd9 100644
--- a/src/common/common.h
+++ b/src/common/common.h
@@ -232,4 +232,11 @@ bool DBClientID_equal(DBClientID first_id, DBClientID second_id);
extern const unsigned char NIL_DIGEST[DIGEST_SIZE];
+/**
+ * Return the current time in milliseconds since the Unix epoch.
+ *
+ * @return The number of milliseconds since the Unix epoch.
+ */
+int64_t current_time_ms();
+
#endif
diff --git a/src/local_scheduler/local_scheduler.cc b/src/local_scheduler/local_scheduler.cc
index f6500d3e0d408..1454f5e1575aa 100644
--- a/src/local_scheduler/local_scheduler.cc
+++ b/src/local_scheduler/local_scheduler.cc
@@ -405,6 +405,9 @@ LocalSchedulerState *LocalSchedulerState_init(
start_worker(state, NIL_ACTOR_ID);
}
+ /* Initialize the time at which the previous heartbeat was sent. */
+ state->previous_heartbeat_time = current_time_ms();
+
return state;
}
@@ -1122,6 +1125,17 @@ void handle_actor_creation_callback(ActorID actor_id,
int heartbeat_handler(event_loop *loop, timer_id id, void *context) {
LocalSchedulerState *state = (LocalSchedulerState *) context;
SchedulingAlgorithmState *algorithm_state = state->algorithm_state;
+
+ /* Check that the last heartbeat was not sent too long ago. */
+ int64_t current_time = current_time_ms();
+ CHECK(current_time >= state->previous_heartbeat_time);
+ if (current_time - state->previous_heartbeat_time >
+ NUM_HEARTBEATS_TIMEOUT * HEARTBEAT_TIMEOUT_MILLISECONDS) {
+ LOG_FATAL("The last heartbeat was sent %" PRId64 " milliseconds ago.",
+ current_time - state->previous_heartbeat_time);
+ }
+ state->previous_heartbeat_time = current_time;
+
LocalSchedulerInfo info;
/* Ask the scheduling algorithm to fill out the scheduler info struct. */
provide_scheduler_info(state, algorithm_state, &info);
diff --git a/src/local_scheduler/local_scheduler_shared.h b/src/local_scheduler/local_scheduler_shared.h
index 94627b7abeb3d..8b35c5ec4d91d 100644
--- a/src/local_scheduler/local_scheduler_shared.h
+++ b/src/local_scheduler/local_scheduler_shared.h
@@ -78,6 +78,9 @@ struct LocalSchedulerState {
* available_gpus.size() == dynamic_resources[ResourceIndex_GPU] should
* always be true. */
std::vector<int> available_gpus;
+ /** The time (in milliseconds since the Unix epoch) when the most recent
+ * heartbeat was sent. */
+ int64_t previous_heartbeat_time;
};
/** Contains all information associated with a local scheduler client. */
diff --git a/src/plasma/plasma_manager.cc b/src/plasma/plasma_manager.cc
index e9308a29570b5..b54cb8fdc5028 100644
--- a/src/plasma/plasma_manager.cc
+++ b/src/plasma/plasma_manager.cc
@@ -236,6 +236,9 @@ struct PlasmaManagerState {
ObjectWaitRequests *object_wait_requests_remote;
/** Initialize an empty hash map for the cache of local available object. */
AvailableObject *local_available_objects;
+ /** The time (in milliseconds since the Unix epoch) when the most recent
+ * heartbeat was sent. */
+ int64_t previous_heartbeat_time;
};
PlasmaManagerState *g_manager_state = NULL;
@@ -553,6 +556,8 @@ PlasmaManagerState *PlasmaManagerState_init(const char *store_socket_name,
/* Add the callback that processes the notification to the event loop. */
event_loop_add_file(state->loop, plasma_fd, EVENT_LOOP_READ,
process_object_notification, state);
+ /* Initialize the time at which the previous heartbeat was sent. */
+ state->previous_heartbeat_time = current_time_ms();
return state;
}
@@ -1590,6 +1595,17 @@ void process_message(event_loop *loop,
int heartbeat_handler(event_loop *loop, timer_id id, void *context) {
PlasmaManagerState *state = (PlasmaManagerState *) context;
+
+ /* Check that the last heartbeat was not sent too long ago. */
+ int64_t current_time = current_time_ms();
+ CHECK(current_time >= state->previous_heartbeat_time);
+ if (current_time - state->previous_heartbeat_time >
+ NUM_HEARTBEATS_TIMEOUT * HEARTBEAT_TIMEOUT_MILLISECONDS) {
+ LOG_FATAL("The last heartbeat was sent %" PRId64 " milliseconds ago.",
+ current_time - state->previous_heartbeat_time);
+ }
+ state->previous_heartbeat_time = current_time;
+
plasma_manager_send_heartbeat(state->db);
return HEARTBEAT_TIMEOUT_MILLISECONDS;
}
diff --git a/src/plasma/plasma_manager.h b/src/plasma/plasma_manager.h
index f28224570c57e..e9247a4f15a83 100644
--- a/src/plasma/plasma_manager.h
+++ b/src/plasma/plasma_manager.h
@@ -12,11 +12,9 @@
#endif
/* Timeouts are in milliseconds. */
-#ifndef RAY_TIMEOUT
#define MANAGER_TIMEOUT 1000
-#else
-#define MANAGER_TIMEOUT RAY_TIMEOUT
-#endif
+
+#define NUM_HEARTBEATS_TIMEOUT 100
/* The buffer size in bytes. Data will get transfered in multiples of this */
#define BUFSIZE 4096
|
https://api.github.com/repos/ray-project/ray/pulls/676
|
2017-06-16T05:35:41Z
|
2017-06-16T19:11:02Z
|
2017-06-16T19:11:02Z
|
2017-06-16T19:11:04Z
| 1,659
|
ray-project/ray
| 19,257
|
|
Escape paths when passing them to glob
|
diff --git a/server.py b/server.py
index d040604998..b2e16716ba 100644
--- a/server.py
+++ b/server.py
@@ -132,12 +132,12 @@ def get_embeddings(self):
@routes.get("/extensions")
async def get_extensions(request):
files = glob.glob(os.path.join(
- self.web_root, 'extensions/**/*.js'), recursive=True)
+ glob.escape(self.web_root), 'extensions/**/*.js'), recursive=True)
extensions = list(map(lambda f: "/" + os.path.relpath(f, self.web_root).replace("\\", "/"), files))
for name, dir in nodes.EXTENSION_WEB_DIRS.items():
- files = glob.glob(os.path.join(dir, '**/*.js'), recursive=True)
+ files = glob.glob(os.path.join(glob.escape(dir), '**/*.js'), recursive=True)
extensions.extend(list(map(lambda f: "/extensions/" + urllib.parse.quote(
name) + "/" + os.path.relpath(f, dir).replace("\\", "/"), files)))
|
Try to prevent JS search from breaking on pathnames with square brackets.
Tested by renaming my ComfyUI path to `ComfyUI[test]`, then starting without the patch -> no custom nodes show up, nor the "Manager" button. Checkout branch with my change and start again, Manager button shows up, custom nodes seem to work.
Note that while this is the only use of glob I could find in the ComfyUI code, there are probably custom nodes with the same issue, so I don't know if this will actually make for a reasonable UX if there are square brackets in any pathname.
|
https://api.github.com/repos/comfyanonymous/ComfyUI/pulls/1557
|
2023-09-19T12:21:01Z
|
2023-09-19T17:16:48Z
|
2023-09-19T17:16:48Z
|
2023-09-19T17:16:48Z
| 225
|
comfyanonymous/ComfyUI
| 17,943
|
Fallback to filename for title in vlc_telnet
|
diff --git a/homeassistant/components/vlc_telnet/media_player.py b/homeassistant/components/vlc_telnet/media_player.py
index 85a39197ca81..303c5cfc85a4 100644
--- a/homeassistant/components/vlc_telnet/media_player.py
+++ b/homeassistant/components/vlc_telnet/media_player.py
@@ -156,6 +156,12 @@ def update(self):
self._media_artist = info.get(0, {}).get("artist")
self._media_title = info.get(0, {}).get("title")
+ if not self._media_title:
+ # Fall back to filename.
+ data_info = info.get("data")
+ if data_info:
+ self._media_title = data_info["filename"]
+
except (CommandError, LuaError, ParseError) as err:
_LOGGER.error("Command error: %s", err)
except (ConnErr, EOFError) as err:
|
PR extracted from #44776.
<!--
You are amazing! Thanks for contributing to our project!
Please, DO NOT DELETE ANY TEXT from this template! (unless instructed).
-->
## Proposed change
<!--
Describe the big picture of your changes here to communicate to the
maintainers why we should accept this pull request. If it fixes a bug
or resolves a feature request, be sure to link to that issue in the
additional information section.
-->
Handle the case where metadata is missing more smoothly.
## Type of change
<!--
What type of change does your PR introduce to Home Assistant?
NOTE: Please, check only 1! box!
If your PR requires multiple boxes to be checked, you'll most likely need to
split it into multiple PRs. This makes things easier and faster to code review.
-->
- [ ] Dependency upgrade
- [x] Bugfix (non-breaking change which fixes an issue)
- [ ] New integration (thank you!)
- [ ] New feature (which adds functionality to an existing integration)
- [ ] Breaking change (fix/feature causing existing functionality to break)
- [ ] Code quality improvements to existing code or addition of tests
## Additional information
<!--
Details are important, and help maintainers processing your PR.
Please be sure to fill out additional details, if applicable.
-->
- This PR fixes or closes issue: fixes #
- This PR is related to issue:
- Link to documentation pull request:
## Checklist
<!--
Put an `x` in the boxes that apply. You can also fill these out after
creating the PR. If you're unsure about any of them, don't hesitate to ask.
We're here to help! This is simply a reminder of what we are going to look
for before merging your code.
-->
- [x] The code change is tested and works locally.
- [x] Local tests pass. **Your PR cannot be merged unless tests pass**
- [x] There is no commented out code in this PR.
- [x] I have followed the [development checklist][dev-checklist]
- [x] The code has been formatted using Black (`black --fast homeassistant tests`)
- [ ] Tests have been added to verify that the new code works.
If user exposed functionality or configuration variables are added/changed:
- [ ] Documentation added/updated for [www.home-assistant.io][docs-repository]
If the code communicates with devices, web services, or third-party tools:
- [ ] The [manifest file][manifest-docs] has all fields filled out correctly.
Updated and included derived files by running: `python3 -m script.hassfest`.
- [ ] New or updated dependencies have been added to `requirements_all.txt`.
Updated by running `python3 -m script.gen_requirements_all`.
- [ ] Untested files have been added to `.coveragerc`.
The integration reached or maintains the following [Integration Quality Scale][quality-scale]:
<!--
The Integration Quality Scale scores an integration on the code quality
and user experience. Each level of the quality scale consists of a list
of requirements. We highly recommend getting your integration scored!
-->
- [ ] No score or internal
- [ ] 🥈 Silver
- [ ] 🥇 Gold
- [ ] 🏆 Platinum
<!--
This project is very active and we have a high turnover of pull requests.
Unfortunately, the number of incoming pull requests is higher than what our
reviewers can review and merge so there is a long backlog of pull requests
waiting for review. You can help here!
By reviewing another pull request, you will help raise the code quality of
that pull request and the final review will be faster. This way the general
pace of pull request reviews will go up and your wait time will go down.
When picking a pull request to review, try to choose one that hasn't yet
been reviewed.
Thanks for helping out!
-->
To help with the load of incoming pull requests:
- [ ] I have reviewed two other [open pull requests][prs] in this repository.
[prs]: https://github.com/home-assistant/core/pulls?q=is%3Aopen+is%3Apr+-author%3A%40me+-draft%3Atrue+-label%3Awaiting-for-upstream+sort%3Acreated-desc+review%3Anone
<!--
Thank you for contributing <3
Below, some useful links you could explore:
-->
[dev-checklist]: https://developers.home-assistant.io/docs/en/development_checklist.html
[manifest-docs]: https://developers.home-assistant.io/docs/en/creating_integration_manifest.html
[quality-scale]: https://developers.home-assistant.io/docs/en/next/integration_quality_scale_index.html
[docs-repository]: https://github.com/home-assistant/home-assistant.io
|
https://api.github.com/repos/home-assistant/core/pulls/48438
|
2021-03-28T16:33:59Z
|
2021-03-28T17:39:37Z
|
2021-03-28T17:39:37Z
|
2021-03-29T19:04:08Z
| 207
|
home-assistant/core
| 39,146
|
make the function *call* more visible
|
diff --git a/README.md b/README.md
index 855e2ac..9049fdb 100644
--- a/README.md
+++ b/README.md
@@ -555,7 +555,7 @@ for x in range(7):
def some_func():
return x
funcs.append(some_func)
- results.append(some_func())
+ results.append(some_func()) # note the function call here
funcs_results = [func() for func in funcs]
```
|
It is easy to overlook the () in that appending operation, I propose adding a short comment.
|
https://api.github.com/repos/satwikkansal/wtfpython/pulls/104
|
2018-12-05T10:23:24Z
|
2018-12-05T19:45:59Z
|
2018-12-05T19:45:59Z
|
2018-12-05T19:46:00Z
| 108
|
satwikkansal/wtfpython
| 25,869
|
Update pdf2word.py
|
diff --git a/ppstructure/pdf2word/README.md b/ppstructure/pdf2word/README.md
index 11bfec8ab7..93023ecde0 100644
--- a/ppstructure/pdf2word/README.md
+++ b/ppstructure/pdf2word/README.md
@@ -1,6 +1,6 @@
# PDF2Word
-PDF2Word是PaddleOCR社区开发者 [whjdark](https://github.com/whjdark) 基于PP-StructureV2版面分析与恢复模型实现的PDF转换Word应用程序,提供可直接安装的exe应用程序,**方便Windows用户离线、免环境配置运行**
+PDF2Word是PaddleOCR社区开发者 [whjdark](https://github.com/whjdark) 基于PP-StructureV2版面分析与恢复模型实现的PDF转换Word应用程序,提供可直接安装的exe应用程序,**方便Windows用户免环境配置运行**
## 1.使用
@@ -20,18 +20,20 @@ PDF2Word是PaddleOCR社区开发者 [whjdark](https://github.com/whjdark) 基于
> - 初次安装程序根据不同设备需要等待1-2分钟不等
> - 使用Office与WPS打开的Word结果会出现不同,推荐以Office为准
> - 本程序使用 [QPT](https://github.com/QPT-Family/QPT) 进行应用程序打包,感谢 [GT-ZhangAcer](https://github.com/GT-ZhangAcer) 对打包过程的支持
+> - 应用程序不支持盗版Windows系统,若在安装过程中出现报错或缺少依赖,推荐直接使用 `paddleocr` whl包应用PDF2Word功能,详情可查看[链接](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.6/ppstructure/docs/quickstart.md)
### 脚本启动界面
-首次运行需要将切换路径到 `/ppstructure/pdf2word` ,然后运行代码
+首次运行需要将切换路径到PaddleOCR文件目录 ,然后运行代码
```
+cd ./ppstructure/pdf2word
python pdf2word.py
```
### PaddleOCR whl包
-针对Linux、Mac用户或已经拥有Python环境的用户,**推荐安装 `paddleocr` whl包直接应用版面恢复功能**,详情可查看[链接](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.6/ppstructure/docs/quickstart.md)
+针对Linux、Mac用户或已经拥有Python环境的用户,**推荐安装 `paddleocr` whl包直接应用PDF2Word功能**,详情可查看[链接](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.6/ppstructure/docs/quickstart.md)
<a name="download"></a>
@@ -42,9 +44,10 @@ python pdf2word.py
<div align="center">
<img src="https://user-images.githubusercontent.com/50011306/186369636-35f2008b-df5a-4784-b1f5-cebebcb2b7a5.jpg" width = "150" height = "150" />
</div>
+
## 3.版本说明
-v0.2版:新加入PDF解析功能,仅提供full版本,打包了所有依赖包与模型文件,尽可能避免安装失败问题
+v0.2版:新加入PDF解析功能,仅提供full版本,打包了所有依赖包与模型文件,尽可能避免安装失败问题。若仍然安装失败,推荐使用 `paddleocr` whl包
v0.1版:最初版本,分为3个版本:
diff --git a/ppstructure/pdf2word/pdf2word.py b/ppstructure/pdf2word/pdf2word.py
index a287fb248c..735fa5350a 100644
--- a/ppstructure/pdf2word/pdf2word.py
+++ b/ppstructure/pdf2word/pdf2word.py
@@ -213,8 +213,8 @@ def run(self):
class APP_Image2Doc(QWidget):
def __init__(self):
super().__init__()
- self.setFixedHeight(100)
- self.setFixedWidth(420)
+ # self.setFixedHeight(100)
+ # self.setFixedWidth(520)
# settings
self.imagePaths = []
@@ -481,7 +481,7 @@ def handleCBChangeSignal(self):
def handleThreadException(self, message):
self._thread.quit()
- QMessageBox.information(self, message)
+ QMessageBox.information(self, 'Error', message)
def main():
|
Update pdf2word.py
|
https://api.github.com/repos/PaddlePaddle/PaddleOCR/pulls/8053
|
2022-10-22T15:55:43Z
|
2022-10-22T15:55:50Z
|
2022-10-22T15:55:50Z
|
2022-10-22T15:55:51Z
| 1,049
|
PaddlePaddle/PaddleOCR
| 42,747
|
Fix system test for Memorystore memcached
|
diff --git a/tests/system/providers/google/cloud/cloud_memorystore/example_cloud_memorystore_memcached.py b/tests/system/providers/google/cloud/cloud_memorystore/example_cloud_memorystore_memcached.py
index 4a41a1449a908..df268320cc91c 100644
--- a/tests/system/providers/google/cloud/cloud_memorystore/example_cloud_memorystore_memcached.py
+++ b/tests/system/providers/google/cloud/cloud_memorystore/example_cloud_memorystore_memcached.py
@@ -46,7 +46,12 @@
LOCATION = "europe-north1"
# [START howto_operator_memcached_instance]
-MEMCACHED_INSTANCE = {"name": "", "node_count": 1, "node_config": {"cpu_count": 1, "memory_size_mb": 1024}}
+MEMCACHED_INSTANCE = {
+ "name": "",
+ "node_count": 1,
+ "node_config": {"cpu_count": 1, "memory_size_mb": 1024},
+ "zones": [LOCATION + "-a"],
+}
# [END howto_operator_memcached_instance]
|
Fixed system test for Google Memorystore memcached by adding zone to instance nodes specification
<!--
Thank you for contributing! Please make sure that your code changes
are covered with tests. And in case of new features or big changes
remember to adjust the documentation.
Feel free to ping committers for the review!
In case of an existing issue, reference it using one of the following:
closes: #ISSUE
related: #ISSUE
How to write a good git commit message:
http://chris.beams.io/posts/git-commit/
-->
---
**^ Add meaningful description above**
Read the **[Pull Request Guidelines](https://github.com/apache/airflow/blob/main/CONTRIBUTING.rst#pull-request-guidelines)** for more information.
In case of fundamental code changes, an Airflow Improvement Proposal ([AIP](https://cwiki.apache.org/confluence/display/AIRFLOW/Airflow+Improvement+Proposals)) is needed.
In case of a new dependency, check compliance with the [ASF 3rd Party License Policy](https://www.apache.org/legal/resolved.html#category-x).
In case of backwards incompatible changes please leave a note in a newsfragment file, named `{pr_number}.significant.rst` or `{issue_number}.significant.rst`, in [newsfragments](https://github.com/apache/airflow/tree/main/newsfragments).
|
https://api.github.com/repos/apache/airflow/pulls/26962
|
2022-10-10T08:28:17Z
|
2022-12-03T21:04:55Z
|
2022-12-03T21:04:54Z
|
2022-12-03T21:04:57Z
| 237
|
apache/airflow
| 14,229
|
Bug Fix: Unicode files
|
diff --git a/gpt_engineer/applications/cli/file_selector.py b/gpt_engineer/applications/cli/file_selector.py
index 8173e93d75..2bf2d9e49d 100644
--- a/gpt_engineer/applications/cli/file_selector.py
+++ b/gpt_engineer/applications/cli/file_selector.py
@@ -104,7 +104,9 @@ def ask_for_files(self) -> FilesDict:
# selected files contains paths that are relative to the project path
try:
# to open the file we need the path from the cwd
- with open(Path(self.project_path) / file_path, "r") as content:
+ with open(
+ Path(self.project_path) / file_path, "r", encoding="utf-8"
+ ) as content:
content_dict[str(file_path)] = content.read()
except FileNotFoundError:
print(f"Warning: File not found {file_path}")
|
Provide utf-8 encoding to file open command. Previously done in https://github.com/gpt-engineer-org/gpt-engineer/pull/801 but lost in merges.
Addresses https://github.com/gpt-engineer-org/gpt-engineer/issues/1032
|
https://api.github.com/repos/gpt-engineer-org/gpt-engineer/pulls/1044
|
2024-03-04T11:03:43Z
|
2024-03-04T14:11:45Z
|
2024-03-04T14:11:45Z
|
2024-03-04T14:11:45Z
| 203
|
gpt-engineer-org/gpt-engineer
| 33,044
|
Added more precise message level for succcess and warning messages
|
diff --git a/django/contrib/admin/actions.py b/django/contrib/admin/actions.py
index d11ba3d1a86e0..a56c6a6168b6d 100644
--- a/django/contrib/admin/actions.py
+++ b/django/contrib/admin/actions.py
@@ -3,6 +3,7 @@
"""
from django.core.exceptions import PermissionDenied
+from django.contrib import messages
from django.contrib.admin import helpers
from django.contrib.admin.util import get_deleted_objects, model_ngettext
from django.db import router
@@ -47,7 +48,7 @@ def delete_selected(modeladmin, request, queryset):
queryset.delete()
modeladmin.message_user(request, _("Successfully deleted %(count)d %(items)s.") % {
"count": n, "items": model_ngettext(modeladmin.opts, n)
- })
+ }, messages.SUCCESS)
# Return None to display the change list page again.
return None
diff --git a/django/contrib/admin/options.py b/django/contrib/admin/options.py
index f7bfca455f879..a1ab3a95807e9 100644
--- a/django/contrib/admin/options.py
+++ b/django/contrib/admin/options.py
@@ -829,7 +829,7 @@ def response_add(self, request, obj, post_url_continue=None):
# the presence of keys in request.POST.
if "_continue" in request.POST:
msg = _('The %(name)s "%(obj)s" was added successfully. You may edit it again below.') % msg_dict
- self.message_user(request, msg)
+ self.message_user(request, msg, messages.SUCCESS)
if post_url_continue is None:
post_url_continue = reverse('admin:%s_%s_change' %
(opts.app_label, opts.model_name),
@@ -847,11 +847,11 @@ def response_add(self, request, obj, post_url_continue=None):
(escape(pk_value), escapejs(obj)))
elif "_addanother" in request.POST:
msg = _('The %(name)s "%(obj)s" was added successfully. You may add another %(name)s below.') % msg_dict
- self.message_user(request, msg)
+ self.message_user(request, msg, messages.SUCCESS)
return HttpResponseRedirect(request.path)
else:
msg = _('The %(name)s "%(obj)s" was added successfully.') % msg_dict
- self.message_user(request, msg)
+ self.message_user(request, msg, messages.SUCCESS)
return self.response_post_save_add(request, obj)
def response_change(self, request, obj):
@@ -865,27 +865,27 @@ def response_change(self, request, obj):
msg_dict = {'name': force_text(opts.verbose_name), 'obj': force_text(obj)}
if "_continue" in request.POST:
msg = _('The %(name)s "%(obj)s" was changed successfully. You may edit it again below.') % msg_dict
- self.message_user(request, msg)
+ self.message_user(request, msg, messages.SUCCESS)
if "_popup" in request.REQUEST:
return HttpResponseRedirect(request.path + "?_popup=1")
else:
return HttpResponseRedirect(request.path)
elif "_saveasnew" in request.POST:
msg = _('The %(name)s "%(obj)s" was added successfully. You may edit it again below.') % msg_dict
- self.message_user(request, msg)
+ self.message_user(request, msg, messages.SUCCESS)
return HttpResponseRedirect(reverse('admin:%s_%s_change' %
(opts.app_label, opts.model_name),
args=(pk_value,),
current_app=self.admin_site.name))
elif "_addanother" in request.POST:
msg = _('The %(name)s "%(obj)s" was changed successfully. You may add another %(name)s below.') % msg_dict
- self.message_user(request, msg)
+ self.message_user(request, msg, messages.SUCCESS)
return HttpResponseRedirect(reverse('admin:%s_%s_add' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name))
else:
msg = _('The %(name)s "%(obj)s" was changed successfully.') % msg_dict
- self.message_user(request, msg)
+ self.message_user(request, msg, messages.SUCCESS)
return self.response_post_save_change(request, obj)
def response_post_save_add(self, request, obj):
@@ -964,7 +964,7 @@ def response_action(self, request, queryset):
# Reminder that something needs to be selected or nothing will happen
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
- self.message_user(request, msg)
+ self.message_user(request, msg, messages.WARNING)
return None
if not select_across:
@@ -982,7 +982,7 @@ def response_action(self, request, queryset):
return HttpResponseRedirect(request.get_full_path())
else:
msg = _("No action selected.")
- self.message_user(request, msg)
+ self.message_user(request, msg, messages.WARNING)
return None
@csrf_protect_m
@@ -1224,7 +1224,7 @@ def changelist_view(self, request, extra_context=None):
else:
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
- self.message_user(request, msg)
+ self.message_user(request, msg, messages.WARNING)
action_failed = True
# Actions with confirmation
@@ -1269,7 +1269,7 @@ def changelist_view(self, request, extra_context=None):
changecount) % {'count': changecount,
'name': name,
'obj': force_text(obj)}
- self.message_user(request, msg)
+ self.message_user(request, msg, messages.SUCCESS)
return HttpResponseRedirect(request.get_full_path())
@@ -1346,7 +1346,11 @@ def delete_view(self, request, object_id, extra_context=None):
self.log_deletion(request, obj, obj_display)
self.delete_model(request, obj)
- self.message_user(request, _('The %(name)s "%(obj)s" was deleted successfully.') % {'name': force_text(opts.verbose_name), 'obj': force_text(obj_display)})
+ self.message_user(request, _(
+ 'The %(name)s "%(obj)s" was deleted successfully.') % {
+ 'name': force_text(opts.verbose_name),
+ 'obj': force_text(obj_display)},
+ messages.SUCCESS)
if not self.has_change_permission(request, None):
return HttpResponseRedirect(reverse('admin:index',
|
Related ticket: https://code.djangoproject.com/ticket/20111
|
https://api.github.com/repos/django/django/pulls/938
|
2013-03-22T09:01:37Z
|
2013-04-08T11:56:57Z
|
2013-04-08T11:56:57Z
|
2014-06-29T21:43:15Z
| 1,433
|
django/django
| 51,603
|
fixes #4083 (#4148)
|
diff --git a/.travis.yml b/.travis.yml
index b35f0ebbf2e..ca06f07d04e 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -94,11 +94,13 @@ matrix:
# Only build pushes to the master branch, PRs, and branches beginning with
-# `test-`. This reduces the number of simultaneous Travis runs, which speeds
-# turnaround time on review since there is a cap of 5 simultaneous runs.
+# `test-` or of the form `digit(s).digit(s).x`. This reduces the number of
+# simultaneous Travis runs, which speeds turnaround time on review since there
+# is a cap of on the number of simultaneous runs.
branches:
only:
- master
+ - /^\d+\.\d+\.x$/
- /^test-.*$/
# container-based infrastructure
|
(cherry picked from commit dd8253b3d69329b6f23662eab4499a17dc4da9e7)
|
https://api.github.com/repos/certbot/certbot/pulls/4156
|
2017-02-02T02:15:52Z
|
2017-02-02T03:01:12Z
|
2017-02-02T03:01:12Z
|
2017-02-02T03:01:15Z
| 201
|
certbot/certbot
| 3,359
|
v2.31.0
|
diff --git a/HISTORY.md b/HISTORY.md
index aaa05e2832..bbe6dd425b 100644
--- a/HISTORY.md
+++ b/HISTORY.md
@@ -6,6 +6,33 @@ dev
- \[Short description of non-trivial change.\]
+2.31.0 (2023-05-22)
+-------------------
+
+**Security**
+- Versions of Requests between v2.3.0 and v2.30.0 are vulnerable to potential
+ forwarding of `Proxy-Authorization` headers to destination servers when
+ following HTTPS redirects.
+
+ When proxies are defined with user info (https://user:pass@proxy:8080), Requests
+ will construct a `Proxy-Authorization` header that is attached to the request to
+ authenticate with the proxy.
+
+ In cases where Requests receives a redirect response, it previously reattached
+ the `Proxy-Authorization` header incorrectly, resulting in the value being
+ sent through the tunneled connection to the destination server. Users who rely on
+ defining their proxy credentials in the URL are *strongly* encouraged to upgrade
+ to Requests 2.31.0+ to prevent unintentional leakage and rotate their proxy
+ credentials once the change has been fully deployed.
+
+ Users who do not use a proxy or do not supply their proxy credentials through
+ the user information portion of their proxy URL are not subject to this
+ vulnerability.
+
+ Full details can be read in our [Github Security Advisory](https://github.com/psf/requests/security/advisories/GHSA-j8r2-6x86-q33q)
+ and [CVE-2023-32681](https://nvd.nist.gov/vuln/detail/CVE-2023-32681).
+
+
2.30.0 (2023-05-03)
-------------------
diff --git a/requests/__version__.py b/requests/__version__.py
index e0f3373d0d..5063c3f8ee 100644
--- a/requests/__version__.py
+++ b/requests/__version__.py
@@ -5,8 +5,8 @@
__title__ = "requests"
__description__ = "Python HTTP for Humans."
__url__ = "https://requests.readthedocs.io"
-__version__ = "2.30.0"
-__build__ = 0x023000
+__version__ = "2.31.0"
+__build__ = 0x023100
__author__ = "Kenneth Reitz"
__author_email__ = "[email protected]"
__license__ = "Apache 2.0"
|
2.31.0 (2023-05-22)
-------------------
**Security**
- Versions of Requests between v2.3.0 and v2.30.0 are vulnerable to potential
forwarding of `Proxy-Authorization` headers to destination servers when
following HTTPS redirects.
When proxies are defined with user info (https://user:pass@proxy:8080), Requests
will construct a `Proxy-Authorization` header that is attached to the request to
authenticate with the proxy.
In cases where Requests receives a redirect response, it previously reattached
the `Proxy-Authorization` header incorrectly, resulting in the value being
sent through the tunneled connection to the destination server. Users who rely on
defining their proxy credentials in the URL are *strongly* encouraged to upgrade
to Requests 2.31.0+ to prevent unintentional leakage and rotate their proxy
credentials once the change has been fully deployed.
Users who do not use a proxy or do not supply their proxy credentials through
the user information portion of their proxy URL are not subject to this
vulnerability.
Full details can be read in our [Github Security Advisory](https://github.com/psf/requests/security/advisories/GHSA-j8r2-6x86-q33q)
and [CVE-2023-32681](https://nvd.nist.gov/vuln/detail/CVE-2023-32681).
|
https://api.github.com/repos/psf/requests/pulls/6457
|
2023-05-22T15:10:03Z
|
2023-05-22T15:10:33Z
|
2023-05-22T15:10:33Z
|
2023-07-21T19:14:11Z
| 600
|
psf/requests
| 32,137
|
CLN: remove checks for inferred_dtype==unicode
|
diff --git a/pandas/_testing.py b/pandas/_testing.py
index 1fdc5d478aaf6..018551224c582 100644
--- a/pandas/_testing.py
+++ b/pandas/_testing.py
@@ -613,8 +613,8 @@ def _check_types(l, r, obj="Index"):
assert_attr_equal("dtype", l, r, obj=obj)
# allow string-like to have different inferred_types
- if l.inferred_type in ("string", "unicode"):
- assert r.inferred_type in ("string", "unicode")
+ if l.inferred_type in ("string"):
+ assert r.inferred_type in ("string")
else:
assert_attr_equal("inferred_type", l, r, obj=obj)
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 39e8e9008a844..59256f6924b79 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -201,7 +201,7 @@ def _ensure_arraylike(values):
"""
if not is_array_like(values):
inferred = lib.infer_dtype(values, skipna=False)
- if inferred in ["mixed", "string", "unicode"]:
+ if inferred in ["mixed", "string"]:
if isinstance(values, tuple):
values = list(values)
values = construct_1d_object_array_from_listlike(values)
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 1dbdb8dbba48b..2a09bd7e54a8e 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -670,7 +670,7 @@ def infer_dtype_from_array(arr, pandas_dtype: bool = False):
# don't force numpy coerce with nan's
inferred = lib.infer_dtype(arr, skipna=False)
- if inferred in ["string", "bytes", "unicode", "mixed", "mixed-integer"]:
+ if inferred in ["string", "bytes", "mixed", "mixed-integer"]:
return (np.object_, arr)
arr = np.asarray(arr)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index f2f53f564da76..47daaa4958411 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -910,7 +910,7 @@ def _format_data(self, name=None):
# do we want to justify (only do so for non-objects)
is_justify = not (
- self.inferred_type in ("string", "unicode")
+ self.inferred_type in ("string")
or (
self.inferred_type == "categorical" and is_object_dtype(self.categories)
)
@@ -2860,7 +2860,6 @@ def _convert_scalar_indexer(self, key, kind=None):
"mixed-integer-float",
"integer-na",
"string",
- "unicode",
"mixed",
]:
self._invalid_indexer("label", key)
diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py
index 4be62b886f076..98f2eb3929b59 100644
--- a/pandas/io/parquet.py
+++ b/pandas/io/parquet.py
@@ -51,7 +51,7 @@ def validate_dataframe(df: DataFrame):
raise ValueError("to_parquet only supports IO with DataFrames")
# must have value column names (strings only)
- if df.columns.inferred_type not in {"string", "unicode", "empty"}:
+ if df.columns.inferred_type not in {"string", "empty"}:
raise ValueError("parquet must have string column names")
# index level names must be strings
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 41db6ed0ef503..84a8b5b2a94fe 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -1304,7 +1304,7 @@ def _validate_usecols_arg(usecols):
usecols_dtype = lib.infer_dtype(usecols, skipna=False)
- if usecols_dtype not in ("empty", "integer", "string", "unicode"):
+ if usecols_dtype not in ("empty", "integer", "string"):
raise ValueError(msg)
usecols = set(usecols)
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index b8e04ad55dde1..cee5f3d280991 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -2334,7 +2334,7 @@ def _encode_strings(self):
dtype = column.dtype
if dtype.type == np.object_:
inferred_dtype = infer_dtype(column, skipna=True)
- if not ((inferred_dtype in ("string", "unicode")) or len(column) == 0):
+ if not ((inferred_dtype in ("string")) or len(column) == 0):
col = column.name
raise ValueError(
f"""\
|
https://api.github.com/repos/pandas-dev/pandas/pulls/31020
|
2020-01-14T22:14:59Z
|
2020-01-15T02:58:07Z
|
2020-01-15T02:58:07Z
|
2020-01-15T03:22:54Z
| 1,182
|
pandas-dev/pandas
| 45,123
|
|
gdax: parseFloat => this.safeFloat
|
diff --git a/js/gdax.js b/js/gdax.js
index 9994ebe94fcb..a5ebb61ca6c9 100644
--- a/js/gdax.js
+++ b/js/gdax.js
@@ -165,13 +165,13 @@ module.exports = class gdax extends Exchange {
'precision': precision,
'limits': {
'amount': {
- 'min': parseFloat (market['base_min_size']),
- 'max': parseFloat (market['base_max_size']),
+ 'min': this.safeFloat (market, 'base_min_size'),
+ 'max': this.safeFloat (market, 'base_max_size'),
},
'price': priceLimits,
'cost': {
- 'min': parseFloat (market['min_market_funds']),
- 'max': parseFloat (market['max_market_funds']),
+ 'min': this.safeFloat (market, 'min_market_funds'),
+ 'max': this.safeFloat (market, 'max_market_funds'),
},
},
'taker': taker,
@@ -190,9 +190,9 @@ module.exports = class gdax extends Exchange {
let balance = balances[b];
let currency = balance['currency'];
let account = {
- 'free': parseFloat (balance['available']),
- 'used': parseFloat (balance['hold']),
- 'total': parseFloat (balance['balance']),
+ 'free': this.safeFloat (balance, 'available'),
+ 'used': this.safeFloat (balance, 'hold'),
+ 'total': this.safeFloat (balance, 'balance'),
};
result[currency] = account;
}
@@ -219,9 +219,9 @@ module.exports = class gdax extends Exchange {
let bid = undefined;
let ask = undefined;
if ('bid' in ticker)
- bid = parseFloat (ticker['bid']);
+ bid = this.safeFloat (ticker, 'bid');
if ('ask' in ticker)
- ask = parseFloat (ticker['ask']);
+ ask = this.safeFloat (ticker, 'ask');
return {
'symbol': symbol,
'timestamp': timestamp,
@@ -238,7 +238,7 @@ module.exports = class gdax extends Exchange {
'change': undefined,
'percentage': undefined,
'average': undefined,
- 'baseVolume': parseFloat (ticker['volume']),
+ 'baseVolume': this.safeFloat (ticker, 'volume'),
'quoteVolume': undefined,
'info': ticker,
};
@@ -271,7 +271,7 @@ module.exports = class gdax extends Exchange {
if (market)
feeCurrency = market['quote'];
fee = {
- 'cost': parseFloat (trade['fill_fees']),
+ 'cost': this.safeFloat (trade, 'fill_fees'),
'currency': feeCurrency,
'rate': undefined,
};
@@ -290,8 +290,8 @@ module.exports = class gdax extends Exchange {
'symbol': symbol,
'type': type,
'side': side,
- 'price': parseFloat (trade['price']),
- 'amount': parseFloat (trade['size']),
+ 'price': this.safeFloat (trade, 'price'),
+ 'amount': this.safeFloat (trade, 'size'),
'fee': fee,
};
}
|
More on PHP notices
|
https://api.github.com/repos/ccxt/ccxt/pulls/1590
|
2018-01-29T16:53:36Z
|
2018-01-29T17:30:24Z
|
2018-01-29T17:30:24Z
|
2018-01-29T17:30:24Z
| 725
|
ccxt/ccxt
| 13,490
|
Add a comment about `tmpname + '^'`.
|
diff --git a/tests/test_downloader_handlers.py b/tests/test_downloader_handlers.py
index c69bd3da17d..8835267fe3c 100644
--- a/tests/test_downloader_handlers.py
+++ b/tests/test_downloader_handlers.py
@@ -109,6 +109,7 @@ class FileTestCase(unittest.TestCase):
def setUp(self):
self.tmpname = self.mktemp()
+ # add a special char to check that they are handled correctly
with open(self.tmpname + '^', 'w') as f:
f.write('0123456789')
handler = create_instance(FileDownloadHandler, None, get_crawler())
|
I had no idea why is this needed and how should it be used (https://github.com/scrapy/scrapy/pull/5285/files#r844017508, https://github.com/scrapy/scrapy/pull/5682/files/726680c7125ab3a6622b12e25d45dbfedc5a39b3#r1027288466) so I tracked it down to 3012030b2faea43b00cb8ae233321c1f9eb8a579 and added this comment.
|
https://api.github.com/repos/scrapy/scrapy/pulls/5736
|
2022-11-25T12:48:05Z
|
2022-11-25T13:19:51Z
|
2022-11-25T13:19:51Z
|
2022-11-25T13:20:44Z
| 141
|
scrapy/scrapy
| 34,907
|
Update CHANGES.md for 21.7b0 release
|
diff --git a/CHANGES.md b/CHANGES.md
index b3224e1c5b3..30f809d5057 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,6 +1,6 @@
# Change Log
-## Unreleased
+## 21.7b0
### _Black_
@@ -9,6 +9,21 @@
- Add primer support and test for code piped into black via STDIN (#2315)
- Fix internal error when `FORCE_OPTIONAL_PARENTHESES` feature is enabled (#2332)
- Accept empty stdin (#2346)
+- Provide a more useful error when parsing fails during AST safety checks (#2304)
+
+### Docker
+
+- Add new `latest_release` tag automation to follow latest black release on docker
+ images (#2374)
+
+### Integrations
+
+- The vim plugin now searches upwards from the directory containing the current buffer
+ instead of the current working directory for pyproject.toml. (#1871)
+- The vim plugin now reads the correct string normalization option in pyproject.toml
+ (#1869)
+- The vim plugin no longer crashes Black when there's boolean values in pyproject.toml
+ (#1869)
## 21.6b0
@@ -20,7 +35,6 @@
- Fixed option usage when using the `--code` flag (#2259)
- Do not call `uvloop.install()` when _Black_ is used as a library (#2303)
- Added `--required-version` option to require a specific version to be running (#2300)
-- Provide a more useful error when parsing fails during AST safety checks (#2304)
- Fix incorrect custom breakpoint indices when string group contains fake f-strings
(#2311)
- Fix regression where `R` prefixes would be lowercased for docstrings (#2285)
@@ -29,15 +43,8 @@
### Integrations
-- The vim plugin now searches upwards from the directory containing the current buffer
- instead of the current working directory for pyproject.toml. (#1871)
-
-### Integrations
-
-- The vim plugin now reads the correct string normalization option in pyproject.toml
- (#1869)
-- The vim plugin no longer crashes Black when there's boolean values in pyproject.toml
- (#1869)
+- The official Black action now supports choosing what version to use, and supports the
+ major 3 OSes. (#1940)
## 21.5b2
@@ -58,11 +65,6 @@
- Add a lower bound for the `aiohttp-cors` dependency. Only 0.4.0 or higher is
supported. (#2231)
-### Integrations
-
-- The official Black action now supports choosing what version to use, and supports the
- major 3 OSes. (#1940)
-
### Packaging
- Release self-contained x86_64 MacOS binaries as part of the GitHub release pipeline
|
- Here we go ...
|
https://api.github.com/repos/psf/black/pulls/2376
|
2021-07-16T14:32:22Z
|
2021-07-16T14:42:47Z
|
2021-07-16T14:42:47Z
|
2021-07-16T14:42:50Z
| 651
|
psf/black
| 24,109
|
resolve line-too-long in premade_models
|
diff --git a/keras/premade_models/linear.py b/keras/premade_models/linear.py
index 3d11430f8de..b9f54fac79e 100644
--- a/keras/premade_models/linear.py
+++ b/keras/premade_models/linear.py
@@ -80,14 +80,15 @@ def __init__(
units: Positive integer, output dimension without the batch size.
activation: Activation function to use.
If you don't specify anything, no activation is applied.
- use_bias: whether to calculate the bias/intercept for this model. If set
- to False, no bias/intercept will be used in calculations, e.g., the data
- is already centered.
+ use_bias: whether to calculate the bias/intercept for this model. If
+ set to False, no bias/intercept will be used in calculations, e.g.,
+ the data is already centered.
kernel_initializer: Initializer for the `kernel` weights matrices.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: regularizer for kernel vectors.
bias_regularizer: regularizer for bias vector.
- **kwargs: The keyword arguments that are passed on to BaseLayer.__init__.
+ **kwargs: The keyword arguments that are passed on to
+ BaseLayer.__init__.
"""
self.units = units
diff --git a/keras/premade_models/wide_deep.py b/keras/premade_models/wide_deep.py
index 50989255629..f474dfe4765 100644
--- a/keras/premade_models/wide_deep.py
+++ b/keras/premade_models/wide_deep.py
@@ -44,7 +44,8 @@ class WideDeepModel(keras_training.Model):
dnn_model = keras.Sequential([keras.layers.Dense(units=64),
keras.layers.Dense(units=1)])
combined_model = WideDeepModel(linear_model, dnn_model)
- combined_model.compile(optimizer=['sgd', 'adam'], loss='mse', metrics=['mse'])
+ combined_model.compile(optimizer=['sgd', 'adam'],
+ loss='mse', metrics=['mse'])
# define dnn_inputs and linear_inputs as separate numpy arrays or
# a single numpy array if dnn_inputs is same as linear_inputs.
combined_model.fit([linear_inputs, dnn_inputs], y, epochs)
@@ -66,7 +67,8 @@ class WideDeepModel(keras_training.Model):
dnn_model.compile('rmsprop', 'mse')
dnn_model.fit(dnn_inputs, y, epochs)
combined_model = WideDeepModel(linear_model, dnn_model)
- combined_model.compile(optimizer=['sgd', 'adam'], loss='mse', metrics=['mse'])
+ combined_model.compile(optimizer=['sgd', 'adam'],
+ loss='mse', metrics=['mse'])
combined_model.fit([linear_inputs, dnn_inputs], y, epochs)
```
@@ -76,14 +78,14 @@ def __init__(self, linear_model, dnn_model, activation=None, **kwargs):
"""Create a Wide & Deep Model.
Args:
- linear_model: a premade LinearModel, its output must match the output of
- the dnn model.
+ linear_model: a premade LinearModel, its output must match the output
+ of the dnn model.
dnn_model: a `tf.keras.Model`, its output must match the output of the
linear model.
activation: Activation function. Set it to None to maintain a linear
activation.
- **kwargs: The keyword arguments that are passed on to BaseLayer.__init__.
- Allowed keyword arguments include `name`.
+ **kwargs: The keyword arguments that are passed on to
+ BaseLayer.__init__. Allowed keyword arguments include `name`.
"""
super().__init__(**kwargs)
base_layer.keras_premade_model_gauge.get_cell("WideDeep").set(True)
@@ -171,12 +173,12 @@ def _make_train_function(self):
# Training updates
updates = []
linear_updates = linear_optimizer.get_updates(
- params=self.linear_model.trainable_weights, # pylint: disable=protected-access
+ params=self.linear_model.trainable_weights,
loss=self.total_loss,
)
updates += linear_updates
dnn_updates = dnn_optimizer.get_updates(
- params=self.dnn_model.trainable_weights, # pylint: disable=protected-access
+ params=self.dnn_model.trainable_weights,
loss=self.total_loss,
)
updates += dnn_updates
|
https://api.github.com/repos/keras-team/keras/pulls/16608
|
2022-05-27T00:16:27Z
|
2022-05-27T06:48:10Z
|
2022-05-27T06:48:10Z
|
2022-06-07T18:15:37Z
| 1,004
|
keras-team/keras
| 47,830
|
|
Improved FLOPS computation
|
diff --git a/models/yolo.py b/models/yolo.py
index 0080056a444..2ef1574a8b7 100644
--- a/models/yolo.py
+++ b/models/yolo.py
@@ -192,8 +192,8 @@ def autoshape(self): # add autoShape module
copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes
return m
- def info(self, verbose=False): # print model information
- model_info(self, verbose)
+ def info(self, verbose=False, img_size=640): # print model information
+ model_info(self, verbose, img_size)
def parse_model(d, ch): # model_dict, input_channels(3)
diff --git a/utils/torch_utils.py b/utils/torch_utils.py
index cdd21b519d6..e5ef2607a8c 100644
--- a/utils/torch_utils.py
+++ b/utils/torch_utils.py
@@ -139,8 +139,8 @@ def fuse_conv_and_bn(conv, bn):
return fusedconv
-def model_info(model, verbose=False):
- # Plots a line-by-line description of a PyTorch model
+def model_info(model, verbose=False, img_size=640):
+ # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320]
n_p = sum(x.numel() for x in model.parameters()) # number parameters
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
if verbose:
@@ -152,8 +152,10 @@ def model_info(model, verbose=False):
try: # FLOPS
from thop import profile
- flops = profile(deepcopy(model), inputs=(torch.zeros(1, 3, 64, 64),), verbose=False)[0] / 1E9 * 2
- fs = ', %.1f GFLOPS' % (flops * 100) # 640x640 FLOPS
+ stride = int(model.stride.max())
+ flops = profile(deepcopy(model), inputs=(torch.zeros(1, 3, stride, stride),), verbose=False)[0] / 1E9 * 2
+ img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float
+ fs = ', %.1f GFLOPS' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 FLOPS
except ImportError:
fs = ''
|
This PR improves FLOPS computation, and allows passing an `img_size` argument for the first time (previous FLOPS computations were fixed at 640, 640 img-size.
To view model FLOPS, `pip install thop` and then run any normal command:
<img width="1259" alt="Screenshot 2020-11-14 at 14 25 24" src="https://user-images.githubusercontent.com/26833433/99147975-509e7580-2685-11eb-924d-04e3d9d797d0.png">
Or call the model.info() method directly with an optional `img_size` argument:
```python
import torch
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)
model.info()
model.info(img_size=320)
model.info(img_size=[320, 640])
# OUTPUT
# Model Summary: 232 layers, 7459581 parameters, 0 gradients, 17.5 GFLOPS
# Model Summary: 232 layers, 7459581 parameters, 0 gradients, 4.4 GFLOPS
# Model Summary: 232 layers, 7459581 parameters, 0 gradients, 8.7 GFLOPS
```
## 🛠️ PR Summary
<sub>Made with ❤️ by [Ultralytics Actions](https://github.com/ultralytics/actions)<sub>
### 🌟 Summary
Updated YOLOv5 model and utility functions to provide more detailed information including FLOPS based on varying image sizes.
### 📊 Key Changes
- The `info` method in `models/yolo.py` now takes an additional `img_size` argument.
- The `model_info` function in `utils/torch_utils.py` has been updated to:
- Accept `img_size` as an argument, which can be an integer or a list.
- Use `img_size` to calculate FLOPS correctly for different input image dimensions.
- Compute FLOPS based on provided stride and image dimensions for greater accuracy.
### 🎯 Purpose & Impact
- **Purpose**: To offer users a more accurate calculation of FLOPS that reflects the model's complexity based on different input resolutions, rather than a fixed size.
- **Impact**: Users now can get precise model information pertinent to their specific use case, leading to better understanding of the model's computational demand for different image sizes. This could be particularly useful for optimizing performance or ensuring compatibility with hardware constraints. 🚀💡
|
https://api.github.com/repos/ultralytics/yolov5/pulls/1398
|
2020-11-14T13:26:20Z
|
2020-11-14T13:39:47Z
|
2020-11-14T13:39:46Z
|
2024-01-19T20:28:11Z
| 613
|
ultralytics/yolov5
| 25,167
|
build(deps): bump pdf2docx from 0.5.6 to 0.5.7
|
diff --git a/requirements_with_versions.txt b/requirements_with_versions.txt
index 51457558d1..6d118214dc 100644
--- a/requirements_with_versions.txt
+++ b/requirements_with_versions.txt
@@ -8,7 +8,7 @@ simplegui==0.1.1
utils==1.0.2
Tubes==0.2.1
modules==1.0.0
-pdf2docx==0.5.6
+pdf2docx==0.5.7
pong==1.5
beautifulsoup4==4.12.2
dictator==0.3.1
|
Bumps [pdf2docx]() from 0.5.6 to 0.5.7.
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
<details>
<summary>Dependabot commands and options</summary>
<br />
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually
- `@dependabot show <dependency name> ignore conditions` will show all of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
</details>
|
https://api.github.com/repos/geekcomputers/Python/pulls/2088
|
2024-01-10T18:11:13Z
|
2024-01-10T18:25:13Z
|
2024-01-10T18:25:13Z
|
2024-01-10T18:25:20Z
| 140
|
geekcomputers/Python
| 31,027
|
Allow --mount-dependencies for python modules which are not in a separate directory
|
diff --git a/localstack/dev/run/configurators.py b/localstack/dev/run/configurators.py
index 488a9bf537113..7f4a335c20315 100644
--- a/localstack/dev/run/configurators.py
+++ b/localstack/dev/run/configurators.py
@@ -294,8 +294,8 @@ def __call__(self, cfg: ContainerConfiguration):
# find dependencies from the host
for dep_path in self.host_paths.venv_dir.glob("lib/python3.*/site-packages/*"):
- # filter out everything that heuristically cannot be a source directory
- if not dep_path.is_dir():
+ # filter out everything that heuristically cannot be a source path
+ if not self._can_be_source_path(dep_path):
continue
if dep_path.name.endswith(".dist-info"):
continue
@@ -316,6 +316,9 @@ def __call__(self, cfg: ContainerConfiguration):
cfg.volumes.append(VolumeBind(str(dep_path), target_path))
+ def _can_be_source_path(self, path: Path) -> bool:
+ return path.is_dir() or (path.name.endswith(".py") and not path.name.startswith("__"))
+
def _has_mount(self, volumes: VolumeMappings, target_path: str) -> bool:
return True if volumes.find_target_mapping(target_path) else False
|
<!-- Please refer to the contribution guidelines before raising a PR: https://github.com/localstack/localstack/blob/master/CONTRIBUTING.md -->
<!-- Why am I raising this PR? Add context such as related issues, PRs, or documentation. -->
## Motivation
Currently, in the dev script, we only mount python modules which are in there own subfolder.
However, the assumption that all dependencies are located in a subfolder does not hold:
For example, `nest-asyncio` is only one file, and is not located in a separate folder in `site-packages`. (This is a dependency for serverless tests in -ext, and without it, test collection fails).
We should therefore also mount python files in `site-packages`, unless they start with `__` (sometimes linked editable installs have a python finder file like this: `__editable___localstack_core_2_2_1_dev0_finder.py`).
<!-- What notable changes does this PR make? -->
## Changes
* Mount files from `site-packages` as dependency as well, unless they start with a dunder
<!-- The following sections are optional, but can be useful!
## Testing
Description of how to test the changes
## TODO
What's left to do:
- [ ] ...
- [ ] ...
-->
|
https://api.github.com/repos/localstack/localstack/pulls/9289
|
2023-10-04T14:49:40Z
|
2023-10-04T19:21:22Z
|
2023-10-04T19:21:22Z
|
2023-10-04T19:21:28Z
| 301
|
localstack/localstack
| 28,835
|
Work around Basic Authentication for challenge dir in Apache
|
diff --git a/certbot-apache/certbot_apache/http_01.py b/certbot-apache/certbot_apache/http_01.py
index e463f38804b..cce93a64695 100644
--- a/certbot-apache/certbot_apache/http_01.py
+++ b/certbot-apache/certbot_apache/http_01.py
@@ -11,30 +11,43 @@
class ApacheHttp01(common.TLSSNI01):
"""Class that performs HTTP-01 challenges within the Apache configurator."""
- CONFIG_TEMPLATE22 = """\
+ CONFIG_TEMPLATE22_PRE = """\
RewriteEngine on
RewriteRule ^/\\.well-known/acme-challenge/([A-Za-z0-9-_=]+)$ {0}/$1 [L]
+ """
+ CONFIG_TEMPLATE22_POST = """\
<Directory {0}>
Order Allow,Deny
Allow from all
</Directory>
+ <Location /.well-known/acme-challenge>
+ Order Allow,Deny
+ Allow from all
+ </Location>
"""
- CONFIG_TEMPLATE24 = """\
+ CONFIG_TEMPLATE24_PRE = """\
RewriteEngine on
RewriteRule ^/\\.well-known/acme-challenge/([A-Za-z0-9-_=]+)$ {0}/$1 [END]
-
+ """
+ CONFIG_TEMPLATE24_POST = """\
<Directory {0}>
Require all granted
</Directory>
+ <Location /.well-known/acme-challenge>
+ Require all granted
+ </Location>
"""
def __init__(self, *args, **kwargs):
super(ApacheHttp01, self).__init__(*args, **kwargs)
- self.challenge_conf = os.path.join(
+ self.challenge_conf_pre = os.path.join(
+ self.configurator.conf("challenge-location"),
+ "le_http_01_challenge_pre.conf")
+ self.challenge_conf_post = os.path.join(
self.configurator.conf("challenge-location"),
- "le_http_01_challenge.conf")
+ "le_http_01_challenge_post.conf")
self.challenge_dir = os.path.join(
self.configurator.config.work_dir,
"http_challenges")
@@ -79,24 +92,32 @@ def _mod_config(self):
chall.domain, filter_defaults=False,
port=str(self.configurator.config.http01_port))
if vh:
- self._set_up_include_directive(vh)
+ self._set_up_include_directives(vh)
else:
for vh in self._relevant_vhosts():
- self._set_up_include_directive(vh)
+ self._set_up_include_directives(vh)
self.configurator.reverter.register_file_creation(
- True, self.challenge_conf)
+ True, self.challenge_conf_pre)
+ self.configurator.reverter.register_file_creation(
+ True, self.challenge_conf_post)
if self.configurator.version < (2, 4):
- config_template = self.CONFIG_TEMPLATE22
+ config_template_pre = self.CONFIG_TEMPLATE22_PRE
+ config_template_post = self.CONFIG_TEMPLATE22_POST
else:
- config_template = self.CONFIG_TEMPLATE24
+ config_template_pre = self.CONFIG_TEMPLATE24_PRE
+ config_template_post = self.CONFIG_TEMPLATE24_POST
- config_text = config_template.format(self.challenge_dir)
+ config_text_pre = config_template_pre.format(self.challenge_dir)
+ config_text_post = config_template_post.format(self.challenge_dir)
- logger.debug("writing a config file with text:\n %s", config_text)
- with open(self.challenge_conf, "w") as new_conf:
- new_conf.write(config_text)
+ logger.debug("writing a pre config file with text:\n %s", config_text_pre)
+ with open(self.challenge_conf_pre, "w") as new_conf:
+ new_conf.write(config_text_pre)
+ logger.debug("writing a post config file with text:\n %s", config_text_post)
+ with open(self.challenge_conf_post, "w") as new_conf:
+ new_conf.write(config_text_post)
def _relevant_vhosts(self):
http01_port = str(self.configurator.config.http01_port)
@@ -137,14 +158,17 @@ def _set_up_challenge(self, achall):
return response
- def _set_up_include_directive(self, vhost):
- """Includes override configuration to the beginning of VirtualHost.
- Note that this include isn't added to Augeas search tree"""
+ def _set_up_include_directives(self, vhost):
+ """Includes override configuration to the beginning and to the end of
+ VirtualHost. Note that this include isn't added to Augeas search tree"""
if vhost not in self.moded_vhosts:
logger.debug(
"Adding a temporary challenge validation Include for name: %s " +
"in: %s", vhost.name, vhost.filep)
self.configurator.parser.add_dir_beginning(
- vhost.path, "Include", self.challenge_conf)
+ vhost.path, "Include", self.challenge_conf_pre)
+ self.configurator.parser.add_dir(
+ vhost.path, "Include", self.challenge_conf_post)
+
self.moded_vhosts.add(vhost)
diff --git a/certbot-apache/certbot_apache/tests/http_01_test.py b/certbot-apache/certbot_apache/tests/http_01_test.py
index 64a76649abf..9ed4ee509f0 100644
--- a/certbot-apache/certbot_apache/tests/http_01_test.py
+++ b/certbot-apache/certbot_apache/tests/http_01_test.py
@@ -158,23 +158,31 @@ def common_perform_test(self, achalls, vhosts):
for vhost in vhosts:
if not vhost.ssl:
matches = self.config.parser.find_dir("Include",
- self.http.challenge_conf,
+ self.http.challenge_conf_pre,
+ vhost.path)
+ self.assertEqual(len(matches), 1)
+ matches = self.config.parser.find_dir("Include",
+ self.http.challenge_conf_post,
vhost.path)
self.assertEqual(len(matches), 1)
self.assertTrue(os.path.exists(challenge_dir))
def _test_challenge_conf(self):
- with open(self.http.challenge_conf) as f:
- conf_contents = f.read()
+ with open(self.http.challenge_conf_pre) as f:
+ pre_conf_contents = f.read()
+
+ with open(self.http.challenge_conf_post) as f:
+ post_conf_contents = f.read()
+
+ self.assertTrue("RewriteEngine on" in pre_conf_contents)
+ self.assertTrue("RewriteRule" in pre_conf_contents)
- self.assertTrue("RewriteEngine on" in conf_contents)
- self.assertTrue("RewriteRule" in conf_contents)
- self.assertTrue(self.http.challenge_dir in conf_contents)
+ self.assertTrue(self.http.challenge_dir in post_conf_contents)
if self.config.version < (2, 4):
- self.assertTrue("Allow from all" in conf_contents)
+ self.assertTrue("Allow from all" in post_conf_contents)
else:
- self.assertTrue("Require all granted" in conf_contents)
+ self.assertTrue("Require all granted" in post_conf_contents)
def _test_challenge_file(self, achall):
name = os.path.join(self.http.challenge_dir, achall.chall.encode("token"))
|
An alternative to this is: #5462
Unfortunately, the way that Apache merges the configuration directives is different for `mod_rewrite` and `<Location>` / `<Directory>` directives.
To work around basic auth in VirtualHosts, the challenge override `Include` had to be split in two. The first part handles overrides for `RewriteRule` and the other part will handle overrides for `<Directory>` and `<Location>` directives.
|
https://api.github.com/repos/certbot/certbot/pulls/5461
|
2018-01-23T13:51:56Z
|
2018-01-24T00:46:37Z
|
2018-01-24T00:46:37Z
|
2018-01-24T00:46:37Z
| 1,659
|
certbot/certbot
| 15
|
Fix for older versions of pydantic
|
diff --git a/deepspeed/runtime/config_utils.py b/deepspeed/runtime/config_utils.py
index 81ef972ac0c4..08a50785ceb9 100755
--- a/deepspeed/runtime/config_utils.py
+++ b/deepspeed/runtime/config_utils.py
@@ -109,6 +109,7 @@ class Config:
use_enum_values = True
allow_population_by_field_name = True
extra = "forbid"
+ arbitrary_types_allowed = True
class pp_int(int):
|
resolves #2609
|
https://api.github.com/repos/microsoft/DeepSpeed/pulls/2611
|
2022-12-15T19:07:27Z
|
2022-12-15T21:16:25Z
|
2022-12-15T21:16:25Z
|
2022-12-15T21:16:25Z
| 111
|
microsoft/DeepSpeed
| 10,401
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.