title
stringlengths 2
169
| diff
stringlengths 235
19.5k
| body
stringlengths 0
30.5k
| url
stringlengths 48
84
| created_at
stringlengths 20
20
| closed_at
stringlengths 20
20
| merged_at
stringlengths 20
20
| updated_at
stringlengths 20
20
| diff_len
float64 101
3.99k
| repo_name
stringclasses 83
values | __index_level_0__
int64 15
52.7k
|
|---|---|---|---|---|---|---|---|---|---|---|
Added keras version of FAN for plaidml backends
|
diff --git a/lib/plaidml_tools.py b/lib/plaidml_tools.py
index 972ae8a00b..67d2e3658f 100644
--- a/lib/plaidml_tools.py
+++ b/lib/plaidml_tools.py
@@ -202,5 +202,5 @@ def setup_plaidml(loglevel):
os.environ["KERAS_BACKEND"] = "plaidml.keras.backend"
plaid = PlaidMLStats(loglevel)
plaid.load_active_devices()
- logger.info("Using GPU: %s", plaid.active_devices)
+ logger.info("Using GPU: %s", [plaid.ids[i] for i in plaid.active_devices])
logger.info("Successfully set up for PlaidML")
diff --git a/plugins/extract/align/fan_amd.py b/plugins/extract/align/fan_amd.py
new file mode 100644
index 0000000000..25930b5953
--- /dev/null
+++ b/plugins/extract/align/fan_amd.py
@@ -0,0 +1,268 @@
+#!/usr/bin/env python3
+""" Facial landmarks extractor for faceswap.py
+ Code adapted and modified from:
+ https://github.com/1adrianb/face-alignment
+"""
+import cv2
+import numpy as np
+import keras
+from keras import backend as K
+
+from ._base import Aligner, logger
+
+
+class Align(Aligner):
+ """ Perform transformation to align and get landmarks """
+ def __init__(self, **kwargs):
+ git_model_id = 9
+ model_filename = "face-alignment-network_2d4_keras_v1.h5"
+ super().__init__(git_model_id=git_model_id,
+ model_filename=model_filename,
+ colorspace="RGB",
+ input_size=256,
+ **kwargs)
+ self.vram = 2240
+ self.model = None
+ self.reference_scale = 195
+ self.supports_plaidml = True
+
+ def initialize(self, *args, **kwargs):
+ """ Initialization tasks to run prior to alignments """
+ try:
+ super().initialize(*args, **kwargs)
+ logger.info("Initializing Face Alignment Network...")
+ logger.debug("fan initialize: (args: %s kwargs: %s)", args, kwargs)
+ self.model = FAN(self.model_path)
+ self.init.set()
+ logger.info("Initialized Face Alignment Network.")
+ except Exception as err:
+ self.error.set()
+ raise err
+
+ # DETECTED FACE BOUNDING BOX PROCESSING
+ def align_image(self, detected_face, image):
+ """ Get center and scale, crop and align image around center """
+ logger.trace("Aligning image around center")
+ center, scale = self.get_center_scale(detected_face)
+ image = self.crop(image, center, scale)
+ logger.trace("Aligned image around center")
+ return dict(image=image, center=center, scale=scale)
+
+ def get_center_scale(self, detected_face):
+ """ Get the center and set scale of bounding box """
+ logger.trace("Calculating center and scale")
+ center = np.array([(detected_face.left + detected_face.right) / 2.0,
+ (detected_face.top + detected_face.bottom) / 2.0])
+
+ center[1] -= detected_face.height * 0.12
+
+ scale = (detected_face.width + detected_face.height) / self.reference_scale
+
+ logger.trace("Calculated center and scale: %s, %s", center, scale)
+ return center, scale
+
+ def crop(self, image, center, scale): # pylint:disable=too-many-locals
+ """ Crop image around the center point """
+ logger.trace("Cropping image")
+ is_color = image.ndim > 2
+ v_ul = self.transform([1, 1], center, scale, self.input_size).astype(np.int)
+ v_br = self.transform([self.input_size, self.input_size],
+ center,
+ scale,
+ self.input_size).astype(np.int)
+ if is_color:
+ new_dim = np.array([v_br[1] - v_ul[1],
+ v_br[0] - v_ul[0],
+ image.shape[2]],
+ dtype=np.int32)
+ new_img = np.zeros(new_dim, dtype=np.uint8)
+ else:
+ new_dim = np.array([v_br[1] - v_ul[1],
+ v_br[0] - v_ul[0]],
+ dtype=np.int)
+ new_img = np.zeros(new_dim, dtype=np.uint8)
+ height = image.shape[0]
+ width = image.shape[1]
+ new_x = np.array([max(1, -v_ul[0] + 1), min(v_br[0], width) - v_ul[0]],
+ dtype=np.int32)
+ new_y = np.array([max(1, -v_ul[1] + 1),
+ min(v_br[1], height) - v_ul[1]],
+ dtype=np.int32)
+ old_x = np.array([max(1, v_ul[0] + 1), min(v_br[0], width)],
+ dtype=np.int32)
+ old_y = np.array([max(1, v_ul[1] + 1), min(v_br[1], height)],
+ dtype=np.int32)
+ if is_color:
+ new_img[new_y[0] - 1:new_y[1],
+ new_x[0] - 1:new_x[1]] = image[old_y[0] - 1:old_y[1],
+ old_x[0] - 1:old_x[1], :]
+ else:
+ new_img[new_y[0] - 1:new_y[1],
+ new_x[0] - 1:new_x[1]] = image[old_y[0] - 1:old_y[1],
+ old_x[0] - 1:old_x[1]]
+
+ if new_img.shape[0] < self.input_size:
+ interpolation = cv2.INTER_CUBIC # pylint:disable=no-member
+ else:
+ interpolation = cv2.INTER_AREA # pylint:disable=no-member
+
+ new_img = cv2.resize(new_img, # pylint:disable=no-member
+ dsize=(int(self.input_size), int(self.input_size)),
+ interpolation=interpolation)
+ logger.trace("Cropped image")
+ return new_img
+
+ @staticmethod
+ def transform(point, center, scale, resolution):
+ """ Transform Image """
+ logger.trace("Transforming Points")
+ pnt = np.array([point[0], point[1], 1.0])
+ hscl = 200.0 * scale
+ eye = np.eye(3)
+ eye[0, 0] = resolution / hscl
+ eye[1, 1] = resolution / hscl
+ eye[0, 2] = resolution * (-center[0] / hscl + 0.5)
+ eye[1, 2] = resolution * (-center[1] / hscl + 0.5)
+ eye = np.linalg.inv(eye)
+ retval = np.matmul(eye, pnt)[0:2]
+ logger.trace("Transformed Points: %s", retval)
+ return retval
+
+ def predict_landmarks(self, feed_dict):
+ """ Predict the 68 point landmarks """
+ logger.trace("Predicting Landmarks")
+ image = np.expand_dims(
+ feed_dict["image"].transpose((2, 0, 1)).astype(np.float32) / 255.0, 0)
+ prediction = self.model.predict(image)[-1]
+ pts_img = self.get_pts_from_predict(prediction, feed_dict["center"], feed_dict["scale"])
+ retval = [(int(pt[0]), int(pt[1])) for pt in pts_img]
+ logger.trace("Predicted Landmarks: %s", retval)
+ return retval
+
+ def get_pts_from_predict(self, prediction, center, scale):
+ """ Get points from predictor """
+ logger.trace("Obtain points from prediction")
+ var_b = prediction.reshape((prediction.shape[0],
+ prediction.shape[1] * prediction.shape[2]))
+ var_c = var_b.argmax(1).reshape((prediction.shape[0],
+ 1)).repeat(2,
+ axis=1).astype(np.float)
+ var_c[:, 0] %= prediction.shape[2]
+ var_c[:, 1] = np.apply_along_axis(
+ lambda x: np.floor(x / prediction.shape[2]),
+ 0,
+ var_c[:, 1])
+
+ for i in range(prediction.shape[0]):
+ pt_x, pt_y = int(var_c[i, 0]), int(var_c[i, 1])
+ if pt_x > 0 and pt_x < 63 and pt_y > 0 and pt_y < 63:
+ diff = np.array([prediction[i, pt_y, pt_x+1]
+ - prediction[i, pt_y, pt_x-1],
+ prediction[i, pt_y+1, pt_x]
+ - prediction[i, pt_y-1, pt_x]])
+
+ var_c[i] += np.sign(diff)*0.25
+
+ var_c += 0.5
+ retval = [self.transform(var_c[i], center, scale, prediction.shape[2])
+ for i in range(prediction.shape[0])]
+ logger.trace("Obtained points from prediction: %s", retval)
+
+ return retval
+
+
+class TorchBatchNorm2D(keras.engine.base_layer.Layer):
+ """" Required for FAN_keras model """
+ def __init__(self, axis=-1, momentum=0.99, epsilon=1e-3, **kwargs):
+ super(TorchBatchNorm2D, self).__init__(**kwargs)
+ self.supports_masking = True
+ self.axis = axis
+ self.momentum = momentum
+ self.epsilon = epsilon
+ self._epsilon_const = K.constant(self.epsilon, dtype='float32')
+
+ self.built = False
+ self.gamma = None
+ self.beta = None
+ self.moving_mean = None
+ self.moving_variance = None
+
+ def build(self, input_shape):
+ dim = input_shape[self.axis]
+ if dim is None:
+ raise ValueError("Axis {} of input tensor should have a "
+ "defined dimension but the layer received "
+ "an input with shape {}."
+ .format(str(self.axis), str(input_shape)))
+ shape = (dim,)
+ self.gamma = self.add_weight(shape=shape,
+ name='gamma',
+ initializer='ones',
+ regularizer=None,
+ constraint=None)
+ self.beta = self.add_weight(shape=shape,
+ name='beta',
+ initializer='zeros',
+ regularizer=None,
+ constraint=None)
+ self.moving_mean = self.add_weight(shape=shape,
+ name='moving_mean',
+ initializer='zeros',
+ trainable=False)
+ self.moving_variance = self.add_weight(shape=shape,
+ name='moving_variance',
+ initializer='ones',
+ trainable=False)
+ self.built = True
+
+ def call(self, inputs, **kwargs):
+ input_shape = K.int_shape(inputs)
+
+ broadcast_shape = [1] * len(input_shape)
+ broadcast_shape[self.axis] = input_shape[self.axis]
+
+ broadcast_moving_mean = K.reshape(self.moving_mean, broadcast_shape)
+ broadcast_moving_variance = K.reshape(self.moving_variance,
+ broadcast_shape)
+ broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
+ broadcast_beta = K.reshape(self.beta, broadcast_shape)
+ invstd = (
+ K.ones(shape=broadcast_shape, dtype='float32')
+ / K.sqrt(broadcast_moving_variance + self._epsilon_const)
+ )
+
+ return((inputs - broadcast_moving_mean)
+ * invstd
+ * broadcast_gamma
+ + broadcast_beta)
+
+ def get_config(self):
+ config = {'axis': self.axis,
+ 'momentum': self.momentum,
+ 'epsilon': self.epsilon}
+ base_config = super(TorchBatchNorm2D, self).get_config()
+ return dict(list(base_config.items()) + list(config.items()))
+
+
+class FAN(object):
+ """
+ Converted from pyTorch from
+ https://github.com/1adrianb/face-alignment
+ """
+ def __init__(self, model_path):
+ self.model_path = model_path
+ self.model = None
+ self.load_model()
+
+ def load_model(self):
+ """ Load the Keras Model """
+ logger.verbose("Initializing Face Alignment Network model (Keras version).")
+ self.model = keras.models.load_model(
+ self.model_path,
+ custom_objects={'TorchBatchNorm2D': TorchBatchNorm2D}
+ )
+
+ def predict(self, feed_item):
+ """ Predict landmarks in session """
+ d = self.model.predict(feed_item)
+ return [d[-1].reshape((68, 64, 64))]
|
This PR adds a keras version of the FAN aligner to support plaidml backends.
I currently import keras in the global scope. If this should lead to multiprocessing problems we can move the import behind a backend check, but i'd prefer not to if we don't need to.
This is not tested with multithreading/processing nor with an nvidia card nor on windows.
Please test if it break the "normal" behaviour.
|
https://api.github.com/repos/deepfakes/faceswap/pulls/756
|
2019-06-11T18:22:20Z
|
2019-06-22T17:58:37Z
|
2019-06-22T17:58:37Z
|
2019-06-22T18:33:24Z
| 3,000
|
deepfakes/faceswap
| 18,769
|
Add open data, Poland goverment
|
diff --git a/README.md b/README.md
index cc5bd3c1c2..a162a44696 100644
--- a/README.md
+++ b/README.md
@@ -700,6 +700,7 @@ API | Description | Auth | HTTPS | CORS |
| [Open Government, Mexico](https://www.inegi.org.mx/datos/) | Mexican Statistical Government Open Data | No | Yes | Unknown |
| [Open Government, New Zealand](https://www.data.govt.nz/) | New Zealand Government Open Data | No | Yes | Unknown |
| [Open Government, Peru](https://www.datosabiertos.gob.pe/) | Peru Government Open Data | No | Yes | Unknown |
+| [Open Government, Poland](https://dane.gov.pl/en) | Poland Government Open Data | No | Yes | Yes |
| [Open Government, Romania](http://data.gov.ro/) | Romania Government Open Data | No | No | Unknown |
| [Open Government, Russia](https://data.gov.ru/?language=en) | Open Data Portal Russia | `apiKey` | No | Unknown |
| [Open Government, Saudi Arabia](https://data.gov.sa) | Saudi Arabia Government Open Data | No | Yes | Unknown |
|
<!-- Thank you for taking the time to work on a Pull Request for this project! -->
<!-- To ensure your PR is dealt with swiftly please check the following: -->
The Dane.gov.pl website pursues the objective of the Central Public Data Repository in Poland, provides data also via API
- [x] My submission is formatted according to the guidelines in the [contributing guide](/CONTRIBUTING.md)
- [x] My addition is ordered alphabetically
- [x] My submission has a useful description
- [x] The description does not end with punctuation
- [x] Each table column is padded with one space on either side
- [x] I have searched the repository for any relevant issues or pull requests
- [x] Any category I am creating has the minimum requirement of 3 items
- [x] All changes have been [squashed][squash-link] into a single commit
[squash-link]: <https://github.com/todotxt/todo.txt-android/wiki/Squash-All-Commits-Related-to-a-Single-Issue-into-a-Single-Commit>
|
https://api.github.com/repos/public-apis/public-apis/pulls/1920
|
2021-08-03T13:09:27Z
|
2021-09-02T12:48:26Z
|
2021-09-02T12:48:26Z
|
2021-09-02T14:01:03Z
| 265
|
public-apis/public-apis
| 35,823
|
Fix bokeh slider by adding in side effects to package json
|
diff --git a/e2e/specs/st_bokeh_chart.spec.js b/e2e/specs/st_bokeh_chart.spec.js
deleted file mode 100644
index 915fc411e91e..000000000000
--- a/e2e/specs/st_bokeh_chart.spec.js
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-describe("st.bokeh_chart", () => {
- before(() => {
- cy.loadApp("http://localhost:3000/");
- });
-
- beforeEach(() => {
- return cy.get(".stBokehChart").should("have.length", 3);
- });
-
- it("shows left and right graph", () => {
- cy.getIndexed(".stBokehChart", 1).find("canvas");
- cy.getIndexed(".stBokehChart", 2).find("canvas");
- });
-});
diff --git a/e2e_flaky/specs/st_bokeh_chart.spec.ts b/e2e_flaky/specs/st_bokeh_chart.spec.ts
deleted file mode 100644
index aca5c77e5c0c..000000000000
--- a/e2e_flaky/specs/st_bokeh_chart.spec.ts
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/// <reference types="cypress" />
-
-describe("st.bokeh_chart", () => {
- before(() => {
- cy.loadApp("http://localhost:3000/");
- });
-
- it("displays a bokeh chart", () => {
- cy.get(".element-container .stBokehChart").should(
- "have.css",
- "height",
- "600px"
- );
- });
-});
diff --git a/e2e/scripts/st_bokeh_chart.py b/e2e_playwright/st_bokeh_chart.py
similarity index 52%
rename from e2e/scripts/st_bokeh_chart.py
rename to e2e_playwright/st_bokeh_chart.py
index 68b491c808fb..6fbbc77efec7 100644
--- a/e2e/scripts/st_bokeh_chart.py
+++ b/e2e_playwright/st_bokeh_chart.py
@@ -12,6 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import numpy as np
+from bokeh.layouts import column, row
+from bokeh.models import ColumnDataSource, CustomJS, Slider
from bokeh.plotting import figure
import streamlit as st
@@ -38,3 +41,38 @@
with col2:
st.bokeh_chart(right_chart, use_container_width=True)
+
+x = np.linspace(0, 10, 500)
+y = np.sin(x)
+
+source = ColumnDataSource(data=dict(x=x, y=y))
+
+plot = figure(y_range=(-10, 10), width=400, height=400)
+
+plot.line("x", "y", source=source, line_width=3, line_alpha=0.6)
+
+amp = Slider(start=0.1, end=10, value=1, step=0.1, title="Amplitude")
+freq = Slider(start=0.1, end=10, value=1, step=0.1, title="Frequency")
+phase = Slider(start=-6.4, end=6.4, value=0, step=0.1, title="Phase")
+offset = Slider(start=-9, end=9, value=0, step=0.1, title="Offset")
+
+callback = CustomJS(
+ args=dict(source=source, amp=amp, freq=freq, phase=phase, offset=offset),
+ code="""
+ const A = amp.value
+ const k = freq.value
+ const phi = phase.value
+ const B = offset.value
+
+ const x = source.data.x
+ const y = Array.from(x, (x) => B + A*Math.sin(k*x+phi))
+ source.data = { x, y }
+""",
+)
+
+amp.js_on_change("value", callback)
+freq.js_on_change("value", callback)
+phase.js_on_change("value", callback)
+offset.js_on_change("value", callback)
+
+st.bokeh_chart(row(plot, column(amp, freq, phase, offset)))
diff --git a/e2e_playwright/st_bokeh_chart_test.py b/e2e_playwright/st_bokeh_chart_test.py
new file mode 100644
index 000000000000..4a329f8a6e85
--- /dev/null
+++ b/e2e_playwright/st_bokeh_chart_test.py
@@ -0,0 +1,28 @@
+# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from playwright.sync_api import Page, expect
+
+
+def test_bokeh_chart(themed_app: Page):
+ """Test that st.bokeh_chart renders correctly."""
+ bokeh_chart_elements = themed_app.locator("[data-testid=stBokehChart]")
+ expect(bokeh_chart_elements).to_have_count(4)
+
+ expect(bokeh_chart_elements.nth(0).locator("canvas").nth(0)).to_be_visible()
+ expect(bokeh_chart_elements.nth(1).locator("canvas").nth(0)).to_be_visible()
+ expect(bokeh_chart_elements.nth(2).locator("canvas").nth(0)).to_be_visible()
+
+ # show a bokeh slider
+ expect(bokeh_chart_elements.nth(3).locator("canvas").nth(0)).to_be_visible()
diff --git a/frontend/lib/package.json b/frontend/lib/package.json
index 1319a28a7fe0..1fdc36cda4cb 100644
--- a/frontend/lib/package.json
+++ b/frontend/lib/package.json
@@ -5,7 +5,7 @@
"license": "Apache-2.0",
"main": "dist/index.js",
"types": "dist/index.d.ts",
- "sideEffects": false,
+ "sideEffects": ["**/vendor/bokeh/**"],
"files": [
"dist"
],
diff --git a/frontend/lib/src/vendor/bokeh/bokeh-api-2.4.3.esm.min.d.ts b/frontend/lib/src/vendor/bokeh/bokeh-api-2.4.3.esm.min.d.ts
new file mode 100644
index 000000000000..d2e9f340f06e
--- /dev/null
+++ b/frontend/lib/src/vendor/bokeh/bokeh-api-2.4.3.esm.min.d.ts
@@ -0,0 +1,5 @@
+// Including this polyfill type declaration prevents typescript checker from
+// parsing bokeh-api-2.4.3 JS source file that has TS deems invalid (\u2118)
+// https://mothereff.in/js-variables
+declare const plugin;
+export default plugin;
\ No newline at end of file
diff --git a/frontend/lib/src/vendor/bokeh/bokeh-gl-2.4.3.esm.min.d.ts b/frontend/lib/src/vendor/bokeh/bokeh-gl-2.4.3.esm.min.d.ts
new file mode 100644
index 000000000000..397965023ba9
--- /dev/null
+++ b/frontend/lib/src/vendor/bokeh/bokeh-gl-2.4.3.esm.min.d.ts
@@ -0,0 +1,5 @@
+// Including this polyfill type declaration prevents typescript checker from
+// parsing bokeh-gl-2.4.3 JS source file that has TS deems invalid (\u2118)
+// https://mothereff.in/js-variables
+declare const plugin;
+export default plugin;
\ No newline at end of file
diff --git a/frontend/lib/src/vendor/bokeh/bokeh-tables-2.4.3.esm.min.d.ts b/frontend/lib/src/vendor/bokeh/bokeh-tables-2.4.3.esm.min.d.ts
new file mode 100644
index 000000000000..b1ffab3f51fa
--- /dev/null
+++ b/frontend/lib/src/vendor/bokeh/bokeh-tables-2.4.3.esm.min.d.ts
@@ -0,0 +1,5 @@
+// Including this polyfill type declaration prevents typescript checker from
+// parsing bokeh-tables-2.4.3 JS source file that has TS deems invalid (\u2118)
+// https://mothereff.in/js-variables
+declare const plugin;
+export default plugin;
\ No newline at end of file
diff --git a/frontend/lib/src/vendor/bokeh/bokeh-widgets-2.4.3.esm.min.d.ts b/frontend/lib/src/vendor/bokeh/bokeh-widgets-2.4.3.esm.min.d.ts
new file mode 100644
index 000000000000..06c9c94463f5
--- /dev/null
+++ b/frontend/lib/src/vendor/bokeh/bokeh-widgets-2.4.3.esm.min.d.ts
@@ -0,0 +1,5 @@
+// Including this polyfill type declaration prevents typescript checker from
+// parsing bokeh-widgets-2.4.3 JS source file that has TS deems invalid (\u2118)
+// https://mothereff.in/js-variables
+declare const plugin;
+export default plugin;
\ No newline at end of file
|
<!--
⚠️ BEFORE CONTRIBUTING PLEASE READ OUR CONTRIBUTING GUIDELINES!
https://github.com/streamlit/streamlit/wiki/Contributing
-->
## Describe your changes
- need to add an exception to `sideEffects` in `package.json` as we are doing side effect imports in `BokehChart.tsx`
- add a test in e2e to make sure this regression doesn't happen
- migrate e2e cypress test to playwright
## GitHub Issue Link (if applicable)
closes #7171
## Testing Plan
- Explanation of why no additional tests are needed
- Unit Tests (JS and/or Python)
- not sure of how to add a unit test for this
- E2E Tests
- added a test
- Any manual testing needed?
---
**Contribution License Agreement**
By submitting this pull request you agree that all contributions to this project are made under the Apache 2.0 license.
|
https://api.github.com/repos/streamlit/streamlit/pulls/7441
|
2023-09-27T21:41:26Z
|
2023-09-29T18:29:56Z
|
2023-09-29T18:29:56Z
|
2023-09-29T18:29:56Z
| 2,545
|
streamlit/streamlit
| 21,907
|
Use enums in rdw
|
diff --git a/homeassistant/components/rdw/binary_sensor.py b/homeassistant/components/rdw/binary_sensor.py
index 80f4a425212305..81d3e448b78fc9 100644
--- a/homeassistant/components/rdw/binary_sensor.py
+++ b/homeassistant/components/rdw/binary_sensor.py
@@ -7,7 +7,7 @@
from vehicle import Vehicle
from homeassistant.components.binary_sensor import (
- DEVICE_CLASS_PROBLEM,
+ BinarySensorDeviceClass,
BinarySensorEntity,
BinarySensorEntityDescription,
)
@@ -48,7 +48,7 @@ class RDWBinarySensorEntityDescription(
RDWBinarySensorEntityDescription(
key="pending_recall",
name="Pending Recall",
- device_class=DEVICE_CLASS_PROBLEM,
+ device_class=BinarySensorDeviceClass.PROBLEM,
is_on_fn=lambda vehicle: vehicle.pending_recall,
),
)
diff --git a/homeassistant/components/rdw/sensor.py b/homeassistant/components/rdw/sensor.py
index f2c8d93a8a2fff..04f525c61b821b 100644
--- a/homeassistant/components/rdw/sensor.py
+++ b/homeassistant/components/rdw/sensor.py
@@ -8,7 +8,7 @@
from vehicle import Vehicle
from homeassistant.components.sensor import (
- DEVICE_CLASS_DATE,
+ SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
)
@@ -43,13 +43,13 @@ class RDWSensorEntityDescription(
RDWSensorEntityDescription(
key="apk_expiration",
name="APK Expiration",
- device_class=DEVICE_CLASS_DATE,
+ device_class=SensorDeviceClass.DATE,
value_fn=lambda vehicle: vehicle.apk_expiration,
),
RDWSensorEntityDescription(
key="ascription_date",
name="Ascription Date",
- device_class=DEVICE_CLASS_DATE,
+ device_class=SensorDeviceClass.DATE,
value_fn=lambda vehicle: vehicle.ascription_date,
),
)
|
<!--
You are amazing! Thanks for contributing to our project!
Please, DO NOT DELETE ANY TEXT from this template! (unless instructed).
-->
## Breaking change
<!--
If your PR contains a breaking change for existing users, it is important
to tell them what breaks, how to make it work again and why we did this.
This piece of text is published with the release notes, so it helps if you
write it towards our users, not us.
Note: Remove this section if this PR is NOT a breaking change.
-->
## Proposed change
<!--
Describe the big picture of your changes here to communicate to the
maintainers why we should accept this pull request. If it fixes a bug
or resolves a feature request, be sure to link to that issue in the
additional information section.
-->
Use enums in rdw
## Type of change
<!--
What type of change does your PR introduce to Home Assistant?
NOTE: Please, check only 1! box!
If your PR requires multiple boxes to be checked, you'll most likely need to
split it into multiple PRs. This makes things easier and faster to code review.
-->
- [ ] Dependency upgrade
- [ ] Bugfix (non-breaking change which fixes an issue)
- [ ] New integration (thank you!)
- [ ] New feature (which adds functionality to an existing integration)
- [ ] Breaking change (fix/feature causing existing functionality to break)
- [x] Code quality improvements to existing code or addition of tests
## Additional information
<!--
Details are important, and help maintainers processing your PR.
Please be sure to fill out additional details, if applicable.
-->
- This PR fixes or closes issue: fixes #
- This PR is related to issue:
- Link to documentation pull request:
## Checklist
<!--
Put an `x` in the boxes that apply. You can also fill these out after
creating the PR. If you're unsure about any of them, don't hesitate to ask.
We're here to help! This is simply a reminder of what we are going to look
for before merging your code.
-->
- [ ] The code change is tested and works locally.
- [x] Local tests pass. **Your PR cannot be merged unless tests pass**
- [x] There is no commented out code in this PR.
- [x] I have followed the [development checklist][dev-checklist]
- [x] The code has been formatted using Black (`black --fast homeassistant tests`)
- [ ] Tests have been added to verify that the new code works.
If user exposed functionality or configuration variables are added/changed:
- [ ] Documentation added/updated for [www.home-assistant.io][docs-repository]
If the code communicates with devices, web services, or third-party tools:
- [ ] The [manifest file][manifest-docs] has all fields filled out correctly.
Updated and included derived files by running: `python3 -m script.hassfest`.
- [ ] New or updated dependencies have been added to `requirements_all.txt`.
Updated by running `python3 -m script.gen_requirements_all`.
- [ ] For the updated dependencies - a link to the changelog, or at minimum a diff between library versions is added to the PR description.
- [ ] Untested files have been added to `.coveragerc`.
The integration reached or maintains the following [Integration Quality Scale][quality-scale]:
<!--
The Integration Quality Scale scores an integration on the code quality
and user experience. Each level of the quality scale consists of a list
of requirements. We highly recommend getting your integration scored!
-->
- [ ] No score or internal
- [ ] 🥈 Silver
- [ ] 🥇 Gold
- [ ] 🏆 Platinum
<!--
This project is very active and we have a high turnover of pull requests.
Unfortunately, the number of incoming pull requests is higher than what our
reviewers can review and merge so there is a long backlog of pull requests
waiting for review. You can help here!
By reviewing another pull request, you will help raise the code quality of
that pull request and the final review will be faster. This way the general
pace of pull request reviews will go up and your wait time will go down.
When picking a pull request to review, try to choose one that hasn't yet
been reviewed.
Thanks for helping out!
-->
To help with the load of incoming pull requests:
- [ ] I have reviewed two other [open pull requests][prs] in this repository.
[prs]: https://github.com/home-assistant/core/pulls?q=is%3Aopen+is%3Apr+-author%3A%40me+-draft%3Atrue+-label%3Awaiting-for-upstream+sort%3Acreated-desc+review%3Anone
<!--
Thank you for contributing <3
Below, some useful links you could explore:
-->
[dev-checklist]: https://developers.home-assistant.io/docs/en/development_checklist.html
[manifest-docs]: https://developers.home-assistant.io/docs/en/creating_integration_manifest.html
[quality-scale]: https://developers.home-assistant.io/docs/en/next/integration_quality_scale_index.html
[docs-repository]: https://github.com/home-assistant/home-assistant.io
|
https://api.github.com/repos/home-assistant/core/pulls/62059
|
2021-12-16T13:25:09Z
|
2021-12-16T14:10:54Z
|
2021-12-16T14:10:54Z
|
2021-12-17T15:02:06Z
| 442
|
home-assistant/core
| 39,425
|
[tune/rllib/wandb] Flatten result dict so that nested result dicts are shown in W&B logger
|
diff --git a/python/ray/tune/integration/wandb.py b/python/ray/tune/integration/wandb.py
index 66d13ee4e8a1f..b00d3cdd866fa 100644
--- a/python/ray/tune/integration/wandb.py
+++ b/python/ray/tune/integration/wandb.py
@@ -7,6 +7,7 @@
from ray.tune import Trainable
from ray.tune.function_runner import FunctionRunner
from ray.tune.logger import Logger
+from ray.tune.utils import flatten_dict
try:
import wandb
@@ -145,11 +146,16 @@ def run(self):
def _handle_result(self, result):
config_update = result.get("config", {}).copy()
log = {}
+ flat_result = flatten_dict(result, delimiter="/")
- for k, v in result.items():
- if k in self._to_config:
+ for k, v in flat_result.items():
+ if any(
+ k.startswith(item + "/") or k == item
+ for item in self._to_config):
config_update[k] = v
- elif k in self._exclude:
+ elif any(
+ k.startswith(item + "/") or k == item
+ for item in self._exclude):
continue
elif not isinstance(v, Number):
continue
|
<!-- Thank you for your contribution! Please review https://github.com/ray-project/ray/blob/master/CONTRIBUTING.rst before opening a pull request. -->
<!-- Please add a reviewer to the assignee section when you create a PR. If you don't have the access to it, we will shortly find a reviewer and assign them to your PR. -->
## Why are these changes needed?
The Weights & Biases logger does not flatten the result dict, therefore nested dicts will not be considered. This PR flattens the result dict, similar to most other loggers.
<!-- Please give a short summary of the change and the problem this solves. -->
## Related issue number
<!-- For example: "Closes #1234" -->
## Checks
- [X] I've run `scripts/format.sh` to lint the changes in this PR.
- [ ] I've included any doc changes needed for https://docs.ray.io/en/latest/.
- [ ] I've made sure the tests are passing. Note that there might be a few flaky tests, see the recent failure rates at https://ray-travis-tracker.herokuapp.com/.
- Testing Strategy
- [ ] Unit tests
- [ ] Release tests
- [ ] This PR is not tested (please justify below)
|
https://api.github.com/repos/ray-project/ray/pulls/10429
|
2020-08-30T15:59:23Z
|
2020-08-31T22:28:47Z
|
2020-08-31T22:28:47Z
|
2020-08-31T22:28:47Z
| 304
|
ray-project/ray
| 19,266
|
Add fingerprint for 2018 honda accord sport 2.0L canadian version
|
diff --git a/selfdrive/car/honda/values.py b/selfdrive/car/honda/values.py
index be5fd24dfefa5c..0d82ff947e702a 100644
--- a/selfdrive/car/honda/values.py
+++ b/selfdrive/car/honda/values.py
@@ -204,6 +204,7 @@ class CAR:
b'77959-TBX-H230\x00\x00',
],
(Ecu.combinationMeter, 0x18da60f1, None): [
+ b'78109-TVC-C010\x00\x00',
b'78109-TVA-A210\x00\x00',
b'78109-TVC-A010\x00\x00',
b'78109-TVC-A020\x00\x00',
|
***** Car Fingerprinting *****
added fingerprint per subject following https://github.com/commaai/openpilot/wiki/Fingerprinting, missing firmware:
( ecu = combinationMeter,
fwVersion = "78109-TVC-C010\000\000",
address = 416964849, -> 0x18da60f1
subAddress = 0 )
|
https://api.github.com/repos/commaai/openpilot/pulls/21831
|
2021-08-02T23:55:57Z
|
2021-08-03T06:41:10Z
|
2021-08-03T06:41:10Z
|
2021-08-03T06:41:10Z
| 170
|
commaai/openpilot
| 9,858
|
optimization
|
diff --git a/sorts/selection_sort.py b/sorts/selection_sort.py
index 43ad26a7bf27..6a9c063d3364 100644
--- a/sorts/selection_sort.py
+++ b/sorts/selection_sort.py
@@ -35,7 +35,8 @@ def selection_sort(collection):
for k in range(i + 1, length):
if collection[k] < collection[least]:
least = k
- collection[least], collection[i] = (collection[i], collection[least])
+ if least != i:
+ collection[least], collection[i] = (collection[i], collection[least])
return collection
|
We don't need swap if `least == i`
|
https://api.github.com/repos/TheAlgorithms/Python/pulls/1303
|
2019-10-08T02:49:27Z
|
2019-10-08T08:25:01Z
|
2019-10-08T08:25:00Z
|
2019-10-08T10:32:59Z
| 149
|
TheAlgorithms/Python
| 29,414
|
Backport PR #58126: BLD: Build wheels with numpy 2.0rc1
|
diff --git a/pyproject.toml b/pyproject.toml
index c225ed80dcb10..b2764b137a1f8 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -6,12 +6,9 @@ requires = [
"meson==1.2.1",
"wheel",
"Cython==3.0.5", # Note: sync with setup.py, environment.yml and asv.conf.json
- # Any NumPy version should be fine for compiling. Users are unlikely
- # to get a NumPy<1.25 so the result will be compatible with all relevant
- # NumPy versions (if not it is presumably compatible with their version).
- # Pin <2.0 for releases until tested against an RC. But explicitly allow
- # testing the `.dev0` nightlies (which require the extra index).
- "numpy>1.22.4,<=2.0.0.dev0",
+ # Force numpy higher than 2.0rc1, so that built wheels are compatible
+ # with both numpy 1 and 2
+ "numpy>=2.0.0rc1",
"versioneer[toml]"
]
|
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
|
https://api.github.com/repos/pandas-dev/pandas/pulls/58127
|
2024-04-03T02:04:55Z
|
2024-04-03T02:57:16Z
|
2024-04-03T02:57:16Z
|
2024-04-03T02:57:16Z
| 279
|
pandas-dev/pandas
| 44,791
|
Remove orphan comment
|
diff --git a/gym/envs/mujoco/__init__.py b/gym/envs/mujoco/__init__.py
index c81f0dec3fe..fc4cc058be1 100644
--- a/gym/envs/mujoco/__init__.py
+++ b/gym/envs/mujoco/__init__.py
@@ -1,3 +1,5 @@
+from gym.envs.mujoco.mujoco_env import MujocoEnv # isort:skip
+
# ^^^^^ so that user gets the correct error
# message if mujoco is not installed correctly
from gym.envs.mujoco.ant import AntEnv
@@ -7,7 +9,6 @@
from gym.envs.mujoco.humanoidstandup import HumanoidStandupEnv
from gym.envs.mujoco.inverted_double_pendulum import InvertedDoublePendulumEnv
from gym.envs.mujoco.inverted_pendulum import InvertedPendulumEnv
-from gym.envs.mujoco.mujoco_env import MujocoEnv
from gym.envs.mujoco.pusher import PusherEnv
from gym.envs.mujoco.reacher import ReacherEnv
from gym.envs.mujoco.swimmer import SwimmerEnv
|
The comment used to be due to the line that is not there anymore, like:
```python
from gym.envs.mujoco.mujoco_env import MujocoEnv
# ^^^^^ so that user gets the correct error
# message if mujoco is not installed correctly
```
Now the comment is useless.
|
https://api.github.com/repos/openai/gym/pulls/2774
|
2022-04-23T15:29:43Z
|
2022-04-24T16:15:16Z
|
2022-04-24T16:15:16Z
|
2022-04-24T16:15:17Z
| 272
|
openai/gym
| 5,067
|
upscaler_utils: Reduce logging
|
diff --git a/modules/upscaler_utils.py b/modules/upscaler_utils.py
index b5e5a80caa5..17223ca0da1 100644
--- a/modules/upscaler_utils.py
+++ b/modules/upscaler_utils.py
@@ -69,10 +69,8 @@ def upscale_with_model(
for y, h, row in grid.tiles:
newrow = []
for x, w, tile in row:
- logger.debug("Tile (%d, %d) %s...", x, y, tile)
output = upscale_pil_patch(model, tile)
scale_factor = output.width // tile.width
- logger.debug("=> %s (scale factor %s)", output, scale_factor)
newrow.append([x * scale_factor, w * scale_factor, output])
p.update(1)
newtiles.append([y * scale_factor, h * scale_factor, newrow])
|
## Description
* upscale_with_model: Remove debugging logging occurring in loop as
it's an excessive amount of noise when running w/ DEBUG log levels.
## Screenshots/videos:
This is excessive:
```
2024-03-02 07:50:34 DEBUG [modules.modelloader] Loaded <spandrel.__helpers.model_descriptor.ImageModelDescriptor object at 0x000001E855FC8760> from F:\stablediffusion\stable-diffusion-webui-1-8-0-rc\models\ESRGAN\BSRGANx2.pth (device=None, half=False, dtype=None)
2024-03-02 07:50:34 DEBUG [modules.upscaler_utils] Tile (0, 0) <PIL.Image.Image image mode=RGB size=192x192 at 0x1E855FCBC10>...
2024-03-02 07:50:34 DEBUG [modules.upscaler_utils] => <PIL.Image.Image image mode=RGB size=384x384 at 0x1E855FEEB30> (scale factor 2)
2024-03-02 07:50:34 DEBUG [modules.upscaler_utils] Tile (166, 0) <PIL.Image.Image image mode=RGB size=192x192 at 0x1E855FCBCA0>...
2024-03-02 07:50:34 DEBUG [modules.upscaler_utils] => <PIL.Image.Image image mode=RGB size=384x384 at 0x1E855FEC340> (scale factor 2)
2024-03-02 07:50:34 DEBUG [modules.upscaler_utils] Tile (332, 0) <PIL.Image.Image image mode=RGB size=192x192 at 0x1E855FCB9A0>...
2024-03-02 07:50:34 DEBUG [modules.upscaler_utils] => <PIL.Image.Image image mode=RGB size=384x384 at 0x1E855FEEA10> (scale factor 2)
2024-03-02 07:50:34 DEBUG [modules.upscaler_utils] Tile (499, 0) <PIL.Image.Image image mode=RGB size=192x192 at 0x1E855FCBB20>...
2024-03-02 07:50:34 DEBUG [modules.upscaler_utils] => <PIL.Image.Image image mode=RGB size=384x384 at 0x1E855FED420> (scale factor 2)
2024-03-02 07:50:34 DEBUG [modules.upscaler_utils] Tile (665, 0) <PIL.Image.Image image mode=RGB size=192x192 at 0x1E855FCBD30>...
2024-03-02 07:50:34 DEBUG [modules.upscaler_utils] => <PIL.Image.Image image mode=RGB size=384x384 at 0x1E855FEDFF0> (scale factor 2)
2024-03-02 07:50:34 DEBUG [modules.upscaler_utils] Tile (832, 0) <PIL.Image.Image image mode=RGB size=192x192 at 0x1E855FCBBE0>...
2024-03-02 07:50:34 DEBUG [modules.upscaler_utils] => <PIL.Image.Image image mode=RGB size=384x384 at 0x1E855FEF9D0> (scale factor 2)
2024-03-02 07:50:34 DEBUG [modules.upscaler_utils] Tile (0, 166) <PIL.Image.Image image mode=RGB size=192x192 at 0x1E855FCBAF0>...
2024-03-02 07:50:34 DEBUG [modules.upscaler_utils] => <PIL.Image.Image image mode=RGB size=384x384 at 0x1E855FEE800> (scale factor 2)
2024-03-02 07:50:34 DEBUG [modules.upscaler_utils] Tile (166, 166) <PIL.Image.Image image mode=RGB size=192x192 at 0x1E855FCBBB0>...
2024-03-02 07:50:34 DEBUG [modules.upscaler_utils] => <PIL.Image.Image image mode=RGB size=384x384 at 0x1E855FEE560> (scale factor 2)
2024-03-02 07:50:34 DEBUG [modules.upscaler_utils] Tile (332, 166) <PIL.Image.Image image mode=RGB size=192x192 at 0x1E8553FFA00>...
2024-03-02 07:50:34 DEBUG [modules.upscaler_utils] => <PIL.Image.Image image mode=RGB size=384x384 at 0x1E855FEE6E0> (scale factor 2)
2024-03-02 07:50:34 DEBUG [modules.upscaler_utils] Tile (499, 166) <PIL.Image.Image image mode=RGB size=192x192 at 0x1E8553FFFD0>...
2024-03-02 07:50:34 DEBUG [modules.upscaler_utils] => <PIL.Image.Image image mode=RGB size=384x384 at 0x1E855FED300> (scale factor 2)
2024-03-02 07:50:34 DEBUG [modules.upscaler_utils] Tile (665, 166) <PIL.Image.Image image mode=RGB size=192x192 at 0x1E8553FFD30>...
2024-03-02 07:50:34 DEBUG [modules.upscaler_utils] => <PIL.Image.Image image mode=RGB size=384x384 at 0x1E855FECFA0> (scale factor 2)
2024-03-02 07:50:34 DEBUG [modules.upscaler_utils] Tile (832, 166) <PIL.Image.Image image mode=RGB size=192x192 at 0x1E8553FFB20>...
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] => <PIL.Image.Image image mode=RGB size=384x384 at 0x1E855FED780> (scale factor 2)
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] Tile (0, 332) <PIL.Image.Image image mode=RGB size=192x192 at 0x1E8553FF760>...
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] => <PIL.Image.Image image mode=RGB size=384x384 at 0x1E855FED570> (scale factor 2)
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] Tile (166, 332) <PIL.Image.Image image mode=RGB size=192x192 at 0x1E8553FEF50>...
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] => <PIL.Image.Image image mode=RGB size=384x384 at 0x1E855FECD60> (scale factor 2)
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] Tile (332, 332) <PIL.Image.Image image mode=RGB size=192x192 at 0x1E8553FF8B0>...
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] => <PIL.Image.Image image mode=RGB size=384x384 at 0x1E855FEE260> (scale factor 2)
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] Tile (499, 332) <PIL.Image.Image image mode=RGB size=192x192 at 0x1E8553FF9A0>...
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] => <PIL.Image.Image image mode=RGB size=384x384 at 0x1E855FEE020> (scale factor 2)
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] Tile (665, 332) <PIL.Image.Image image mode=RGB size=192x192 at 0x1E8553FF880>...
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] => <PIL.Image.Image image mode=RGB size=384x384 at 0x1E855FEDEA0> (scale factor 2)
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] Tile (832, 332) <PIL.Image.Image image mode=RGB size=192x192 at 0x1E8553FF790>...
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] => <PIL.Image.Image image mode=RGB size=384x384 at 0x1E855FEC220> (scale factor 2)
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] Tile (0, 499) <PIL.Image.Image image mode=RGB size=192x192 at 0x1E8553FD330>...
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] => <PIL.Image.Image image mode=RGB size=384x384 at 0x1E855FED180> (scale factor 2)
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] Tile (166, 499) <PIL.Image.Image image mode=RGB size=192x192 at 0x1E8553FD2D0>...
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] => <PIL.Image.Image image mode=RGB size=384x384 at 0x1E855FEC0A0> (scale factor 2)
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] Tile (332, 499) <PIL.Image.Image image mode=RGB size=192x192 at 0x1E8553FED10>...
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] => <PIL.Image.Image image mode=RGB size=384x384 at 0x1E855444A30> (scale factor 2)
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] Tile (499, 499) <PIL.Image.Image image mode=RGB size=192x192 at 0x1E8553FECE0>...
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] => <PIL.Image.Image image mode=RGB size=384x384 at 0x1E855444CD0> (scale factor 2)
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] Tile (665, 499) <PIL.Image.Image image mode=RGB size=192x192 at 0x1E8553FF100>...
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] => <PIL.Image.Image image mode=RGB size=384x384 at 0x1E855444BB0> (scale factor 2)
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] Tile (832, 499) <PIL.Image.Image image mode=RGB size=192x192 at 0x1E8553FF040>...
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] => <PIL.Image.Image image mode=RGB size=384x384 at 0x1E855446350> (scale factor 2)
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] Tile (0, 665) <PIL.Image.Image image mode=RGB size=192x192 at 0x1E8553FF1F0>...
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] => <PIL.Image.Image image mode=RGB size=384x384 at 0x1E6FC2D73D0> (scale factor 2)
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] Tile (166, 665) <PIL.Image.Image image mode=RGB size=192x192 at 0x1E8553FF0D0>...
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] => <PIL.Image.Image image mode=RGB size=384x384 at 0x1E6FC2D7DC0> (scale factor 2)
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] Tile (332, 665) <PIL.Image.Image image mode=RGB size=192x192 at 0x1E8553FF3A0>...
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] => <PIL.Image.Image image mode=RGB size=384x384 at 0x1E6FC2D7F70> (scale factor 2)
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] Tile (499, 665) <PIL.Image.Image image mode=RGB size=192x192 at 0x1E8553FF370>...
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] => <PIL.Image.Image image mode=RGB size=384x384 at 0x1E6FC2D77F0> (scale factor 2)
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] Tile (665, 665) <PIL.Image.Image image mode=RGB size=192x192 at 0x1E8553FF4F0>...
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] => <PIL.Image.Image image mode=RGB size=384x384 at 0x1E6FC2D6800> (scale factor 2)
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] Tile (832, 665) <PIL.Image.Image image mode=RGB size=192x192 at 0x1E8553FF4C0>...
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] => <PIL.Image.Image image mode=RGB size=384x384 at 0x1E6FC2D7C70> (scale factor 2)
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] Tile (0, 832) <PIL.Image.Image image mode=RGB size=192x192 at 0x1E8553FF640>...
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] => <PIL.Image.Image image mode=RGB size=384x384 at 0x1E6FC2D5D50> (scale factor 2)
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] Tile (166, 832) <PIL.Image.Image image mode=RGB size=192x192 at 0x1E8553FF610>...
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] => <PIL.Image.Image image mode=RGB size=384x384 at 0x1E6FC2D6230> (scale factor 2)
2024-03-02 07:50:35 DEBUG [modules.upscaler_utils] Tile (332, 832) <PIL.Image.Image image mode=RGB size=192x192 at 0x1E8553FFDC0>...
2024-03-02 07:50:36 DEBUG [modules.upscaler_utils] => <PIL.Image.Image image mode=RGB size=384x384 at 0x1E8553FFAF0> (scale factor 2)
2024-03-02 07:50:36 DEBUG [modules.upscaler_utils] Tile (499, 832) <PIL.Image.Image image mode=RGB size=192x192 at 0x1E8553FFC70>...
2024-03-02 07:50:36 DEBUG [modules.upscaler_utils] => <PIL.Image.Image image mode=RGB size=384x384 at 0x1E8553FF430> (scale factor 2)
2024-03-02 07:50:36 DEBUG [modules.upscaler_utils] Tile (665, 832) <PIL.Image.Image image mode=RGB size=192x192 at 0x1E8553FD240>...
2024-03-02 07:50:36 DEBUG [modules.upscaler_utils] => <PIL.Image.Image image mode=RGB size=384x384 at 0x1E8553FEC50> (scale factor 2)
2024-03-02 07:50:36 DEBUG [modules.upscaler_utils] Tile (832, 832) <PIL.Image.Image image mode=RGB size=192x192 at 0x1E8553FD210>...
2024-03-02 07:50:36 DEBUG [modules.upscaler_utils] => <PIL.Image.Image image mode=RGB size=384x384 at 0x1E8553FD1B0> (scale factor 2)
tiled upscale: 100%|████████████████████████████████████████████████████████████████████████| 36/36 [00:01<00:00, 20.07it/s]
```
## Checklist:
- [x] I have read [contributing wiki page](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing)
- [x] I have performed a self-review of my own code
- [x] My code follows the [style guidelines](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing#code-style)
- [x] My code passes [tests](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Tests)
|
https://api.github.com/repos/AUTOMATIC1111/stable-diffusion-webui/pulls/15084
|
2024-03-02T16:12:49Z
|
2024-03-04T03:50:04Z
|
2024-03-04T03:50:03Z
|
2024-03-04T03:50:04Z
| 200
|
AUTOMATIC1111/stable-diffusion-webui
| 39,803
|
map eta: use pair
|
diff --git a/selfdrive/ui/qt/maps/map_eta.cc b/selfdrive/ui/qt/maps/map_eta.cc
index 13e11e5597aed0..9dbbad28450a3b 100644
--- a/selfdrive/ui/qt/maps/map_eta.cc
+++ b/selfdrive/ui/qt/maps/map_eta.cc
@@ -30,12 +30,12 @@ void MapETA::paintEvent(QPaintEvent *event) {
void MapETA::updateETA(float s, float s_typical, float d) {
// ETA
auto eta_t = QDateTime::currentDateTime().addSecs(s).time();
- auto eta = format_24h ? std::array{eta_t.toString("HH:mm"), tr("eta")}
- : std::array{eta_t.toString("h:mm a").split(' ')[0], eta_t.toString("a")};
+ auto eta = format_24h ? std::pair{eta_t.toString("HH:mm"), tr("eta")}
+ : std::pair{eta_t.toString("h:mm a").split(' ')[0], eta_t.toString("a")};
// Remaining time
- auto remaining = s < 3600 ? std::array{QString::number(int(s / 60)), tr("min")}
- : std::array{QString("%1:%2").arg((int)s / 3600).arg(((int)s % 3600) / 60, 2, 10, QLatin1Char('0')), tr("hr")};
+ auto remaining = s < 3600 ? std::pair{QString::number(int(s / 60)), tr("min")}
+ : std::pair{QString("%1:%2").arg((int)s / 3600).arg(((int)s % 3600) / 60, 2, 10, QLatin1Char('0')), tr("hr")};
QString color = "#25DA6E";
if (s / s_typical > 1.5)
color = "#DA3025";
@@ -44,13 +44,13 @@ void MapETA::updateETA(float s, float s_typical, float d) {
// Distance
float num = uiState()->scene.is_metric ? (d / 1000.0) : (d * METER_TO_MILE);
- auto distance = std::array{QString::number(num, 'f', num < 100 ? 1 : 0),
- uiState()->scene.is_metric ? tr("km") : tr("mi")};
+ auto distance = std::pair{QString::number(num, 'f', num < 100 ? 1 : 0),
+ uiState()->scene.is_metric ? tr("km") : tr("mi")};
eta_doc.setHtml(QString(R"(<body><table><tr style="vertical-align:bottom;"><td><b>%1</b></td><td>%2</td>
<td style="padding-left:40px;color:%3;"><b>%4</b></td><td style="padding-right:40px;color:%3;">%5</td>
<td><b>%6</b></td><td>%7</td></tr></body>)")
- .arg(eta[0], eta[1], color, remaining[0], remaining[1], distance[0], distance[1]));
+ .arg(eta.first, eta.second, color, remaining.first, remaining.second, distance.first, distance.second));
setVisible(d >= MANEUVER_TRANSITION_THRESHOLD);
update();
|
split from https://github.com/commaai/openpilot/pull/29151
|
https://api.github.com/repos/commaai/openpilot/pulls/29460
|
2023-08-18T05:30:06Z
|
2023-08-18T05:42:12Z
|
2023-08-18T05:42:12Z
|
2023-08-18T05:42:13Z
| 760
|
commaai/openpilot
| 9,127
|
fix some error when start command on macos
|
diff --git a/interpreter/terminal_interface/profiles/defaults/local.py b/interpreter/terminal_interface/profiles/defaults/local.py
index 44125a51d..c42584daf 100644
--- a/interpreter/terminal_interface/profiles/defaults/local.py
+++ b/interpreter/terminal_interface/profiles/defaults/local.py
@@ -43,7 +43,7 @@
# Run the new llamafile in the background
if os.path.exists(llamafile_path):
- subprocess.Popen([llamafile_path, "-ngl", "9999"])
+ subprocess.Popen(f'"{llamafile_path}" ' + ' '.join(["-ngl", "9999"]), shell=True)
else:
error_message = "The llamafile does not exist or is corrupted. Please ensure it has been downloaded correctly or try again."
print(error_message)
|
### Describe the changes you have made:
when run local model in macos(m2 arm), maybe occur exec format error, i need Popen with shell=True, but when model path contain space (like me, my oi_dir is '/Users/lishuo121/Library/Application Support/open-interpreter/models/phi-2.Q5_K_M.llamafile') that not find err will occured, so i do some change to issue this problem.
### Reference any relevant issues (e.g. "Fixes #000"):
### Pre-Submission Checklist (optional but appreciated):
- [x] I have included relevant documentation updates (stored in /docs)
- [x] I have read `docs/CONTRIBUTING.md`
- [x] I have read `docs/ROADMAP.md`
### OS Tests (optional but appreciated):
- [ ] Tested on Windows
- [x] Tested on MacOS
- [ ] Tested on Linux
|
https://api.github.com/repos/OpenInterpreter/open-interpreter/pulls/1062
|
2024-03-09T09:01:18Z
|
2024-03-11T01:56:39Z
|
2024-03-11T01:56:39Z
|
2024-03-11T01:57:33Z
| 183
|
OpenInterpreter/open-interpreter
| 40,670
|
remove deprecated code from FeedExporter
|
diff --git a/scrapy/contrib/feedexport.py b/scrapy/contrib/feedexport.py
index 92664220ccc..95354d6fe74 100644
--- a/scrapy/contrib/feedexport.py
+++ b/scrapy/contrib/feedexport.py
@@ -151,15 +151,7 @@ def __init__(self, settings):
@classmethod
def from_crawler(cls, crawler):
- if len(get_func_args(cls)) < 1:
- # FIXME: remove for scrapy 0.17
- import warnings
- from scrapy.exceptions import ScrapyDeprecationWarning
- warnings.warn("%s must receive a settings object as first constructor argument." % cls.__name__,
- ScrapyDeprecationWarning, stacklevel=2)
- o = cls()
- else:
- o = cls(crawler.settings)
+ o = cls(crawler.settings)
crawler.signals.connect(o.open_spider, signals.spider_opened)
crawler.signals.connect(o.close_spider, signals.spider_closed)
crawler.signals.connect(o.item_scraped, signals.item_scraped)
|
https://api.github.com/repos/scrapy/scrapy/pulls/1155
|
2015-04-14T15:20:07Z
|
2015-04-14T17:09:10Z
|
2015-04-14T17:09:10Z
|
2015-04-14T17:09:10Z
| 242
|
scrapy/scrapy
| 34,903
|
|
examples: fix typos in generate-csr.sh script comments
|
diff --git a/examples/generate-csr.sh b/examples/generate-csr.sh
index fa9327095d9..c4a3af01628 100755
--- a/examples/generate-csr.sh
+++ b/examples/generate-csr.sh
@@ -1,7 +1,7 @@
#!/bin/sh
# This script generates a simple SAN CSR to be used with Let's Encrypt
-# CA. Mostly intedened for "auth --csr" testing, but, since its easily
-# auditable, feel free to adjust it and use on you production web
+# CA. Mostly intended for "auth --csr" testing, but, since it's easily
+# auditable, feel free to adjust it and use it on your production web
# server.
if [ "$#" -lt 1 ]
|
Minor comment fixups.
|
https://api.github.com/repos/certbot/certbot/pulls/702
|
2015-08-23T14:29:25Z
|
2015-08-24T20:45:35Z
|
2015-08-24T20:45:35Z
|
2018-11-29T15:32:14Z
| 176
|
certbot/certbot
| 939
|
Manual Pengguna -> Panduan Pengguna
|
diff --git a/doc/translations/README-id-ID.md b/doc/translations/README-id-ID.md
index f309c53944e..4f8ec4284b6 100644
--- a/doc/translations/README-id-ID.md
+++ b/doc/translations/README-id-ID.md
@@ -35,7 +35,7 @@ Untuk mendapatkan daftar opsi lanjut gunakan:
python sqlmap.py -hh
Anda dapat mendapatkan contoh penggunaan [di sini](https://asciinema.org/a/46601).
-Untuk mendapatkan gambaran singkat kemampuan sqlmap, daftar fitur yang didukung, deskripsi dari semua opsi, berikut dengan contohnya, Anda disarankan untuk membaca [manual pengguna](https://github.com/sqlmapproject/sqlmap/wiki/Usage).
+Untuk mendapatkan gambaran singkat kemampuan sqlmap, daftar fitur yang didukung, deskripsi dari semua opsi, berikut dengan contohnya, Anda disarankan untuk membaca [Panduan Pengguna](https://github.com/sqlmapproject/sqlmap/wiki/Usage).
Tautan
----
|
User Manual -> Panduan Pengguna
|
https://api.github.com/repos/sqlmapproject/sqlmap/pulls/2537
|
2017-05-18T06:00:36Z
|
2017-05-18T10:13:14Z
|
2017-05-18T10:13:14Z
|
2020-07-23T18:22:35Z
| 261
|
sqlmapproject/sqlmap
| 15,079
|
Add Aqueduct to the list
|
diff --git a/README.md b/README.md
index 11436827..6d3df9eb 100644
--- a/README.md
+++ b/README.md
@@ -1763,6 +1763,7 @@ be
* [Chaos Genius](https://github.com/chaos-genius/chaos_genius/) - ML powered analytics engine for outlier/anomaly detection and root cause analysis.
* [MLEM](https://github.com/iterative/mlem) - Version and deploy your ML models following GitOps principles
* [DockerDL](https://github.com/matifali/dockerdl) - Ready to use deeplearning docker images.
+* [Aqueduct](https://github.com/aqueducthq/aqueduct) - Aqueduct enables you to easily define, run, and manage AI & ML tasks on any cloud infrastructure.
<a name="books"></a>
## Books
|
Aqueduct is an open-source MLOps framework that allows you to write code in vanilla Python, run that code on any cloud infrastructure you'd like to use, and gain visibility into the execution and performance of your models and predictions. @josephmisiti
|
https://api.github.com/repos/josephmisiti/awesome-machine-learning/pulls/930
|
2023-04-15T07:49:34Z
|
2023-05-07T21:59:28Z
|
2023-05-07T21:59:28Z
|
2023-05-07T21:59:28Z
| 196
|
josephmisiti/awesome-machine-learning
| 52,180
|
Clarify Python versions supported by requests
|
diff --git a/docs/index.rst b/docs/index.rst
index 5eb643e128..d1cbc85694 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -87,7 +87,7 @@ Requests is ready for today's web.
- Chunked Requests
- Thread-safety
-Requests supports Python 2.6 — 3.5, and runs great on PyPy.
+Requests officially supports Python 2.6 — 2.7 and 3.3 — 3.5, and runs great on PyPy.
The User Guide
|
Remove some confusion in the Python versions supported by Requests.
|
https://api.github.com/repos/psf/requests/pulls/3480
|
2016-08-09T02:51:05Z
|
2016-08-09T03:00:55Z
|
2016-08-09T03:00:55Z
|
2021-09-08T03:00:53Z
| 132
|
psf/requests
| 32,772
|
Make boulder-start.sh more robust & helpful
|
diff --git a/tests/boulder-start.sh b/tests/boulder-start.sh
index e17716b54f5..0af5dfb97d4 100755
--- a/tests/boulder-start.sh
+++ b/tests/boulder-start.sh
@@ -1,6 +1,23 @@
-#!/bin/sh -xe
+#!/bin/bash
# Download and run Boulder instance for integration testing
+
+# ugh, go version output is like:
+# go version go1.4.2 linux/amd64
+GOVER=`go version | cut -d" " -f3 | cut -do -f2`
+
+# version comparison
+function verlte {
+ [ "$1" = "`echo -e "$1\n$2" | sort -V | head -n1`" ]
+}
+
+if ! verlte 1.5 "$GOVER" ; then
+ echo "We require go version 1.5 or later; you have... $GOVER"
+ exit 1
+fi
+
+set -xe
+
export GOPATH="${GOPATH:-/tmp/go}"
# `/...` avoids `no buildable Go source files` errors, for more info
@@ -8,7 +25,11 @@ export GOPATH="${GOPATH:-/tmp/go}"
go get -d github.com/letsencrypt/boulder/...
cd $GOPATH/src/github.com/letsencrypt/boulder
# goose is needed for ./test/create_db.sh
-go get bitbucket.org/liamstask/goose/cmd/goose
+if ! go get bitbucket.org/liamstask/goose/cmd/goose ; then
+ echo Problems installing goose... perhaps rm -rf \$GOPATH \("$GOPATH"\)
+ echo and try again...
+ exit 1
+fi
./test/create_db.sh
./start.py &
# Hopefully start.py bootstraps before integration test is started...
|
Don't try to run with go versions that are going to fail, and perhaps detect broken states if that happened in the past.
|
https://api.github.com/repos/certbot/certbot/pulls/780
|
2015-09-17T02:18:34Z
|
2015-09-23T18:18:06Z
|
2015-09-23T18:18:06Z
|
2016-05-06T19:21:52Z
| 415
|
certbot/certbot
| 3,458
|
Add new AWS IPv6 SSRF Endpoint
|
diff --git a/Server Side Request Forgery/README.md b/Server Side Request Forgery/README.md
index db9ad9d93f..3dd0a41f4c 100644
--- a/Server Side Request Forgery/README.md
+++ b/Server Side Request Forgery/README.md
@@ -550,13 +550,15 @@ Example of a PDF attachment using HTML
The AWS Instance Metadata Service is a service available within Amazon EC2 instances that allows those instances to access metadata about themselves. - [Docs](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html#instancedata-data-categories)
-* Old endpoint: `http://169.254.169.254/latest/meta-data/`
-* New endpoint requires the header `X-aws-ec2-metadata-token`
+* IPv4 endpoint (old): `http://169.254.169.254/latest/meta-data/`
+* IPv4 endpoint (new) requires the header `X-aws-ec2-metadata-token`
```powershell
export TOKEN=`curl -X PUT -H "X-aws-ec2-metadata-token-ttl-seconds: 21600" "http://169.254.169.254/latest/api/token"`
curl -H "X-aws-ec2-metadata-token:$TOKEN" -v "http://169.254.169.254/latest/meta-data"
```
+* IPv6 endpoint: `http://[fd00:ec2::254]/latest/meta-data/`
+
In case of a WAF, you might want to try different ways to connect to the API.
* DNS record pointing to the AWS API IP
```powershell
@@ -583,6 +585,7 @@ In case of a WAF, you might want to try different ways to connect to the API.
http://[::ffff:a9fe:a9fe] IPV6 Compressed
http://[0:0:0:0:0:ffff:a9fe:a9fe] IPV6 Expanded
http://[0:0:0:0:0:ffff:169.254.169.254] IPV6/IPV4
+ http://[fd00:ec2::254] IPV6
```
|
Documentation: https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/instancedata-data-retrieval.html
|
https://api.github.com/repos/swisskyrepo/PayloadsAllTheThings/pulls/651
|
2023-06-28T22:52:09Z
|
2023-06-29T08:59:10Z
|
2023-06-29T08:59:10Z
|
2023-06-29T08:59:11Z
| 486
|
swisskyrepo/PayloadsAllTheThings
| 8,568
|
add parent child relationships in nodes
|
diff --git a/gpt_index/data_structs/node_v2.py b/gpt_index/data_structs/node_v2.py
index d5e78be1da3db..25658bebaacd2 100644
--- a/gpt_index/data_structs/node_v2.py
+++ b/gpt_index/data_structs/node_v2.py
@@ -8,17 +8,16 @@
It is often used as an atomic unit of data in various indices.
"""
+import logging
+import warnings
from dataclasses import dataclass, field
from enum import Enum, auto
-from typing import Any, Dict, Optional
-import warnings
+from typing import Any, Dict, List, Optional
from dataclasses_json import DataClassJsonMixin
from gpt_index.schema import BaseDocument
-import logging
-
_logger = logging.getLogger(__name__)
@@ -29,12 +28,16 @@ class DocumentRelationship(str, Enum):
SOURCE: The node is the source document.
PREVIOUS: The node is the previous node in the document.
NEXT: The node is the next node in the document.
+ PARENT: The node is the parent node in the document.
+ CHILD: The node is a child node in the document.
"""
SOURCE = auto()
PREVIOUS = auto()
NEXT = auto()
+ PARENT = auto()
+ CHILD = auto()
class NodeType(str, Enum):
@@ -51,7 +54,7 @@ class Node(BaseDocument):
text (str): The text of the node.
doc_id (Optional[str]): The document id of the node.
embeddings (Optional[List[float]]): The embeddings of the node.
- relationships (Dict[DocumentRelationship, str]): The relationships of the node.
+ relationships (Dict[DocumentRelationship, Any]): The relationships of the node.
"""
@@ -66,7 +69,7 @@ def __post_init__(self) -> None:
node_info: Optional[Dict[str, Any]] = None
# document relationships
- relationships: Dict[DocumentRelationship, str] = field(default_factory=dict)
+ relationships: Dict[DocumentRelationship, Any] = field(default_factory=dict)
@property
def ref_doc_id(self) -> Optional[str]:
@@ -82,6 +85,8 @@ def prev_node_id(self) -> str:
"""Prev node id."""
if DocumentRelationship.PREVIOUS not in self.relationships:
raise ValueError("Node does not have previous node")
+ if not isinstance(self.relationships[DocumentRelationship.PREVIOUS], str):
+ raise ValueError("Previous node must be a string")
return self.relationships[DocumentRelationship.PREVIOUS]
@property
@@ -89,8 +94,28 @@ def next_node_id(self) -> str:
"""Next node id."""
if DocumentRelationship.NEXT not in self.relationships:
raise ValueError("Node does not have next node")
+ if not isinstance(self.relationships[DocumentRelationship.NEXT], str):
+ raise ValueError("Next node must be a string")
return self.relationships[DocumentRelationship.NEXT]
+ @property
+ def parent_node_id(self) -> str:
+ """Parent node id."""
+ if DocumentRelationship.PARENT not in self.relationships:
+ raise ValueError("Node does not have parent node")
+ if not isinstance(self.relationships[DocumentRelationship.PARENT], str):
+ raise ValueError("Parent node must be a string")
+ return self.relationships[DocumentRelationship.PARENT]
+
+ @property
+ def child_node_ids(self) -> List[str]:
+ """Child node ids."""
+ if DocumentRelationship.CHILD not in self.relationships:
+ raise ValueError("Node does not have child nodes")
+ if not isinstance(self.relationships[DocumentRelationship.CHILD], list):
+ raise ValueError("Child nodes must be a list")
+ return self.relationships[DocumentRelationship.CHILD]
+
def get_text(self) -> str:
"""Get text."""
text = super().get_text()
|
Add parent and child relationships to node v2. Useful for trees such as JSON and YAML.
Is the Union OK (for multiple children) or should we separate out children into a separate attribute?
|
https://api.github.com/repos/run-llama/llama_index/pulls/1121
|
2023-04-09T18:19:58Z
|
2023-04-12T02:05:03Z
|
2023-04-12T02:05:03Z
|
2023-04-12T02:05:04Z
| 868
|
run-llama/llama_index
| 6,808
|
Chart: allow using krb5.conf with CeleryExecutor
|
diff --git a/chart/templates/configmaps/configmap.yaml b/chart/templates/configmaps/configmap.yaml
index 29b2a2dffb318..6cf47fb334cf1 100644
--- a/chart/templates/configmaps/configmap.yaml
+++ b/chart/templates/configmaps/configmap.yaml
@@ -63,8 +63,8 @@ data:
{{ tpl (.Files.Get "files/pod-template-file.kubernetes-helm-yaml") . | nindent 4 }}
{{- end }}
{{- end }}
+{{- end }}
{{- if .Values.kerberos.enabled }}
krb5.conf: |
{{ tpl .Values.kerberos.config . | nindent 4 }}
{{- end }}
-{{- end }}
diff --git a/chart/tests/test_configmap.py b/chart/tests/test_configmap.py
index bc0cda9e0017e..562e470b4e269 100644
--- a/chart/tests/test_configmap.py
+++ b/chart/tests/test_configmap.py
@@ -63,3 +63,14 @@ def test_airflow_local_settings(self):
"# Well hello RELEASE-NAME!"
== jmespath.search('data."airflow_local_settings.py"', docs[0]).strip()
)
+
+ def test_kerberos_config_available_with_celery_executor(self):
+ docs = render_chart(
+ values={
+ "executor": "CeleryExecutor",
+ "kerberos": {"enabled": True, "config": "krb5content"},
+ },
+ show_only=["templates/configmaps/configmap.yaml"],
+ )
+
+ assert jmespath.search('data."krb5.conf"', docs[0]) == "\nkrb5content\n"
|
For some reason kerberos can be used only when setting `executor: KubernetesExecutor` or `CeleryKubernetesExecutor` in helm chart - see https://github.com/apache/airflow/blob/main/chart/templates/configmaps/configmap.yaml#L57 .
This PR allows to use kerberos when setting `executor: CeleryExecutor`.
|
https://api.github.com/repos/apache/airflow/pulls/16822
|
2021-07-05T16:25:43Z
|
2021-07-06T12:48:57Z
|
2021-07-06T12:48:57Z
|
2021-07-07T17:35:09Z
| 366
|
apache/airflow
| 14,237
|
Fixed #23859 -- Fixed a migration crash when a field is renamed that is part of an index_together
|
diff --git a/django/db/migrations/operations/fields.py b/django/db/migrations/operations/fields.py
index 435d4a0936825..afa3fde818b77 100644
--- a/django/db/migrations/operations/fields.py
+++ b/django/db/migrations/operations/fields.py
@@ -177,13 +177,14 @@ def state_forwards(self, app_label, state):
(self.new_name if n == self.old_name else n, f)
for n, f in state.models[app_label, self.model_name.lower()].fields
]
- # Fix unique_together to refer to the new field
+ # Fix index/unique_together to refer to the new field
options = state.models[app_label, self.model_name.lower()].options
- if "unique_together" in options:
- options['unique_together'] = [
- [self.new_name if n == self.old_name else n for n in unique]
- for unique in options['unique_together']
- ]
+ for option in ('index_together', 'unique_together'):
+ if option in options:
+ options[option] = [
+ [self.new_name if n == self.old_name else n for n in together]
+ for together in options[option]
+ ]
def database_forwards(self, app_label, schema_editor, from_state, to_state):
to_model = to_state.render().get_model(app_label, self.model_name)
diff --git a/docs/releases/1.7.2.txt b/docs/releases/1.7.2.txt
index 976210367d6aa..8420b5b30383c 100644
--- a/docs/releases/1.7.2.txt
+++ b/docs/releases/1.7.2.txt
@@ -59,3 +59,6 @@ Bugfixes
* Fixed a custom field type validation error with MySQL backend when
``db_type`` returned ``None`` (:ticket:`23761`).
+
+* Fixed a migration crash when a field is renamed that is part of an
+ ``index_together`` (:ticket:`23859`).
diff --git a/tests/migrations/test_operations.py b/tests/migrations/test_operations.py
index 6e7e8d2059a84..09dc25fd26cbb 100644
--- a/tests/migrations/test_operations.py
+++ b/tests/migrations/test_operations.py
@@ -49,7 +49,7 @@ def make_test_state(self, app_label, operation, **kwargs):
def set_up_test_model(self, app_label, second_model=False, third_model=False,
related_model=False, mti_model=False, proxy_model=False,
- unique_together=False, options=False, db_table=None):
+ unique_together=False, options=False, db_table=None, index_together=False):
"""
Creates a test model state and database table.
"""
@@ -81,6 +81,7 @@ def set_up_test_model(self, app_label, second_model=False, third_model=False,
# Make the "current" state
model_options = {
"swappable": "TEST_SWAP_MODEL",
+ "index_together": [["pink", "weight"]] if index_together else [],
"unique_together": [["pink", "weight"]] if unique_together else [],
}
if options:
@@ -984,7 +985,7 @@ def test_rename_field(self):
"""
Tests the RenameField operation.
"""
- project_state = self.set_up_test_model("test_rnfl", unique_together=True)
+ project_state = self.set_up_test_model("test_rnfl", unique_together=True, index_together=True)
# Test the state alteration
operation = migrations.RenameField("Pony", "pink", "blue")
self.assertEqual(operation.describe(), "Rename field pink on Pony to blue")
@@ -995,6 +996,9 @@ def test_rename_field(self):
# Make sure the unique_together has the renamed column too
self.assertIn("blue", new_state.models["test_rnfl", "pony"].options['unique_together'][0])
self.assertNotIn("pink", new_state.models["test_rnfl", "pony"].options['unique_together'][0])
+ # Make sure the index_together has the renamed column too
+ self.assertIn("blue", new_state.models["test_rnfl", "pony"].options['index_together'][0])
+ self.assertNotIn("pink", new_state.models["test_rnfl", "pony"].options['index_together'][0])
# Test the database alteration
self.assertColumnExists("test_rnfl_pony", "pink")
self.assertColumnNotExists("test_rnfl_pony", "blue")
|
Ticket: https://code.djangoproject.com/ticket/23859
|
https://api.github.com/repos/django/django/pulls/3563
|
2014-11-17T18:12:54Z
|
2014-11-17T18:52:50Z
|
2014-11-17T18:52:50Z
|
2014-11-17T20:51:46Z
| 1,046
|
django/django
| 51,261
|
added machine learning crash course to courses.md
|
diff --git a/courses.md b/courses.md
index 9700e753..e7dc1b6b 100644
--- a/courses.md
+++ b/courses.md
@@ -20,3 +20,4 @@ The following is a list of free or paid online courses on machine learning, stat
* [Reinforcement Learning](https://www.udacity.com/course/reinforcement-learning--ud600) - free
* [Machine Learning for Trading](https://www.udacity.com/course/machine-learning-for-trading--ud501) - free
* [Mining of Massive Datasets](https://www.youtube.com/watch?v=xoA5v9AO7S0&list=PLLssT5z_DsK9JDLcT8T62VtzwyW9LNepV) (YouTube playlist)- Course [website](http://mmds.org/) has info about accompanying book, free chapters, and Stanford's [MOOC](https://lagunita.stanford.edu/courses/course-v1:ComputerScience+MMDS+SelfPaced/about)
+* [Machine Learning Crash Course (Google)](https://developers.google.com/machine-learning/crash-course/) - free
\ No newline at end of file
|
https://api.github.com/repos/josephmisiti/awesome-machine-learning/pulls/481
|
2018-03-01T02:31:12Z
|
2018-03-07T03:07:45Z
|
2018-03-07T03:07:45Z
|
2018-03-07T03:07:45Z
| 266
|
josephmisiti/awesome-machine-learning
| 52,067
|
|
Update README.rst for fixxing the issue #996
|
diff --git a/README.rst b/README.rst
index 565ba5dee1..7d7036c036 100644
--- a/README.rst
+++ b/README.rst
@@ -128,9 +128,9 @@ and always provides the latest version) is to use `pip`_:
.. code-block:: bash
# Make sure we have an up-to-date version of pip and setuptools:
- $ pip install --upgrade pip setuptools
+ $ python -m pip install --upgrade pip setuptools
- $ pip install --upgrade httpie
+ $ python -m pip install --upgrade httpie
(If ``pip`` installation fails for some reason, you can try
|
fixes: #996
|
https://api.github.com/repos/httpie/cli/pulls/1005
|
2020-12-21T06:42:22Z
|
2020-12-21T10:38:01Z
|
2020-12-21T10:38:01Z
|
2020-12-31T08:23:52Z
| 156
|
httpie/cli
| 34,129
|
Action improvements and python version update.
|
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
index ff3983489..97c08e80b 100644
--- a/.github/workflows/codeql-analysis.yml
+++ b/.github/workflows/codeql-analysis.yml
@@ -2,7 +2,7 @@ name: "CodeQL"
on:
push:
- branches: [master, ]
+ branches: [master]
pull_request:
# The branches below must be a subset of the branches above
branches: [master]
@@ -19,7 +19,7 @@ jobs:
uses: actions/checkout@v2
with:
# We must fetch at least the immediate parents so that if this is
- # a pull request then we can checkout the head.
+ # a pull request then we can check out the head.
fetch-depth: 2
# If this run was triggered by a pull request event, then checkout
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index ed784b999..3219a77dc 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -18,13 +18,14 @@ jobs:
test:
strategy:
matrix:
- python: ["3.6", "3.7", "3.8", "3.9"]
- runs-on: ubuntu-20.04
+ python: [3.7, 3.8, 3.9, 3.10, 3.11]
+ runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- - uses: actions/setup-python@v2
+ - uses: actions/setup-python@v3
with:
python-version: ${{ matrix.python }}
+ cache: poetry
- name: Setup Graphviz
uses: ts-graphviz/setup-graphviz@v1
- name: Install poetry
|
Removed 3.6 from python versions (fails to build), added 3.10 and 3.11.
Updated python setup action and added poetry caching to make workflow more efficient.
Run on ubuntu-latest.
Small OCD corrections in codeql.
|
https://api.github.com/repos/mingrammer/diagrams/pulls/843
|
2023-01-31T07:19:44Z
|
2023-05-22T23:34:51Z
|
2023-05-22T23:34:51Z
|
2023-05-22T23:34:51Z
| 442
|
mingrammer/diagrams
| 52,585
|
Contribution to MongoDB topic
|
diff --git a/README.md b/README.md
index 8c022ab96..57cf985ed 100644
--- a/README.md
+++ b/README.md
@@ -1259,18 +1259,27 @@ as key-value pair, document-oriented, etc.
<details>
<summary>What is a document? What is a collection?</summary><br><b>
+
+ * A document is a record in MongoDB, which is stored in BSON (Binary JSON) format and is the basic unit of data in MongoDB.
+ * A collection is a group of related documents stored in a single database in MongoDB.
</b></details>
<details>
<summary>What is an aggregator?</summary><br><b>
+
+ * An aggregator is a framework in MongoDB that performs operations on a set of data to return a single computed result.
</b></details>
<details>
<summary>What is better? Embedded documents or referenced?</summary><br><b>
+
+ * There is no definitive answer to which is better, it depends on the specific use case and requirements. Some explainations : Embedded documents provide atomic updates, while referenced documents allow for better normalization.
</b></details>
<details>
<summary>Have you performed data retrieval optimizations in Mongo? If not, can you think about ways to optimize a slow data retrieval?</summary><br><b>
+
+ * Some ways to optimize data retrieval in MongoDB are: indexing, proper schema design, query optimization and database load balancing.
</b></details>
##### Queries
@@ -1285,6 +1294,9 @@ as key-value pair, document-oriented, etc.
<details>
<summary>What is the difference between find() and find_one()?</code></summary><br><b>
+
+ * `find()` returns all documents that match the query conditions.
+ * find_one() returns only one document that matches the query conditions (or null if no match is found).
</b></details>
<details>
@@ -2029,7 +2041,7 @@ This is where data is stored and also where different processing takes place (e.
<details>
<summary>What is a master node?</summary><br><b>
-Par of a master node responsibilites:
+Part of a master node responsibilites:
* Track the status of all the nodes in the cluster
* Verify replicas are working and the data is available from every data node.
* No hot nodes (no data node that works much harder than other nodes)
|
I have answered to some MongoDB questions.
|
https://api.github.com/repos/bregman-arie/devops-exercises/pulls/343
|
2023-01-31T17:33:03Z
|
2023-02-02T10:59:41Z
|
2023-02-02T10:59:41Z
|
2023-02-02T10:59:42Z
| 545
|
bregman-arie/devops-exercises
| 17,427
|
Feat fixllm client kwargs
|
diff --git a/metagpt/provider/fireworks_api.py b/metagpt/provider/fireworks_api.py
index 5fe86fc1c..638b0703d 100644
--- a/metagpt/provider/fireworks_api.py
+++ b/metagpt/provider/fireworks_api.py
@@ -85,10 +85,9 @@ def __init_fireworks(self):
self._init_client()
self.model = self.config.fireworks_api_model # `self.model` should after `_make_client` to rewrite it
- def _make_client_kwargs(self) -> (dict, dict):
+ def _make_client_kwargs(self) -> dict:
kwargs = dict(api_key=self.config.fireworks_api_key, base_url=self.config.fireworks_api_base)
- async_kwargs = kwargs.copy()
- return kwargs, async_kwargs
+ return kwargs
def _update_costs(self, usage: CompletionUsage):
if self.config.calc_usage and usage:
diff --git a/metagpt/provider/open_llm_api.py b/metagpt/provider/open_llm_api.py
index 2893f5b30..976e95c57 100644
--- a/metagpt/provider/open_llm_api.py
+++ b/metagpt/provider/open_llm_api.py
@@ -48,10 +48,9 @@ def __init_openllm(self):
self._init_client()
self.model = self.config.open_llm_api_model # `self.model` should after `_make_client` to rewrite it
- def _make_client_kwargs(self) -> (dict, dict):
+ def _make_client_kwargs(self) -> dict:
kwargs = dict(api_key="sk-xxx", base_url=self.config.open_llm_api_base)
- async_kwargs = kwargs.copy()
- return kwargs, async_kwargs
+ return kwargs
def _calc_usage(self, messages: list[dict], rsp: str) -> CompletionUsage:
usage = CompletionUsage(prompt_tokens=0, completion_tokens=0, total_tokens=0)
|
**Features**
<!-- Clear and direct description of the submit features. -->
<!-- If it's a bug fix, please also paste the issue link. -->
- fix openllm/fireworks client_kwargs due to previous PR delete sync client
**Feature Docs**
<!-- The RFC, tutorial, or use cases about the feature if it's a pretty big update. If not, there is no need to fill. -->
**Influence**
<!-- Tell me the impact of the new feature and I'll focus on it. -->
**Result**
<!-- The screenshot/log of unittest/running result -->
**Other**
<!-- Something else about this PR. -->
|
https://api.github.com/repos/geekan/MetaGPT/pulls/640
|
2023-12-28T01:36:39Z
|
2023-12-28T02:48:10Z
|
2023-12-28T02:48:10Z
|
2023-12-28T08:12:04Z
| 430
|
geekan/MetaGPT
| 16,920
|
Revert "Improve SSE User Experience"
|
diff --git a/fastchat/serve/openai_api_server.py b/fastchat/serve/openai_api_server.py
index 6aad721d9d..b83c4152f5 100644
--- a/fastchat/serve/openai_api_server.py
+++ b/fastchat/serve/openai_api_server.py
@@ -22,7 +22,6 @@
from fastapi.security.http import HTTPAuthorizationCredentials, HTTPBearer
import httpx
from pydantic import BaseSettings
-from sse_starlette.sse import EventSourceResponse
import shortuuid
import tiktoken
import uvicorn
@@ -470,7 +469,7 @@ async def create_completion(request: CompletionRequest):
if request.stream:
generator = generate_completion_stream_generator(request, request.n)
- return EventSourceResponse(response_stream, ping=600)
+ return StreamingResponse(generator, media_type="text/event-stream")
else:
text_completions = []
for text in request.prompt:
diff --git a/pyproject.toml b/pyproject.toml
index dc91725d30..4c7ef76d9d 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -16,7 +16,7 @@ dependencies = [
"accelerate", "fastapi", "gradio==3.35.2", "httpx", "markdown2[all]", "nh3", "numpy",
"peft", "prompt_toolkit>=3.0.0", "pydantic", "requests", "rich>=10.0.0", "sentencepiece",
"shortuuid", "shortuuid", "tiktoken", "tokenizers>=0.12.1", "torch",
- "transformers>=4.28.0,<4.29.0", "uvicorn", "wandb", "sse-starlette",
+ "transformers>=4.28.0,<4.29.0", "uvicorn", "wandb",
]
[project.optional-dependencies]
|
Reverts lm-sys/FastChat#1223.
I tested it with https://github.com/lm-sys/FastChat/blob/fcf88ff0259a2a735a52fef091090204b302895a/tests/test_openai_api.py#L1-L3 but meet the following errors
```
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/fastchat/lib/python3.9/site-packages/openai/api_requestor.py", line 335, in handle_error_response
error_data = resp["error"]
TypeError: string indices must be integers
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/ubuntu/FastChat/tests/test_openai_api.py", line 107, in <module>
test_completion_stream(model)
File "/home/ubuntu/FastChat/tests/test_openai_api.py", line 30, in test_completion_stream
res = openai.Completion.create(
File "/home/ubuntu/anaconda3/envs/fastchat/lib/python3.9/site-packages/openai/api_resources/completion.py", line 25, in create
return super().create(*args, **kwargs)
File "/home/ubuntu/anaconda3/envs/fastchat/lib/python3.9/site-packages/openai/api_resources/abstract/engine_api_resource.py", line 153, in create
response, _, api_key = requestor.request(
File "/home/ubuntu/anaconda3/envs/fastchat/lib/python3.9/site-packages/openai/api_requestor.py", line 230, in request
resp, got_stream = self._interpret_response(result, stream)
File "/home/ubuntu/anaconda3/envs/fastchat/lib/python3.9/site-packages/openai/api_requestor.py", line 624, in _interpret_response
self._interpret_response_line(
File "/home/ubuntu/anaconda3/envs/fastchat/lib/python3.9/site-packages/openai/api_requestor.py", line 687, in _interpret_response_line
raise self.handle_error_response(
File "/home/ubuntu/anaconda3/envs/fastchat/lib/python3.9/site-packages/openai/api_requestor.py", line 337, in handle_error_response
raise error.APIError(
openai.error.APIError: Invalid response object from API: 'Internal Server Error' (HTTP response code was 500)
```
```
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/fastchat/lib/python3.9/site-packages/openai/api_requestor.py", line 677, in _interpret_response_line
data = json.loads(rbody)
File "/home/ubuntu/anaconda3/envs/fastchat/lib/python3.9/json/__init__.py", line 346, in loads
return _default_decoder.decode(s)
File "/home/ubuntu/anaconda3/envs/fastchat/lib/python3.9/json/decoder.py", line 337, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "/home/ubuntu/anaconda3/envs/fastchat/lib/python3.9/json/decoder.py", line 355, in raw_decode
raise JSONDecodeError("Expecting value", s, err.value) from None
json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/ubuntu/FastChat/tests/test_openai_api.py", line 107, in <module>
test_completion_stream(model)
File "/home/ubuntu/FastChat/tests/test_openai_api.py", line 34, in test_completion_stream
for chunk in res:
File "/home/ubuntu/anaconda3/envs/fastchat/lib/python3.9/site-packages/openai/api_resources/abstract/engine_api_resource.py", line 166, in <genexpr>
return (
File "/home/ubuntu/anaconda3/envs/fastchat/lib/python3.9/site-packages/openai/api_requestor.py", line 617, in <genexpr>
self._interpret_response_line(
File "/home/ubuntu/anaconda3/envs/fastchat/lib/python3.9/site-packages/openai/api_requestor.py", line 679, in _interpret_response_line
raise error.APIError(
openai.error.APIError: HTTP code 200 from API (data: {"id": "cmpl-3VGFL52CCiQUYZycvVdTXv", "object": "text_completion", "model": "vicuna-7b-v1.3", "choices": [{"index": 0, "text": ",", "logprobs": null, "finish_reason": null}]})
```
|
https://api.github.com/repos/lm-sys/FastChat/pulls/1875
|
2023-07-06T11:17:12Z
|
2023-07-06T11:18:11Z
|
2023-07-06T11:18:11Z
|
2023-07-06T11:22:45Z
| 433
|
lm-sys/FastChat
| 41,575
|
fix(website): update diagrams png
|
diff --git a/website/static/img/advanced_web_service_with_on-premise.png b/website/static/img/advanced_web_service_with_on-premise.png
index 83298236c..c2edf4d74 100644
Binary files a/website/static/img/advanced_web_service_with_on-premise.png and b/website/static/img/advanced_web_service_with_on-premise.png differ
diff --git a/website/static/img/advanced_web_service_with_on-premise_colored.png b/website/static/img/advanced_web_service_with_on-premise_colored.png
index f775cd50a..d404d3c29 100644
Binary files a/website/static/img/advanced_web_service_with_on-premise_colored.png and b/website/static/img/advanced_web_service_with_on-premise_colored.png differ
diff --git a/website/static/img/simple_web_service_with_db_cluster_diagram.png b/website/static/img/simple_web_service_with_db_cluster_diagram.png
index 96a74505c..e92681c77 100644
Binary files a/website/static/img/simple_web_service_with_db_cluster_diagram.png and b/website/static/img/simple_web_service_with_db_cluster_diagram.png differ
|
Fix for https://github.com/mingrammer/diagrams/issues/753
@mingrammer
I found 2 others png in https://diagrams.mingrammer.com/docs/getting-started/examples that were not updated I will add one more commit.
|
https://api.github.com/repos/mingrammer/diagrams/pulls/790
|
2022-11-09T12:57:58Z
|
2023-01-08T07:47:27Z
|
2023-01-08T07:47:27Z
|
2023-01-08T07:47:27Z
| 256
|
mingrammer/diagrams
| 52,621
|
fix bilibili bangumi page
|
diff --git a/src/you_get/extractors/bilibili.py b/src/you_get/extractors/bilibili.py
index 24821d77fc..5ed7f28d05 100644
--- a/src/you_get/extractors/bilibili.py
+++ b/src/you_get/extractors/bilibili.py
@@ -284,12 +284,6 @@ def vc_entry(self, **kwargs):
self.streams['vc']['size'] = int(item['video_size'])
def bangumi_entry(self, **kwargs):
- bangumi_id = re.search(r'(\d+)', self.url).group(1)
- frag = urllib.parse.urlparse(self.url).fragment
- if frag:
- episode_id = frag
- else:
- episode_id = re.search(r'first_ep_id\s*=\s*"(\d+)"', self.page) or re.search(r'\/ep(\d+)', self.url).group(1)
data = json.loads(re.search(r'__INITIAL_STATE__=(.+);\(function', self.page).group(1))
cid = data['epInfo']['cid']
# index_title = data['epInfo']['index_title']
|
delete useless old regex which sometimes cause error
~~~
you-get --debug https://www.bilibili.com/bangumi/play/ss25681/?spm_id_from=333.
334.b_62696c695f62616e67756d69.5
......
File "c:\users\fengl\appdata\local\programs\python\python36\lib\site-packages\you_get\extractors\bilibili.py", line 267, in bangumi_entry
episode_id = re.search(r'first_ep_id\s*=\s*"(\d+)"', self.page) or re.search(r'\/ep(\d+)', self.url).group(1)
AttributeError: 'NoneType' object has no attribute 'group'
~~~
|
https://api.github.com/repos/soimort/you-get/pulls/2664
|
2018-12-01T14:49:42Z
|
2018-12-01T20:13:07Z
|
2018-12-01T20:13:07Z
|
2018-12-01T20:13:07Z
| 264
|
soimort/you-get
| 21,015
|
Support GatedRepoError + use raise from
|
diff --git a/src/transformers/utils/hub.py b/src/transformers/utils/hub.py
index ffed74360d052..f4c9e9b3c66eb 100644
--- a/src/transformers/utils/hub.py
+++ b/src/transformers/utils/hub.py
@@ -41,6 +41,7 @@
from huggingface_hub.file_download import REGEX_COMMIT_HASH, http_get
from huggingface_hub.utils import (
EntryNotFoundError,
+ GatedRepoError,
LocalEntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
@@ -428,21 +429,26 @@ def cached_file(
use_auth_token=use_auth_token,
local_files_only=local_files_only,
)
-
- except RepositoryNotFoundError:
+ except GatedRepoError as e:
+ raise EnvironmentError(
+ "You are trying to access a gated repo.\nMake sure to request access at "
+ f"https://huggingface.co/{path_or_repo_id} and pass a token having permission to this repo either "
+ "by logging in with `huggingface-cli login` or by passing `token=<your_token>`."
+ ) from e
+ except RepositoryNotFoundError as e:
raise EnvironmentError(
f"{path_or_repo_id} is not a local folder and is not a valid model identifier "
- "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to "
- "pass a token having permission to this repo with `use_auth_token` or log in with "
- "`huggingface-cli login` and pass `use_auth_token=True`."
- )
- except RevisionNotFoundError:
+ "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token "
+ "having permission to this repo either by logging in with `huggingface-cli login` or by passing "
+ "`token=<your_token>`"
+ ) from e
+ except RevisionNotFoundError as e:
raise EnvironmentError(
f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists "
"for this model name. Check the model page at "
f"'https://huggingface.co/{path_or_repo_id}' for available revisions."
- )
- except LocalEntryNotFoundError:
+ ) from e
+ except LocalEntryNotFoundError as e:
# We try to see if we have a cached version (not up to date):
resolved_file = try_to_load_from_cache(path_or_repo_id, full_filename, cache_dir=cache_dir, revision=revision)
if resolved_file is not None and resolved_file != _CACHED_NO_EXIST:
@@ -454,8 +460,8 @@ def cached_file(
f" cached files and it looks like {path_or_repo_id} is not the path to a directory containing a file named"
f" {full_filename}.\nCheckout your internet connection or see how to run the library in offline mode at"
" 'https://huggingface.co/docs/transformers/installation#offline-mode'."
- )
- except EntryNotFoundError:
+ ) from e
+ except EntryNotFoundError as e:
if not _raise_exceptions_for_missing_entries:
return None
if revision is None:
@@ -463,7 +469,7 @@ def cached_file(
raise EnvironmentError(
f"{path_or_repo_id} does not appear to have a file named {full_filename}. Checkout "
f"'https://huggingface.co/{path_or_repo_id}/{revision}' for available files."
- )
+ ) from e
except HTTPError as err:
# First we try to see if we have a cached version (not up to date):
resolved_file = try_to_load_from_cache(path_or_repo_id, full_filename, cache_dir=cache_dir, revision=revision)
@@ -611,6 +617,13 @@ def has_file(
try:
hf_raise_for_status(r)
return True
+ except GatedRepoError as e:
+ logger.error(e)
+ raise EnvironmentError(
+ f"{path_or_repo} is a gated repository. Make sure to request access at "
+ f"https://huggingface.co/{path_or_repo} and pass a token having permission to this repo either by "
+ "logging in with `huggingface-cli login` or by passing `token=<your_token>`."
+ ) from e
except RepositoryNotFoundError as e:
logger.error(e)
raise EnvironmentError(f"{path_or_repo} is not a local folder or a valid repository name on 'https://hf.co'.")
|
(PR started after comment from @osanseviero [on slack](https://huggingface.slack.com/archives/C03V11RNS7P/p1690185858721759?thread_ts=1689956871.406059&cid=C03V11RNS7P) -private link)
This PR adds 2 things:
- raise a more custom error in case of `GatedRepoError` when downloading a file. `GatedRepoError` is a subclass of `RepoNotFoundError` in which the repo is actually found but user don't have access to it (the inheritance is there for backward compatibility)
- when raising a EnvironmentError in `utils/hub.py` I think it's best to use the Python's syntax `raise ... from ...`. This make debugging much easier for both users and maintainers.
At the moment `GatedRepoError` is triggered only if token is passed but a [PR is moon-landing](https://github.com/huggingface/moon-landing/pull/7106) (private link) is opened to also trigger a gated repo error for unauthenticated users.
**Note:** there might be some tests to adapt and I'm willing to do it once the logic is approved
(EDIT: I just checked and in the lowest version of `huggingface_hub` that is supported (0.14.1), GatedRepoError [already exists](https://github.com/huggingface/huggingface_hub/blob/v0.14.1/src/huggingface_hub/utils/_errors.py#L108) so no import issue to worry about)
|
https://api.github.com/repos/huggingface/transformers/pulls/25034
|
2023-07-24T10:06:26Z
|
2023-07-24T13:12:39Z
|
2023-07-24T13:12:39Z
|
2023-07-24T13:13:10Z
| 1,010
|
huggingface/transformers
| 12,757
|
[3.10] GH-95494: Fix transport EOF handling in OpenSSL 3.0 (GH-95495)
|
diff --git a/Lib/test/test_ssl.py b/Lib/test/test_ssl.py
index addfb6ce43d809..c28a574f8e052d 100644
--- a/Lib/test/test_ssl.py
+++ b/Lib/test/test_ssl.py
@@ -155,7 +155,6 @@ def data_file(*name):
OP_SINGLE_ECDH_USE = getattr(ssl, "OP_SINGLE_ECDH_USE", 0)
OP_CIPHER_SERVER_PREFERENCE = getattr(ssl, "OP_CIPHER_SERVER_PREFERENCE", 0)
OP_ENABLE_MIDDLEBOX_COMPAT = getattr(ssl, "OP_ENABLE_MIDDLEBOX_COMPAT", 0)
-OP_IGNORE_UNEXPECTED_EOF = getattr(ssl, "OP_IGNORE_UNEXPECTED_EOF", 0)
# Ubuntu has patched OpenSSL and changed behavior of security level 2
# see https://bugs.python.org/issue41561#msg389003
@@ -1199,8 +1198,7 @@ def test_options(self):
# SSLContext also enables these by default
default |= (OP_NO_COMPRESSION | OP_CIPHER_SERVER_PREFERENCE |
OP_SINGLE_DH_USE | OP_SINGLE_ECDH_USE |
- OP_ENABLE_MIDDLEBOX_COMPAT |
- OP_IGNORE_UNEXPECTED_EOF)
+ OP_ENABLE_MIDDLEBOX_COMPAT)
self.assertEqual(default, ctx.options)
with warnings_helper.check_warnings():
ctx.options |= ssl.OP_NO_TLSv1
@@ -2362,6 +2360,20 @@ def test_bio_read_write_data(self):
self.assertEqual(buf, b'foo\n')
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
+ def test_transport_eof(self):
+ client_context, server_context, hostname = testing_context()
+ with socket.socket(socket.AF_INET) as sock:
+ sock.connect(self.server_addr)
+ incoming = ssl.MemoryBIO()
+ outgoing = ssl.MemoryBIO()
+ sslobj = client_context.wrap_bio(incoming, outgoing,
+ server_hostname=hostname)
+ self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
+
+ # Simulate EOF from the transport.
+ incoming.write_eof()
+ self.assertRaises(ssl.SSLEOFError, sslobj.read)
+
@support.requires_resource('network')
class NetworkedTests(unittest.TestCase):
diff --git a/Misc/NEWS.d/next/Library/2022-07-30-23-01-43.gh-issue-95495.RA-q1d.rst b/Misc/NEWS.d/next/Library/2022-07-30-23-01-43.gh-issue-95495.RA-q1d.rst
new file mode 100644
index 00000000000000..d0f4ccbdd3e39f
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2022-07-30-23-01-43.gh-issue-95495.RA-q1d.rst
@@ -0,0 +1,7 @@
+When built against OpenSSL 3.0, the :mod:`ssl` module had a bug where it
+reported unauthenticated EOFs (i.e. without close_notify) as a clean TLS-level
+EOF. It now raises :exc:`~ssl.SSLEOFError`, matching the behavior in previous
+versions of OpenSSL. The :attr:`~ssl.SSLContext.options` attribute on
+:class:`~ssl.SSLContext` also no longer includes
+:data:`~ssl.OP_IGNORE_UNEXPECTED_EOF` by default. This option may be set to
+specify the previous OpenSSL 3.0 behavior.
diff --git a/Modules/_ssl.c b/Modules/_ssl.c
index 360eb864ad3620..7a28f2d37f6c55 100644
--- a/Modules/_ssl.c
+++ b/Modules/_ssl.c
@@ -671,6 +671,16 @@ PySSL_SetError(PySSLSocket *sslsock, int ret, const char *filename, int lineno)
ERR_GET_REASON(e) == SSL_R_CERTIFICATE_VERIFY_FAILED) {
type = state->PySSLCertVerificationErrorObject;
}
+#if defined(SSL_R_UNEXPECTED_EOF_WHILE_READING)
+ /* OpenSSL 3.0 changed transport EOF from SSL_ERROR_SYSCALL with
+ * zero return value to SSL_ERROR_SSL with a special error code. */
+ if (ERR_GET_LIB(e) == ERR_LIB_SSL &&
+ ERR_GET_REASON(e) == SSL_R_UNEXPECTED_EOF_WHILE_READING) {
+ p = PY_SSL_ERROR_EOF;
+ type = state->PySSLEOFErrorObject;
+ errstr = "EOF occurred in violation of protocol";
+ }
+#endif
break;
}
default:
@@ -3133,10 +3143,6 @@ _ssl__SSLContext_impl(PyTypeObject *type, int proto_version)
#endif
#ifdef SSL_OP_SINGLE_ECDH_USE
options |= SSL_OP_SINGLE_ECDH_USE;
-#endif
-#ifdef SSL_OP_IGNORE_UNEXPECTED_EOF
- /* Make OpenSSL 3.0.0 behave like 1.1.1 */
- options |= SSL_OP_IGNORE_UNEXPECTED_EOF;
#endif
SSL_CTX_set_options(self->ctx, options);
|
GH-25309 enabled SSL_OP_IGNORE_UNEXPECTED_EOF by default, with a comment
that it restores OpenSSL 1.1.1 behavior, but this wasn't quite right.
That option causes OpenSSL to treat transport EOF as the same as
close_notify (i.e. SSL_ERROR_ZERO_RETURN), whereas Python actually has
distinct SSLEOFError and SSLZeroReturnError exceptions. (The latter is
usually mapped to a zero return from read.) In OpenSSL 1.1.1, the ssl
module would raise them for transport EOF and close_notify,
respectively. In OpenSSL 3.0, both act like close_notify.
Fix this by, instead, just detecting SSL_R_UNEXPECTED_EOF_WHILE_READING
and mapping that to the other exception type.
There doesn't seem to have been any unit test of this error, so fill in
the missing one. This had to be done with the BIO path because it's
actually slightly tricky to simulate a transport EOF with Python's fd
based APIs. (If you instruct the server to close the socket, it gets
confused, probably because the server's SSL object is still referencing
the now dead fd?)
(cherry picked from commit 420bbb783b43216cc897dc8914851899db37a31d)
Co-authored-by: David Benjamin <[email protected]>
<!-- gh-issue-number: gh-95494 -->
* Issue: gh-95494
<!-- /gh-issue-number -->
|
https://api.github.com/repos/python/cpython/pulls/103007
|
2023-03-24T15:19:55Z
|
2023-03-27T14:14:24Z
|
2023-03-27T14:14:24Z
|
2023-03-27T14:14:59Z
| 1,133
|
python/cpython
| 4,762
|
Remove duplicated inference unit tests
|
diff --git a/tests/unit/inference/test_inference.py b/tests/unit/inference/test_inference.py
index c9073e66b67b..775a20cc9fba 100644
--- a/tests/unit/inference/test_inference.py
+++ b/tests/unit/inference/test_inference.py
@@ -90,37 +90,6 @@ def verify_models():
pytest.fail(f"Model(s) do not have an assigned task: {_missing_task_models}")
-# Fixture to add skips for certain configurations
[email protected]()
-def invalid_test(model_w_task, dtype, enable_cuda_graph, enable_triton):
- model, task = model_w_task
- msg = ""
- if enable_cuda_graph and (torch_info["cuda_version"] == "0.0"):
- msg = "CUDA not detected, cannot use CUDA Graph"
- elif enable_cuda_graph and pkg_version.parse(torch.__version__) < pkg_version.parse("1.10"):
- msg = "CUDA Graph is only available in torch versions >= 1.10"
- elif "gpt-j-6b" in model:
- if dtype != torch.half:
- msg = f"Not enough GPU memory to run {model} with dtype {dtype}"
- elif enable_cuda_graph:
- msg = f"Not enough GPU memory to run {model} with CUDA Graph enabled"
- elif "gpt-neox-20b" in model: # TODO: remove this when neox issues resolved
- msg = "Skipping gpt-neox-20b for now"
- elif ("gpt-neox-20b" in model) and (dtype != torch.half):
- msg = f"Not enough GPU memory to run {model} with dtype {dtype}"
- elif ("bloom" in model) and (dtype != torch.half):
- msg = f"Bloom models only support half precision, cannot use dtype {dtype}"
- elif ("bert" not in model.lower()) and enable_cuda_graph:
- msg = "Non bert/roberta models do no support CUDA Graph"
- elif enable_triton and not (dtype in [torch.half]):
- msg = "Triton is for fp16"
- elif enable_triton and not deepspeed.HAS_TRITON:
- msg = "triton needs to be installed for the test"
- elif ("bert" not in model.lower()) and enable_triton:
- msg = "Triton kernels do not support Non bert/roberta models yet"
- return msg
-
-
""" Fixtures for inference config """
@@ -257,6 +226,36 @@ def verify_injection(module):
verify_injection(model)
+# Verify that test is valid
+def validate_test(model_w_task, dtype, enable_cuda_graph, enable_triton):
+ model, task = model_w_task
+ msg = ""
+ if enable_cuda_graph and (torch_info["cuda_version"] == "0.0"):
+ msg = "CUDA not detected, cannot use CUDA Graph"
+ elif enable_cuda_graph and pkg_version.parse(torch.__version__) < pkg_version.parse("1.10"):
+ msg = "CUDA Graph is only available in torch versions >= 1.10"
+ elif "gpt-j-6b" in model:
+ if dtype != torch.half:
+ msg = f"Not enough GPU memory to run {model} with dtype {dtype}"
+ elif enable_cuda_graph:
+ msg = f"Not enough GPU memory to run {model} with CUDA Graph enabled"
+ elif "gpt-neox-20b" in model: # TODO: remove this when neox issues resolved
+ msg = "Skipping gpt-neox-20b for now"
+ elif ("gpt-neox-20b" in model) and (dtype != torch.half):
+ msg = f"Not enough GPU memory to run {model} with dtype {dtype}"
+ elif ("bloom" in model) and (dtype != torch.half):
+ msg = f"Bloom models only support half precision, cannot use dtype {dtype}"
+ elif ("bert" not in model.lower()) and enable_cuda_graph:
+ msg = "Non bert/roberta models do no support CUDA Graph"
+ elif enable_triton and not (dtype in [torch.half]):
+ msg = "Triton is for fp16"
+ elif enable_triton and not deepspeed.HAS_TRITON:
+ msg = "triton needs to be installed for the test"
+ elif ("bert" not in model.lower()) and enable_triton:
+ msg = "Triton kernels do not support Non bert/roberta models yet"
+ return msg
+
+
@pytest.mark.inference
class TestModelTask(DistributedTest):
world_size = 1
@@ -270,11 +269,11 @@ def test(
query,
inf_kwargs,
assert_fn,
- invalid_test,
perf_meas=True,
):
- if invalid_test:
- pytest.skip(invalid_test)
+ invalid_test_msg = validate_test(model_w_task, dtype, enable_cuda_graph, enable_triton)
+ if invalid_test_msg:
+ pytest.skip(invalid_test_msg)
model, task = model_w_task
local_rank = int(os.getenv("LOCAL_RANK", "0"))
@@ -353,10 +352,10 @@ def test(
query,
inf_kwargs,
assert_fn,
- invalid_test,
):
- if invalid_test:
- pytest.skip(invalid_test)
+ invalid_test_msg = validate_test(model_w_task, dtype, enable_cuda_graph=False, enable_triton=False)
+ if invalid_test_msg:
+ pytest.skip(invalid_test_msg)
model, task = model_w_task
local_rank = int(os.getenv("LOCAL_RANK", "0"))
@@ -434,7 +433,6 @@ def test(
ids=["t5", "roberta"],
)
@pytest.mark.parametrize("dtype", [torch.float], ids=["fp32"])
[email protected]("enable_cuda_graph", [False], ids=["noCG"])
class TestInjectionPolicy(DistributedTest):
world_size = [1, 2]
@@ -445,12 +443,11 @@ def test(
query,
inf_kwargs,
assert_fn,
- invalid_test,
dtype,
- enable_cuda_graph,
):
- if invalid_test:
- pytest.skip(invalid_test)
+ invalid_test_msg = validate_test(model_w_task, dtype, enable_cuda_graph=False, enable_triton=False)
+ if invalid_test_msg:
+ pytest.skip(invalid_test_msg)
model, task = model_w_task
local_rank = int(os.getenv("LOCAL_RANK", "0"))
@@ -485,7 +482,6 @@ def test(
],
)
@pytest.mark.parametrize("dtype", [torch.float16], ids=["fp16"])
[email protected]("enable_cuda_graph", [False], ids=["noCG"])
class TestAutoTensorParallelism(DistributedTest):
world_size = [2]
@@ -495,12 +491,11 @@ def test(
query,
inf_kwargs,
assert_fn,
- invalid_test,
dtype,
- enable_cuda_graph,
):
- if invalid_test:
- pytest.skip(invalid_test)
+ invalid_test_msg = validate_test(model_w_task, dtype, enable_cuda_graph=False, enable_triton=False)
+ if invalid_test_msg:
+ pytest.skip(invalid_test_msg)
model, task = model_w_task
local_rank = int(os.getenv("LOCAL_RANK", "0"))
|
In our inference unit tests, we were seeing duplicated tests due to the use of `invalid_test` fixture. Because this fixture used `model_w_task, dtype, enable_cuda_graph, enable_triton` fixtures, any test that used this fixture but did not use all of those fixtures would have duplicated tests generated. A short example to show the problem:
```python
@pytest.fixture
def invalid_test(model_w_task, dtype):
pass
@pytest.fixture(params=[("gpt2", "text-generation")], ids=["gpt2"])
def model_w_task(request):
return request.param
@pytest.fixture(params=["fp16", "fp32"])
def dtype(request):
return request.param
class TestExample(DistributedTest):
def test(self, model_w_task):
pass
```
The generated tests would be:
```
test[gpt2-fp16]
test[gpt2-fp32]
```
So despite the test not using the `dtype` fixture directly, the tests were still generated.
Resolved that issue by making the invalid test check a function called inside the tests.
|
https://api.github.com/repos/microsoft/DeepSpeed/pulls/3951
|
2023-07-13T17:47:59Z
|
2023-07-18T22:41:24Z
|
2023-07-18T22:41:24Z
|
2023-07-20T21:12:19Z
| 1,652
|
microsoft/DeepSpeed
| 10,010
|
Fix:Kinesis:Timestamp in get records
|
diff --git a/localstack/services/kinesis/kinesis_listener.py b/localstack/services/kinesis/kinesis_listener.py
index 04d90dab6f623..d6196e53b953e 100644
--- a/localstack/services/kinesis/kinesis_listener.py
+++ b/localstack/services/kinesis/kinesis_listener.py
@@ -80,7 +80,6 @@ def return_response(self, method, path, data, headers, response):
action = headers.get('X-Amz-Target')
data = self.decode_content(data or '{}')
response._content = self.replace_in_encoded(response.content or '')
-
records = []
if action in (ACTION_CREATE_STREAM, ACTION_DELETE_STREAM):
event_type = (event_publisher.EVENT_KINESIS_CREATE_STREAM if action == ACTION_CREATE_STREAM
@@ -150,7 +149,7 @@ def return_response(self, method, path, data, headers, response):
for record in records:
if sdk_v2:
- record['ApproximateArrivalTimestamp'] = int(record['ApproximateArrivalTimestamp'] * 1000)
+ record['ApproximateArrivalTimestamp'] = int(record['ApproximateArrivalTimestamp'])
if not isinstance(record['Data'], str):
record['Data'] = base64.encodebytes(bytearray(record['Data']['data']))
diff --git a/tests/integration/test_kinesis.py b/tests/integration/test_kinesis.py
index 0026385eb6e58..c647ffac59755 100644
--- a/tests/integration/test_kinesis.py
+++ b/tests/integration/test_kinesis.py
@@ -1,6 +1,7 @@
from datetime import datetime
import logging
import unittest
+from time import sleep
from localstack.utils.aws import aws_stack
from localstack.utils.common import retry, short_uid
from localstack.utils.kinesis import kinesis_connector
@@ -41,6 +42,30 @@ def assert_consumers(count):
client.deregister_stream_consumer(StreamARN=stream_arn, ConsumerName=consumer_name)
assert_consumers(0)
+ def test_get_records(self):
+ client = aws_stack.connect_to_service('kinesis')
+ stream_name = 'test-%s' % short_uid()
+
+ client.create_stream(StreamName=stream_name, ShardCount=1)
+ sleep(3)
+ client.put_records(StreamName=stream_name, Records=[{'Data': 'SGVsbG8gd29ybGQ=', 'PartitionKey': '1'}])
+
+ response = client.describe_stream(StreamName=stream_name)
+
+ sequence_number = response.get('StreamDescription').get('Shards')[0].get('SequenceNumberRange'). \
+ get('StartingSequenceNumber')
+
+ shard_id = response.get('StreamDescription').get('Shards')[0].get('ShardId')
+
+ response = client.get_shard_iterator(StreamName=stream_name, ShardId=shard_id,
+ ShardIteratorType='AT_SEQUENCE_NUMBER',
+ StartingSequenceNumber=sequence_number)
+
+ response = client.get_records(ShardIterator=response.get('ShardIterator'))
+
+ self.assertEqual(len(response.get('Records')), 1)
+ self.assertIn('Data', response.get('Records')[0])
+
class TestKinesisPythonClient(unittest.TestCase):
|
Fix:Kinesis:Timestamp in get records
https://github.com/localstack/localstack/issues/3488
|
https://api.github.com/repos/localstack/localstack/pulls/3515
|
2021-01-26T16:17:28Z
|
2021-01-29T20:48:25Z
|
2021-01-29T20:48:25Z
|
2021-01-29T20:48:25Z
| 712
|
localstack/localstack
| 28,607
|
Move manual configuration of MQTT lock to the integration key
|
diff --git a/homeassistant/components/mqtt/__init__.py b/homeassistant/components/mqtt/__init__.py
index 02a98f6ce90300..40e4c6105295b6 100644
--- a/homeassistant/components/mqtt/__init__.py
+++ b/homeassistant/components/mqtt/__init__.py
@@ -194,6 +194,7 @@
vol.Optional(Platform.BUTTON.value): cv.ensure_list,
vol.Optional(Platform.FAN.value): cv.ensure_list,
vol.Optional(Platform.LIGHT.value): cv.ensure_list,
+ vol.Optional(Platform.LOCK.value): cv.ensure_list,
}
)
diff --git a/homeassistant/components/mqtt/lock.py b/homeassistant/components/mqtt/lock.py
index 66efe9fece7e38..5dc0a974d26a1f 100644
--- a/homeassistant/components/mqtt/lock.py
+++ b/homeassistant/components/mqtt/lock.py
@@ -1,6 +1,7 @@
"""Support for MQTT locks."""
from __future__ import annotations
+import asyncio
import functools
import voluptuous as vol
@@ -27,8 +28,10 @@
from .mixins import (
MQTT_ENTITY_COMMON_SCHEMA,
MqttEntity,
+ async_get_platform_config_from_yaml,
async_setup_entry_helper,
async_setup_platform_helper,
+ warn_for_legacy_schema,
)
CONF_PAYLOAD_LOCK = "payload_lock"
@@ -53,7 +56,7 @@
}
)
-PLATFORM_SCHEMA = mqtt.MQTT_RW_PLATFORM_SCHEMA.extend(
+PLATFORM_SCHEMA_MODERN = mqtt.MQTT_RW_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean,
@@ -66,7 +69,13 @@
}
).extend(MQTT_ENTITY_COMMON_SCHEMA.schema)
-DISCOVERY_SCHEMA = PLATFORM_SCHEMA.extend({}, extra=vol.REMOVE_EXTRA)
+# Configuring MQTT Locks under the lock platform key is deprecated in HA Core 2022.6
+PLATFORM_SCHEMA = vol.All(
+ cv.PLATFORM_SCHEMA.extend(PLATFORM_SCHEMA_MODERN.schema),
+ warn_for_legacy_schema(lock.DOMAIN),
+)
+
+DISCOVERY_SCHEMA = PLATFORM_SCHEMA_MODERN.extend({}, extra=vol.REMOVE_EXTRA)
async def async_setup_platform(
@@ -75,7 +84,8 @@ async def async_setup_platform(
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
- """Set up MQTT lock panel through configuration.yaml."""
+ """Set up MQTT locks configured under the lock platform key (deprecated)."""
+ # Deprecated in HA Core 2022.6
await async_setup_platform_helper(
hass, lock.DOMAIN, config, async_add_entities, _async_setup_entity
)
@@ -86,7 +96,17 @@ async def async_setup_entry(
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
- """Set up MQTT lock dynamically through MQTT discovery."""
+ """Set up MQTT lock through configuration.yaml and dynamically through MQTT discovery."""
+ # load and initialize platform config from configuration.yaml
+ await asyncio.gather(
+ *(
+ _async_setup_entity(hass, async_add_entities, config, config_entry)
+ for config in await async_get_platform_config_from_yaml(
+ hass, lock.DOMAIN, PLATFORM_SCHEMA_MODERN
+ )
+ )
+ )
+ # setup for discovery
setup = functools.partial(
_async_setup_entity, hass, async_add_entities, config_entry=config_entry
)
diff --git a/tests/components/mqtt/test_lock.py b/tests/components/mqtt/test_lock.py
index 86e21a261a3327..ef752ef8749a75 100644
--- a/tests/components/mqtt/test_lock.py
+++ b/tests/components/mqtt/test_lock.py
@@ -1,4 +1,5 @@
"""The tests for the MQTT lock platform."""
+import copy
from unittest.mock import patch
import pytest
@@ -44,6 +45,7 @@
help_test_setting_attribute_via_mqtt_json_message,
help_test_setting_attribute_with_template,
help_test_setting_blocked_attribute_via_mqtt_json_message,
+ help_test_setup_manual_entity_from_yaml,
help_test_unique_id,
help_test_update_with_json_attrs_bad_JSON,
help_test_update_with_json_attrs_not_dict,
@@ -675,3 +677,15 @@ async def test_encoding_subscribable_topics(
attribute,
attribute_value,
)
+
+
+async def test_setup_manual_entity_from_yaml(hass, caplog, tmp_path):
+ """Test setup manual configured MQTT entity."""
+ platform = LOCK_DOMAIN
+ config = copy.deepcopy(DEFAULT_CONFIG[platform])
+ config["name"] = "test"
+ del config["platform"]
+ await help_test_setup_manual_entity_from_yaml(
+ hass, caplog, tmp_path, platform, config
+ )
+ assert hass.states.get(f"{platform}.test") is not None
|
<!--
You are amazing! Thanks for contributing to our project!
Please, DO NOT DELETE ANY TEXT from this template! (unless instructed).
-->
## Proposed change
<!--
Describe the big picture of your changes here to communicate to the
maintainers why we should accept this pull request. If it fixes a bug
or resolves a feature request, be sure to link to that issue in the
additional information section.
-->
Move manual configuration of MQTT lock to the integration key
This is an addition to #71676 and adds the lock platform
## Type of change
<!--
What type of change does your PR introduce to Home Assistant?
NOTE: Please, check only 1! box!
If your PR requires multiple boxes to be checked, you'll most likely need to
split it into multiple PRs. This makes things easier and faster to code review.
-->
- [ ] Dependency upgrade
- [ ] Bugfix (non-breaking change which fixes an issue)
- [ ] New integration (thank you!)
- [ ] New feature (which adds functionality to an existing integration)
- [ ] Breaking change (fix/feature causing existing functionality to break)
- [x] Code quality improvements to existing code or addition of tests
## Additional information
<!--
Details are important, and help maintainers processing your PR.
Please be sure to fill out additional details, if applicable.
-->
- This PR fixes or closes issue: fixes #
- This PR is related to issue:
- Link to documentation pull request: https://github.com/home-assistant/home-assistant.io/pull/22830
## Checklist
<!--
Put an `x` in the boxes that apply. You can also fill these out after
creating the PR. If you're unsure about any of them, don't hesitate to ask.
We're here to help! This is simply a reminder of what we are going to look
for before merging your code.
-->
- [x] The code change is tested and works locally.
- [x] Local tests pass. **Your PR cannot be merged unless tests pass**
- [x] There is no commented out code in this PR.
- [x] I have followed the [development checklist][dev-checklist]
- [x] The code has been formatted using Black (`black --fast homeassistant tests`)
- [x] Tests have been added to verify that the new code works.
If user exposed functionality or configuration variables are added/changed:
- [x] Documentation added/updated for [www.home-assistant.io][docs-repository]
If the code communicates with devices, web services, or third-party tools:
- [ ] The [manifest file][manifest-docs] has all fields filled out correctly.
Updated and included derived files by running: `python3 -m script.hassfest`.
- [ ] New or updated dependencies have been added to `requirements_all.txt`.
Updated by running `python3 -m script.gen_requirements_all`.
- [ ] For the updated dependencies - a link to the changelog, or at minimum a diff between library versions is added to the PR description.
- [ ] Untested files have been added to `.coveragerc`.
The integration reached or maintains the following [Integration Quality Scale][quality-scale]:
<!--
The Integration Quality Scale scores an integration on the code quality
and user experience. Each level of the quality scale consists of a list
of requirements. We highly recommend getting your integration scored!
-->
- [ ] No score or internal
- [ ] 🥈 Silver
- [ ] 🥇 Gold
- [ ] 🏆 Platinum
<!--
This project is very active and we have a high turnover of pull requests.
Unfortunately, the number of incoming pull requests is higher than what our
reviewers can review and merge so there is a long backlog of pull requests
waiting for review. You can help here!
By reviewing another pull request, you will help raise the code quality of
that pull request and the final review will be faster. This way the general
pace of pull request reviews will go up and your wait time will go down.
When picking a pull request to review, try to choose one that hasn't yet
been reviewed.
Thanks for helping out!
-->
To help with the load of incoming pull requests:
- [ ] I have reviewed two other [open pull requests][prs] in this repository.
[prs]: https://github.com/home-assistant/core/pulls?q=is%3Aopen+is%3Apr+-author%3A%40me+-draft%3Atrue+-label%3Awaiting-for-upstream+sort%3Acreated-desc+review%3Anone+-status%3Afailure
<!--
Thank you for contributing <3
Below, some useful links you could explore:
-->
[dev-checklist]: https://developers.home-assistant.io/docs/en/development_checklist.html
[manifest-docs]: https://developers.home-assistant.io/docs/en/creating_integration_manifest.html
[quality-scale]: https://developers.home-assistant.io/docs/en/next/integration_quality_scale_index.html
[docs-repository]: https://github.com/home-assistant/home-assistant.io
|
https://api.github.com/repos/home-assistant/core/pulls/72271
|
2022-05-21T12:17:43Z
|
2022-05-21T17:07:26Z
|
2022-05-21T17:07:26Z
|
2022-05-22T19:01:43Z
| 1,096
|
home-assistant/core
| 38,723
|
Update issue templates
|
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
new file mode 100644
index 0000000000..01b6fb85e2
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -0,0 +1,19 @@
+---
+name: Bug report
+about: Create a report to help us improve
+title: ''
+labels: kind/triage
+assignees: ''
+
+---
+
+#### Problem Description
+A clear and concise description of what the bug is.
+
+#### Steps to reproduce the behavior:
+1.
+2.
+3.
+
+#### System Information
+Paste the output of "mitmproxy --version" here.
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 0000000000..8e8080dbfc
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,20 @@
+---
+name: Feature request
+about: Suggest an idea for this project
+title: ''
+labels: kind/feature
+assignees: ''
+
+---
+
+**Is your feature request related to a problem? Please describe.**
+A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
+
+#### Describe the solution you'd like
+A clear and concise description of what you want to happen.
+
+#### Describe alternatives you've considered
+A clear and concise description of any alternative solutions or features you've considered.
+
+#### Additional context
+Add any other context or screenshots about the feature request here.
|
GitHub has new issue template, here's a first stab at them for us.
@Kriechi, you tend to have opinions on these kinds of things? 😉
|
https://api.github.com/repos/mitmproxy/mitmproxy/pulls/3686
|
2019-11-04T14:43:40Z
|
2019-11-07T19:20:39Z
|
2019-11-07T19:20:39Z
|
2019-11-07T19:20:45Z
| 387
|
mitmproxy/mitmproxy
| 27,999
|
Adding Flask-Misaka to the extensions page
|
diff --git a/flask_website/listings/extensions.py b/flask_website/listings/extensions.py
index f6f02ad342..91f9781d4a 100644
--- a/flask_website/listings/extensions.py
+++ b/flask_website/listings/extensions.py
@@ -524,7 +524,17 @@ def docserver(self):
github='aromanovich/flask-webtest',
docs='http://flask-webtest.readthedocs.org/',
approved=False
- )
+ ),
+ Extension('Flask-Misaka', 'David Baumgold',
+ description='''
+ A simple extension to integrate the
+ <a href="http://misaka.61924.nl/">Misaka</a> module for efficiently
+ parsing Markdown.
+ ''',
+ docs='https://flask-misaka.readthedocs.org/en/latest/',
+ github='singingwolfboy/flask-misaka',
+ approved=True,
+ ),
]
|
I'd like to submit the Flask extension I wrote, [Flask-Misaka](https://github.com/singingwolfboy/flask-misaka), for review for inclusion on the extensions registry page.
|
https://api.github.com/repos/pallets/flask/pulls/851
|
2013-08-29T04:13:40Z
|
2014-03-21T20:53:01Z
|
2014-03-21T20:53:01Z
|
2020-11-14T06:52:44Z
| 214
|
pallets/flask
| 20,299
|
feat(starfish): Add the delta columns to WSV
|
diff --git a/static/app/views/starfish/utils/generatePerformanceEventView.tsx b/static/app/views/starfish/utils/generatePerformanceEventView.tsx
index 4b096598672ad..3970852fceb64 100644
--- a/static/app/views/starfish/utils/generatePerformanceEventView.tsx
+++ b/static/app/views/starfish/utils/generatePerformanceEventView.tsx
@@ -125,8 +125,11 @@ export function generateWebServiceEventView(
'transaction',
'http.method',
'tps()',
+ 'tps_percent_change()',
'p95(transaction.duration)',
+ 'percentile_percent_change(transaction.duration,0.95)',
'http_error_count()',
+ 'http_error_count_percent_change()',
'time_spent_percentage()',
'sum(transaction.duration)',
];
diff --git a/static/app/views/starfish/views/webServiceView/endpointList.tsx b/static/app/views/starfish/views/webServiceView/endpointList.tsx
index 71854f2296244..ffb9e0cd1e97e 100644
--- a/static/app/views/starfish/views/webServiceView/endpointList.tsx
+++ b/static/app/views/starfish/views/webServiceView/endpointList.tsx
@@ -33,10 +33,13 @@ import {DataTitles} from 'sentry/views/starfish/views/spans/types';
import {EndpointDataRow} from 'sentry/views/starfish/views/webServiceView/endpointDetails';
const COLUMN_TITLES = [
- 'Endpoint',
+ t('Endpoint'),
DataTitles.throughput,
+ t('Change'),
DataTitles.p95,
+ t('Change'),
DataTitles.errorCount,
+ t('Change'),
DataTitles.timeSpent,
];
@@ -131,21 +134,38 @@ function EndpointList({eventView, location, organization, setError}: Props) {
}
if (
- field.startsWith(
- 'equation|(percentile_range(transaction.duration,0.95,lessOrEquals,'
- )
+ [
+ 'percentile_percent_change(transaction.duration,0.95)',
+ 'http_error_count_percent_change()',
+ ].includes(field)
) {
const deltaValue = dataRow[field] as number;
const trendDirection = deltaValue < 0 ? 'good' : deltaValue > 0 ? 'bad' : 'neutral';
return (
<NumberContainer>
- <TrendingDuration trendDirection={trendDirection}>
+ <PercentChangeCell trendDirection={trendDirection}>
+ {tct('[sign][delta]', {
+ sign: deltaValue >= 0 ? '+' : '-',
+ delta: formatPercentage(Math.abs(deltaValue), 2),
+ })}
+ </PercentChangeCell>
+ </NumberContainer>
+ );
+ }
+
+ if (field === 'tps_percent_change()') {
+ const deltaValue = dataRow[field] as number;
+ const trendDirection = deltaValue > 0 ? 'good' : deltaValue < 0 ? 'bad' : 'neutral';
+
+ return (
+ <NumberContainer>
+ <PercentChangeCell trendDirection={trendDirection}>
{tct('[sign][delta]', {
sign: deltaValue >= 0 ? '+' : '-',
delta: formatPercentage(Math.abs(deltaValue), 2),
})}
- </TrendingDuration>
+ </PercentChangeCell>
</NumberContainer>
);
}
@@ -319,7 +339,9 @@ function EndpointList({eventView, location, organization, setError}: Props) {
export default EndpointList;
-const TrendingDuration = styled('div')<{trendDirection: 'good' | 'bad' | 'neutral'}>`
+export const PercentChangeCell = styled('div')<{
+ trendDirection: 'good' | 'bad' | 'neutral';
+}>`
color: ${p =>
p.trendDirection === 'good'
? p.theme.successText
diff --git a/static/app/views/starfish/views/webServiceView/failureDetailPanel/failureDetailTable.tsx b/static/app/views/starfish/views/webServiceView/failureDetailPanel/failureDetailTable.tsx
index 58cdca9558c3c..401f83b37eaa8 100644
--- a/static/app/views/starfish/views/webServiceView/failureDetailPanel/failureDetailTable.tsx
+++ b/static/app/views/starfish/views/webServiceView/failureDetailPanel/failureDetailTable.tsx
@@ -7,13 +7,16 @@ import GridEditable, {GridColumnHeader} from 'sentry/components/gridEditable';
import {Alignments} from 'sentry/components/gridEditable/sortLink';
import Link from 'sentry/components/links/link';
import Pagination from 'sentry/components/pagination';
-import {t} from 'sentry/locale';
+import {t, tct} from 'sentry/locale';
import {Organization} from 'sentry/types';
import {TableData, TableDataRow} from 'sentry/utils/discover/discoverQuery';
import EventView from 'sentry/utils/discover/eventView';
import {getFieldRenderer} from 'sentry/utils/discover/fieldRenderers';
+import {NumberContainer} from 'sentry/utils/discover/styles';
+import {formatPercentage} from 'sentry/utils/formatters';
import {TableColumn} from 'sentry/views/discover/table/types';
import {EndpointDataRow} from 'sentry/views/starfish/views/endpointDetails';
+import {PercentChangeCell} from 'sentry/views/starfish/views/webServiceView/endpointList';
import {FailureSpike} from 'sentry/views/starfish/views/webServiceView/types';
type Props = {
@@ -30,12 +33,17 @@ const COLUMN_ORDER = [
{
key: 'transaction',
name: t('Endpoint'),
- width: 400,
+ width: 450,
},
{
key: 'http_error_count()',
name: t('5xx Responses'),
- width: 100,
+ width: 150,
+ },
+ {
+ key: 'http_error_count_percent_change()',
+ name: t('Change'),
+ width: 80,
},
];
@@ -67,7 +75,7 @@ export default function FailureDetailTable({
const fieldRenderer = getFieldRenderer(field, tableData.meta, false);
const rendered = fieldRenderer(dataRow, {organization, location});
- if (column.key === 'transaction') {
+ if (field === 'transaction') {
const prefix = dataRow['http.method'] ? `${dataRow['http.method']} ` : '';
const queryParams = {
start: spike ? new Date(spike.startTimestamp) : undefined,
@@ -82,6 +90,22 @@ export default function FailureDetailTable({
);
}
+ if (field === 'http_error_count_percent_change()') {
+ const deltaValue = dataRow[field] as number;
+ const trendDirection = deltaValue < 0 ? 'good' : deltaValue > 0 ? 'bad' : 'neutral';
+
+ return (
+ <NumberContainer>
+ <PercentChangeCell trendDirection={trendDirection}>
+ {tct('[sign][delta]', {
+ sign: deltaValue >= 0 ? '+' : '-',
+ delta: formatPercentage(Math.abs(deltaValue), 2),
+ })}
+ </PercentChangeCell>
+ </NumberContainer>
+ );
+ }
+
return rendered;
}
|
Also adds it to the 500 responses slide out panel
Requires https://github.com/getsentry/sentry/pull/50737 to be deployed (it's deployed)
Note: Noticing some funky behaviour with the http error count change
will dig into that in a follow up
|
https://api.github.com/repos/getsentry/sentry/pulls/50739
|
2023-06-12T15:17:08Z
|
2023-06-12T16:47:50Z
|
2023-06-12T16:47:50Z
|
2024-03-15T21:08:02Z
| 1,628
|
getsentry/sentry
| 43,801
|
Add release video as blog post
|
diff --git a/docs/blog/2023-04-10-open-assistant-livestream-just-chatting/index.mdx b/docs/blog/2023-04-10-open-assistant-livestream-just-chatting/index.mdx
new file mode 100644
index 0000000000..3bc051099d
--- /dev/null
+++ b/docs/blog/2023-04-10-open-assistant-livestream-just-chatting/index.mdx
@@ -0,0 +1,21 @@
+---
+title: AI Alignment Livestream (aka OpenAssistant "Just Chatting")
+description: AI Alignment Livestream (aka OpenAssistant "Just Chatting")
+authors: [yk]
+tags: [open-assistant, youtube]
+image: https://img.youtube.com/vi/5IymlBZDw-0/0.jpg
+---
+
+import ReactPlayer from "react-player";
+
+Livestream playing around with Open Assistant and AI allignement :)
+
+https://open-assistant.io/chat
+
+<ReactPlayer
+ controls
+ width="100%"
+ url="https://www.youtube.com/embed/5IymlBZDw-0"
+/>
+
+<!--truncate-->
diff --git a/docs/blog/2023-04-15-open-assistant-released/index.mdx b/docs/blog/2023-04-15-open-assistant-released/index.mdx
new file mode 100644
index 0000000000..096304638e
--- /dev/null
+++ b/docs/blog/2023-04-15-open-assistant-released/index.mdx
@@ -0,0 +1,42 @@
+---
+title: OpenAssistant RELEASED! The world's best open-source Chat AI!
+description: OpenAssistant RELEASED! The world's best open-source Chat AI!
+authors: [yk]
+tags: [open-assistant, youtube]
+image: https://img.youtube.com/vi/ddG2fM9i4Kk/0.jpg
+---
+
+import ReactPlayer from "react-player";
+
+We're excited to announce the release of OpenAssistant!
+
+The future of AI development depends heavily on high quality datasets and models
+being made publicly available, and that's exactly what this project does.
+
+Our team has worked tirelessly over the past several months collecting large
+amounts of text-based input and feedback to create an incredibly diverse and
+unique dataset designed specifically for training language models or other AI
+applications.
+
+With over 600k human-generated data points covering a wide range of topics and
+styles of writing, our dataset will be an invaluable tool for any developer
+looking to create state-of-the-art instruction models!
+
+To make things even better, we are making this entire dataset free and
+accessible to all who wish to use it. Check it out today at our HF org:
+[OpenAssistant](https://huggingface.co/OpenAssistant)
+
+On top of that, we've trained very powerful models that you can try right now
+at:
+
+[open-assistant.io/chat](https://open-assistant.io/chat)
+
+Watch the annoucement video:
+
+<ReactPlayer
+ controls
+ width="100%"
+ url="https://www.youtube.com/embed/ddG2fM9i4Kk"
+/>
+
+<!--truncate-->
|
- add release blog post
- add livestreaming chatting blog post
|
https://api.github.com/repos/LAION-AI/Open-Assistant/pulls/2546
|
2023-04-15T17:34:01Z
|
2023-04-15T20:42:04Z
|
2023-04-15T20:42:04Z
|
2023-04-15T21:01:00Z
| 733
|
LAION-AI/Open-Assistant
| 37,267
|
[Instagram] Improve thumbnail extraction
|
diff --git a/yt_dlp/extractor/instagram.py b/yt_dlp/extractor/instagram.py
index 6ed20d9c6d3..c96ba9f18de 100644
--- a/yt_dlp/extractor/instagram.py
+++ b/yt_dlp/extractor/instagram.py
@@ -234,7 +234,9 @@ def _real_extract(self, url):
media, lambda x: x['edge_media_to_caption']['edges'][0]['node']['text'],
compat_str) or media.get('caption')
title = media.get('title')
- thumbnail = media.get('display_src') or media.get('display_url')
+ display_resources = media.get('display_resources')
+ if not display_resources:
+ display_resources = [{'src': media.get('display_src')}, {'src': media.get('display_url')}]
duration = float_or_none(media.get('video_duration'))
timestamp = int_or_none(media.get('taken_at_timestamp') or media.get('date'))
uploader = try_get(media, lambda x: x['owner']['full_name'])
@@ -252,6 +254,12 @@ def get_count(keys, kind):
comment_count = get_count(
('preview_comment', 'to_comment', 'to_parent_comment'), 'comment')
+ thumbnails = [{
+ 'url': thumbnail['src'],
+ 'width': thumbnail.get('config_width'),
+ 'height': thumbnail.get('config_height'),
+ } for thumbnail in display_resources if thumbnail.get('src')]
+
comments = []
for comment in try_get(media, lambda x: x['edge_media_to_parent_comment']['edges']):
comment_dict = comment.get('node', {})
@@ -326,7 +334,7 @@ def get_count(keys, kind):
'title': title or 'Video by %s' % uploader_id,
'description': description,
'duration': duration,
- 'thumbnail': thumbnail,
+ 'thumbnails': thumbnails,
'timestamp': timestamp,
'uploader_id': uploader_id,
'uploader': uploader,
|
## Please follow the guide below
- You will be asked some questions, please read them **carefully** and answer honestly
- Put an `x` into all the boxes [ ] relevant to your *pull request* (like that [x])
- Use *Preview* tab to see how your *pull request* will actually look like
---
### Before submitting a *pull request* make sure you have:
- [x] At least skimmed through [contributing guidelines](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) including [yt-dlp coding conventions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#yt-dlp-coding-conventions)
- [x] [Searched](https://github.com/yt-dlp/yt-dlp/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests
- [x] Checked the code with [flake8](https://pypi.python.org/pypi/flake8)
### In order to be accepted and merged into yt-dlp each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check one of the following options:
- [x] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/)
- [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence)
### What is the purpose of your *pull request*?
- [ ] Bug fix
- [x] Improvement
- [ ] New extractor
- [ ] New feature
---
### Description of your *pull request* and other information
Explanation of your *pull request* in arbitrary form goes here. Please make sure the description explains the purpose and effect of your *pull request* and is worded well enough to be understood. Provide as much context and examples as possible.
|
https://api.github.com/repos/yt-dlp/yt-dlp/pulls/1496
|
2021-10-31T12:13:57Z
|
2021-11-04T03:22:10Z
|
2021-11-04T03:22:10Z
|
2021-11-04T03:22:10Z
| 448
|
yt-dlp/yt-dlp
| 7,609
|
[MRG][DOC] Add thanks for infrastructure supporters
|
diff --git a/doc/about.rst b/doc/about.rst
index 2c02bf82d371e..fb95db2c81fb6 100644
--- a/doc/about.rst
+++ b/doc/about.rst
@@ -31,6 +31,7 @@ citations to the following paper:
year={2011}
}
+
Funding
-------
@@ -61,6 +62,7 @@ The `PSF <http://www.python.org/psf/>`_ helped find and manage funding for our
`tinyclues <http://www.tinyclues.com/>`_ funded the 2011 international Granada
sprint.
+
Donating to the project
~~~~~~~~~~~~~~~~~~~~~~~
@@ -101,11 +103,8 @@ for code sprints, as well as towards the organization budget of the project [#f1
.. [#f1] Regarding the organization budget in particular, we might use some of the donated funds to pay for other project expenses such as DNS, hosting or continuous integration services.
-
-
-
The 2013' Paris international sprint
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|center-div| |telecom| |tinyclues| |afpy| |FNRS|
@@ -152,3 +151,18 @@ The 2013' Paris international sprint
</div>
*For more information on this sprint, see* `here <https://github.com/scikit-learn/administrative/blob/master/sprint_paris_2013/proposal.rst>`_
+
+
+Infrastructure support
+----------------------
+
+- We would like to thank `Rackspace <http://www.rackspace.com>`_ for providing
+ us with a free `Rackspace Cloud <http://www.rackspace.com/cloud/>`_ account to
+ automatically build the documentation and the example gallery from for the
+ development version of scikit-learn using `this tool
+ <https://github.com/scikit-learn/sklearn-docbuilder>`_.
+
+- We would also like to thank `Shining Panda
+ <https://www.shiningpanda-ci.com/>`_ for free CPU time on their Continuous
+ Integration server.
+
|
https://api.github.com/repos/scikit-learn/scikit-learn/pulls/2761
|
2014-01-16T23:20:00Z
|
2014-01-17T01:30:54Z
|
2014-01-17T01:30:54Z
|
2014-06-13T11:56:41Z
| 480
|
scikit-learn/scikit-learn
| 46,426
|
|
[wrzuta.pl] Remove extractor (closes #20684)
|
diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index 0e3ccb82d1b..676ad3f7d17 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -1418,10 +1418,6 @@
from .wimp import WimpIE
from .wistia import WistiaIE
from .worldstarhiphop import WorldStarHipHopIE
-from .wrzuta import (
- WrzutaIE,
- WrzutaPlaylistIE,
-)
from .wsj import (
WSJIE,
WSJArticleIE,
diff --git a/youtube_dl/extractor/wrzuta.py b/youtube_dl/extractor/wrzuta.py
deleted file mode 100644
index 0f53f1bcb85..00000000000
--- a/youtube_dl/extractor/wrzuta.py
+++ /dev/null
@@ -1,158 +0,0 @@
-# coding: utf-8
-from __future__ import unicode_literals
-
-import re
-
-from .common import InfoExtractor
-from ..utils import (
- ExtractorError,
- int_or_none,
- qualities,
- remove_start,
-)
-
-
-class WrzutaIE(InfoExtractor):
- IE_NAME = 'wrzuta.pl'
-
- _VALID_URL = r'https?://(?P<uploader>[0-9a-zA-Z]+)\.wrzuta\.pl/(?P<typ>film|audio)/(?P<id>[0-9a-zA-Z]+)'
-
- _TESTS = [{
- 'url': 'http://laboratoriumdextera.wrzuta.pl/film/aq4hIZWrkBu/nike_football_the_last_game',
- 'md5': '9e67e05bed7c03b82488d87233a9efe7',
- 'info_dict': {
- 'id': 'aq4hIZWrkBu',
- 'ext': 'mp4',
- 'title': 'Nike Football: The Last Game',
- 'duration': 307,
- 'uploader_id': 'laboratoriumdextera',
- 'description': 'md5:7fb5ef3c21c5893375fda51d9b15d9cd',
- },
- 'skip': 'Redirected to wrzuta.pl',
- }, {
- 'url': 'http://vexling.wrzuta.pl/audio/01xBFabGXu6/james_horner_-_into_the_na_39_vi_world_bonus',
- 'md5': 'f80564fb5a2ec6ec59705ae2bf2ba56d',
- 'info_dict': {
- 'id': '01xBFabGXu6',
- 'ext': 'mp3',
- 'title': 'James Horner - Into The Na\'vi World [Bonus]',
- 'description': 'md5:30a70718b2cd9df3120fce4445b0263b',
- 'duration': 95,
- 'uploader_id': 'vexling',
- },
- }]
-
- def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
- typ = mobj.group('typ')
- uploader = mobj.group('uploader')
-
- webpage, urlh = self._download_webpage_handle(url, video_id)
-
- if urlh.geturl() == 'http://www.wrzuta.pl/':
- raise ExtractorError('Video removed', expected=True)
-
- quality = qualities(['SD', 'MQ', 'HQ', 'HD'])
-
- audio_table = {'flv': 'mp3', 'webm': 'ogg', '???': 'mp3'}
-
- embedpage = self._download_json('http://www.wrzuta.pl/npp/embed/%s/%s' % (uploader, video_id), video_id)
-
- formats = []
- for media in embedpage['url']:
- fmt = media['type'].split('@')[0]
- if typ == 'audio':
- ext = audio_table.get(fmt, fmt)
- else:
- ext = fmt
-
- formats.append({
- 'format_id': '%s_%s' % (ext, media['quality'].lower()),
- 'url': media['url'],
- 'ext': ext,
- 'quality': quality(media['quality']),
- })
-
- self._sort_formats(formats)
-
- return {
- 'id': video_id,
- 'title': self._og_search_title(webpage),
- 'thumbnail': self._og_search_thumbnail(webpage),
- 'formats': formats,
- 'duration': int_or_none(embedpage['duration']),
- 'uploader_id': uploader,
- 'description': self._og_search_description(webpage),
- 'age_limit': embedpage.get('minimalAge', 0),
- }
-
-
-class WrzutaPlaylistIE(InfoExtractor):
- """
- this class covers extraction of wrzuta playlist entries
- the extraction process bases on following steps:
- * collect information of playlist size
- * download all entries provided on
- the playlist webpage (the playlist is split
- on two pages: first directly reached from webpage
- second: downloaded on demand by ajax call and rendered
- using the ajax call response)
- * in case size of extracted entries not reached total number of entries
- use the ajax call to collect the remaining entries
- """
-
- IE_NAME = 'wrzuta.pl:playlist'
- _VALID_URL = r'https?://(?P<uploader>[0-9a-zA-Z]+)\.wrzuta\.pl/playlista/(?P<id>[0-9a-zA-Z]+)'
- _TESTS = [{
- 'url': 'http://miromak71.wrzuta.pl/playlista/7XfO4vE84iR/moja_muza',
- 'playlist_mincount': 14,
- 'info_dict': {
- 'id': '7XfO4vE84iR',
- 'title': 'Moja muza',
- },
- }, {
- 'url': 'http://heroesf70.wrzuta.pl/playlista/6Nj3wQHx756/lipiec_-_lato_2015_muzyka_swiata',
- 'playlist_mincount': 144,
- 'info_dict': {
- 'id': '6Nj3wQHx756',
- 'title': 'Lipiec - Lato 2015 Muzyka Świata',
- },
- }, {
- 'url': 'http://miromak71.wrzuta.pl/playlista/7XfO4vE84iR',
- 'only_matching': True,
- }]
-
- def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- playlist_id = mobj.group('id')
- uploader = mobj.group('uploader')
-
- webpage = self._download_webpage(url, playlist_id)
-
- playlist_size = int_or_none(self._html_search_regex(
- (r'<div[^>]+class=["\']playlist-counter["\'][^>]*>\d+/(\d+)',
- r'<div[^>]+class=["\']all-counter["\'][^>]*>(.+?)</div>'),
- webpage, 'playlist size', default=None))
-
- playlist_title = remove_start(
- self._og_search_title(webpage), 'Playlista: ')
-
- entries = []
- if playlist_size:
- entries = [
- self.url_result(entry_url)
- for _, entry_url in re.findall(
- r'<a[^>]+href=(["\'])(http.+?)\1[^>]+class=["\']playlist-file-page',
- webpage)]
- if playlist_size > len(entries):
- playlist_content = self._download_json(
- 'http://%s.wrzuta.pl/xhr/get_playlist_offset/%s' % (uploader, playlist_id),
- playlist_id,
- 'Downloading playlist JSON',
- 'Unable to download playlist JSON')
- entries.extend([
- self.url_result(entry['filelink'])
- for entry in playlist_content.get('files', []) if entry.get('filelink')])
-
- return self.playlist_result(entries, playlist_id, playlist_title)
|
Wrzuta.pl was shut down in 2017.
(Apparently the domain was then repurposed on something unrelated.)
|
https://api.github.com/repos/ytdl-org/youtube-dl/pulls/20801
|
2019-04-24T14:45:21Z
|
2019-04-26T22:12:16Z
|
2019-04-26T22:12:16Z
|
2019-04-26T22:16:55Z
| 1,912
|
ytdl-org/youtube-dl
| 50,191
|
Add mysqlclient-python and PyMySQL
|
diff --git a/README.md b/README.md
index 37e3eb946..b798ecab2 100644
--- a/README.md
+++ b/README.md
@@ -294,6 +294,8 @@ long, literate-programming-style documentation generator.
* Relational Databases
* [mysql-python](http://sourceforge.net/projects/mysql-python/) - The MySQL database connector for Python.
+ * [mysqlclient](https://github.com/PyMySQL/mysqlclient-python) - mysql-python fork supporting Python 3.
+ * [PyMySQL](https://github.com/PyMySQL/PyMySQL) - Pure Python MySQL driver compatible to mysql-python.
* [mysql-connector-python](https://pypi.python.org/pypi/mysql-connector-python) - A pure Python MySQL driver from Oracle.
* [oursql](https://pythonhosted.org/oursql/) - A better MySQL connector for Python with support for native prepared statements and BLOBs.
* [psycopg2](http://initd.org/psycopg/) - The most popular PostgreSQL adapter for Python.
|
https://api.github.com/repos/vinta/awesome-python/pulls/138
|
2014-07-14T06:35:08Z
|
2014-07-14T14:12:14Z
|
2014-07-14T14:12:14Z
|
2014-07-14T14:12:14Z
| 236
|
vinta/awesome-python
| 27,295
|
|
Taxi.py: Improve inline documentation
|
diff --git a/gym/envs/toy_text/taxi.py b/gym/envs/toy_text/taxi.py
index 425892c7734..7aa888ac706 100644
--- a/gym/envs/toy_text/taxi.py
+++ b/gym/envs/toy_text/taxi.py
@@ -15,6 +15,7 @@
"+---------+",
]
+
class TaxiEnv(discrete.DiscreteEnv):
"""
The Taxi Problem
@@ -45,74 +46,88 @@ class TaxiEnv(discrete.DiscreteEnv):
- magenta: destination
- yellow: empty taxi
- green: full taxi
- - other letters: locations
-
+ - other letters (R, G, B and Y): locations for passengers and destinations
+
+ actions:
+ - 0: south
+ - 1: north
+ - 2: east
+ - 3: west
+ - 4: pickup
+ - 5: dropoff
+
+ state space is represented by:
+ (taxi_row, taxi_col, passenger_location, destination)
"""
metadata = {'render.modes': ['human', 'ansi']}
def __init__(self):
- self.desc = np.asarray(MAP,dtype='c')
+ self.desc = np.asarray(MAP, dtype='c')
self.locs = locs = [(0,0), (0,4), (4,0), (4,3)]
- nS = 500
- nR = 5
- nC = 5
- maxR = nR-1
- maxC = nC-1
- isd = np.zeros(nS)
- nA = 6
- P = {s : {a : [] for a in range(nA)} for s in range(nS)}
- for row in range(5):
- for col in range(5):
- for passidx in range(5):
- for destidx in range(4):
- state = self.encode(row, col, passidx, destidx)
- if passidx < 4 and passidx != destidx:
- isd[state] += 1
- for a in range(nA):
+ num_states = 500
+ num_rows = 5
+ num_columns = 5
+ max_row = num_rows - 1
+ max_col = num_columns - 1
+ initial_state_distrib = np.zeros(num_states)
+ num_actions = 6
+ P = {state: {action: []
+ for action in range(num_actions)} for state in range(num_states)}
+ for row in range(num_rows):
+ for col in range(num_columns):
+ for pass_idx in range(len(locs) + 1): # +1 for being inside taxi
+ for dest_idx in range(len(locs)):
+ state = self.encode(row, col, pass_idx, dest_idx)
+ if pass_idx < 4 and pass_idx != dest_idx:
+ initial_state_distrib[state] += 1
+ for action in range(num_actions):
# defaults
- newrow, newcol, newpassidx = row, col, passidx
- reward = -1
+ new_row, new_col, new_pass_idx = row, col, pass_idx
+ reward = -1 # default reward when there is no pickup/dropoff
done = False
- taxiloc = (row, col)
-
- if a==0:
- newrow = min(row+1, maxR)
- elif a==1:
- newrow = max(row-1, 0)
- if a==2 and self.desc[1+row,2*col+2]==b":":
- newcol = min(col+1, maxC)
- elif a==3 and self.desc[1+row,2*col]==b":":
- newcol = max(col-1, 0)
- elif a==4: # pickup
- if (passidx < 4 and taxiloc == locs[passidx]):
- newpassidx = 4
- else:
+ taxi_loc = (row, col)
+
+ if action == 0:
+ new_row = min(row + 1, max_row)
+ elif action == 1:
+ new_row = max(row - 1, 0)
+ if action == 2 and self.desc[1 + row, 2 * col + 2] == b":":
+ new_col = min(col + 1, max_col)
+ elif action == 3 and self.desc[1 + row, 2 * col] == b":":
+ new_col = max(col - 1, 0)
+ elif action == 4: # pickup
+ if (pass_idx < 4 and taxi_loc == locs[pass_idx]):
+ new_pass_idx = 4
+ else: # passenger not at location
reward = -10
- elif a==5: # dropoff
- if (taxiloc == locs[destidx]) and passidx==4:
- newpassidx = destidx
+ elif action == 5: # dropoff
+ if (taxi_loc == locs[dest_idx]) and pass_idx == 4:
+ new_pass_idx = dest_idx
done = True
reward = 20
- elif (taxiloc in locs) and passidx==4:
- newpassidx = locs.index(taxiloc)
- else:
+ elif (taxi_loc in locs) and pass_idx == 4:
+ new_pass_idx = locs.index(taxi_loc)
+ else: # dropoff at wrong location
reward = -10
- newstate = self.encode(newrow, newcol, newpassidx, destidx)
- P[state][a].append((1.0, newstate, reward, done))
- isd /= isd.sum()
- discrete.DiscreteEnv.__init__(self, nS, nA, P, isd)
-
- def encode(self, taxirow, taxicol, passloc, destidx):
+ new_state = self.encode(
+ new_row, new_col, new_pass_idx, dest_idx)
+ P[state][action].append(
+ (1.0, new_state, reward, done))
+ initial_state_distrib /= initial_state_distrib.sum()
+ discrete.DiscreteEnv.__init__(
+ self, num_states, num_actions, P, initial_state_distrib)
+
+ def encode(self, taxi_row, taxi_col, pass_loc, dest_idx):
# (5) 5, 5, 4
- i = taxirow
+ i = taxi_row
i *= 5
- i += taxicol
+ i += taxi_col
i *= 5
- i += passloc
+ i += pass_loc
i *= 4
- i += destidx
+ i += dest_idx
return i
def decode(self, i):
@@ -132,18 +147,21 @@ def render(self, mode='human'):
out = self.desc.copy().tolist()
out = [[c.decode('utf-8') for c in line] for line in out]
- taxirow, taxicol, passidx, destidx = self.decode(self.s)
+ taxi_row, taxi_col, pass_idx, dest_idx = self.decode(self.s)
+
def ul(x): return "_" if x == " " else x
- if passidx < 4:
- out[1+taxirow][2*taxicol+1] = utils.colorize(out[1+taxirow][2*taxicol+1], 'yellow', highlight=True)
- pi, pj = self.locs[passidx]
- out[1+pi][2*pj+1] = utils.colorize(out[1+pi][2*pj+1], 'blue', bold=True)
- else: # passenger in taxi
- out[1+taxirow][2*taxicol+1] = utils.colorize(ul(out[1+taxirow][2*taxicol+1]), 'green', highlight=True)
-
- di, dj = self.locs[destidx]
- out[1+di][2*dj+1] = utils.colorize(out[1+di][2*dj+1], 'magenta')
- outfile.write("\n".join(["".join(row) for row in out])+"\n")
+ if pass_idx < 4:
+ out[1 + taxi_row][2 * taxi_col + 1] = utils.colorize(
+ out[1 + taxi_row][2 * taxi_col + 1], 'yellow', highlight=True)
+ pi, pj = self.locs[pass_idx]
+ out[1 + pi][2 * pj + 1] = utils.colorize(out[1 + pi][2 * pj + 1], 'blue', bold=True)
+ else: # passenger in taxi
+ out[1 + taxi_row][2 * taxi_col + 1] = utils.colorize(
+ ul(out[1 + taxi_row][2 * taxi_col + 1]), 'green', highlight=True)
+
+ di, dj = self.locs[dest_idx]
+ out[1 + di][2 * dj + 1] = utils.colorize(out[1 + di][2 * dj + 1], 'magenta')
+ outfile.write("\n".join(["".join(row) for row in out]) + "\n")
if self.lastaction is not None:
outfile.write(" ({})\n".format(["South", "North", "East", "West", "Pickup", "Dropoff"][self.lastaction]))
else: outfile.write("\n")
|
* Use more descriptive variable names
* Fix formatting as per PEP8
* Add some inline documentation for better undrstanding
|
https://api.github.com/repos/openai/gym/pulls/1077
|
2018-06-16T18:47:20Z
|
2019-02-16T00:11:18Z
|
2019-02-16T00:11:18Z
|
2019-02-16T00:11:19Z
| 2,213
|
openai/gym
| 5,112
|
[gemini] update the gpt example
|
diff --git a/colossalai/nn/parallel/zero_wrapper.py b/colossalai/nn/parallel/zero_wrapper.py
index 504625e62d30..be8d1da7c24e 100644
--- a/colossalai/nn/parallel/zero_wrapper.py
+++ b/colossalai/nn/parallel/zero_wrapper.py
@@ -32,16 +32,19 @@ def zero_model_wrapper(model: nn.Module, zero_stage: int = 1, gemini_config: Opt
>>> config_dict = dict(device=torch.cuda.current_device(), hidden_dim=1024, placement_policy='auto')
>>> model = zero_model_wrapper(model, zero_stage=3, gemini_config=config_dict)
"""
- setattr(model, "_colo_zero_stage", zero_stage)
assert zero_stage in [1, 2, 3], "The stage of ZeRO should be 1, 2 or 3"
if gemini_config is None:
gemini_config = dict()
if zero_stage in [1, 2]:
- return model
+ wrapped_model = model
else:
- return GeminiDDP(model, **gemini_config)
+ wrapped_model = GeminiDDP(model, **gemini_config)
+
+ setattr(wrapped_model, "_colo_zero_stage", zero_stage)
+
+ return wrapped_model
def zero_optim_wrapper(model: nn.Module,
diff --git a/examples/language/gpt/gemini/benchmark_gemini.sh b/examples/language/gpt/gemini/benchmark_gemini.sh
index 9a630b2ffe23..3a42e13645f6 100644
--- a/examples/language/gpt/gemini/benchmark_gemini.sh
+++ b/examples/language/gpt/gemini/benchmark_gemini.sh
@@ -1,5 +1,5 @@
for MODEL_TYPE in "gpt2_medium"; do
- for DISTPLAN in "colossalai"; do
+ for DISTPLAN in "CAI_Gemini"; do
for BATCH_SIZE in 16; do
for GPUNUM in 1 2 4 8; do
for TPDEGREE in 1 2 4 8; do
diff --git a/examples/language/gpt/gemini/run_gemini.sh b/examples/language/gpt/gemini/run_gemini.sh
index 6f0710d54f01..ad4e9419c1bd 100644
--- a/examples/language/gpt/gemini/run_gemini.sh
+++ b/examples/language/gpt/gemini/run_gemini.sh
@@ -1,6 +1,6 @@
set -x
-# distplan in ["colossalai", "zero1", "zero2", "torch_ddp", "torch_zero"]
-export DISTPLAN=${DISTPLAN:-"colossalai"}
+# distplan in ["CAI_ZeRO1", "CAI_ZeRO2", "CAI_Gemini", "Pytorch_DDP", "Pytorch_ZeRO"]
+export DISTPLAN=${DISTPLAN:-"CAI_Gemini"}
# The following options only valid when DISTPLAN="colossalai"
export GPUNUM=${GPUNUM:-1}
@@ -12,6 +12,12 @@ export MODEL_TYPE=${MODEL_TYPE:-"gpt2_medium"}
export TRAIN_STEP=${TRAIN_STEP:-10}
# export PYTHONPATH=$PWD:$PYTHONPATH
+if [ ${USE_SHARD_INIT} = "True" ]; then
+ USE_SHARD_INIT="--shardinit"
+else
+ USE_SHARD_INIT=""
+fi
+
mkdir -p gemini_logs
torchrun --standalone --nproc_per_node=${GPUNUM} ./train_gpt_demo.py \
@@ -19,7 +25,7 @@ torchrun --standalone --nproc_per_node=${GPUNUM} ./train_gpt_demo.py \
--model_type=${MODEL_TYPE} \
--batch_size=${BATCH_SIZE} \
--placement=${PLACEMENT} \
---shardinit=${USE_SHARD_INIT} \
+${USE_SHARD_INIT} \
--distplan=${DISTPLAN} \
--train_step=${TRAIN_STEP} \
2>&1 | tee ./gemini_logs/${MODEL_TYPE}_${DISTPLAN}_gpu_${GPUNUM}_bs_${BATCH_SIZE}_tp_${TPDEGREE}_${PLACEMENT}.log
diff --git a/examples/language/gpt/gemini/train_gpt_demo.py b/examples/language/gpt/gemini/train_gpt_demo.py
index 02857ae9ce12..ab8a65e625cf 100644
--- a/examples/language/gpt/gemini/train_gpt_demo.py
+++ b/examples/language/gpt/gemini/train_gpt_demo.py
@@ -12,26 +12,21 @@
import colossalai
from colossalai.logging import disable_existing_loggers, get_dist_logger
-from colossalai.nn.parallel import ZeroDDP
+from colossalai.nn.optimizer import HybridAdam
+from colossalai.nn.parallel import zero_model_wrapper, zero_optim_wrapper
from colossalai.tensor import ColoParameter, ComputePattern, ComputeSpec, ProcessGroup, ReplicaSpec, ShardSpec
from colossalai.utils import get_current_device
from colossalai.utils.model.colo_init_context import ColoInitContext
CAI_VERSION = colossalai.__version__
-if version.parse(CAI_VERSION) > version.parse("0.1.10"):
- # These are added after 0.1.10
- from colossalai.nn.optimizer.gemini_optimizer import GeminiAdamOptimizer
- from colossalai.nn.parallel import GeminiDDP
- from colossalai.zero.sharded_optim import LowLevelZeroOptimizer
-
def parse_args():
parser = colossalai.get_default_parser()
parser.add_argument(
"--distplan",
type=str,
- default='colossalai',
+ default='CAI_Gemini',
help="The distributed plan [colossalai, zero1, zero2, torch_ddp, torch_zero].",
)
parser.add_argument(
@@ -48,8 +43,7 @@ def parse_args():
)
parser.add_argument(
"--shardinit",
- type=bool,
- default=False,
+ action='store_true',
help=
"Shard the tensors when init the model to shrink peak memory size on the assigned device. Valid when using colossalai as dist plan.",
)
@@ -186,57 +180,16 @@ def tensor_parallelize(model: torch.nn.Module, pg: ProcessGroup):
param.visited = True
-# Gemini + ZeRO DDP
-def build_gemini(model: torch.nn.Module, pg: ProcessGroup, placement_policy: str = "auto", ddp_flag: bool = True):
- fp16_init_scale = 2**5
- gpu_margin_mem_ratio_for_auto = 0
-
- if version.parse(CAI_VERSION) > version.parse("0.1.10"):
- model = GeminiDDP(model,
- strict_ddp_mode=ddp_flag,
- device=get_current_device(),
- placement_policy=placement_policy,
- pin_memory=True,
- hidden_dim=model.config.n_embd,
- search_range_mb=128)
- # configure the const policy
- if placement_policy == 'const':
- model.gemini_manager._placement_policy.set_const_memory_boundary(2 * 1024)
- # build a highly optimized cpu optimizer
- optimizer = GeminiAdamOptimizer(model,
- lr=1e-3,
- initial_scale=fp16_init_scale,
- gpu_margin_mem_ratio=gpu_margin_mem_ratio_for_auto)
- elif version.parse("0.1.9") <= version.parse(CAI_VERSION) <= version.parse("0.1.10"):
- from colossalai.gemini import ChunkManager, GeminiManager
- from colossalai.nn.optimizer import HybridAdam
- from colossalai.zero import ZeroOptimizer
- chunk_size = ChunkManager.search_chunk_size(model, 64 * 1024**2, 1024, filter_exlarge_params=True)
- chunk_manager = ChunkManager(chunk_size,
- pg,
- enable_distributed_storage=True,
- init_device=GeminiManager.get_default_device(placement_policy))
- gemini_manager = GeminiManager(placement_policy, chunk_manager)
- model = ZeroDDP(model, gemini_manager)
- optimizer = HybridAdam(model.parameters(), lr=1e-3)
- optimizer = ZeroOptimizer(optimizer,
- model,
- initial_scale=fp16_init_scale,
- gpu_margin_mem_ratio=gpu_margin_mem_ratio_for_auto)
- else:
- raise NotImplemented(f"CAI version {CAI_VERSION} is not supported")
- return model, optimizer
-
-
def main():
# version check
- # this example is supposed to work for versions greater than 0.1.9
- assert version.parse(CAI_VERSION) >= version.parse("0.1.9")
+ # this example is supposed to work for versions greater than 0.2.0
+ assert version.parse(CAI_VERSION) >= version.parse("0.2.0")
set_cpu_maximum_parallelism()
args = parse_args()
- if args.distplan not in ["colossalai", "torch_ddp", "torch_zero", "zero1", "zero2"]:
+ # if args.distplan not in ["colossalai", "torch_ddp", "torch_zero", "zero1", "zero2"]:
+ if args.distplan not in ["CAI_ZeRO1", "CAI_ZeRO2", "CAI_Gemini", "Pytorch_DDP", "Pytorch_ZeRO"]:
raise TypeError(f"{args.distplan} is error")
# batch size per DP degree
@@ -260,22 +213,21 @@ def main():
criterion = GPTLMLoss()
torch.manual_seed(123)
- if args.distplan == "colossalai":
+ if args.distplan.startswith("CAI"):
# all param must use the same process group.
world_size = torch.distributed.get_world_size()
shard_pg = ProcessGroup(tp_degree=world_size) if args.shardinit else None
default_dist_spec = ShardSpec([-1], [world_size]) if args.shardinit else None
+ if args.shardinit and args.distplan != "CAI_Gemini":
+ raise RuntimeError("You can only use shardinit with CAI_Gemini")
+
# build GPT model
- if version.parse(CAI_VERSION) > version.parse("0.1.10"):
- with ColoInitContext(device=get_current_device(),
- dtype=torch.half,
- default_dist_spec=default_dist_spec,
- default_pg=shard_pg):
- model = model_builder(args.model_type)(checkpoint=True)
- else:
- with ColoInitContext(device=get_current_device()):
- model = model_builder(args.model_type)(checkpoint=True)
+ with ColoInitContext(device=get_current_device(),
+ dtype=torch.half,
+ default_dist_spec=default_dist_spec,
+ default_pg=shard_pg):
+ model = model_builder(args.model_type)(checkpoint=True)
tp_pg = ProcessGroup(tp_degree=args.tp_degree)
# Tensor Parallelism (TP)
@@ -283,34 +235,49 @@ def main():
if args.tp_degree > 1:
tensor_parallelize(model, tp_pg)
- # build a Gemini model and a highly optimized cpu optimizer
- # Gemini + ZeRO DP, Note it must be used after TP
- model, optimizer = build_gemini(model, tp_pg, args.placement, args.tp_degree == 1)
+ # asign running configurations
+ gemini_config = None
+ if args.distplan.startswith("CAI_ZeRO"):
+ optim_config = dict(reduce_bucket_size=12 * 1024 * 1024, overlap_communication=True, verbose=True)
+ elif args.distplan == "CAI_Gemini":
+ gemini_config = dict(strict_ddp_mode=args.tp_degree == 1,
+ device=get_current_device(),
+ placement_policy=args.placement,
+ pin_memory=True,
+ hidden_dim=model.config.n_embd,
+ search_range_mb=128)
+ optim_config = dict(gpu_margin_mem_ratio=0.)
+ else:
+ raise RuntimeError
+
+ # build a highly optimized gpu/cpu optimizer
+ optimizer = HybridAdam(model.parameters(), lr=1e-3)
+
+ if args.distplan == "CAI_ZeRO1":
+ zero_stage = 1
+ elif args.distplan == "CAI_ZeRO2":
+ zero_stage = 2
+ elif args.distplan == "CAI_Gemini":
+ zero_stage = 3
+ else:
+ raise RuntimeError
+
+ # wrap your model and optimizer
+ model = zero_model_wrapper(model, zero_stage, gemini_config)
+ optimizer = zero_optim_wrapper(model, optimizer, optim_config=optim_config)
logger.info(get_mem_info(prefix='After init optim, '), ranks=[0])
- else:
+ elif args.distplan.startswith("Pytorch"):
assert args.tp_degree == 1, "The degree of TP should be 1 for DDP examples."
model = model_builder(args.model_type)(checkpoint=True).cuda()
-
- if args.distplan.startswith("torch"):
model = DDP(model)
- if args.distplan.endswith("ddp"):
- optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
- elif args.distplan.endswith("zero"):
+ if args.distplan.endswith("DDP"):
+ optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
+ elif args.distplan.endswith("ZeRO"):
from torch.distributed.optim import ZeroRedundancyOptimizer
- optimizer = ZeroRedundancyOptimizer(model.parameters(), optimizer_class=torch.optim.Adam, lr=0.01)
- elif args.distplan.startswith("zero"):
- model = model.half()
- partition_flag = (args.distplan == "zero2")
- optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
-
- optimizer = LowLevelZeroOptimizer(
- optimizer,
- reduce_bucket_size=12 * 1024 * 1024,
- overlap_communication=True,
- partition_grad=partition_flag,
- verbose=True,
- )
+ optimizer = ZeroRedundancyOptimizer(model.parameters(), optimizer_class=torch.optim.Adam, lr=1e-3)
+ else:
+ raise RuntimeError
# model is shared after TP
numel = get_model_size(model)
@@ -338,17 +305,18 @@ def main():
fwd_time = fwd_end - start
logger.info(get_mem_info(prefix=f'[{n + 1}/{NUM_STEPS}] Forward '), ranks=[0])
- if args.distplan in ["colossalai", "zero1", "zero2"]:
+ if args.distplan.startswith("CAI"):
optimizer.backward(loss)
- elif args.distplan in ["torch_ddp", "torch_zero"]:
+ elif args.distplan.startswith("Pytorch"):
loss.backward()
+ else:
+ raise RuntimeError
+
torch.cuda.synchronize()
bwd_end = time()
bwd_time = bwd_end - fwd_end
logger.info(get_mem_info(prefix=f'[{n + 1}/{NUM_STEPS}] Backward '), ranks=[0])
- if args.distplan in ["zero1", "zero2"]:
- optimizer.sync_grad()
optimizer.step()
torch.cuda.synchronize()
optim_time = time() - bwd_end
|
https://api.github.com/repos/hpcaitech/ColossalAI/pulls/2527
|
2023-01-30T05:08:04Z
|
2023-01-30T09:58:05Z
|
2023-01-30T09:58:05Z
|
2023-01-30T09:58:06Z
| 3,401
|
hpcaitech/ColossalAI
| 11,593
|
|
Option to input directory
|
diff --git a/comfy/cli_args.py b/comfy/cli_args.py
index ffae81c49d..35d44164f1 100644
--- a/comfy/cli_args.py
+++ b/comfy/cli_args.py
@@ -39,6 +39,7 @@ def __call__(self, parser, namespace, values, option_string=None):
parser.add_argument("--extra-model-paths-config", type=str, default=None, metavar="PATH", nargs='+', action='append', help="Load one or more extra_model_paths.yaml files.")
parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory.")
parser.add_argument("--temp-directory", type=str, default=None, help="Set the ComfyUI temp directory (default is in the ComfyUI directory).")
+parser.add_argument("--input-directory", type=str, default=None, help="Set the ComfyUI input directory.")
parser.add_argument("--auto-launch", action="store_true", help="Automatically launch ComfyUI in the default browser.")
parser.add_argument("--disable-auto-launch", action="store_true", help="Disable auto launching the browser.")
parser.add_argument("--cuda-device", type=int, default=None, metavar="DEVICE_ID", help="Set the id of the cuda device this instance will use.")
diff --git a/folder_paths.py b/folder_paths.py
index 4a10c68e7e..898513b0e1 100644
--- a/folder_paths.py
+++ b/folder_paths.py
@@ -46,6 +46,10 @@ def set_temp_directory(temp_dir):
global temp_directory
temp_directory = temp_dir
+def set_input_directory(input_dir):
+ global input_directory
+ input_directory = input_dir
+
def get_output_directory():
global output_directory
return output_directory
diff --git a/main.py b/main.py
index 7c5eaee0a8..875ea1aa90 100644
--- a/main.py
+++ b/main.py
@@ -175,6 +175,11 @@ def load_extra_path_config(yaml_path):
print(f"Setting output directory to: {output_dir}")
folder_paths.set_output_directory(output_dir)
+ if args.input_directory:
+ input_dir = os.path.abspath(args.input_directory)
+ print(f"Setting input directory to: {input_dir}")
+ folder_paths.set_input_directory(input_dir)
+
if args.quick_test_for_ci:
exit(0)
|
There are options to set the output and temp directory, but the option for the input directory is missing, so this PR adds it.
|
https://api.github.com/repos/comfyanonymous/ComfyUI/pulls/1666
|
2023-10-04T22:47:31Z
|
2023-10-09T05:56:49Z
|
2023-10-09T05:56:49Z
|
2023-10-09T17:26:20Z
| 531
|
comfyanonymous/ComfyUI
| 17,906
|
Update oasst-data docs to reflect code changes
|
diff --git a/oasst-data/README.md b/oasst-data/README.md
index 947fdb62f3..4e42419d8f 100644
--- a/oasst-data/README.md
+++ b/oasst-data/README.md
@@ -19,12 +19,12 @@ Code example:
```python
# parsing OA data files with oasst_data helpers
-from oasst_data import load_trees, visit_messages_depth_first, ExportMessageNode
+from oasst_data import read_message_trees, visit_messages_depth_first, ExportMessageNode
messages: list[ExportMessageNode] = []
input_file_path = "data_file.jsonl.gz"
-for tree in load_trees(input_file_path):
+for tree in read_message_trees(input_file_path):
if tree.prompt.lang not in ["en","es"]: # filtering by language tag (optional)
continue
|
Close #2639
|
https://api.github.com/repos/LAION-AI/Open-Assistant/pulls/2660
|
2023-04-17T12:06:20Z
|
2023-04-17T14:27:11Z
|
2023-04-17T14:27:11Z
|
2023-04-17T14:27:12Z
| 193
|
LAION-AI/Open-Assistant
| 37,009
|
Improve docs of signatures of before/after/teardown callback funcs
|
diff --git a/flask/app.py b/flask/app.py
index 2f2735928f..467725c176 100644
--- a/flask/app.py
+++ b/flask/app.py
@@ -1243,7 +1243,13 @@ def add_template_global(self, f, name=None):
@setupmethod
def before_request(self, f):
- """Registers a function to run before each request."""
+ """Registers a function to run before each request.
+
+ The function will be called without any arguments.
+ If the function returns a non-None value, it's handled as
+ if it was the return value from the view and further
+ request handling is stopped.
+ """
self.before_request_funcs.setdefault(None, []).append(f)
return f
@@ -1252,6 +1258,9 @@ def before_first_request(self, f):
"""Registers a function to be run before the first request to this
instance of the application.
+ The function will be called without any arguments and its return
+ value is ignored.
+
.. versionadded:: 0.8
"""
self.before_first_request_funcs.append(f)
@@ -1298,6 +1307,8 @@ def teardown_request(self, f):
When a teardown function was called because of a exception it will
be passed an error object.
+ The return values of teardown functions are ignored.
+
.. admonition:: Debug Note
In debug mode Flask will not tear down a request on an exception
@@ -1332,6 +1343,8 @@ def teardown_appcontext(self, f):
When a teardown function was called because of an exception it will
be passed an error object.
+ The return values of teardown functions are ignored.
+
.. versionadded:: 0.9
"""
self.teardown_appcontext_funcs.append(f)
@@ -1710,8 +1723,9 @@ def handle_url_build_error(self, error, endpoint, values):
def preprocess_request(self):
"""Called before the actual request dispatching and will
- call every as :meth:`before_request` decorated function.
- If any of these function returns a value it's handled as
+ call each :meth:`before_request` decorated function, passing no
+ arguments.
+ If any of these functions returns a value, it's handled as
if it was the return value from the view and further
request handling is stopped.
|
I got bitten by the not-optimally-documented fact that the return values of `before_request` callbacks aren't ignored.
|
https://api.github.com/repos/pallets/flask/pulls/1338
|
2015-02-05T20:48:11Z
|
2015-02-05T21:11:07Z
|
2015-02-05T21:11:07Z
|
2020-11-14T06:09:03Z
| 541
|
pallets/flask
| 20,785
|
[googledrive] Fix missing "source" format
|
diff --git a/yt_dlp/extractor/googledrive.py b/yt_dlp/extractor/googledrive.py
index 9e2ccde0050..8a4cd1690e2 100644
--- a/yt_dlp/extractor/googledrive.py
+++ b/yt_dlp/extractor/googledrive.py
@@ -5,7 +5,9 @@
from ..utils import (
ExtractorError,
determine_ext,
+ extract_attributes,
get_element_by_class,
+ get_element_html_by_id,
int_or_none,
lowercase_escape,
try_get,
@@ -34,6 +36,7 @@ class GoogleDriveIE(InfoExtractor):
'ext': 'mp4',
'title': 'Big Buck Bunny.mp4',
'duration': 45,
+ 'thumbnail': 'https://drive.google.com/thumbnail?id=0ByeS4oOUV-49Zzh4R1J6R09zazQ',
}
}, {
# video can't be watched anonymously due to view count limit reached,
@@ -207,10 +210,10 @@ def get_value(key):
'export': 'download',
})
- def request_source_file(source_url, kind):
+ def request_source_file(source_url, kind, data=None):
return self._request_webpage(
source_url, video_id, note='Requesting %s file' % kind,
- errnote='Unable to request %s file' % kind, fatal=False)
+ errnote='Unable to request %s file' % kind, fatal=False, data=data)
urlh = request_source_file(source_url, 'source')
if urlh:
def add_source_format(urlh):
@@ -237,14 +240,10 @@ def add_source_format(urlh):
urlh, url, video_id, note='Downloading confirmation page',
errnote='Unable to confirm download', fatal=False)
if confirmation_webpage:
- confirm = self._search_regex(
- r'confirm=([^&"\']+)', confirmation_webpage,
- 'confirmation code', default=None)
- if confirm:
- confirmed_source_url = update_url_query(source_url, {
- 'confirm': confirm,
- })
- urlh = request_source_file(confirmed_source_url, 'confirmed source')
+ confirmed_source_url = extract_attributes(
+ get_element_html_by_id('download-form', confirmation_webpage) or '').get('action')
+ if confirmed_source_url:
+ urlh = request_source_file(confirmed_source_url, 'confirmed source', data=b'')
if urlh and urlh.headers.get('Content-Disposition'):
add_source_format(urlh)
else:
|
**IMPORTANT**: PRs without the template will be CLOSED
### Description of your *pull request* and other information
<!--
Explanation of your *pull request* in arbitrary form goes here. Please **make sure the description explains the purpose and effect** of your *pull request* and is worded well enough to be understood. Provide as much **context and examples** as possible
-->
This pull request addresses an issue where the "source" format was missing when a video was still being processed.
The problem occurred when attempting to extract video information from a source that was not yet fully processed. In such cases, the "source" format was not included in the extracted information, leading to "Video is still being processed" error even though the source is available.
To resolve this issue, the code has been modified to ensure that the "source" format is included even when the video is still undergoing processing by changing confirm regex to the whole url including uuid & at param. This ensures that all available formats are captured, providing a comprehensive set of video options for users.
Additionally, the pull request includes necessary updates to the relevant test cases to ensure proper functionality so that the test cases is marked as passed because the thumbnail key is available.
Overall, this fix enhances the reliability and completeness of video extraction from the given source, improving the overall user experience.
Fix #7344
<details open><summary>Template</summary> <!-- OPEN is intentional -->
<!--
# PLEASE FOLLOW THE GUIDE BELOW
- You will be asked some questions, please read them **carefully** and answer honestly
- Put an `x` into all the boxes `[ ]` relevant to your *pull request* (like [x])
- Use *Preview* tab to see how your *pull request* will actually look like
-->
### Before submitting a *pull request* make sure you have:
- [x] At least skimmed through [contributing guidelines](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) including [yt-dlp coding conventions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#yt-dlp-coding-conventions)
- [x] [Searched](https://github.com/yt-dlp/yt-dlp/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests
- [x] Checked the code with [flake8](https://pypi.python.org/pypi/flake8) and [ran relevant tests](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions)
### In order to be accepted and merged into yt-dlp each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check all of the following options that apply:
- [x] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/)
- [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence)
### What is the purpose of your *pull request*?
- [x] Fix or improvement to an extractor (Make sure to add/update tests)
- [ ] New extractor ([Piracy websites will not be accepted](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#is-the-website-primarily-used-for-piracy))
- [ ] Core bug fix/improvement
- [ ] New feature (It is strongly [recommended to open an issue first](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#adding-new-feature-or-making-overarching-changes))
<!-- Do NOT edit/remove anything below this! -->
</details><details><summary>Copilot Summary</summary>
<!--
copilot:all
-->
### <samp>🤖 Generated by Copilot at 44f5ec9</samp>
### Summary
🖼️📮🔗
<!--
1. 🖼️ for thumbnail extraction, since this emoji depicts a framed picture or image.
2. 📮 for supporting POST requests for source files, since this emoji depicts a mailbox with a letter, which could symbolize sending or receiving data via HTTP methods.
3. 🔗 for simplifying confirmation URL extraction, since this emoji depicts a link or chain, which could symbolize a URL or a connection.
-->
Enhance GoogleDriveIE extractor with thumbnail, POST, and confirmation URL features.
> _Sing, O Muse, of the skillful coder who improved the extractor_
> _Of GoogleDriveIE, the mighty tool that fetches videos online_
> _He added thumbnail extraction, a splendid feature for the users_
> _And made it support POST requests, `source_file` to refine_
### Walkthrough
* Add thumbnail URL extraction to GoogleDriveIE extractor ([link](https://github.com/yt-dlp/yt-dlp/pull/7395/files?diff=unified&w=0#diff-f000a5dd6f0f7231185a73c67cd0b3599e4a48f119391e7a7f00f702e7b17ee0R37))
* Modify `request_source_file` function to accept optional `data` parameter for POST requests ([link](https://github.com/yt-dlp/yt-dlp/pull/7395/files?diff=unified&w=0#diff-f000a5dd6f0f7231185a73c67cd0b3599e4a48f119391e7a7f00f702e7b17ee0L210-R214))
* Use HTML-based extraction of confirmation URL instead of regex-based extraction of confirmation code ([link](https://github.com/yt-dlp/yt-dlp/pull/7395/files?diff=unified&w=0#diff-f000a5dd6f0f7231185a73c67cd0b3599e4a48f119391e7a7f00f702e7b17ee0L240-R245))
</details>
|
https://api.github.com/repos/yt-dlp/yt-dlp/pulls/7395
|
2023-06-23T09:26:17Z
|
2023-07-05T02:17:13Z
|
2023-07-05T02:17:13Z
|
2023-07-05T02:17:14Z
| 591
|
yt-dlp/yt-dlp
| 7,573
|
Add FAQ on making a blank request
|
diff --git a/docs/faq.rst b/docs/faq.rst
index 20dd814df31..2113b096435 100644
--- a/docs/faq.rst
+++ b/docs/faq.rst
@@ -405,6 +405,23 @@ or :class:`~scrapy.signals.headers_received` signals and raising a
:ref:`topics-stop-response-download` topic for additional information and examples.
+.. _faq-blank-request:
+
+How can I make a blank request?
+-------------------------------
+
+.. code-block:: python
+
+ from scrapy import Request
+
+
+ blank_request = Request("data:,")
+
+In this case, the URL is set to a data URI scheme. Data URLs allow you to include data
+in-line in web pages as if they were external resources. The "data:" scheme with an empty
+content (",") essentially creates a request to a data URL without any specific content.
+
+
Running ``runspider`` I get ``error: No spider found in file: <filename>``
--------------------------------------------------------------------------
|
This PR addresses issue #6203, adding a FAQ entry on making a blank requests.
```python
yield Request(
url="data:,",
callback=self.your_call_back
)
```
suggested by: @Gallaecio
|
https://api.github.com/repos/scrapy/scrapy/pulls/6208
|
2024-01-17T15:17:53Z
|
2024-01-18T17:56:12Z
|
2024-01-18T17:56:12Z
|
2024-01-18T17:56:12Z
| 230
|
scrapy/scrapy
| 35,092
|
Update for autogen.py to include the content of constraints.py
|
diff --git a/docs/autogen.py b/docs/autogen.py
index f8fcb380f41..808307589e0 100644
--- a/docs/autogen.py
+++ b/docs/autogen.py
@@ -80,6 +80,7 @@
from keras import losses
from keras import metrics
from keras import backend
+from keras import constraints
from keras import activations
from keras import preprocessing
@@ -327,6 +328,10 @@
'page': 'backend.md',
'all_module_functions': [backend],
},
+ {
+ 'page': 'constraints.md',
+ 'all_module_classes': [constraints],
+ },
{
'page': 'utils.md',
'functions': [utils.to_categorical,
diff --git a/docs/templates/constraints.md b/docs/templates/constraints.md
index e997fd2774f..ca39abe0c23 100644
--- a/docs/templates/constraints.md
+++ b/docs/templates/constraints.md
@@ -17,7 +17,9 @@ model.add(Dense(64, kernel_constraint=max_norm(2.)))
## Available constraints
-- __max_norm(max_value=2, axis=0)__: maximum-norm constraint
-- __non_neg()__: non-negativity constraint
-- __unit_norm(axis=0)__: unit-norm constraint
-- __min_max_norm(min_value=0.0, max_value=1.0, rate=1.0, axis=0)__: minimum/maximum-norm constraint
+---
+
+{{autogenerated}}
+
+---
+
|
### Summary
Information from the docstrings of the file constraints.py was not grabbed by autogen.py because the autogenerated code block was absent in templates/constraints.md and the corresponsing part of the PAGES list in autogen.py was not there too. I did not find a commit where it would work before, but i could have missed it.
### Related Issues
Resolves the issue #11382
### PR Overview
- [n] This PR requires new unit tests [y/n] (make sure tests are included)
- [y] This PR requires to update the documentation [y/n] (make sure the docs are up-to-date)
- [y] This PR is backwards compatible [y/n]
- [n] This PR changes the current API [y/n] (all API changes need to be approved by fchollet)
|
https://api.github.com/repos/keras-team/keras/pulls/11456
|
2018-10-22T17:50:55Z
|
2018-10-23T16:56:15Z
|
2018-10-23T16:56:15Z
|
2018-10-23T16:56:16Z
| 339
|
keras-team/keras
| 47,472
|
Revert protobuf<=3.20.1
|
diff --git a/requirements.txt b/requirements.txt
index de3239cbdd4..6313cecee57 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -12,7 +12,7 @@ scipy>=1.4.1
torch>=1.7.0
torchvision>=0.8.1
tqdm>=4.64.0
-protobuf<4.21.3 # https://github.com/ultralytics/yolov5/issues/8012
+protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012
# Logging -------------------------------------
tensorboard>=2.4.1
|
Resolve #8012 (again)
## 🛠️ PR Summary
<sub>Made with ❤️ by [Ultralytics Actions](https://github.com/ultralytics/actions)<sub>
### 🌟 Summary
Adjustment of `protobuf` version requirements for better compatibility.
### 📊 Key Changes
- Modified the required `protobuf` version in `requirements.txt` from `<4.21.3` to `<=3.20.1`.
### 🎯 Purpose & Impact
- 🛠 This change aims to address compatibility issues highlighted in issue #8012.
- 👩💻👨💻 Users can expect more stable installations and fewer conflicts with this specific dependency when setting up YOLOv5.
- 🔄 Ensuring a specific version of `protobuf` may prevent potential bugs that could arise from newer, untested versions.
|
https://api.github.com/repos/ultralytics/yolov5/pulls/8742
|
2022-07-27T14:40:28Z
|
2022-07-27T15:27:44Z
|
2022-07-27T15:27:44Z
|
2024-01-19T08:09:50Z
| 151
|
ultralytics/yolov5
| 25,178
|
[shardformer] update t5 model
|
diff --git a/colossalai/shardformer/modeling/t5.py b/colossalai/shardformer/modeling/t5.py
index 9c5ce3fb65c9..94f4fce74501 100644
--- a/colossalai/shardformer/modeling/t5.py
+++ b/colossalai/shardformer/modeling/t5.py
@@ -118,15 +118,12 @@ def t5_stack_forward(
# required mask seq length can be calculated via length of past
mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length
- if attention_mask is None:
- attention_mask = torch.ones(batch_size, mask_seq_length, device=device)
- if in_decoder and encoder_attention_mask is None and encoder_hidden_states is not None:
- encoder_seq_length = encoder_hidden_states.shape[1]
- encoder_attention_mask = torch.ones(batch_size, encoder_seq_length, device=device, dtype=torch.long)
-
# initialize past_key_values with `None` if past does not exist
if past_key_values is None:
past_key_values = [None] * len(self.block)
+
+ if attention_mask is None:
+ attention_mask = torch.ones(batch_size, mask_seq_length, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
@@ -138,7 +135,9 @@ def t5_stack_forward(
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
- encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device)
+ encoder_attention_mask = torch.ones(
+ encoder_hidden_shape, device=inputs_embeds.device, dtype=torch.long
+ )
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
@@ -162,15 +161,8 @@ def t5_stack_forward(
torch.cuda.set_device(hidden_states.device)
if self.gradient_checkpointing and self.training:
-
- def create_custom_forward(module):
- def custom_forward(*inputs):
- return tuple(module(*inputs, use_cache, output_attentions))
-
- return custom_forward
-
- layer_outputs = checkpoint(
- create_custom_forward(layer_module),
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.forward,
hidden_states,
extended_attention_mask,
position_bias,
@@ -180,6 +172,8 @@ def custom_forward(*inputs):
layer_head_mask,
cross_attn_layer_head_mask,
None, # past_key_value is always None with gradient checkpointing
+ use_cache,
+ output_attentions,
)
else:
layer_outputs = layer_module(
|
## 🚨 Issue number
- [ ] https://github.com/hpcaitech/ColossalAI/issues/5505
## 📝 What does this PR do?
[shardformer/modeling/t5]: Upgrade transformers from version 4.33.0 to version 4.36.0 for the opt model, including the `t5_stack_forward` function of the `t5` model.
|
https://api.github.com/repos/hpcaitech/ColossalAI/pulls/5524
|
2024-03-27T11:01:03Z
|
2024-04-03T04:23:38Z
|
2024-04-03T04:23:37Z
|
2024-04-03T04:23:38Z
| 649
|
hpcaitech/ColossalAI
| 11,301
|
⬆️ Upgrade Ruff
|
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index e59e05abe4e3e..4e34cc7ede902 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -19,7 +19,7 @@ repos:
- --py3-plus
- --keep-runtime-typing
- repo: https://github.com/charliermarsh/ruff-pre-commit
- rev: v0.0.114
+ rev: v0.0.138
hooks:
- id: ruff
args:
diff --git a/fastapi/dependencies/utils.py b/fastapi/dependencies/utils.py
index 3df5ccfc8b58e..4c817d5d0ba89 100644
--- a/fastapi/dependencies/utils.py
+++ b/fastapi/dependencies/utils.py
@@ -105,10 +105,10 @@ def check_file_field(field: ModelField) -> None:
assert parse_options_header
except ImportError:
logger.error(multipart_incorrect_install_error)
- raise RuntimeError(multipart_incorrect_install_error)
+ raise RuntimeError(multipart_incorrect_install_error) from None
except ImportError:
logger.error(multipart_not_installed_error)
- raise RuntimeError(multipart_not_installed_error)
+ raise RuntimeError(multipart_not_installed_error) from None
def get_param_sub_dependant(
diff --git a/fastapi/encoders.py b/fastapi/encoders.py
index 6bde9f4abf583..2f95bcbf6692d 100644
--- a/fastapi/encoders.py
+++ b/fastapi/encoders.py
@@ -157,7 +157,7 @@ def jsonable_encoder(
data = vars(obj)
except Exception as e:
errors.append(e)
- raise ValueError(errors)
+ raise ValueError(errors) from e
return jsonable_encoder(
data,
include=include,
diff --git a/fastapi/utils.py b/fastapi/utils.py
index b94dacecc55d8..b15f6a2cfb09b 100644
--- a/fastapi/utils.py
+++ b/fastapi/utils.py
@@ -89,7 +89,7 @@ def create_response_field(
except RuntimeError:
raise fastapi.exceptions.FastAPIError(
f"Invalid args for response field! Hint: check that {type_} is a valid pydantic field type"
- )
+ ) from None
def create_cloned_field(
diff --git a/pyproject.toml b/pyproject.toml
index 9549cc47da156..4ae3809864b2a 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -53,7 +53,7 @@ test = [
"pytest >=7.1.3,<8.0.0",
"coverage[toml] >= 6.5.0,<7.0",
"mypy ==0.982",
- "ruff ==0.0.114",
+ "ruff ==0.0.138",
"black == 22.10.0",
"isort >=5.0.6,<6.0.0",
"httpx >=0.23.0,<0.24.0",
@@ -87,7 +87,7 @@ doc = [
"pyyaml >=5.3.1,<7.0.0",
]
dev = [
- "ruff ==0.0.114",
+ "ruff ==0.0.138",
"uvicorn[standard] >=0.12.0,<0.19.0",
"pre-commit >=2.17.0,<3.0.0",
]
@@ -168,6 +168,7 @@ select = [
ignore = [
"E501", # line too long, handled by black
"B008", # do not perform function calls in argument defaults
+ "C901", # too complex
]
[tool.ruff.per-file-ignores]
@@ -178,7 +179,8 @@ ignore = [
"docs_src/dependencies/tutorial010.py" = ["F821"]
"docs_src/custom_response/tutorial007.py" = ["B007"]
"docs_src/dataclasses/tutorial003.py" = ["I001"]
-
+"docs_src/path_operation_advanced_configuration/tutorial007.py" = ["B904"]
+"docs_src/custom_request_and_route/tutorial002.py" = ["B904"]
[tool.ruff.isort]
known-third-party = ["fastapi", "pydantic", "starlette"]
|
⬆️ Upgrade Ruff
|
https://api.github.com/repos/tiangolo/fastapi/pulls/5698
|
2022-11-27T13:38:20Z
|
2022-11-27T13:59:32Z
|
2022-11-27T13:59:32Z
|
2022-11-27T13:59:33Z
| 1,009
|
tiangolo/fastapi
| 22,653
|
[diffusion] update readme
|
diff --git a/examples/images/diffusion/README.md b/examples/images/diffusion/README.md
index 324337426927..8583f3be2138 100644
--- a/examples/images/diffusion/README.md
+++ b/examples/images/diffusion/README.md
@@ -52,7 +52,7 @@ export PACKAGE_NAME=pytorch
pip install .
```
-### Install [Colossal-AI v0.1.10](https://colossalai.org/download/) From Our Official Website
+### Install [Colossal-AI v0.1.12](https://colossalai.org/download/) From Our Official Website
```
pip install colossalai==0.1.12+torch1.12cu11.3 -f https://release.colossalai.org
@@ -101,10 +101,10 @@ python main.py --logdir /tmp/ -t -b configs/train_colossalai.yaml
You can change the trainging config in the yaml file
-- accelerator: acceleratortype, default 'gpu'
-- devices: device number used for training, default 4
-- max_epochs: max training epochs
-- precision: usefp16 for training or not, default 16, you must use fp16 if you want to apply colossalai
+- devices: device number used for training, default 8
+- max_epochs: max training epochs, default 2
+- precision: the precision type used in training, default 16 (fp16), you must use fp16 if you want to apply colossalai
+- more information about the configuration of ColossalAIStrategy can be found [here](https://pytorch-lightning.readthedocs.io/en/latest/advanced/model_parallel.html#colossal-ai)
## Finetune Example
### Training on Teyvat Datasets
|
https://api.github.com/repos/hpcaitech/ColossalAI/pulls/2214
|
2022-12-28T07:42:00Z
|
2022-12-28T08:06:48Z
|
2022-12-28T08:06:48Z
|
2022-12-28T08:06:48Z
| 387
|
hpcaitech/ColossalAI
| 11,276
|
|
Remove unnecessary period in documentation
|
diff --git a/docs/testing.rst b/docs/testing.rst
index 82e10328b5..50e727620f 100644
--- a/docs/testing.rst
+++ b/docs/testing.rst
@@ -40,7 +40,7 @@ pytest.
Next, we create a `pytest fixture`_ called
:func:`client` that configures
-the application for testing and initializes a new database.::
+the application for testing and initializes a new database::
import os
import tempfile
|
Except for this one, I also notice that there are [two sentences](http://flask.pocoo.org/docs/dev/testing/#testing-cli-commands) end with period instead of colon before a code snippet, should we change it?
|
https://api.github.com/repos/pallets/flask/pulls/2834
|
2018-06-17T04:16:40Z
|
2018-06-18T03:43:58Z
|
2018-06-18T03:43:58Z
|
2020-11-14T03:20:32Z
| 112
|
pallets/flask
| 20,822
|
security(deps): Bump django version to 2.2.27
|
diff --git a/requirements-base.txt b/requirements-base.txt
index cc3e97c8aceac..9aa32dd7a1bc8 100644
--- a/requirements-base.txt
+++ b/requirements-base.txt
@@ -12,7 +12,7 @@ datadog==0.29.3
django-crispy-forms==1.8.1
django-picklefield==2.1.0
django-pg-zero-downtime-migrations==0.10
-Django==2.2.24
+Django==2.2.27
djangorestframework==3.11.2
drf-spectacular==0.21.0
email-reply-parser==0.5.12
diff --git a/src/sentry/web/debug_urls.py b/src/sentry/web/debug_urls.py
index 1e252038a2bdd..742c4e3701577 100644
--- a/src/sentry/web/debug_urls.py
+++ b/src/sentry/web/debug_urls.py
@@ -1,4 +1,4 @@
-from django.conf.urls import url
+from django.urls import re_path
from django.views.generic import TemplateView
import sentry.web.frontend.debug.mail
@@ -65,67 +65,67 @@
from sentry.web.frontend.debug.debug_unassigned_email import DebugUnassignedEmailView
urlpatterns = [
- url(r"^debug/mail/alert/$", sentry.web.frontend.debug.mail.alert),
- url(r"^debug/mail/note/$", DebugNoteEmailView.as_view()),
- url(r"^debug/mail/new-release/$", DebugNewReleaseEmailView.as_view()),
- url(r"^debug/mail/new-user-feedback/$", DebugNewUserFeedbackEmailView.as_view()),
- url(r"^debug/mail/assigned/$", DebugAssignedEmailView.as_view()),
- url(r"^debug/mail/assigned/self/$", DebugSelfAssignedEmailView.as_view()),
- url(r"^debug/mail/assigned/team/$", DebugSelfAssignedTeamEmailView.as_view()),
- url(
- r"^debug/mail/codeowners_auto_sync_failure/$", DebugCodeOwnersAutoSyncFailureView.as_view()
+ re_path("^debug/mail/alert", sentry.web.frontend.debug.mail.alert),
+ re_path("^debug/mail/note", DebugNoteEmailView.as_view()),
+ re_path("^debug/mail/new-release", DebugNewReleaseEmailView.as_view()),
+ re_path("^debug/mail/new-user-feedback", DebugNewUserFeedbackEmailView.as_view()),
+ re_path("^debug/mail/assigned/self", DebugSelfAssignedEmailView.as_view()),
+ re_path("^debug/mail/assigned/team", DebugSelfAssignedTeamEmailView.as_view()),
+ re_path("^debug/mail/assigned", DebugAssignedEmailView.as_view()),
+ re_path(
+ "^debug/mail/codeowners_auto_sync_failure", DebugCodeOwnersAutoSyncFailureView.as_view()
),
- url(r"^debug/mail/digest/$", sentry.web.frontend.debug.mail.digest),
- url(r"^debug/mail/report/$", sentry.web.frontend.debug.mail.report),
- url(r"^debug/mail/regression/$", DebugRegressionEmailView.as_view()),
- url(r"^debug/mail/regression/release/$", DebugRegressionReleaseEmailView.as_view()),
- url(r"^debug/mail/resolved/$", DebugResolvedEmailView.as_view()),
- url(r"^debug/mail/resolved-in-release/$", DebugResolvedInReleaseEmailView.as_view()),
- url(
- r"^debug/mail/resolved-in-release/upcoming/$",
+ re_path("^debug/mail/digest", sentry.web.frontend.debug.mail.digest),
+ re_path("^debug/mail/report", sentry.web.frontend.debug.mail.report),
+ re_path("^debug/mail/regression/release", DebugRegressionReleaseEmailView.as_view()),
+ re_path("^debug/mail/regression", DebugRegressionEmailView.as_view()),
+ re_path(
+ "^debug/mail/resolved-in-release/upcoming",
DebugResolvedInReleaseUpcomingEmailView.as_view(),
),
- url(r"^debug/mail/request-access/$", sentry.web.frontend.debug.mail.request_access),
- url(
- r"^debug/mail/request-access-for-another-member/$",
+ re_path("^debug/mail/resolved-in-release", DebugResolvedInReleaseEmailView.as_view()),
+ re_path("^debug/mail/resolved", DebugResolvedEmailView.as_view()),
+ re_path(
+ "^debug/mail/request-access-for-another-member",
sentry.web.frontend.debug.mail.request_access_for_another_member,
),
- url(r"^debug/mail/join-request/$", DebugOrganizationJoinRequestEmailView.as_view()),
- url(r"^debug/mail/invite-request/$", DebugOrganizationInviteRequestEmailView.as_view()),
- url(r"^debug/mail/access-approved/$", sentry.web.frontend.debug.mail.access_approved),
- url(r"^debug/mail/invitation/$", sentry.web.frontend.debug.mail.invitation),
- url(r"^debug/mail/invalid-identity/$", DebugInvalidIdentityEmailView.as_view()),
- url(r"^debug/mail/codeowners-request/$", DebugCodeOwnersRequestView.as_view()),
- url(r"^debug/mail/confirm-email/$", sentry.web.frontend.debug.mail.confirm_email),
- url(r"^debug/mail/recover-account/$", sentry.web.frontend.debug.mail.recover_account),
- url(r"^debug/mail/unable-to-delete-repo/$", DebugUnableToDeleteRepository.as_view()),
- url(r"^debug/mail/unable-to-fetch-commits/$", DebugUnableToFetchCommitsEmailView.as_view()),
- url(r"^debug/mail/unassigned/$", DebugUnassignedEmailView.as_view()),
- url(r"^debug/mail/org-delete-confirm/$", sentry.web.frontend.debug.mail.org_delete_confirm),
- url(r"^debug/mail/mfa-removed/$", DebugMfaRemovedEmailView.as_view()),
- url(r"^debug/mail/mfa-added/$", DebugMfaAddedEmailView.as_view()),
- url(
- r"^debug/mail/recovery-codes-regenerated/$",
+ re_path("^debug/mail/request-access", sentry.web.frontend.debug.mail.request_access),
+ re_path("^debug/mail/join-request", DebugOrganizationJoinRequestEmailView.as_view()),
+ re_path("^debug/mail/invite-request", DebugOrganizationInviteRequestEmailView.as_view()),
+ re_path("^debug/mail/access-approved", sentry.web.frontend.debug.mail.access_approved),
+ re_path("^debug/mail/invitation", sentry.web.frontend.debug.mail.invitation),
+ re_path("^debug/mail/invalid-identity", DebugInvalidIdentityEmailView.as_view()),
+ re_path("^debug/mail/codeowners-request", DebugCodeOwnersRequestView.as_view()),
+ re_path("^debug/mail/confirm-email", sentry.web.frontend.debug.mail.confirm_email),
+ re_path("^debug/mail/recover-account", sentry.web.frontend.debug.mail.recover_account),
+ re_path("^debug/mail/unable-to-delete-repo", DebugUnableToDeleteRepository.as_view()),
+ re_path("^debug/mail/unable-to-fetch-commits", DebugUnableToFetchCommitsEmailView.as_view()),
+ re_path("^debug/mail/unassigned", DebugUnassignedEmailView.as_view()),
+ re_path("^debug/mail/org-delete-confirm", sentry.web.frontend.debug.mail.org_delete_confirm),
+ re_path("^debug/mail/mfa-removed", DebugMfaRemovedEmailView.as_view()),
+ re_path("^debug/mail/mfa-added", DebugMfaAddedEmailView.as_view()),
+ re_path(
+ "^debug/mail/recovery-codes-regenerated",
DebugRecoveryCodesRegeneratedEmailView.as_view(),
),
- url(r"^debug/mail/password-changed/$", DebugPasswordChangedEmailView.as_view()),
- url(r"^debug/mail/new-processing-issues/$", DebugNewProcessingIssuesEmailView.as_view()),
- url(
- r"^debug/mail/new-processing-issues-no-reprocessing/$",
+ re_path("^debug/mail/password-changed", DebugPasswordChangedEmailView.as_view()),
+ re_path(
+ "^debug/mail/new-processing-issues-no-reprocessing",
DebugNewProcessingIssuesNoReprocessingEmailView.as_view(),
),
- url(r"^debug/mail/sso-linked/$", DebugSsoLinkedEmailView.as_view()),
- url(r"^debug/mail/sso-unlinked/$", DebugSsoUnlinkedEmailView.as_view()),
- url(r"^debug/mail/sso-unlinked/no-password$", DebugSsoUnlinkedNoPasswordEmailView.as_view()),
- url(r"^debug/mail/incident-activity$", DebugIncidentActivityEmailView.as_view()),
- url(r"^debug/mail/incident-trigger$", DebugIncidentTriggerEmailView.as_view()),
- url(r"^debug/mail/setup-2fa/$", DebugSetup2faEmailView.as_view()),
- url(r"^debug/embed/error-page/$", DebugErrorPageEmbedView.as_view()),
- url(r"^debug/trigger-error/$", DebugTriggerErrorView.as_view()),
- url(r"^debug/auth-confirm-identity/$", debug_auth_views.DebugAuthConfirmIdentity.as_view()),
- url(r"^debug/auth-confirm-link/$", debug_auth_views.DebugAuthConfirmLink.as_view()),
- url(r"^debug/sudo/$", TemplateView.as_view(template_name="sentry/account/sudo.html")),
- url(r"^debug/oauth/authorize/$", DebugOAuthAuthorizeView.as_view()),
- url(r"^debug/oauth/authorize/error/$", DebugOAuthAuthorizeErrorView.as_view()),
- url(r"^debug/chart-renderer/$", DebugChartRendererView.as_view()),
+ re_path("^debug/mail/new-processing-issues", DebugNewProcessingIssuesEmailView.as_view()),
+ re_path("^debug/mail/sso-linked", DebugSsoLinkedEmailView.as_view()),
+ re_path("^debug/mail/sso-unlinked/no-password", DebugSsoUnlinkedNoPasswordEmailView.as_view()),
+ re_path("^debug/mail/sso-unlinked", DebugSsoUnlinkedEmailView.as_view()),
+ re_path("^debug/mail/incident-activity", DebugIncidentActivityEmailView.as_view()),
+ re_path("^debug/mail/incident-trigger", DebugIncidentTriggerEmailView.as_view()),
+ re_path("^debug/mail/setup-2fa", DebugSetup2faEmailView.as_view()),
+ re_path("^debug/embed/error-page", DebugErrorPageEmbedView.as_view()),
+ re_path("^debug/trigger-error", DebugTriggerErrorView.as_view()),
+ re_path("^debug/auth-confirm-identity", debug_auth_views.DebugAuthConfirmIdentity.as_view()),
+ re_path("^debug/auth-confirm-link", debug_auth_views.DebugAuthConfirmLink.as_view()),
+ re_path("^debug/sudo", TemplateView.as_view(template_name="sentry/account/sudo.html")),
+ re_path("^debug/oauth/authorize/error", DebugOAuthAuthorizeErrorView.as_view()),
+ re_path("^debug/oauth/authorize", DebugOAuthAuthorizeView.as_view()),
+ re_path("^debug/chart-renderer", DebugChartRendererView.as_view()),
]
diff --git a/src/sentry/web/urls.py b/src/sentry/web/urls.py
index 6baeb3fb9e4f0..a13734776966d 100644
--- a/src/sentry/web/urls.py
+++ b/src/sentry/web/urls.py
@@ -700,6 +700,5 @@
name="sentry-project-event-redirect",
),
# Legacy
- # This triggers a false positive for the urls.W002 Django warning
- url(r"/$", react_page_view),
+ url(r"", react_page_view),
]
|
#sync-getsentry
Update Django version from 2.2.24 to 2.2.27 to resolve dependabot identified vulnerabilities.
Django 2.2.25 included some breaking changes for us in terms of URL route matching.
> Changed in Django 2.2.25:
> In older versions, a full-match wasn’t required for a route which ends with $.
Some route changes were necessary:
- The _legacy_ catch-all route no longer behaved as we expected, so it's been adjusted.
- For example in 2.2.24, a HTTP request to `/<org.slug>/<project.slug>/settings/plugins/<plugin_slug>/` would match on the intended catch-all, be routed to React land, and then redirected to `/settings/<org.slug>/projects/<project.slug>/plugins/<plugin_slug>/`.
- In >=2.2.25, a full-match is now required. The above HTTP request fails to match even on the original catch-all resulting in a HTTP response of 404.
- Routes in the debug_urls were not behaving as expected and have been adjusted.
- I also opted to use `re_path` (from `django.urls`) rather than `url` (from `django.conf.urls`). The latter is an alias to `re_path` in 2.2.x, but is [deprecated starting from Django 3.1.x](https://docs.djangoproject.com/en/3.2/ref/urls/#url).
This PR supersedes:
- #31720
- #31098
|
https://api.github.com/repos/getsentry/sentry/pulls/33079
|
2022-03-29T20:50:51Z
|
2022-04-05T19:07:46Z
|
2022-04-05T19:07:46Z
|
2022-05-05T16:53:03Z
| 2,462
|
getsentry/sentry
| 44,018
|
Fix warning: Expected type 'bool', got 'int' instead
|
diff --git a/scrapy/pipelines/media.py b/scrapy/pipelines/media.py
index 0a12f3e2c14..0c2ee685688 100644
--- a/scrapy/pipelines/media.py
+++ b/scrapy/pipelines/media.py
@@ -86,7 +86,7 @@ def process_item(self, item, spider):
info = self.spiderinfo
requests = arg_to_iter(self.get_media_requests(item, info))
dlist = [self._process_request(r, info, item) for r in requests]
- dfd = DeferredList(dlist, consumeErrors=1)
+ dfd = DeferredList(dlist, consumeErrors=True)
return dfd.addCallback(self.item_completed, item, info)
def _process_request(self, request, info, item):
diff --git a/scrapy/utils/defer.py b/scrapy/utils/defer.py
index 21ba02a0b5d..6db9cc1177b 100644
--- a/scrapy/utils/defer.py
+++ b/scrapy/utils/defer.py
@@ -105,7 +105,7 @@ def process_parallel(callbacks, input, *a, **kw):
callbacks
"""
dfds = [defer.succeed(input).addCallback(x, *a, **kw) for x in callbacks]
- d = defer.DeferredList(dfds, fireOnOneErrback=1, consumeErrors=1)
+ d = defer.DeferredList(dfds, fireOnOneErrback=True, consumeErrors=True)
d.addCallbacks(lambda r: [x[1] for x in r], lambda f: f.value.subFailure)
return d
|
Minor fix: **twisted.internet.defer.DeferredList** parameter **consumeErrors** expects _boolean_, got _integer_ instead.
|
https://api.github.com/repos/scrapy/scrapy/pulls/4940
|
2020-12-28T17:31:13Z
|
2020-12-30T14:22:27Z
|
2020-12-30T14:22:27Z
|
2020-12-30T14:23:07Z
| 364
|
scrapy/scrapy
| 35,143
|
fix for syntax measure
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index c21439d94..e7b459dd9 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -25,6 +25,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Fix missing `mode` property on file wrapper breaking uploads via `requests` https://github.com/Textualize/rich/pull/2495
- Fix mismatching default value of parameter `ensure_ascii` https://github.com/Textualize/rich/pull/2538
- Remove unused height parameter in `Layout` class https://github.com/Textualize/rich/pull/2540
+- Fixed exception in Syntax.__rich_measure__ for empty files
### Changed
diff --git a/rich/syntax.py b/rich/syntax.py
index 51f890ccb..aed041f34 100644
--- a/rich/syntax.py
+++ b/rich/syntax.py
@@ -593,10 +593,11 @@ def __rich_measure__(
if self.code_width is not None:
width = self.code_width + self._numbers_column_width + padding + 1
return Measurement(self._numbers_column_width, width)
+ lines = self.code.splitlines()
width = (
self._numbers_column_width
+ padding
- + max(cell_len(line) for line in self.code.splitlines())
+ + (max(cell_len(line) for line in lines) if lines else 0)
)
if self.line_numbers:
width += 1
diff --git a/tests/test_syntax.py b/tests/test_syntax.py
index 6b8ce9ec1..5eff05eea 100644
--- a/tests/test_syntax.py
+++ b/tests/test_syntax.py
@@ -392,6 +392,9 @@ def test_syntax_measure():
code = Syntax("Hello, World", "python", code_width=20, line_numbers=True)
assert code.__rich_measure__(console, console.options) == Measurement(3, 24)
+ code = Syntax("", "python", code_width=20, line_numbers=True)
+ assert code.__rich_measure__(console, console.options) == Measurement(3, 24)
+
if __name__ == "__main__":
syntax = Panel.fit(
|
## Type of changes
- [ ] Bug fix
- [ ] New feature
- [ ] Documentation / docstrings
- [ ] Tests
- [ ] Other
## Checklist
- [ ] I've run the latest [black](https://github.com/psf/black) with default args on new code.
- [ ] I've updated CHANGELOG.md and CONTRIBUTORS.md where appropriate.
- [ ] I've added tests for new code.
- [ ] I accept that @willmcgugan may be pedantic in the code review.
## Description
Please describe your changes here. If this fixes a bug, please link to the issue, if possible.
|
https://api.github.com/repos/Textualize/rich/pulls/2558
|
2022-10-01T14:21:58Z
|
2022-10-01T14:29:56Z
|
2022-10-01T14:29:56Z
|
2022-10-01T14:29:57Z
| 515
|
Textualize/rich
| 47,959
|
Add support for video.helsinki.fi archives
|
diff --git a/youtube_dl/extractor/__init__.py b/youtube_dl/extractor/__init__.py
index 3a8cd8a589b..9490df0d84b 100644
--- a/youtube_dl/extractor/__init__.py
+++ b/youtube_dl/extractor/__init__.py
@@ -91,6 +91,7 @@
from .googleplus import GooglePlusIE
from .googlesearch import GoogleSearchIE
from .hark import HarkIE
+from .helsinki import HelsinkiIE
from .hotnewhiphop import HotNewHipHopIE
from .howcast import HowcastIE
from .huffpost import HuffPostIE
diff --git a/youtube_dl/extractor/helsinki.py b/youtube_dl/extractor/helsinki.py
new file mode 100644
index 00000000000..2a54f3cca89
--- /dev/null
+++ b/youtube_dl/extractor/helsinki.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+
+
+class HelsinkiIE(InfoExtractor):
+ _VALID_URL = r'https?://video\.helsinki\.fi/Arkisto/flash\.php\?id=(?P<id>\d+)'
+ _TEST = {
+ 'url': 'http://video.helsinki.fi/Arkisto/flash.php?id=20258',
+ 'md5': 'cd829201b890905682eb194cbdea55d7',
+ 'info_dict': {
+ 'id': '20258',
+ 'ext': 'mp4',
+ 'title': 'Tietotekniikkafoorumi-iltapäivä',
+ }
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ vid = mobj.group('id')
+ webpage = self._download_webpage(url, vid)
+ formats = []
+ mobj = re.search('file=((\w+):[^&]+)', webpage)
+ if mobj: formats.append({
+ 'ext': mobj.group(2),
+ 'play_path': mobj.group(1),
+ 'url': 'rtmp://flashvideo.it.helsinki.fi/vod/',
+ 'player_url': 'http://video.helsinki.fi/player.swf',
+ 'format_note': 'sd'
+ })
+
+ mobj = re.search('hd\.file=((\w+):[^&]+)', webpage)
+ if mobj: formats.append({
+ 'ext': mobj.group(2),
+ 'play_path': mobj.group(1),
+ 'url': 'rtmp://flashvideo.it.helsinki.fi/vod/',
+ 'player_url': 'http://video.helsinki.fi/player.swf',
+ 'format_note': 'hd'
+ })
+
+ return {
+ 'id': vid,
+ 'title': self._og_search_title(webpage).replace('Video: ', ''),
+ 'description': self._og_search_description(webpage),
+ 'thumbnail': self._og_search_thumbnail(webpage),
+ 'formats': formats
+ }
|
https://api.github.com/repos/ytdl-org/youtube-dl/pulls/2394
|
2014-02-16T11:06:42Z
|
2014-02-17T10:33:24Z
|
2014-02-17T10:33:24Z
|
2014-07-08T01:59:43Z
| 728
|
ytdl-org/youtube-dl
| 50,154
|
|
fix lowvram
|
diff --git a/fooocus_extras/ip_adapter.py b/fooocus_extras/ip_adapter.py
index 453afa322..adb554f6a 100644
--- a/fooocus_extras/ip_adapter.py
+++ b/fooocus_extras/ip_adapter.py
@@ -179,7 +179,7 @@ def preprocess(img):
global ip_unconds
inputs = clip_vision.processor(images=img, return_tensors="pt")
- comfy.model_management.load_models_gpu([clip_vision.patcher, image_proj_model, ip_layers])
+ comfy.model_management.load_model_gpu(clip_vision.patcher)
pixel_values = inputs['pixel_values'].to(clip_vision.load_device)
if clip_vision.dtype != torch.float32:
@@ -195,11 +195,15 @@ def preprocess(img):
else:
cond = outputs.image_embeds.to(ip_adapter.dtype)
+ comfy.model_management.load_model_gpu(image_proj_model)
+ cond = image_proj_model.model(cond).to(device=ip_adapter.load_device, dtype=ip_adapter.dtype)
+
+ comfy.model_management.load_model_gpu(ip_layers)
+
if ip_unconds is None:
uncond = ip_negative.to(device=ip_adapter.load_device, dtype=ip_adapter.dtype)
ip_unconds = [m(uncond).cpu() for m in ip_layers.model.to_kvs]
- cond = image_proj_model.model(cond).to(device=ip_adapter.load_device, dtype=ip_adapter.dtype)
ip_conds = [m(cond).cpu() for m in ip_layers.model.to_kvs]
return ip_conds
diff --git a/fooocus_version.py b/fooocus_version.py
index 7b8301ffa..70f6a08b4 100644
--- a/fooocus_version.py
+++ b/fooocus_version.py
@@ -1 +1 @@
-version = '2.1.3'
+version = '2.1.4'
|
https://api.github.com/repos/lllyasviel/Fooocus/pulls/564
|
2023-10-08T08:15:49Z
|
2023-10-08T08:15:54Z
|
2023-10-08T08:15:54Z
|
2023-10-08T08:15:57Z
| 410
|
lllyasviel/Fooocus
| 7,068
|
|
Add CodeQL workflow
|
diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml
new file mode 100644
index 0000000000..05000db7af
--- /dev/null
+++ b/.github/workflows/codeql.yml
@@ -0,0 +1,66 @@
+name: "CodeQL"
+
+on:
+ push:
+ branches: [ 'main' ]
+ pull_request:
+ # The branches below must be a subset of the branches above
+ branches: [ 'main' ]
+ schedule:
+ - cron: '26 2 * * 6'
+
+jobs:
+ analyze:
+ name: Analyze
+ runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }}
+ timeout-minutes: ${{ (matrix.language == 'swift' && 120) || 360 }}
+ permissions:
+ actions: read
+ contents: read
+ security-events: write
+
+ strategy:
+ fail-fast: false
+ matrix:
+ language: [ 'python' ]
+ # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
+ # Use only 'java' to analyze code written in Java, Kotlin or both
+ # Use only 'javascript' to analyze code written in JavaScript, TypeScript or both
+ # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v3
+
+ # Initializes the CodeQL tools for scanning.
+ - name: Initialize CodeQL
+ uses: github/codeql-action/init@v2
+ with:
+ languages: ${{ matrix.language }}
+ # If you wish to specify custom queries, you can do so here or in a config file.
+ # By default, queries listed here will override any specified in a config file.
+ # Prefix the list here with "+" to use these queries and those in the config file.
+
+ # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
+ # queries: security-extended,security-and-quality
+
+
+ # Autobuild attempts to build any compiled languages (C/C++, C#, Go, Java, or Swift).
+ # If this step fails, then you should remove it and run the build manually (see below)
+ - name: Autobuild
+ uses: github/codeql-action/autobuild@v2
+
+ # ℹ️ Command-line programs to run using the OS shell.
+ # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
+
+ # If the Autobuild fails above, remove it and uncomment the following three lines.
+ # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.
+
+ # - run: |
+ # echo "Run, Build Application using script"
+ # ./location_of_script_within_repo/buildscript.sh
+
+ - name: Perform CodeQL Analysis
+ uses: github/codeql-action/analyze@v2
+ with:
+ category: "/language:${{matrix.language}}"
|
Hello from [GitHub Security Lab](https://securitylab.github.com/)!
Your repository is critical to the security of the Open Source Software (OSS) ecosystem and as part of our mission to make OSS safer, we are contributing a [CodeQL configuration for code scanning](https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning-for-a-repository#setting-up-code-scanning-manually) to your repository. By enabling code scanning with CodeQL, you will be able to continuously analyze your code and surface potential vulnerabilities [before they can even reach your codebase](https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/triaging-code-scanning-alerts-in-pull-requests#about-code-scanning-results-on-pull-requests). In fact, you may have seen some alerts already appearing on this pull request!
We’ve tested the configuration manually before opening this pull request and adjusted it to the needs of your particular repository, but feel free to tweak it further! Check [this page](https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#editing-a-code-scanning-workflow) for detailed documentation.
Questions? Check out the FAQ below!
### FAQ
<details>
<summary>Click here to expand the FAQ section</summary>
#### How often will the code scanning analysis run?
By default, code scanning will trigger a scan with the CodeQL engine on the following events:
* On every pull request — to flag up potential security problems for you to investigate before merging a PR.
* On every push to your default branch and other protected branches — this keeps the analysis results on your repository’s *Security* tab up to date.
* Once a week at a fixed time — to make sure you benefit from the latest updated security analysis even when no code was committed or PRs were opened.
#### What will this cost?
Nothing! The CodeQL engine will run inside GitHub Actions, making use of your [unlimited free compute minutes for public repositories](https://docs.github.com/en/actions/learn-github-actions/usage-limits-billing-and-administration#about-billing-for-github-actions).
#### Where can I see the results of the analysis?
The results of the analysis will be available on the *Security* tab of your repository. You can find more information about the results [here](https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/managing-code-scanning-alerts-for-your-repository#viewing-the-alerts-for-a-repository).
#### What types of problems does CodeQL find?
By default, code scanning runs the [`default` query suite](https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/built-in-codeql-query-suites#default-query-suite).
#### How do I upgrade my CodeQL engine?
No need! New versions of the CodeQL analysis are constantly deployed on GitHub.com; your repository will automatically benefit from the most recently released version.
#### The analysis doesn’t seem to be working
If you get an error in GitHub Actions that indicates that CodeQL wasn’t able to analyze your code, please [follow the instructions here](https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/troubleshooting-the-codeql-workflow) to debug the analysis.
#### Which source code hosting platforms does code scanning support?
GitHub code scanning is deeply integrated within GitHub itself. If you’d like to scan source code that is hosted elsewhere, we suggest that you create a mirror of that code on GitHub.
</details>
|
https://api.github.com/repos/gpt-engineer-org/gpt-engineer/pulls/422
|
2023-06-26T14:57:25Z
|
2023-06-26T18:07:43Z
|
2023-06-26T18:07:43Z
|
2023-06-26T18:07:44Z
| 789
|
gpt-engineer-org/gpt-engineer
| 33,109
|
fix(youtube): update extractor handling consent
|
diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 9c419c00222..3bf483c1c8a 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -260,16 +260,10 @@ def _initialize_consent(self):
cookies = self._get_cookies('https://www.youtube.com/')
if cookies.get('__Secure-3PSID'):
return
- consent_id = None
- consent = cookies.get('CONSENT')
- if consent:
- if 'YES' in consent.value:
- return
- consent_id = self._search_regex(
- r'PENDING\+(\d+)', consent.value, 'consent', default=None)
- if not consent_id:
- consent_id = random.randint(100, 999)
- self._set_cookie('.youtube.com', 'CONSENT', 'YES+cb.20210328-17-p0.en+FX+%s' % consent_id)
+ socs = cookies.get('SOCS')
+ if socs and not socs.value.startswith('CAA'): # not consented
+ return
+ self._set_cookie('.youtube.com', 'SOCS', 'CAI', secure=True) # accept all (required for mixes)
def _real_initialize(self):
self._initialize_consent()
|
## Please follow the guide below
- You will be asked some questions, please read them **carefully** and answer honestly
- Put an `x` into all the boxes [ ] relevant to your *pull request* (like that [x])
- Use *Preview* tab to see how your *pull request* will actually look like
---
### Before submitting a *pull request* make sure you have:
- [x] [Searched](https://github.com/ytdl-org/youtube-dl/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests
- [ ] Read [adding new extractor tutorial](https://github.com/ytdl-org/youtube-dl#adding-support-for-a-new-site)
- [x] Read [youtube-dl coding conventions](https://github.com/ytdl-org/youtube-dl#youtube-dl-coding-conventions) and adjusted the code to meet them
- [ ] Covered the code with tests (note that PRs without tests will be REJECTED)
- [x] Checked the code with [flake8](https://pypi.python.org/pypi/flake8)
### In order to be accepted and merged into youtube-dl each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check one of the following options:
- [ ] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/)
- [x] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence)
This change is part of yt-dlp (https://github.com/yt-dlp/yt-dlp/commit/378ae9f9fb8e8c86e6ac89c4c5b815b48ce93620)
### What is the purpose of your *pull request*?
- [x] Bug fix
- [ ] Improvement
- [ ] New extractor
- [ ] New feature
---
### Description of your *pull request* and other information
Explanation of your *pull request* in arbitrary form goes here. Please make sure the description explains the purpose and effect of your *pull request* and is worded well enough to be understood. Provide as much context and examples as possible.
This fixes the issue where videos/playlists could not be extracted in certain situations (EU users).
Thought I would throw up a PR to help out. https://github.com/ytdl-org/youtube-dl/issues/32499#issuecomment-1807269769
- Fixes #32499
- Fixes #32500
- Fixes #32501
- Fixes #32531
- Fixes #32533
- Fixes #32538
- Fixes #32540
- Fixes #32542
- Fixes #32543
- Fixes #32546
- Fixes #32550
- Fixes #32552
- Fixes #32554
- Fixes #32555
- Fixes #32561
- Fixes #32575
- Fixes #32583
- Fixes #32582
- Fixes #32586
- Fixes #32601
- Fixes #32630
|
https://api.github.com/repos/ytdl-org/youtube-dl/pulls/32638
|
2023-11-16T00:09:21Z
|
2023-11-22T09:49:32Z
|
2023-11-22T09:49:32Z
|
2024-01-02T20:52:41Z
| 318
|
ytdl-org/youtube-dl
| 49,808
|
Add routes command
|
diff --git a/CHANGES b/CHANGES
index 11ac64307e..ddab541fb2 100644
--- a/CHANGES
+++ b/CHANGES
@@ -32,6 +32,8 @@ Major release, unreleased
- ``Flask.make_response`` raises ``TypeError`` instead of ``ValueError`` for
bad response types. The error messages have been improved to describe why the
type is invalid. (`#2256`_)
+- Add ``routes`` CLI command to output routes registered on the application.
+ (`#2259`_)
.. _#1489: https://github.com/pallets/flask/pull/1489
.. _#1898: https://github.com/pallets/flask/pull/1898
@@ -40,6 +42,7 @@ Major release, unreleased
.. _#2223: https://github.com/pallets/flask/pull/2223
.. _#2254: https://github.com/pallets/flask/pull/2254
.. _#2256: https://github.com/pallets/flask/pull/2256
+.. _#2259: https://github.com/pallets/flask/pull/2259
Version 0.12.1
--------------
diff --git a/flask/cli.py b/flask/cli.py
index 0cc240a2ef..3d361be870 100644
--- a/flask/cli.py
+++ b/flask/cli.py
@@ -12,14 +12,17 @@
import os
import sys
import traceback
-from threading import Lock, Thread
from functools import update_wrapper
+from operator import attrgetter
+from threading import Lock, Thread
import click
+from . import __version__
from ._compat import iteritems, reraise
+from .globals import current_app
from .helpers import get_debug_flag
-from . import __version__
+
class NoAppException(click.UsageError):
"""Raised if an application cannot be found or loaded."""
@@ -319,6 +322,7 @@ def __init__(self, add_default_commands=True, create_app=None,
if add_default_commands:
self.add_command(run_command)
self.add_command(shell_command)
+ self.add_command(routes_command)
self._loaded_plugin_commands = False
@@ -484,6 +488,53 @@ def shell_command():
code.interact(banner=banner, local=ctx)
[email protected]('routes', short_help='Show the routes for the app.')
[email protected](
+ '--sort', '-s',
+ type=click.Choice(('endpoint', 'methods', 'rule', 'match')),
+ default='endpoint',
+ help=(
+ 'Method to sort routes by. "match" is the order that Flask will match '
+ 'routes when dispatching a request.'
+ )
+)
[email protected](
+ '--all-methods',
+ is_flag=True,
+ help="Show HEAD and OPTIONS methods."
+)
+@with_appcontext
+def routes_command(sort, all_methods):
+ """Show all registered routes with endpoints and methods."""
+
+ rules = list(current_app.url_map.iter_rules())
+ ignored_methods = set(() if all_methods else ('HEAD', 'OPTIONS'))
+
+ if sort in ('endpoint', 'rule'):
+ rules = sorted(rules, key=attrgetter(sort))
+ elif sort == 'methods':
+ rules = sorted(rules, key=lambda rule: sorted(rule.methods))
+
+ rule_methods = [
+ ', '.join(sorted(rule.methods - ignored_methods)) for rule in rules
+ ]
+
+ headers = ('Endpoint', 'Methods', 'Rule')
+ widths = (
+ max(len(rule.endpoint) for rule in rules),
+ max(len(methods) for methods in rule_methods),
+ max(len(rule.rule) for rule in rules),
+ )
+ widths = [max(len(h), w) for h, w in zip(headers, widths)]
+ row = '{{0:<{0}}} {{1:<{1}}} {{2:<{2}}}'.format(*widths)
+
+ click.echo(row.format(*headers).strip())
+ click.echo(row.format(*('-' * width for width in widths)))
+
+ for rule, methods in zip(rules, rule_methods):
+ click.echo(row.format(rule.endpoint, methods, rule.rule).rstrip())
+
+
cli = FlaskGroup(help="""\
This shell command acts as general utility script for Flask applications.
diff --git a/tests/test_cli.py b/tests/test_cli.py
index 8b291a6376..ab875cef3b 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -14,17 +14,23 @@
from __future__ import absolute_import, print_function
import os
import sys
+from functools import partial
import click
import pytest
from click.testing import CliRunner
from flask import Flask, current_app
-from flask.cli import AppGroup, FlaskGroup, NoAppException, ScriptInfo, \
+from flask.cli import cli, AppGroup, FlaskGroup, NoAppException, ScriptInfo, \
find_best_app, locate_app, with_appcontext, prepare_exec_for_file, \
find_default_import_path, get_version
[email protected]
+def runner():
+ return CliRunner()
+
+
def test_cli_name(test_apps):
"""Make sure the CLI object's name is the app's name and not the app itself"""
from cliapp.app import testapp
@@ -129,7 +135,7 @@ def create_app(info):
assert obj.load_app() == app
-def test_with_appcontext():
+def test_with_appcontext(runner):
"""Test of with_appcontext."""
@click.command()
@with_appcontext
@@ -138,13 +144,12 @@ def testcmd():
obj = ScriptInfo(create_app=lambda info: Flask("testapp"))
- runner = CliRunner()
result = runner.invoke(testcmd, obj=obj)
assert result.exit_code == 0
assert result.output == 'testapp\n'
-def test_appgroup():
+def test_appgroup(runner):
"""Test of with_appcontext."""
@click.group(cls=AppGroup)
def cli():
@@ -164,7 +169,6 @@ def test2():
obj = ScriptInfo(create_app=lambda info: Flask("testappgroup"))
- runner = CliRunner()
result = runner.invoke(cli, ['test'], obj=obj)
assert result.exit_code == 0
assert result.output == 'testappgroup\n'
@@ -174,7 +178,7 @@ def test2():
assert result.output == 'testappgroup\n'
-def test_flaskgroup():
+def test_flaskgroup(runner):
"""Test FlaskGroup."""
def create_app(info):
return Flask("flaskgroup")
@@ -187,13 +191,12 @@ def cli(**params):
def test():
click.echo(current_app.name)
- runner = CliRunner()
result = runner.invoke(cli, ['test'])
assert result.exit_code == 0
assert result.output == 'flaskgroup\n'
-def test_print_exceptions():
+def test_print_exceptions(runner):
"""Print the stacktrace if the CLI."""
def create_app(info):
raise Exception("oh no")
@@ -203,8 +206,65 @@ def create_app(info):
def cli(**params):
pass
- runner = CliRunner()
result = runner.invoke(cli, ['--help'])
assert result.exit_code == 0
assert 'Exception: oh no' in result.output
assert 'Traceback' in result.output
+
+
+class TestRoutes:
+ @pytest.fixture
+ def invoke(self, runner):
+ def create_app(info):
+ app = Flask(__name__)
+ app.testing = True
+
+ @app.route('/get_post/<int:x>/<int:y>', methods=['GET', 'POST'])
+ def yyy_get_post(x, y):
+ pass
+
+ @app.route('/zzz_post', methods=['POST'])
+ def aaa_post():
+ pass
+
+ return app
+
+ cli = FlaskGroup(create_app=create_app)
+ return partial(runner.invoke, cli)
+
+ def expect_order(self, order, output):
+ # skip the header and match the start of each row
+ for expect, line in zip(order, output.splitlines()[2:]):
+ # do this instead of startswith for nicer pytest output
+ assert line[:len(expect)] == expect
+
+ def test_simple(self, invoke):
+ result = invoke(['routes'])
+ assert result.exit_code == 0
+ self.expect_order(
+ ['aaa_post', 'static', 'yyy_get_post'],
+ result.output
+ )
+
+ def test_sort(self, invoke):
+ default_output = invoke(['routes']).output
+ endpoint_output = invoke(['routes', '-s', 'endpoint']).output
+ assert default_output == endpoint_output
+ self.expect_order(
+ ['static', 'yyy_get_post', 'aaa_post'],
+ invoke(['routes', '-s', 'methods']).output
+ )
+ self.expect_order(
+ ['yyy_get_post', 'static', 'aaa_post'],
+ invoke(['routes', '-s', 'rule']).output
+ )
+ self.expect_order(
+ ['aaa_post', 'yyy_get_post', 'static'],
+ invoke(['routes', '-s', 'match']).output
+ )
+
+ def test_all_methods(self, invoke):
+ output = invoke(['routes']).output
+ assert 'GET, HEAD, OPTIONS, POST' not in output
+ output = invoke(['routes', '--all-methods']).output
+ assert 'GET, HEAD, OPTIONS, POST' in output
|
Based on #1446, with some ideas from #2092. The whole command is basically reworked, but the most important change is that there is a `match` sort order to show the order Flask will dispatch in (which is really just not sorting).
|
https://api.github.com/repos/pallets/flask/pulls/2259
|
2017-04-26T17:30:22Z
|
2017-04-26T17:43:18Z
|
2017-04-26T17:43:18Z
|
2020-11-14T03:52:49Z
| 2,148
|
pallets/flask
| 20,419
|
Fix AWS::SecretsManager::Secret ARN/Physical ID handling and tests
|
diff --git a/localstack/services/cloudformation/cloudformation_starter.py b/localstack/services/cloudformation/cloudformation_starter.py
index c2976a97bbf9d..1e71fb11938b3 100644
--- a/localstack/services/cloudformation/cloudformation_starter.py
+++ b/localstack/services/cloudformation/cloudformation_starter.py
@@ -15,7 +15,7 @@
from localstack.services.cloudformation import service_models
from localstack.utils.aws import aws_stack
-from .models import elasticsearch, events, kinesisfirehose, logs, secretsmanager
+from .models import elasticsearch, events, kinesisfirehose, logs
LOG = logging.getLogger(__name__)
@@ -76,20 +76,12 @@ def update_physical_resource_id(resource):
elif isinstance(resource, kinesisfirehose.FirehoseDeliveryStream):
resource.physical_resource_id = resource.params.get("DeliveryStreamName")
- elif isinstance(resource, secretsmanager.SecretsManagerSecret):
- resource.physical_resource_id = resource.params.get("Name")
-
elif isinstance(resource, events.EventsRule):
resource.physical_resource_id = resource.params.get("Name")
elif isinstance(resource, elasticsearch.ElasticsearchDomain):
resource.physical_resource_id = resource.params.get("DomainName")
- elif isinstance(resource, secretsmanager.SecretsManagerSecret):
- secret = secretsmanager.SecretsManagerSecret.fetch_details(resource.props["Name"])
- if secret:
- resource.props["ARN"] = resource.physical_resource_id = secret["ARN"]
-
elif isinstance(resource, dynamodb_models.Table):
resource.physical_resource_id = resource.name
diff --git a/localstack/services/cloudformation/models/secretsmanager.py b/localstack/services/cloudformation/models/secretsmanager.py
index c34ff2705c9c9..3cac2a5d41f5e 100644
--- a/localstack/services/cloudformation/models/secretsmanager.py
+++ b/localstack/services/cloudformation/models/secretsmanager.py
@@ -21,9 +21,7 @@ def cloudformation_type():
return "AWS::SecretsManager::Secret"
def get_physical_resource_id(self, attribute, **kwargs):
- props = self.props
- result = props.get("ARN") or aws_stack.secretsmanager_secret_arn(props["Name"])
- return result
+ return self.props.get("ARN")
def get_cfn_attribute(self, attribute_name):
if attribute_name in (REF_ARN_ATTRS + REF_ID_ATTRS):
@@ -77,8 +75,8 @@ def generate_secret_value(
@staticmethod
def add_defaults(resource, stack_name: str):
- role_name = resource.get("Properties", {}).get("Name")
- if not role_name:
+ name = resource.get("Properties", {}).get("Name")
+ if not name:
resource["Properties"]["Name"] = generate_default_name(
stack_name, resource["LogicalResourceId"]
)
diff --git a/localstack/utils/aws/aws_stack.py b/localstack/utils/aws/aws_stack.py
index ba52f5a7c523b..cae8a87ae7d7d 100644
--- a/localstack/utils/aws/aws_stack.py
+++ b/localstack/utils/aws/aws_stack.py
@@ -535,6 +535,7 @@ def get_iam_role(resource, env=None):
return "role-%s" % resource
+# TODO: remove this (can't statically define secret ARN because it includes a random suffix)
def secretsmanager_secret_arn(secret_id, account_id=None, region_name=None):
if ":" in (secret_id or ""):
return secret_id
diff --git a/localstack/utils/cloudformation/template_deployer.py b/localstack/utils/cloudformation/template_deployer.py
index ab7b813fdbbbf..02b29f4ddb9a7 100644
--- a/localstack/utils/cloudformation/template_deployer.py
+++ b/localstack/utils/cloudformation/template_deployer.py
@@ -84,15 +84,6 @@ def lambda_get_params():
# ----------------
-def get_secret_arn(secret_name, account_id=None):
- # TODO: create logic to create static without lookup table!
- from localstack.services.secretsmanager import secretsmanager_starter
-
- storage = secretsmanager_starter.SECRET_ARN_STORAGE
- key = "%s_%s" % (aws_stack.get_region(), secret_name)
- return storage.get(key) or storage.get(secret_name)
-
-
def find_stack(stack_name):
from localstack.services.cloudformation.cloudformation_api import find_stack as api_find_stack
@@ -1024,11 +1015,6 @@ def determine_resource_physical_id(
if attribute == "Arn":
return aws_stack.role_arn(resource_props.get("RoleName"))
return resource_props.get("RoleName")
- elif resource_type == "SecretsManager::Secret":
- arn = get_secret_arn(resource_props.get("Name")) or ""
- if attribute == "Arn":
- return arn
- return arn.split(":")[-1]
elif resource_type == "IAM::Policy":
if attribute == "Arn":
return aws_stack.policy_arn(resource_props.get("PolicyName"))
diff --git a/tests/integration/cloudformation/test_cloudformation_secretsmanager.py b/tests/integration/cloudformation/test_cloudformation_secretsmanager.py
index ddff456cad024..18a6c9c49b489 100644
--- a/tests/integration/cloudformation/test_cloudformation_secretsmanager.py
+++ b/tests/integration/cloudformation/test_cloudformation_secretsmanager.py
@@ -15,7 +15,7 @@
Description: Aurora Password
Name: %s
GenerateSecretString:
- SecretStringTemplate: !Sub '{"username": "${Username}"}'
+ SecretStringTemplate: '{"username": "localstack-user"}'
GenerateStringKey: "password"
PasswordLength: 30
IncludeSpace: false
@@ -47,17 +47,21 @@ def test_cfn_secretsmanager_gen_secret(
assert "/dev/db/pass" == secret["Name"]
assert "secret:/dev/db/pass" in secret["ARN"]
- # assert that secret has ben generated and added to the result template JSON
- value = secretsmanager_client.get_secret_value(SecretId="/dev/db/pass")
- secret = value.get("SecretString")
- secret = json.loads(secret)
- assert "password" in secret
- assert len(secret["password"]) == 30
+ # assert that secret has been generated and added to the result template JSON
+ secret_value = secretsmanager_client.get_secret_value(SecretId="/dev/db/pass")[
+ "SecretString"
+ ]
+ secret_json = json.loads(secret_value)
+ assert "password" in secret_json
+ assert len(secret_json["password"]) == 30
# assert that the Ref properly returns the secret ARN
result = cfn_client.describe_stacks(StackName=stack_name)["Stacks"][0]
assert len(result["Outputs"]) == 1
+
assert result["Outputs"][0]["OutputKey"] == "SecretARN"
- assert re.match(r".*%s-[a-zA-Z0-9]+" % SECRET_NAME, result["Outputs"][0]["OutputValue"])
+ output_secret_arn = result["Outputs"][0]["OutputValue"]
+ assert output_secret_arn == secret["ARN"]
+ assert re.match(r".*%s-[a-zA-Z0-9]+" % SECRET_NAME, output_secret_arn)
finally:
cleanup_stacks([stack_id])
|
There were some issues with the AWS::SecretsManager::Secret and the related cloudformation test.
Removes the Username reference from the test template since this isn't actually defined and correctly fails when trying to deploy on AWS. The test now works both against AWS and LocalStack.
Also removes some presumably legacy code and unnecessary physical-id-generating fallbacks that led to some issues/race conditions when deploying the secret where the output resolved to the wrong ARN in up to 50% of test runs.
|
https://api.github.com/repos/localstack/localstack/pulls/4845
|
2021-11-02T23:35:55Z
|
2021-11-03T19:19:29Z
|
2021-11-03T19:19:29Z
|
2021-11-03T19:19:32Z
| 1,639
|
localstack/localstack
| 28,421
|
[AWAAN] Extract uploader
|
diff --git a/youtube_dl/extractor/awaan.py b/youtube_dl/extractor/awaan.py
index a2603bbffef..3a7700cd43f 100644
--- a/youtube_dl/extractor/awaan.py
+++ b/youtube_dl/extractor/awaan.py
@@ -48,6 +48,7 @@ def _parse_video_data(self, video_data, video_id, is_live):
'duration': int_or_none(video_data.get('duration')),
'timestamp': parse_iso8601(video_data.get('create_time'), ' '),
'is_live': is_live,
+ 'uploader_id': video_data.get('user_id'),
}
@@ -107,6 +108,7 @@ class AWAANLiveIE(AWAANBaseIE):
'title': 're:Dubai Al Oula [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'upload_date': '20150107',
'timestamp': 1420588800,
+ 'uploader_id': '71',
},
'params': {
# m3u8 download
|
### Before submitting a *pull request* make sure you have:
- [x] [Searched](https://github.com/ytdl-org/youtube-dl/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests
- [x] Read [adding new extractor tutorial](https://github.com/ytdl-org/youtube-dl#adding-support-for-a-new-site)
- [x] Read [youtube-dl coding conventions](https://github.com/ytdl-org/youtube-dl#youtube-dl-coding-conventions) and adjusted the code to meet them
- [x] Covered the code with tests (note that PRs without tests will be REJECTED)
- [x] Checked the code with [flake8](https://pypi.python.org/pypi/flake8)
### In order to be accepted and merged into youtube-dl each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check one of the following options:
- [x] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/)
- [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence)
### What is the purpose of your *pull request*?
- [ ] Bug fix
- [x] Improvement
- [ ] New extractor
- [ ] New feature
---
### Description of your *pull request* and other information
This fixes the test for me. See also 93933c9819fa1282081a5f0761cbeabc9fbea336 and #10773.
|
https://api.github.com/repos/ytdl-org/youtube-dl/pulls/27963
|
2021-01-25T12:49:34Z
|
2021-01-27T19:06:13Z
|
2021-01-27T19:06:13Z
|
2021-01-27T19:12:33Z
| 267
|
ytdl-org/youtube-dl
| 50,249
|
Fix typos
|
diff --git a/examples/mnist_acgan.py b/examples/mnist_acgan.py
index 8f7ebddc502..5d7584fbb78 100644
--- a/examples/mnist_acgan.py
+++ b/examples/mnist_acgan.py
@@ -217,10 +217,10 @@ def build_discriminator():
aux_y = np.concatenate((label_batch, sampled_labels), axis=0)
# we don't want the discriminator to also maximize the classification
- # accuracy of the auxilary classifier on generated images, so we
+ # accuracy of the auxiliary classifier on generated images, so we
# don't train discriminator to produce class labels for generated
# images (see https://openreview.net/forum?id=rJXTf9Bxg).
- # To preserve sum of sample weights for the auxilary classifier,
+ # To preserve sum of sample weights for the auxiliary classifier,
# we assign sample weight of 2 to the real images.
disc_sample_weight = [np.ones(2 * len(image_batch)),
np.concatenate((np.ones(len(image_batch)) * 2,
|
This PR fixes a typo: `auxilary` -> `auxiliary`.
|
https://api.github.com/repos/keras-team/keras/pulls/11340
|
2018-10-09T11:14:26Z
|
2018-10-09T12:35:20Z
|
2018-10-09T12:35:20Z
|
2018-10-09T12:42:18Z
| 244
|
keras-team/keras
| 47,430
|
Update Hub Path inputs
|
diff --git a/hubconf.py b/hubconf.py
index 7ef512655ae..93ea84d69dd 100644
--- a/hubconf.py
+++ b/hubconf.py
@@ -115,9 +115,11 @@ def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=Tr
import cv2
import numpy as np
from PIL import Image
+ from pathlib import Path
imgs = ['data/images/zidane.jpg', # filename
- 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg', # URI
+ Path('data/images/zidane.jpg'), # Path
+ 'https://ultralytics.com/images/zidane.jpg', # URI
cv2.imread('data/images/bus.jpg')[:, :, ::-1], # OpenCV
Image.open('data/images/bus.jpg'), # PIL
np.zeros((320, 640, 3))] # numpy
diff --git a/models/common.py b/models/common.py
index 901648b693a..fc085e22b16 100644
--- a/models/common.py
+++ b/models/common.py
@@ -2,7 +2,7 @@
import logging
from copy import copy
-from pathlib import Path, PosixPath
+from pathlib import Path
import math
import numpy as np
@@ -248,7 +248,7 @@ def forward(self, imgs, size=640, augment=False, profile=False):
shape0, shape1, files = [], [], [] # image and inference shapes, filenames
for i, im in enumerate(imgs):
f = f'image{i}' # filename
- if isinstance(im, (str, PosixPath)): # filename or uri
+ if isinstance(im, (str, Path)): # filename or uri
im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im
im = np.asarray(exif_transpose(im))
elif isinstance(im, Image.Image): # PIL Image
|
## 🛠️ PR Summary
<sub>Made with ❤️ by [Ultralytics Actions](https://github.com/ultralytics/actions)<sub>
### 🌟 Summary
Enhanced support for image file path handling and simplified code by using a unified path import.
### 📊 Key Changes
- 🛠 Updated image loading examples in `hubconf.py` to use `Path` from `pathlib`.
- 🌐 Changed a test image URL to point to the updated `ultralytics.com` domain.
- 🧹 Refactored `models/common.py` by removing `PosixPath` to unify path handling under `Path`.
### 🎯 Purpose & Impact
- ✔️ **Usability Improvement**: By using `Path`, developers can now pass image locations as path objects, making the code more consistent and cross-platform friendly.
- 📡 **URL Update**: The change in URL ensures users are accessing the latest resources from an official domain.
- 📁 **Code Maintenance**: Simplifying the imports in `common.py` results in cleaner code, which is easier to read and maintain. The consolidation also prevents potential bugs from inconsistent path handling across different operating system environments.
|
https://api.github.com/repos/ultralytics/yolov5/pulls/4200
|
2021-07-28T14:44:41Z
|
2021-07-28T14:55:39Z
|
2021-07-28T14:55:39Z
|
2024-01-19T16:38:39Z
| 478
|
ultralytics/yolov5
| 25,670
|
BUG: crosstab with duplicate column or index labels
|
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 049ccc0e6c4df..b16888338cda5 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -729,6 +729,7 @@ Groupby/resample/rolling
Reshaping
^^^^^^^^^
+- Bug in :meth:`DataFrame.crosstab` was returning incorrect results on inputs with duplicate row names, duplicate column names or duplicate names between row and column labels (:issue:`22529`)
- Bug in :meth:`DataFrame.pivot_table` with ``aggfunc='count'`` or ``aggfunc='sum'`` returning ``NaN`` for missing categories when pivoted on a ``Categorical``. Now returning ``0`` (:issue:`31422`)
- Bug in :func:`concat` and :class:`DataFrame` constructor where input index names are not preserved in some cases (:issue:`13475`)
- Bug in func :meth:`crosstab` when using multiple columns with ``margins=True`` and ``normalize=True`` (:issue:`35144`)
diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py
index c1198cdfcda81..22887cede51ed 100644
--- a/pandas/core/reshape/pivot.py
+++ b/pandas/core/reshape/pivot.py
@@ -5,6 +5,7 @@
List,
Optional,
Sequence,
+ Set,
Tuple,
Union,
cast,
@@ -578,29 +579,37 @@ def crosstab(
b 0 1 0
c 0 0 0
"""
+ if values is None and aggfunc is not None:
+ raise ValueError("aggfunc cannot be used without values.")
+
+ if values is not None and aggfunc is None:
+ raise ValueError("values cannot be used without an aggfunc.")
+
index = com.maybe_make_list(index)
columns = com.maybe_make_list(columns)
- rownames = _get_names(index, rownames, prefix="row")
- colnames = _get_names(columns, colnames, prefix="col")
-
common_idx = None
pass_objs = [x for x in index + columns if isinstance(x, (ABCSeries, ABCDataFrame))]
if pass_objs:
common_idx = get_objs_combined_axis(pass_objs, intersect=True, sort=False)
- data: Dict = {}
- data.update(zip(rownames, index))
- data.update(zip(colnames, columns))
-
- if values is None and aggfunc is not None:
- raise ValueError("aggfunc cannot be used without values.")
+ rownames = _get_names(index, rownames, prefix="row")
+ colnames = _get_names(columns, colnames, prefix="col")
- if values is not None and aggfunc is None:
- raise ValueError("values cannot be used without an aggfunc.")
+ # duplicate names mapped to unique names for pivot op
+ (
+ rownames_mapper,
+ unique_rownames,
+ colnames_mapper,
+ unique_colnames,
+ ) = _build_names_mapper(rownames, colnames)
from pandas import DataFrame
+ data = {
+ **dict(zip(unique_rownames, index)),
+ **dict(zip(unique_colnames, columns)),
+ }
df = DataFrame(data, index=common_idx)
original_df_cols = df.columns
@@ -613,8 +622,8 @@ def crosstab(
table = df.pivot_table(
["__dummy__"],
- index=rownames,
- columns=colnames,
+ index=unique_rownames,
+ columns=unique_colnames,
margins=margins,
margins_name=margins_name,
dropna=dropna,
@@ -633,6 +642,9 @@ def crosstab(
table, normalize=normalize, margins=margins, margins_name=margins_name
)
+ table = table.rename_axis(index=rownames_mapper, axis=0)
+ table = table.rename_axis(columns=colnames_mapper, axis=1)
+
return table
@@ -731,3 +743,57 @@ def _get_names(arrs, names, prefix: str = "row"):
names = list(names)
return names
+
+
+def _build_names_mapper(
+ rownames: List[str], colnames: List[str]
+) -> Tuple[Dict[str, str], List[str], Dict[str, str], List[str]]:
+ """
+ Given the names of a DataFrame's rows and columns, returns a set of unique row
+ and column names and mappers that convert to original names.
+
+ A row or column name is replaced if it is duplicate among the rows of the inputs,
+ among the columns of the inputs or between the rows and the columns.
+
+ Paramters
+ ---------
+ rownames: list[str]
+ colnames: list[str]
+
+ Returns
+ -------
+ Tuple(Dict[str, str], List[str], Dict[str, str], List[str])
+
+ rownames_mapper: dict[str, str]
+ a dictionary with new row names as keys and original rownames as values
+ unique_rownames: list[str]
+ a list of rownames with duplicate names replaced by dummy names
+ colnames_mapper: dict[str, str]
+ a dictionary with new column names as keys and original column names as values
+ unique_colnames: list[str]
+ a list of column names with duplicate names replaced by dummy names
+
+ """
+
+ def get_duplicates(names):
+ seen: Set = set()
+ return {name for name in names if name not in seen}
+
+ shared_names = set(rownames).intersection(set(colnames))
+ dup_names = get_duplicates(rownames) | get_duplicates(colnames) | shared_names
+
+ rownames_mapper = {
+ f"row_{i}": name for i, name in enumerate(rownames) if name in dup_names
+ }
+ unique_rownames = [
+ f"row_{i}" if name in dup_names else name for i, name in enumerate(rownames)
+ ]
+
+ colnames_mapper = {
+ f"col_{i}": name for i, name in enumerate(colnames) if name in dup_names
+ }
+ unique_colnames = [
+ f"col_{i}" if name in dup_names else name for i, name in enumerate(colnames)
+ ]
+
+ return rownames_mapper, unique_rownames, colnames_mapper, unique_colnames
diff --git a/pandas/tests/reshape/test_crosstab.py b/pandas/tests/reshape/test_crosstab.py
index 5f6037276b31c..6faf64789c687 100644
--- a/pandas/tests/reshape/test_crosstab.py
+++ b/pandas/tests/reshape/test_crosstab.py
@@ -535,15 +535,32 @@ def test_crosstab_with_numpy_size(self):
)
tm.assert_frame_equal(result, expected)
- def test_crosstab_dup_index_names(self):
- # GH 13279
- s = Series(range(3), name="foo")
+ def test_crosstab_duplicate_names(self):
+ # GH 13279 / 22529
+
+ s1 = Series(range(3), name="foo")
+ s2_foo = Series(range(1, 4), name="foo")
+ s2_bar = Series(range(1, 4), name="bar")
+ s3 = Series(range(3), name="waldo")
+
+ # check result computed with duplicate labels against
+ # result computed with unique labels, then relabelled
+ mapper = {"bar": "foo"}
+
+ # duplicate row, column labels
+ result = crosstab(s1, s2_foo)
+ expected = crosstab(s1, s2_bar).rename_axis(columns=mapper, axis=1)
+ tm.assert_frame_equal(result, expected)
+
+ # duplicate row, unique column labels
+ result = crosstab([s1, s2_foo], s3)
+ expected = crosstab([s1, s2_bar], s3).rename_axis(index=mapper, axis=0)
+ tm.assert_frame_equal(result, expected)
+
+ # unique row, duplicate column labels
+ result = crosstab(s3, [s1, s2_foo])
+ expected = crosstab(s3, [s1, s2_bar]).rename_axis(columns=mapper, axis=1)
- result = crosstab(s, s)
- expected_index = Index(range(3), name="foo")
- expected = DataFrame(
- np.eye(3, dtype=np.int64), index=expected_index, columns=expected_index
- )
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("names", [["a", ("b", "c")], [("a", "b"), "c"]])
|
- [x] closes #22529
- [x] tests added / passed
- [x] passes black pandas
- [x] passes git diff upstream/master -u -- "*.py" | flake8 --diff
- [x] whatsnew entry
Picking up from #28474
cc @jreback in case this can go in in time for 1.2
|
https://api.github.com/repos/pandas-dev/pandas/pulls/37997
|
2020-11-22T05:12:56Z
|
2020-11-28T17:29:04Z
|
2020-11-28T17:29:04Z
|
2020-11-28T17:29:11Z
| 2,050
|
pandas-dev/pandas
| 45,580
|
Additional intro blockchain doc
|
diff --git a/blockchain/README.md b/blockchain/README.md
index 5ae7f95ec981..b5fab7b36eaa 100644
--- a/blockchain/README.md
+++ b/blockchain/README.md
@@ -1,7 +1,44 @@
# Blockchain
-A Blockchain is a type of distributed ledger technology (DLT) that consists of growing list of records, called blocks, that are securely linked together using cryptography.
+A Blockchain is a type of **distributed ledger** technology (DLT) that consists of growing list of records, called **blocks**, that are securely linked together using **cryptography**.
+Let's breakdown the terminologies in the above definition. We find below terminologies,
+
+- Digital Ledger Technology (DLT)
+- Blocks
+- Cryptography
+
+## Digital Ledger Technology
+
+ It is otherwise called as distributed ledger technology. It is simply the opposite of centralized database. Firstly, what is a **ledger**? A ledger is a book or collection of accounts that records account transactions.
+
+ *Why is Blockchain addressed as digital ledger if it can record more than account transactions? What other transaction details and information can it hold?*
+
+Digital Ledger Technology is just a ledger which is shared among multiple nodes. This way there exist no need for central authority to hold the info. Okay, how is it differentiated from central database and what are their benefits?
+
+There is an organization which has 4 branches whose data are stored in a centralized database. So even if one branch needs any data from ledger they need an approval from database in charge. And if one hacks the central database he gets to tamper and control all the data.
+
+Now lets assume every branch has a copy of the ledger and then once anything is added to the ledger by anyone branch it is gonna automatically reflect in all other ledgers available in other branch. This is done using Peer-to-peer network.
+
+So this means even if information is tampered in one branch we can find out. If one branch is hacked we can be alerted ,so we can safeguard other branches. Now, assume these branches as computers or nodes and the ledger is a transaction record or digital receipt. If one ledger is hacked in a node we can detect since there will be a mismatch in comparison with other node information. So this is the concept of Digital Ledger Technology.
+
+*Is it required for all nodes to have access to all information in other nodes? Wouldn't this require enormous storage space in each node?*
+
+## Blocks
+
+In short a block is nothing but collections of records with a labelled header. These are connected cryptographically. Once a new block is added to a chain, the previous block is connected, more precisely said as locked and hence, will remain unaltered. We can understand this concept once we get a clear understanding of working mechanism of blockchain.
+
+## Cryptography
+
+It is the practice and study of secure communication techniques in the midst of adversarial behavior. More broadly, cryptography is the creation and analysis of protocols that prevent third parties or the general public from accessing private messages.
+
+*Which cryptography technology is most widely used in blockchain and why?*
+
+So, in general, blockchain technology is a distributed record holder which records the information about ownership of an asset. To define precisely,
+> Blockchain is a distributed, immutable ledger that makes it easier to record transactions and track assets in a corporate network.
+An asset could be tangible (such as a house, car, cash, or land) or intangible (such as a business) (intellectual property, patents, copyrights, branding). A blockchain network can track and sell almost anything of value, lowering risk and costs for everyone involved.
+
+So this is all about introduction to blockchain technology. To learn more about the topic refer below links....
* <https://en.wikipedia.org/wiki/Blockchain>
* <https://en.wikipedia.org/wiki/Chinese_remainder_theorem>
* <https://en.wikipedia.org/wiki/Diophantine_equation>
|
**With respect to *Blockchain* File**
### Describe your change:
* [ ] Add an algorithm?
* [ ] Fix a bug or typo in an existing algorithm?
* [x] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [ ] All new Python files are placed inside an existing directory.
* [ ] All filenames are in all lowercase characters with no spaces or dashes.
* [ ] All functions and variable names follow Python naming conventions.
* [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [ ] All new algorithms include at least one URL that points to Wikipedia or another similar explanation.
* [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{6642}`.
|
https://api.github.com/repos/TheAlgorithms/Python/pulls/7974
|
2022-11-06T18:37:26Z
|
2022-11-15T13:38:00Z
|
2022-11-15T13:38:00Z
|
2022-11-15T13:38:00Z
| 840
|
TheAlgorithms/Python
| 30,322
|
Use _BaseAutoModelClass's register method
|
diff --git a/src/transformers/models/auto/auto_factory.py b/src/transformers/models/auto/auto_factory.py
index d322f83668ea1..5519d82e7aabf 100644
--- a/src/transformers/models/auto/auto_factory.py
+++ b/src/transformers/models/auto/auto_factory.py
@@ -418,7 +418,7 @@ def from_config(cls, config, **kwargs):
else:
repo_id = config.name_or_path
model_class = get_class_from_dynamic_module(class_ref, repo_id, **kwargs)
- cls._model_mapping.register(config.__class__, model_class, exist_ok=True)
+ cls.register(config.__class__, model_class, exist_ok=True)
_ = kwargs.pop("code_revision", None)
return model_class._from_config(config, **kwargs)
elif type(config) in cls._model_mapping.keys():
@@ -477,7 +477,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
class_ref, pretrained_model_name_or_path, **hub_kwargs, **kwargs
)
_ = hub_kwargs.pop("code_revision", None)
- cls._model_mapping.register(config.__class__, model_class, exist_ok=True)
+ cls.register(config.__class__, model_class, exist_ok=True)
return model_class.from_pretrained(
pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs
)
|
# What does this PR do?
Switching `_BaseAutoModelClass`'s `from_pretrained` and `from_config` to use the register classmethod that it defines rather than using the `_LazyAutoMapping` register method directly. This makes use of the additional consistency check within `_BaseAutoModelClass`'s register method.
<!--
Congratulations! You've made it this far! You're not quite done yet though.
Once merged, your PR is going to appear in the release notes with the title you set, so make sure it's a great title that fully reflects the extent of your awesome contribution.
Then, please replace this with a description of the change and which issue is fixed (if applicable). Please also include relevant motivation and context. List any dependencies (if any) that are required for this change.
Once you're done, someone will review your PR shortly (see the section "Who can review?" below to tag some potential reviewers). They may suggest changes to make the code even better. If no one reviewed your PR after a week has passed, don't hesitate to post a new comment @-mentioning the same persons---sometimes notifications get lost.
-->
## Before submitting
- [ ] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests),
Pull Request section? Yes
- [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Discussed briefly in [#24737 ](https://github.com/huggingface/transformers/issues/24737)
- [ ] Did you make sure to update the documentation with your changes? No public methods/classes changed
- [ ] Did you write any new necessary tests? None necessary
## Who can review?
Anyone in the community is free to review the PR once the tests have passed. @sgugger
<!-- Your PR will be replied to more quickly if you can figure out the right person to tag with @
If you know how to use git blame, that is the easiest way, otherwise, here is a rough guide of **who to tag**.
Please tag fewer than 3 people.
Models:
- text models: @ArthurZucker and @younesbelkada
- vision models: @amyeroberts
- speech models: @sanchit-gandhi
- graph models: @clefourrier
Library:
- flax: @sanchit-gandhi
- generate: @gante
- pipelines: @Narsil
- tensorflow: @gante and @Rocketknight1
- tokenizers: @ArthurZucker
- trainer: @sgugger
Integrations:
- deepspeed: HF Trainer/Accelerate: @pacman100
- ray/raytune: @richardliaw, @amogkam
Documentation: @sgugger, @stevhliu and @MKhalusova
HF projects:
- accelerate: [different repo](https://github.com/huggingface/accelerate)
- datasets: [different repo](https://github.com/huggingface/datasets)
- diffusers: [different repo](https://github.com/huggingface/diffusers)
- rust tokenizers: [different repo](https://github.com/huggingface/tokenizers)
Maintained examples (not research project or legacy):
- Flax: @sanchit-gandhi
- PyTorch: @sgugger
- TensorFlow: @Rocketknight1
-->
|
https://api.github.com/repos/huggingface/transformers/pulls/24810
|
2023-07-13T17:56:11Z
|
2023-07-13T19:24:52Z
|
2023-07-13T19:24:52Z
|
2023-07-13T19:49:57Z
| 312
|
huggingface/transformers
| 12,022
|
Update PyPI links
|
diff --git a/certbot/docs/packaging.rst b/certbot/docs/packaging.rst
index 75349ad14e9..89e22bfc1d9 100644
--- a/certbot/docs/packaging.rst
+++ b/certbot/docs/packaging.rst
@@ -7,21 +7,21 @@ Releases
We release packages and upload them to PyPI (wheels and source tarballs).
-- https://pypi.python.org/pypi/acme
-- https://pypi.python.org/pypi/certbot
-- https://pypi.python.org/pypi/certbot-apache
-- https://pypi.python.org/pypi/certbot-nginx
-- https://pypi.python.org/pypi/certbot-dns-cloudflare
-- https://pypi.python.org/pypi/certbot-dns-digitalocean
-- https://pypi.python.org/pypi/certbot-dns-dnsimple
-- https://pypi.python.org/pypi/certbot-dns-dnsmadeeasy
-- https://pypi.python.org/pypi/certbot-dns-google
-- https://pypi.python.org/pypi/certbot-dns-linode
-- https://pypi.python.org/pypi/certbot-dns-luadns
-- https://pypi.python.org/pypi/certbot-dns-nsone
-- https://pypi.python.org/pypi/certbot-dns-ovh
-- https://pypi.python.org/pypi/certbot-dns-rfc2136
-- https://pypi.python.org/pypi/certbot-dns-route53
+- https://pypi.org/project/acme/
+- https://pypi.org/project/certbot/
+- https://pypi.org/project/certbot-apache/
+- https://pypi.org/project/certbot-nginx/
+- https://pypi.org/project/certbot-dns-cloudflare/
+- https://pypi.org/project/certbot-dns-digitalocean/
+- https://pypi.org/project/certbot-dns-dnsimple/
+- https://pypi.org/project/certbot-dns-dnsmadeeasy/
+- https://pypi.org/project/certbot-dns-google/
+- https://pypi.org/project/certbot-dns-linode/
+- https://pypi.org/project/certbot-dns-luadns/
+- https://pypi.org/project/certbot-dns-nsone/
+- https://pypi.org/project/certbot-dns-ovh/
+- https://pypi.org/project/certbot-dns-rfc2136/
+- https://pypi.org/project/certbot-dns-route53/
The following scripts are used in the process:
|
Switch from the legacy pypi.python.org/pypi/ to the canonical pypi.org/project/; the former redirects to the latter.
|
https://api.github.com/repos/certbot/certbot/pulls/9733
|
2023-07-14T16:26:52Z
|
2023-07-15T22:58:00Z
|
2023-07-15T22:58:00Z
|
2023-07-15T23:10:15Z
| 587
|
certbot/certbot
| 2,554
|
Bump reolink-aio to 0.5.16
|
diff --git a/homeassistant/components/reolink/manifest.json b/homeassistant/components/reolink/manifest.json
index 6a4ae98a1546e7..413c106b53ea51 100644
--- a/homeassistant/components/reolink/manifest.json
+++ b/homeassistant/components/reolink/manifest.json
@@ -18,5 +18,5 @@
"documentation": "https://www.home-assistant.io/integrations/reolink",
"iot_class": "local_push",
"loggers": ["reolink_aio"],
- "requirements": ["reolink-aio==0.5.15"]
+ "requirements": ["reolink-aio==0.5.16"]
}
diff --git a/requirements_all.txt b/requirements_all.txt
index 2d713b42c39676..156d0b512ec19a 100644
--- a/requirements_all.txt
+++ b/requirements_all.txt
@@ -2254,7 +2254,7 @@ regenmaschine==2023.05.1
renault-api==0.1.13
# homeassistant.components.reolink
-reolink-aio==0.5.15
+reolink-aio==0.5.16
# homeassistant.components.python_script
restrictedpython==6.0
diff --git a/requirements_test_all.txt b/requirements_test_all.txt
index 6b4723510ec5f3..20032fe06c2fdd 100644
--- a/requirements_test_all.txt
+++ b/requirements_test_all.txt
@@ -1635,7 +1635,7 @@ regenmaschine==2023.05.1
renault-api==0.1.13
# homeassistant.components.reolink
-reolink-aio==0.5.15
+reolink-aio==0.5.16
# homeassistant.components.python_script
restrictedpython==6.0
|
<!--
You are amazing! Thanks for contributing to our project!
Please, DO NOT DELETE ANY TEXT from this template! (unless instructed).
-->
## Breaking change
<!--
If your PR contains a breaking change for existing users, it is important
to tell them what breaks, how to make it work again and why we did this.
This piece of text is published with the release notes, so it helps if you
write it towards our users, not us.
Note: Remove this section if this PR is NOT a breaking change.
-->
## Proposed change
<!--
Describe the big picture of your changes here to communicate to the
maintainers why we should accept this pull request. If it fixes a bug
or resolves a feature request, be sure to link to that issue in the
additional information section.
-->
Bump reolink-aio to 0.5.16:
https://github.com/starkillerOG/reolink_aio/compare/0.5.15...0.5.16
- Do not send empty requests when no channels connected
## Type of change
<!--
What type of change does your PR introduce to Home Assistant?
NOTE: Please, check only 1! box!
If your PR requires multiple boxes to be checked, you'll most likely need to
split it into multiple PRs. This makes things easier and faster to code review.
-->
- [x] Dependency upgrade
- [ ] Bugfix (non-breaking change which fixes an issue)
- [ ] New integration (thank you!)
- [ ] New feature (which adds functionality to an existing integration)
- [ ] Deprecation (breaking change to happen in the future)
- [ ] Breaking change (fix/feature causing existing functionality to break)
- [ ] Code quality improvements to existing code or addition of tests
## Additional information
<!--
Details are important, and help maintainers processing your PR.
Please be sure to fill out additional details, if applicable.
-->
- This PR fixes or closes issue: fixes https://github.com/home-assistant/core/issues/93288
- This PR is related to issue:
- Link to documentation pull request:
## Checklist
<!--
Put an `x` in the boxes that apply. You can also fill these out after
creating the PR. If you're unsure about any of them, don't hesitate to ask.
We're here to help! This is simply a reminder of what we are going to look
for before merging your code.
-->
- [ ] The code change is tested and works locally.
- [ ] Local tests pass. **Your PR cannot be merged unless tests pass**
- [ ] There is no commented out code in this PR.
- [ ] I have followed the [development checklist][dev-checklist]
- [ ] I have followed the [perfect PR recommendations][perfect-pr]
- [ ] The code has been formatted using Black (`black --fast homeassistant tests`)
- [ ] Tests have been added to verify that the new code works.
If user exposed functionality or configuration variables are added/changed:
- [ ] Documentation added/updated for [www.home-assistant.io][docs-repository]
If the code communicates with devices, web services, or third-party tools:
- [ ] The [manifest file][manifest-docs] has all fields filled out correctly.
Updated and included derived files by running: `python3 -m script.hassfest`.
- [ ] New or updated dependencies have been added to `requirements_all.txt`.
Updated by running `python3 -m script.gen_requirements_all`.
- [ ] For the updated dependencies - a link to the changelog, or at minimum a diff between library versions is added to the PR description.
- [ ] Untested files have been added to `.coveragerc`.
<!--
This project is very active and we have a high turnover of pull requests.
Unfortunately, the number of incoming pull requests is higher than what our
reviewers can review and merge so there is a long backlog of pull requests
waiting for review. You can help here!
By reviewing another pull request, you will help raise the code quality of
that pull request and the final review will be faster. This way the general
pace of pull request reviews will go up and your wait time will go down.
When picking a pull request to review, try to choose one that hasn't yet
been reviewed.
Thanks for helping out!
-->
To help with the load of incoming pull requests:
- [ ] I have reviewed two other [open pull requests][prs] in this repository.
[prs]: https://github.com/home-assistant/core/pulls?q=is%3Aopen+is%3Apr+-author%3A%40me+-draft%3Atrue+-label%3Awaiting-for-upstream+sort%3Acreated-desc+review%3Anone+-status%3Afailure
<!--
Thank you for contributing <3
Below, some useful links you could explore:
-->
[dev-checklist]: https://developers.home-assistant.io/docs/development_checklist/
[manifest-docs]: https://developers.home-assistant.io/docs/creating_integration_manifest/
[quality-scale]: https://developers.home-assistant.io/docs/integration_quality_scale_index/
[docs-repository]: https://github.com/home-assistant/home-assistant.io
[perfect-pr]: https://developers.home-assistant.io/docs/review-process/#creating-the-perfect-pr
|
https://api.github.com/repos/home-assistant/core/pulls/93540
|
2023-05-25T15:13:59Z
|
2023-05-25T20:15:33Z
|
2023-05-25T20:15:33Z
|
2023-05-26T21:02:03Z
| 429
|
home-assistant/core
| 38,968
|
Add Optimus to Data Analysis section
|
diff --git a/README.md b/README.md
index 5883f2272..bfe784f92 100644
--- a/README.md
+++ b/README.md
@@ -342,6 +342,7 @@ Inspired by [awesome-php](https://github.com/ziadoz/awesome-php).
* [Open Mining](https://github.com/mining/mining) - Business Intelligence (BI) in Pandas interface.
* [Orange](https://orange.biolab.si/) - Data mining, data visualization, analysis and machine learning through visual programming or scripts.
* [Pandas](http://pandas.pydata.org/) - A library providing high-performance, easy-to-use data structures and data analysis tools.
+* [Optimus](https://github.com/ironmussa/Optimus) - Cleansing, pre-processing, feature engineering, exploratory data analysis and easy Machine Learning with a PySpark backend.
## Data Validation
|
## What is this Python project?
Cleansing, pre-processing, feature engineering, exploratory data analysis and easy ML with PySpark backend.
## What's the difference between this Python project and similar ones?
It implements several handy tools for data wrangling and munging that will make your life much easier. The first obvious advantage over any other public data cleaning library or framework is that it will work on your laptop or your big cluster, and second, it is amazingly easy to install, use and understand.
## Webpage
https://hioptimus.com
## Docs
https://docs.hioptimus.com
--
Anyone who agrees with this pull request could vote for it by adding a :+1: to it, and usually, the maintainer will merge it when votes reach **20**.
|
https://api.github.com/repos/vinta/awesome-python/pulls/1050
|
2018-04-09T22:43:04Z
|
2018-04-10T03:59:14Z
|
2018-04-10T03:59:14Z
|
2018-04-10T04:02:50Z
| 202
|
vinta/awesome-python
| 27,174
|
feat: added support for open source models, added instructions to docs
|
diff --git a/README.md b/README.md
index 9d7325e548..ddc8087c50 100644
--- a/README.md
+++ b/README.md
@@ -81,6 +81,16 @@ Editing the `preprompts`, and evolving how you write the project prompt, is how
Each step in `steps.py` will have its communication history with GPT4 stored in the logs folder, and can be rerun with `scripts/rerun_edited_message_logs.py`.
+### Running with open source models
+
+You can use gpt-engineer with open source models by using an OpenAI compatible API, such as the one offered by the [text-generator-ui extension `openai`](https://github.com/oobabooga/text-generation-webui/blob/main/extensions/openai/README.md). This can easily be setup with [TheBloke's Runpod template](https://www.runpod.io/console/gpu-secure-cloud?template=f1pf20op0z).
+
+To do so, first set up the API according to the instructions linked above. Then you need to go into the text-generation-webui, go to settings, check the `openai` extension, save. You then need to expose TCP port 5001 in your Runpod config, which will give it an exposed TCP port something like 40125. Then restart your Runpod, and check that the API is live by browsing: http://<public ip>:<port>/v1/models
+
+Then, as an example we can now run it with WizardCoder-Python-34B hosted on Runpod: `OPENAI_API_BASE=http://<host>:<port>/v1 python -m gpt_engineer.main benchmark/pomodoro_timer --steps benchmark TheBloke_WizardCoder-Python-34B-V1.0-GPTQ`
+
+Check your Runpod dashboard for the host and (exposed TCP) port, mine was something like 40125.
+
## Vision
The gpt-engineer community is building the **open platform for devs to tinker with and build their personal code-generation toolbox**.
diff --git a/gpt_engineer/ai.py b/gpt_engineer/ai.py
index e801f4d3ec..e60e11eba0 100644
--- a/gpt_engineer/ai.py
+++ b/gpt_engineer/ai.py
@@ -365,22 +365,19 @@ def create_chat_model(self, model: str, temperature) -> BaseChatModel:
streaming=True,
# tiktoken_model_name
)
- if model == "gpt-4":
+ # Fetch available models from OpenAI API
+ supported = [model["id"] for model in openai.Model.list()["data"]]
+ if model in supported:
return ChatOpenAI(
- model="gpt-4",
- temperature=temperature,
- streaming=True,
- client=openai.ChatCompletion,
- )
- elif model == "gpt-3.5-turbo":
- return ChatOpenAI(
- model="gpt-3.5-turbo",
+ model=model,
temperature=temperature,
streaming=True,
client=openai.ChatCompletion,
)
else:
- raise ValueError(f"Model {model} is not supported.")
+ raise ValueError(
+ f"Model {model} is not supported, supported models are: {supported}"
+ )
def get_tokenizer(model: str):
|
Hi @AntonOsika,
Thought I'd give the new WizardCoder models a go, they are pretty impressive!
I was going to try and run the benchmarks, but it was a bit finicky, so I did this instead :)
|
https://api.github.com/repos/gpt-engineer-org/gpt-engineer/pulls/639
|
2023-08-28T12:13:46Z
|
2023-08-30T12:27:45Z
|
2023-08-30T12:27:45Z
|
2023-08-30T12:37:49Z
| 750
|
gpt-engineer-org/gpt-engineer
| 33,056
|
BUG: (GH10408, GH10412) in vectorised setting of timestamp columns
|
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 8e03fe02bcc97..447a42c43d24e 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -393,5 +393,6 @@ Bug Fixes
- Bug in operator equal on Index not being consistent with Series (:issue:`9947`)
- Reading "famafrench" data via ``DataReader`` results in HTTP 404 error because of the website url is changed (:issue:`10591`).
- Bug in `read_msgpack` where DataFrame to decode has duplicate column names (:issue:`9618`)
-
- Bug in ``io.common.get_filepath_or_buffer`` which caused reading of valid S3 files to fail if the bucket also contained keys for which the user does not have read permission (:issue:`10604`)
+- Bug in vectorised setting of timestamp columns with python ``datetime.date`` and numpy ``datetime64`` (:issue:`10408`, :issue:`10412`)
+
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 6a87f5a0b08e0..5953e783f6c4d 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -2,7 +2,7 @@
import itertools
import re
import operator
-from datetime import datetime, timedelta
+from datetime import datetime, timedelta, date
from collections import defaultdict
import numpy as np
@@ -1839,7 +1839,7 @@ def _try_coerce_args(self, values, other):
if is_null_datelike_scalar(other):
other = tslib.iNaT
- elif isinstance(other, datetime):
+ elif isinstance(other, (datetime, np.datetime64, date)):
other = lib.Timestamp(other).asm8.view('i8')
elif hasattr(other, 'dtype') and com.is_integer_dtype(other):
other = other.view('i8')
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index aeb28524e5cc1..4e78e1549fb0e 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -3,7 +3,7 @@
from __future__ import print_function
# pylint: disable-msg=W0612,E1101
from copy import deepcopy
-from datetime import datetime, timedelta, time
+from datetime import datetime, timedelta, time, date
import sys
import operator
import re
@@ -4248,6 +4248,16 @@ def test_datetimelike_setitem_with_inference(self):
expected = Series([np.dtype('timedelta64[ns]')]*6+[np.dtype('datetime64[ns]')]*2,index=list('ABCDEFGH'))
assert_series_equal(result,expected)
+ def test_setitem_datetime_coercion(self):
+ # GH 1048
+ df = pd.DataFrame({'c': [pd.Timestamp('2010-10-01')]*3})
+ df.loc[0:1, 'c'] = np.datetime64('2008-08-08')
+ self.assertEqual(pd.Timestamp('2008-08-08'), df.loc[0, 'c'])
+ self.assertEqual(pd.Timestamp('2008-08-08'), df.loc[1, 'c'])
+ df.loc[2, 'c'] = date(2005, 5, 5)
+ self.assertEqual(pd.Timestamp('2005-05-05'), df.loc[2, 'c'])
+
+
def test_new_empty_index(self):
df1 = DataFrame(randn(0, 3))
df2 = DataFrame(randn(0, 3))
diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py
index 6d2c87a187995..5a1eb719270c4 100644
--- a/pandas/tests/test_internals.py
+++ b/pandas/tests/test_internals.py
@@ -1,6 +1,8 @@
# -*- coding: utf-8 -*-
# pylint: disable=W0102
+from datetime import datetime, date
+
import nose
import numpy as np
@@ -286,6 +288,26 @@ def test_repr(self):
pass
+class TestDatetimeBlock(tm.TestCase):
+ _multiprocess_can_split_ = True
+
+ def test_try_coerce_arg(self):
+ block = create_block('datetime', [0])
+
+ # coerce None
+ none_coerced = block._try_coerce_args(block.values, None)[1]
+ self.assertTrue(pd.Timestamp(none_coerced) is pd.NaT)
+
+ # coerce different types of date bojects
+ vals = (np.datetime64('2010-10-10'),
+ datetime(2010, 10, 10),
+ date(2010, 10, 10))
+ for val in vals:
+ coerced = block._try_coerce_args(block.values, val)[1]
+ self.assertEqual(np.int64, type(coerced))
+ self.assertEqual(pd.Timestamp('2010-10-10'), pd.Timestamp(coerced))
+
+
class TestBlockManager(tm.TestCase):
_multiprocess_can_split_ = True
|
closes #10408
closes #10412
Fix setting values with python datetime.date and numpy datetime64.
|
https://api.github.com/repos/pandas-dev/pandas/pulls/10644
|
2015-07-21T07:26:53Z
|
2015-07-24T13:50:18Z
|
2015-07-24T13:50:18Z
|
2015-07-24T13:50:22Z
| 1,191
|
pandas-dev/pandas
| 45,256
|
Update default to ACMEv2 server
|
diff --git a/certbot/constants.py b/certbot/constants.py
index d31faa71c26..9760f773931 100644
--- a/certbot/constants.py
+++ b/certbot/constants.py
@@ -88,7 +88,7 @@
config_dir="/etc/letsencrypt",
work_dir="/var/lib/letsencrypt",
logs_dir="/var/log/letsencrypt",
- server="https://acme-v01.api.letsencrypt.org/directory",
+ server="https://acme-v02.api.letsencrypt.org/directory",
# Plugins parsers
configurator=None,
|
Fixes #5369.
|
https://api.github.com/repos/certbot/certbot/pulls/6152
|
2018-06-27T21:27:30Z
|
2018-07-11T18:20:37Z
|
2018-07-11T18:20:37Z
|
2018-07-11T18:44:12Z
| 137
|
certbot/certbot
| 263
|
Refs CVE-2022-34265 -- Unified DatabaseOperations._convert_*_to_tz() hook names.
|
diff --git a/django/db/backends/mysql/operations.py b/django/db/backends/mysql/operations.py
index 6c0c6ebd2b15d..76ca8157656c5 100644
--- a/django/db/backends/mysql/operations.py
+++ b/django/db/backends/mysql/operations.py
@@ -66,7 +66,7 @@ def date_extract_sql(self, lookup_type, sql, params):
return f"EXTRACT({lookup_type} FROM {sql})", params
def date_trunc_sql(self, lookup_type, sql, params, tzname=None):
- sql, params = self._convert_field_to_tz(sql, params, tzname)
+ sql, params = self._convert_sql_to_tz(sql, params, tzname)
fields = {
"year": "%Y-01-01",
"month": "%Y-%m-01",
@@ -89,7 +89,7 @@ def _prepare_tzname_delta(self, tzname):
tzname, sign, offset = split_tzname_delta(tzname)
return f"{sign}{offset}" if offset else tzname
- def _convert_field_to_tz(self, sql, params, tzname):
+ def _convert_sql_to_tz(self, sql, params, tzname):
if tzname and settings.USE_TZ and self.connection.timezone_name != tzname:
return f"CONVERT_TZ({sql}, %s, %s)", (
*params,
@@ -99,19 +99,19 @@ def _convert_field_to_tz(self, sql, params, tzname):
return sql, params
def datetime_cast_date_sql(self, sql, params, tzname):
- sql, params = self._convert_field_to_tz(sql, params, tzname)
+ sql, params = self._convert_sql_to_tz(sql, params, tzname)
return f"DATE({sql})", params
def datetime_cast_time_sql(self, sql, params, tzname):
- sql, params = self._convert_field_to_tz(sql, params, tzname)
+ sql, params = self._convert_sql_to_tz(sql, params, tzname)
return f"TIME({sql})", params
def datetime_extract_sql(self, lookup_type, sql, params, tzname):
- sql, params = self._convert_field_to_tz(sql, params, tzname)
+ sql, params = self._convert_sql_to_tz(sql, params, tzname)
return self.date_extract_sql(lookup_type, sql, params)
def datetime_trunc_sql(self, lookup_type, sql, params, tzname):
- sql, params = self._convert_field_to_tz(sql, params, tzname)
+ sql, params = self._convert_sql_to_tz(sql, params, tzname)
fields = ["year", "month", "day", "hour", "minute", "second"]
format = ("%Y-", "%m", "-%d", " %H:", "%i", ":%s")
format_def = ("0000-", "01", "-01", " 00:", "00", ":00")
@@ -136,7 +136,7 @@ def datetime_trunc_sql(self, lookup_type, sql, params, tzname):
return sql, params
def time_trunc_sql(self, lookup_type, sql, params, tzname=None):
- sql, params = self._convert_field_to_tz(sql, params, tzname)
+ sql, params = self._convert_sql_to_tz(sql, params, tzname)
fields = {
"hour": "%H:00:00",
"minute": "%H:%i:00",
diff --git a/django/db/backends/oracle/operations.py b/django/db/backends/oracle/operations.py
index 70548e358fb9c..78f998183ea6a 100644
--- a/django/db/backends/oracle/operations.py
+++ b/django/db/backends/oracle/operations.py
@@ -105,7 +105,7 @@ def date_extract_sql(self, lookup_type, sql, params):
return extract_sql, (*params, extract_param)
def date_trunc_sql(self, lookup_type, sql, params, tzname=None):
- sql, params = self._convert_field_to_tz(sql, params, tzname)
+ sql, params = self._convert_sql_to_tz(sql, params, tzname)
# https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/ROUND-and-TRUNC-Date-Functions.html
trunc_param = None
if lookup_type in ("year", "month"):
@@ -128,7 +128,7 @@ def _prepare_tzname_delta(self, tzname):
tzname, sign, offset = split_tzname_delta(tzname)
return f"{sign}{offset}" if offset else tzname
- def _convert_field_to_tz(self, sql, params, tzname):
+ def _convert_sql_to_tz(self, sql, params, tzname):
if not (settings.USE_TZ and tzname):
return sql, params
if not self._tzname_re.match(tzname):
@@ -147,13 +147,13 @@ def _convert_field_to_tz(self, sql, params, tzname):
return sql, params
def datetime_cast_date_sql(self, sql, params, tzname):
- sql, params = self._convert_field_to_tz(sql, params, tzname)
+ sql, params = self._convert_sql_to_tz(sql, params, tzname)
return f"TRUNC({sql})", params
def datetime_cast_time_sql(self, sql, params, tzname):
# Since `TimeField` values are stored as TIMESTAMP change to the
# default date and convert the field to the specified timezone.
- sql, params = self._convert_field_to_tz(sql, params, tzname)
+ sql, params = self._convert_sql_to_tz(sql, params, tzname)
convert_datetime_sql = (
f"TO_TIMESTAMP(CONCAT('1900-01-01 ', TO_CHAR({sql}, 'HH24:MI:SS.FF')), "
f"'YYYY-MM-DD HH24:MI:SS.FF')"
@@ -164,11 +164,11 @@ def datetime_cast_time_sql(self, sql, params, tzname):
)
def datetime_extract_sql(self, lookup_type, sql, params, tzname):
- sql, params = self._convert_field_to_tz(sql, params, tzname)
+ sql, params = self._convert_sql_to_tz(sql, params, tzname)
return self.date_extract_sql(lookup_type, sql, params)
def datetime_trunc_sql(self, lookup_type, sql, params, tzname):
- sql, params = self._convert_field_to_tz(sql, params, tzname)
+ sql, params = self._convert_sql_to_tz(sql, params, tzname)
# https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/ROUND-and-TRUNC-Date-Functions.html
trunc_param = None
if lookup_type in ("year", "month"):
@@ -192,7 +192,7 @@ def time_trunc_sql(self, lookup_type, sql, params, tzname=None):
# The implementation is similar to `datetime_trunc_sql` as both
# `DateTimeField` and `TimeField` are stored as TIMESTAMP where
# the date part of the later is ignored.
- sql, params = self._convert_field_to_tz(sql, params, tzname)
+ sql, params = self._convert_sql_to_tz(sql, params, tzname)
trunc_param = None
if lookup_type == "hour":
trunc_param = "HH24"
diff --git a/docs/releases/4.1.txt b/docs/releases/4.1.txt
index 49bbf2dec22ce..e8cd11349bc8f 100644
--- a/docs/releases/4.1.txt
+++ b/docs/releases/4.1.txt
@@ -459,6 +459,10 @@ backends.
``DatabaseOperations.insert_statement()`` method is replaced by
``on_conflict`` that accepts ``django.db.models.constants.OnConflict``.
+* ``DatabaseOperations._convert_field_to_tz()`` is replaced by
+ ``DatabaseOperations._convert_sql_to_tz()`` that accepts the ``sql``,
+ ``params``, and ``tzname`` arguments.
+
* Several date and time methods on ``DatabaseOperations`` now take ``sql`` and
``params`` arguments instead of ``field_name`` and return 2-tuple containing
some SQL and the parameters to be interpolated into that SQL. The changed
|
See https://github.com/django/django/pull/15820#issuecomment-1179094287.
|
https://api.github.com/repos/django/django/pulls/15832
|
2022-07-08T17:28:52Z
|
2022-07-09T11:02:07Z
|
2022-07-09T11:02:07Z
|
2022-07-09T11:02:46Z
| 1,925
|
django/django
| 50,874
|
fix repeat save_res
|
diff --git a/tools/infer_det.py b/tools/infer_det.py
index 1c679e0faf..9d2daf13ad 100755
--- a/tools/infer_det.py
+++ b/tools/infer_det.py
@@ -126,9 +126,6 @@ def main():
otstr = file + "\t" + json.dumps(dt_boxes_json) + "\n"
fout.write(otstr.encode())
- save_det_path = os.path.dirname(config['Global'][
- 'save_res_path']) + "/det_results/"
- draw_det_res(boxes, config, src_img, file, save_det_path)
logger.info("success!")
|
https://api.github.com/repos/PaddlePaddle/PaddleOCR/pulls/4979
|
2021-12-20T14:02:48Z
|
2021-12-20T14:56:04Z
|
2021-12-20T14:56:04Z
|
2021-12-20T14:56:05Z
| 146
|
PaddlePaddle/PaddleOCR
| 42,143
|
|
Fixed Import Error type
|
diff --git a/libs/langchain/langchain/llms/pipelineai.py b/libs/langchain/langchain/llms/pipelineai.py
index 0d257e336471fa..cff6c0f5b18cde 100644
--- a/libs/langchain/langchain/llms/pipelineai.py
+++ b/libs/langchain/langchain/llms/pipelineai.py
@@ -91,7 +91,7 @@ def _call(
try:
from pipeline import PipelineCloud
except ImportError:
- raise ValueError(
+ raise ImportError(
"Could not import pipeline-ai python package. "
"Please install it with `pip install pipeline-ai`."
)
diff --git a/libs/langchain/langchain/utilities/redis.py b/libs/langchain/langchain/utilities/redis.py
index e6c5cb138833c6..a45391c8bcdae6 100644
--- a/libs/langchain/langchain/utilities/redis.py
+++ b/libs/langchain/langchain/utilities/redis.py
@@ -108,7 +108,7 @@ def get_client(redis_url: str, **kwargs: Any) -> RedisType:
try:
import redis
except ImportError:
- raise ValueError(
+ raise ImportError(
"Could not import redis python package. "
"Please install it with `pip install redis>=4.1.0`."
)
diff --git a/libs/langchain/langchain/vectorstores/weaviate.py b/libs/langchain/langchain/vectorstores/weaviate.py
index 200ffedc6ef91d..3f965212241df1 100644
--- a/libs/langchain/langchain/vectorstores/weaviate.py
+++ b/libs/langchain/langchain/vectorstores/weaviate.py
@@ -99,7 +99,7 @@ def __init__(
try:
import weaviate
except ImportError:
- raise ValueError(
+ raise ImportError(
"Could not import weaviate python package. "
"Please install it with `pip install weaviate-client`."
)
|
I have restructured the code to ensure uniform handling of ImportError. In place of previously used ValueError, I've adopted the standard practice of raising ImportError with explanatory messages. This modification enhances code readability and clarifies that any problems stem from module importation.
@baskaryan, @eyurtsev, @rlancemartin.
|
https://api.github.com/repos/langchain-ai/langchain/pulls/10168
|
2023-09-04T07:34:25Z
|
2023-09-04T15:43:29Z
|
2023-09-04T15:43:28Z
|
2023-09-04T15:43:29Z
| 448
|
langchain-ai/langchain
| 43,034
|
[data] Always convert arrow batches to pandas batches when user specifies batch_format="native"
|
diff --git a/python/ray/data/block.py b/python/ray/data/block.py
index 7cf54d4219794..0a83e17dd38f4 100644
--- a/python/ray/data/block.py
+++ b/python/ray/data/block.py
@@ -279,7 +279,7 @@ def for_block(block: Block) -> "BlockAccessor[T]":
return SimpleBlockAccessor(block)
else:
- raise TypeError("Not a block type: {}".format(block))
+ raise TypeError("Not a block type: {} ({})".format(block, type(block)))
def sample(self, n_samples: int, key: Any) -> "Block[T]":
"""Return a random sample of items from this block."""
diff --git a/python/ray/data/dataset.py b/python/ray/data/dataset.py
index 07bfabb34cee6..84bb57b43ecd8 100644
--- a/python/ray/data/dataset.py
+++ b/python/ray/data/dataset.py
@@ -221,8 +221,9 @@ def map_batches(
blocks as batches. Defaults to a system-chosen batch size.
compute: The compute strategy, either "tasks" (default) to use Ray
tasks, or "actors" to use an autoscaling Ray actor pool.
- batch_format: Specify "native" to use the native block format,
- "pandas" to select ``pandas.DataFrame`` as the batch format,
+ batch_format: Specify "native" to use the native block format
+ (promotes Arrow to pandas), "pandas" to select
+ ``pandas.DataFrame`` as the batch format,
or "pyarrow" to select ``pyarrow.Table``.
ray_remote_args: Additional resource requirements to request from
ray (e.g., num_gpus=1 to request GPUs for the map tasks).
@@ -252,7 +253,9 @@ def transform(block: Block) -> Iterable[Block]:
# bug where we include the entire base view on serialization.
view = block.slice(start, end, copy=batch_size is not None)
if batch_format == "native":
- pass
+ # Always promote Arrow blocks to pandas for consistency.
+ if isinstance(view, pa.Table) or isinstance(view, bytes):
+ view = BlockAccessor.for_block(view).to_pandas()
elif batch_format == "pandas":
view = BlockAccessor.for_block(view).to_pandas()
elif batch_format == "pyarrow":
@@ -1831,7 +1834,8 @@ def iter_batches(
current block during the scan.
batch_size: Record batch size, or None to let the system pick.
batch_format: The format in which to return each batch.
- Specify "native" to use the current block format, "pandas" to
+ Specify "native" to use the current block format (promoting
+ Arrow to pandas automatically), "pandas" to
select ``pandas.DataFrame`` or "pyarrow" to select
``pyarrow.Table``. Default is "native".
drop_last: Whether to drop the last batch if it's incomplete.
@@ -1840,10 +1844,17 @@ def iter_batches(
A list of iterators over record batches.
"""
+ import pyarrow as pa
+
time_start = time.perf_counter()
def format_batch(batch: Block, format: str) -> BatchType:
if batch_format == "native":
+ # Always promote Arrow blocks to pandas for consistency, since
+ # we lazily convert pandas->Arrow internally for efficiency.
+ if isinstance(batch, pa.Table) or isinstance(batch, bytes):
+ batch = BlockAccessor.for_block(batch)
+ batch = batch.to_pandas()
return batch
elif batch_format == "pandas":
batch = BlockAccessor.for_block(batch)
diff --git a/python/ray/data/impl/pandas_block.py b/python/ray/data/impl/pandas_block.py
index 915dd7fd04819..76279df05053c 100644
--- a/python/ray/data/impl/pandas_block.py
+++ b/python/ray/data/impl/pandas_block.py
@@ -32,7 +32,6 @@ def as_pydict(self) -> dict:
return {k: v[0] for k, v in self._row.to_dict("list").items()}
def __getitem__(self, key: str) -> Any:
- assert isinstance(key, str)
col = self._row[key]
if len(col) == 0:
return None
diff --git a/python/ray/data/tests/test_dataset.py b/python/ray/data/tests/test_dataset.py
index 9e468b98fbc0f..d3532c680a262 100644
--- a/python/ray/data/tests/test_dataset.py
+++ b/python/ray/data/tests/test_dataset.py
@@ -1230,10 +1230,8 @@ def test_from_numpy(ray_start_regular_shared):
arr1 = np.expand_dims(np.arange(0, 4), axis=1)
arr2 = np.expand_dims(np.arange(4, 8), axis=1)
ds = ray.data.from_numpy([ray.put(arr1), ray.put(arr2)])
- values = np.array(ds.take(8))
- np.testing.assert_array_equal(
- values, np.expand_dims(np.concatenate((arr1, arr2)), axis=1)
- )
+ values = np.stack([x["value"] for x in ds.take(8)])
+ np.testing.assert_array_equal(values, np.concatenate((arr1, arr2)))
def test_from_arrow(ray_start_regular_shared):
|
<!-- Thank you for your contribution! Please review https://github.com/ray-project/ray/blob/master/CONTRIBUTING.rst before opening a pull request. -->
<!-- Please add a reviewer to the assignee section when you create a PR. If you don't have the access to it, we will shortly find a reviewer and assign them to your PR. -->
## Why are these changes needed?
With the addition of https://github.com/ray-project/ray/pull/20988, the native format becomes ambiguous. This PR proposes to auto-promote arrow to pandas blocks when the user specifies "native" format, to avoid uncertainty.
|
https://api.github.com/repos/ray-project/ray/pulls/21566
|
2022-01-12T23:28:02Z
|
2022-02-02T05:26:37Z
|
2022-02-02T05:26:37Z
|
2022-02-02T05:26:37Z
| 1,229
|
ray-project/ray
| 19,049
|
Add Poetry DB under Books
|
diff --git a/README.md b/README.md
index e94efc83e7..fcf63ecf0f 100644
--- a/README.md
+++ b/README.md
@@ -239,6 +239,7 @@ API | Description | Auth | HTTPS | CORS |
| [LibGen](https://garbage.world/posts/libgen/) | Library Genesis search engine | No | No | Unknown |
| [Open Library](https://openlibrary.org/developers/api) | Books, book covers and related data | No | Yes | No |
| [Penguin Publishing](http://www.penguinrandomhouse.biz/webservices/rest/) | Books, book covers and related data | No | Yes | Yes |
+| [PoetryDB](https://github.com/thundercomb/poetrydb#readme) | Enables you to get instant data from our vast poetry collection | No | Yes | Yes |
| [Quran](https://quran.api-docs.io/) | RESTful Quran API with multiple languages | No | Yes | Yes |
| [Quran Cloud](https://alquran.cloud/api) | A RESTful Quran API to retrieve an Ayah, Surah, Juz or the entire Holy Quran | No | Yes | Yes |
| [Quran-api](https://github.com/fawazahmed0/quran-api#readme) | Free Quran API Service with 90+ different languages and 400+ translations | No | Yes | Yes |
|
<!-- Thank you for taking the time to work on a Pull Request for this project! -->
<!-- To ensure your PR is dealt with swiftly please check the following: -->
- [x] My submission is formatted according to the guidelines in the [contributing guide](/CONTRIBUTING.md)
- [x] My addition is ordered alphabetically
- [x] My submission has a useful description
- [x] The description does not have more than 100 characters
- [x] The description does not end with punctuation
- [x] Each table column is padded with one space on either side
- [x] I have searched the repository for any relevant issues or pull requests
- [x] Any category I am creating has the minimum requirement of 3 items
- [x] All changes have been [squashed][squash-link] into a single commit
[squash-link]: <https://github.com/todotxt/todo.txt-android/wiki/Squash-All-Commits-Related-to-a-Single-Issue-into-a-Single-Commit>
Poetry DB API Enables to get instant data from our vast poetry collection.
|
https://api.github.com/repos/public-apis/public-apis/pulls/2863
|
2021-10-31T19:10:40Z
|
2021-11-01T23:16:51Z
|
2021-11-01T23:16:51Z
|
2021-11-02T06:33:42Z
| 313
|
public-apis/public-apis
| 35,947
|
lykke: remove method
|
diff --git a/ts/src/lykke.ts b/ts/src/lykke.ts
index 83eff4840b3d..928e2d0aa23b 100644
--- a/ts/src/lykke.ts
+++ b/ts/src/lykke.ts
@@ -439,7 +439,12 @@ export default class lykke extends Exchange {
};
// publicGetTickers or publicGetPrices
const method = this.safeString (this.options, 'fetchTickerMethod', 'publicGetTickers');
- const response = await this[method] (this.extend (request, params));
+ let response = undefined;
+ if (method === 'publicGetPrices') {
+ response = await this.publicGetPrices (this.extend (request, params));
+ } else {
+ response = await this.publicGetTickers (this.extend (request, params));
+ }
const ticker = this.safeValue (response, 'payload', []);
//
// publicGetTickers
@@ -802,8 +807,12 @@ export default class lykke extends Exchange {
if (type === 'limit') {
query['price'] = parseFloat (this.priceToPrecision (market['symbol'], price));
}
- const method = 'privatePostOrders' + this.capitalize (type);
- const result = await this[method] (this.extend (query, params));
+ let result = undefined;
+ if (this.capitalize (type) === 'Market') {
+ result = await this.privatePostOrdersMarket (this.extend (query, params));
+ } else {
+ result = await this.privatePostOrdersLimit (this.extend (query, params));
+ }
//
// market
//
diff --git a/ts/src/test/static/currencies/lykke.json b/ts/src/test/static/currencies/lykke.json
new file mode 100644
index 000000000000..08ea049e2e35
--- /dev/null
+++ b/ts/src/test/static/currencies/lykke.json
@@ -0,0 +1,84 @@
+{
+ "USDT": {
+ "id": "c3e8b0bd-072c-42a0-b4fe-4af9f7c72214",
+ "code": "USDT",
+ "info": {
+ "assetId": "c3e8b0bd-072c-42a0-b4fe-4af9f7c72214",
+ "name": "USDT",
+ "symbol": "USDT",
+ "accuracy": "6",
+ "multiplierPower": "6",
+ "assetAddress": null,
+ "blockchainIntegrationLayerId": "",
+ "blockchain": "ethereum",
+ "type": "erc20Token",
+ "isTradable": true,
+ "isTrusted": true,
+ "kycNeeded": true,
+ "blockchainWithdrawal": true,
+ "cashoutMinimalAmount": "50.0",
+ "lowVolumeAmount": null,
+ "lykkeEntityId": "LYKKE UK",
+ "siriusAssetId": "300000052",
+ "siriusBlockchainId": "ethereum",
+ "blockchainIntegrationType": "sirius",
+ "blockchainDepositEnabled": true,
+ "isDisabled": false
+ },
+ "type": "crypto",
+ "name": "USDT",
+ "active": true,
+ "deposit": true,
+ "withdraw": true,
+ "precision": 0.000001,
+ "limits": {
+ "withdraw": {
+ "min": "50.0"
+ },
+ "amount": {}
+ },
+ "networks": {}
+ },
+ "BTC": {
+ "id": "BTC",
+ "code": "BTC",
+ "info": {
+ "assetId": "BTC",
+ "name": "BTC",
+ "symbol": "BTC",
+ "accuracy": "8",
+ "multiplierPower": "8",
+ "assetAddress": null,
+ "blockchainIntegrationLayerId": "Bitcoin",
+ "blockchain": "ethereum",
+ "type": "erc20Token",
+ "isTradable": true,
+ "isTrusted": true,
+ "kycNeeded": true,
+ "blockchainWithdrawal": true,
+ "cashoutMinimalAmount": "0.0007",
+ "lowVolumeAmount": "0.0055",
+ "lykkeEntityId": "LYKKE UK",
+ "siriusAssetId": "300000037",
+ "siriusBlockchainId": "bitcoin",
+ "blockchainIntegrationType": "sirius",
+ "blockchainDepositEnabled": true,
+ "isDisabled": false
+ },
+ "type": "crypto",
+ "name": "BTC",
+ "active": true,
+ "deposit": true,
+ "withdraw": true,
+ "precision": 1e-8,
+ "limits": {
+ "withdraw": {
+ "min": "0.0007"
+ },
+ "amount": {
+ "min": "0.0055"
+ }
+ },
+ "networks": {}
+ }
+}
diff --git a/ts/src/test/static/markets/lykke.json b/ts/src/test/static/markets/lykke.json
new file mode 100644
index 000000000000..4dc92ed3fe7e
--- /dev/null
+++ b/ts/src/test/static/markets/lykke.json
@@ -0,0 +1,43 @@
+{
+ "BTC/USDT": {
+ "id": "BTCUSDT",
+ "symbol": "BTC/USDT",
+ "base": "BTC",
+ "quote": "USDT",
+ "baseId": "BTC",
+ "quoteId": "c3e8b0bd-072c-42a0-b4fe-4af9f7c72214",
+ "type": "spot",
+ "spot": true,
+ "margin": false,
+ "swap": false,
+ "future": false,
+ "option": false,
+ "contract": false,
+ "active": true,
+ "precision": {
+ "amount": 1e-8,
+ "price": 0.00001
+ },
+ "limits": {
+ "amount": {
+ "min": 0.0001
+ },
+ "price": {},
+ "cost": {
+ "min": 1
+ },
+ "leverage": {}
+ },
+ "info": {
+ "assetPairId": "BTCUSDT",
+ "baseAssetId": "BTC",
+ "quoteAssetId": "c3e8b0bd-072c-42a0-b4fe-4af9f7c72214",
+ "name": "BTC/USDT",
+ "priceAccuracy": "5",
+ "baseAssetAccuracy": "8",
+ "quoteAssetAccuracy": "6",
+ "minVolume": "0.0001",
+ "minOppositeVolume": "1.0"
+ }
+ }
+}
diff --git a/ts/src/test/static/request/lykke.json b/ts/src/test/static/request/lykke.json
new file mode 100644
index 000000000000..b204921e72c0
--- /dev/null
+++ b/ts/src/test/static/request/lykke.json
@@ -0,0 +1,44 @@
+{
+ "exchange": "lykke",
+ "skipKeys": [],
+ "outputType": "json",
+ "methods": {
+ "fetchTicker": [
+ {
+ "description": "fetchTicker",
+ "method": "fetchTicker",
+ "url": "https://hft-apiv2.lykke.com/api/tickers?assetPairIds=BTCUSDT",
+ "input": [
+ "BTC/USDT"
+ ]
+ }
+ ],
+ "createOrder": [
+ {
+ "description": "createOrder market",
+ "method": "createOrder",
+ "url": "https://hft-apiv2.lykke.com/api/orders/market",
+ "input": [
+ "BTC/USDT",
+ "market",
+ "buy",
+ 1
+ ],
+ "output": "{\"assetPairId\":\"BTCUSDT\",\"side\":\"Buy\",\"volume\":1}"
+ },
+ {
+ "description": "createOrder limit",
+ "method": "createOrder",
+ "url": "https://hft-apiv2.lykke.com/api/orders/limit",
+ "input": [
+ "BTC/USDT",
+ "limit",
+ "buy",
+ 1,
+ 40000
+ ],
+ "output": "{\"assetPairId\":\"BTCUSDT\",\"side\":\"Buy\",\"volume\":1,\"price\":40000}"
+ }
+ ]
+ }
+}
|
https://api.github.com/repos/ccxt/ccxt/pulls/20588
|
2023-12-29T08:44:40Z
|
2023-12-30T10:52:22Z
|
2023-12-30T10:52:22Z
|
2023-12-30T10:52:22Z
| 2,054
|
ccxt/ccxt
| 13,654
|
|
Update go client gen command
|
diff --git a/clients/gen/go.sh b/clients/gen/go.sh
index a7a820e3f6802..e8e0f26b4822b 100755
--- a/clients/gen/go.sh
+++ b/clients/gen/go.sh
@@ -31,6 +31,7 @@ readonly VERSION
go_config=(
"packageVersion=${VERSION}"
"enumClassPrefix=true"
+ "structPrefix=true"
)
validate_input "$@"
|
When we have multiple tags on an endpoint in the spec, client generation will succeed but with go code not valid. (Struct redefined). We have such a case [here](https://github.com/apache/airflow/blob/main/airflow/api_connexion/openapi/v1.yaml#L912
)
Cf:
https://github.com/OpenAPITools/openapi-generator/issues/741
This updates the go client to add the required option to avoid name conflicts in internal functions
|
https://api.github.com/repos/apache/airflow/pulls/28967
|
2023-01-16T09:38:10Z
|
2023-01-16T10:37:13Z
|
2023-01-16T10:37:13Z
|
2023-01-16T10:37:21Z
| 101
|
apache/airflow
| 14,244
|
🌐 Update Chinese translation for `docs/zh/docs/tutorial/body.md`
|
diff --git a/docs/zh/docs/tutorial/body.md b/docs/zh/docs/tutorial/body.md
index 3d615be399d86..fa8b54d0245ee 100644
--- a/docs/zh/docs/tutorial/body.md
+++ b/docs/zh/docs/tutorial/body.md
@@ -1,21 +1,24 @@
# 请求体
-当你需要将数据从客户端(例如浏览器)发送给 API 时,你将其作为「请求体」发送。
+FastAPI 使用**请求体**从客户端(例如浏览器)向 API 发送数据。
-**请求**体是客户端发送给 API 的数据。**响应**体是 API 发送给客户端的数据。
+**请求体**是客户端发送给 API 的数据。**响应体**是 API 发送给客户端的数据。
-你的 API 几乎总是要发送**响应**体。但是客户端并不总是需要发送**请求**体。
+API 基本上肯定要发送**响应体**,但是客户端不一定发送**请求体**。
-我们使用 <a href="https://docs.pydantic.dev/" class="external-link" target="_blank">Pydantic</a> 模型来声明**请求**体,并能够获得它们所具有的所有能力和优点。
+使用 <a href="https://docs.pydantic.dev/" class="external-link" target="_blank">Pydantic</a> 模型声明**请求体**,能充分利用它的功能和优点。
-!!! info
- 你不能使用 `GET` 操作(HTTP 方法)发送请求体。
+!!! info "说明"
- 要发送数据,你必须使用下列方法之一:`POST`(较常见)、`PUT`、`DELETE` 或 `PATCH`。
+ 发送数据使用 `POST`(最常用)、`PUT`、`DELETE`、`PATCH` 等操作。
+
+ 规范中没有定义使用 `GET` 发送请求体的操作,但不管怎样,FastAPI 也支持这种方式,只不过仅用于非常复杂或极端的用例。
+
+ 我们不建议使用 `GET`,因此,在 Swagger UI 交互文档中不会显示有关 `GET` 的内容,而且代理协议也不一定支持 `GET`。
## 导入 Pydantic 的 `BaseModel`
-首先,你需要从 `pydantic` 中导入 `BaseModel`:
+从 `pydantic` 中导入 `BaseModel`:
=== "Python 3.10+"
@@ -31,9 +34,9 @@
## 创建数据模型
-然后,将你的数据模型声明为继承自 `BaseModel` 的类。
+把数据模型声明为继承 `BaseModel` 的类。
-使用标准的 Python 类型来声明所有属性:
+使用 Python 标准类型声明所有属性:
=== "Python 3.10+"
@@ -47,9 +50,9 @@
{!> ../../../docs_src/body/tutorial001.py!}
```
-和声明查询参数时一样,当一个模型属性具有默认值时,它不是必需的。否则它是一个必需属性。将默认值设为 `None` 可使其成为可选属性。
+与声明查询参数一样,包含默认值的模型属性是可选的,否则就是必选的。默认值为 `None` 的模型属性也是可选的。
-例如,上面的模型声明了一个这样的 JSON「`object`」(或 Python `dict`):
+例如,上述模型声明如下 JSON **对象**(即 Python **字典**):
```JSON
{
@@ -60,7 +63,7 @@
}
```
-...由于 `description` 和 `tax` 是可选的(它们的默认值为 `None`),下面的 JSON「`object`」也将是有效的:
+……由于 `description` 和 `tax` 是可选的(默认值为 `None`),下面的 JSON **对象**也有效:
```JSON
{
@@ -69,9 +72,9 @@
}
```
-## 声明为参数
+## 声明请求体参数
-使用与声明路径和查询参数的相同方式声明请求体,即可将其添加到「路径操作」中:
+使用与声明路径和查询参数相同的方式声明请求体,把请求体添加至*路径操作*:
=== "Python 3.10+"
@@ -85,56 +88,68 @@
{!> ../../../docs_src/body/tutorial001.py!}
```
-...并且将它的类型声明为你创建的 `Item` 模型。
+……此处,请求体参数的类型为 `Item` 模型。
-## 结果
+## 结论
-仅仅使用了 Python 类型声明,**FastAPI** 将会:
+仅使用 Python 类型声明,**FastAPI** 就可以:
-* 将请求体作为 JSON 读取。
-* 转换为相应的类型(在需要时)。
-* 校验数据。
- * 如果数据无效,将返回一条清晰易读的错误信息,指出不正确数据的确切位置和内容。
-* 将接收的数据赋值到参数 `item` 中。
- * 由于你已经在函数中将它声明为 `Item` 类型,你还将获得对于所有属性及其类型的一切编辑器支持(代码补全等)。
-* 为你的模型生成 <a href="https://json-schema.org" class="external-link" target="_blank">JSON 模式</a> 定义,你还可以在其他任何对你的项目有意义的地方使用它们。
-* 这些模式将成为生成的 OpenAPI 模式的一部分,并且被自动化文档 <abbr title="用户界面">UI</abbr> 所使用。
+* 以 JSON 形式读取请求体
+* (在必要时)把请求体转换为对应的类型
+* 校验数据:
+ * 数据无效时返回错误信息,并指出错误数据的确切位置和内容
+* 把接收的数据赋值给参数 `item`
+ * 把函数中请求体参数的类型声明为 `Item`,还能获得代码补全等编辑器支持
+* 为模型生成 <a href="https://json-schema.org" class="external-link" target="_blank">JSON Schema</a>,在项目中所需的位置使用
+* 这些概图是 OpenAPI 概图的部件,用于 API 文档 <abbr title="用户界面">UI</abbr>
-## 自动化文档
+## API 文档
-你所定义模型的 JSON 模式将成为生成的 OpenAPI 模式的一部分,并且在交互式 API 文档中展示:
+Pydantic 模型的 JSON 概图是 OpenAPI 生成的概图部件,可在 API 文档中显示:
-<img src="https://fastapi.tiangolo.com/img/tutorial/body/image01.png">
+<img src="/img/tutorial/body/image01.png">
-而且还将在每一个需要它们的*路径操作*的 API 文档中使用:
+而且,还会用于 API 文档中使用了概图的*路径操作*:
-<img src="https://fastapi.tiangolo.com/img/tutorial/body/image02.png">
+<img src="/img/tutorial/body/image02.png">
## 编辑器支持
-在你的编辑器中,你会在函数内部的任意地方得到类型提示和代码补全(如果你接收的是一个 `dict` 而不是 Pydantic 模型,则不会发生这种情况):
+在编辑器中,函数内部均可使用类型提示、代码补全(如果接收的不是 Pydantic 模型,而是**字典**,就没有这样的支持):
+
+<img src="/img/tutorial/body/image03.png">
+
+还支持检查错误的类型操作:
+
+<img src="/img/tutorial/body/image04.png">
-<img src="https://fastapi.tiangolo.com/img/tutorial/body/image03.png">
+这并非偶然,整个 **FastAPI** 框架都是围绕这种思路精心设计的。
-你还会获得对不正确的类型操作的错误检查:
+并且,在 FastAPI 的设计阶段,我们就已经进行了全面测试,以确保 FastAPI 可以获得所有编辑器的支持。
-<img src="https://fastapi.tiangolo.com/img/tutorial/body/image04.png">
+我们还改进了 Pydantic,让它也支持这些功能。
-这并非偶然,整个框架都是围绕该设计而构建。
+虽然上面的截图取自 <a href="https://code.visualstudio.com" class="external-link" target="_blank">Visual Studio Code</a>。
-并且在进行任何实现之前,已经在设计阶段经过了全面测试,以确保它可以在所有的编辑器中生效。
+但 <a href="https://www.jetbrains.com/pycharm/" class="external-link" target="_blank">PyCharm</a> 和大多数 Python 编辑器也支持同样的功能:
-Pydantic 本身甚至也进行了一些更改以支持此功能。
+<img src="/img/tutorial/body/image05.png">
-上面的截图取自 <a href="https://code.visualstudio.com" class="external-link" target="_blank">Visual Studio Code</a>。
+!!! tip "提示"
-但是在 <a href="https://www.jetbrains.com/pycharm/" class="external-link" target="_blank">PyCharm</a> 和绝大多数其他 Python 编辑器中你也会获得同样的编辑器支持:
+ 使用 <a href="https://www.jetbrains.com/pycharm/" class="external-link" target="_blank">PyCharm</a> 编辑器时,推荐安装 <a href="https://github.com/koxudaxi/pydantic-pycharm-plugin/" class="external-link" target="_blank">Pydantic PyCharm 插件</a>。
-<img src="https://fastapi.tiangolo.com/img/tutorial/body/image05.png">
+ 该插件用于完善 PyCharm 对 Pydantic 模型的支持,优化的功能如下:
+
+ * 自动补全
+ * 类型检查
+ * 代码重构
+ * 查找
+ * 代码审查
## 使用模型
-在函数内部,你可以直接访问模型对象的所有属性:
+在*路径操作*函数内部直接访问模型对象的属性:
=== "Python 3.10+"
@@ -150,9 +165,9 @@ Pydantic 本身甚至也进行了一些更改以支持此功能。
## 请求体 + 路径参数
-你可以同时声明路径参数和请求体。
+**FastAPI** 支持同时声明路径参数和请求体。
-**FastAPI** 将识别出与路径参数匹配的函数参数应**从路径中获取**,而声明为 Pydantic 模型的函数参数应**从请求体中获取**。
+**FastAPI** 能识别与**路径参数**匹配的函数参数,还能识别从**请求体**中获取的类型为 Pydantic 模型的函数参数。
=== "Python 3.10+"
@@ -168,9 +183,9 @@ Pydantic 本身甚至也进行了一些更改以支持此功能。
## 请求体 + 路径参数 + 查询参数
-你还可以同时声明**请求体**、**路径参数**和**查询参数**。
+**FastAPI** 支持同时声明**请求体**、**路径参数**和**查询参数**。
-**FastAPI** 会识别它们中的每一个,并从正确的位置获取数据。
+**FastAPI** 能够正确识别这三种参数,并从正确的位置获取数据。
=== "Python 3.10+"
@@ -184,12 +199,18 @@ Pydantic 本身甚至也进行了一些更改以支持此功能。
{!> ../../../docs_src/body/tutorial004.py!}
```
-函数参数将依次按如下规则进行识别:
+函数参数按如下规则进行识别:
+
+- **路径**中声明了相同参数的参数,是路径参数
+- 类型是(`int`、`float`、`str`、`bool` 等)**单类型**的参数,是**查询**参数
+- 类型是 **Pydantic 模型**的参数,是**请求体**
+
+!!! note "笔记"
+
+ 因为默认值是 `None`, FastAPI 会把 `q` 当作可选参数。
-* 如果在**路径**中也声明了该参数,它将被用作路径参数。
-* 如果参数属于**单一类型**(比如 `int`、`float`、`str`、`bool` 等)它将被解释为**查询**参数。
-* 如果参数的类型被声明为一个 **Pydantic 模型**,它将被解释为**请求体**。
+ FastAPI 不使用 `Optional[str]` 中的 `Optional`, 但 `Optional` 可以让编辑器提供更好的支持,并检测错误。
## 不使用 Pydantic
-如果你不想使用 Pydantic 模型,你还可以使用 **Body** 参数。请参阅文档 [请求体 - 多个参数:请求体中的单一值](body-multiple-params.md#singular-values-in-body){.internal-link target=_blank}。
+即便不使用 Pydantic 模型也能使用 **Body** 参数。详见[请求体 - 多参数:请求体中的单值](body-multiple-params.md#singular-values-in-body){.internal-link target=\_blank}。
|
also fix code highlight for body/tutoiral001.py, tutorial002.py, tutorial003.py, tutorial004.py
|
https://api.github.com/repos/tiangolo/fastapi/pulls/3481
|
2021-07-07T08:08:28Z
|
2024-04-01T05:36:16Z
|
2024-04-01T05:36:16Z
|
2024-04-01T05:36:16Z
| 3,108
|
tiangolo/fastapi
| 23,623
|
Added PrexView service
|
diff --git a/README.md b/README.md
index 0a6af2bc41..40e13c221d 100644
--- a/README.md
+++ b/README.md
@@ -188,6 +188,7 @@ For information on contributing to this project, please see the [contributing gu
|---|---|---|---|---|
| File.io | File Sharing | No | Yes | [Go!](https://www.file.io) |
| pdflayer API | HTML/URL to PDF | No | Yes | [Go!](https://pdflayer.com) |
+| PrexView | Data from XML or JSON to PDF, HTML or Image | `apiKey` | Yes | [Go!](https://prexview.com) |
| Todoist | Todo Lists | `OAuth` | Yes | [Go!](https://developer.todoist.com) |
| Wunderlist | Todo Lists | `OAuth` | Yes | [Go!](https://developer.wunderlist.com/documentation) |
|
PrexView service API to convert data form XML or JSOn to PDF, HTML and Image
|
https://api.github.com/repos/public-apis/public-apis/pulls/323
|
2017-04-03T04:31:52Z
|
2017-04-03T15:25:03Z
|
2017-04-03T15:25:03Z
|
2017-04-03T15:25:03Z
| 223
|
public-apis/public-apis
| 35,607
|
Add Intel AMX/AVX512 support to accelerate inference
|
diff --git a/README.md b/README.md
index 4b51eb32fc..6d04eb6ae7 100644
--- a/README.md
+++ b/README.md
@@ -132,6 +132,11 @@ This runs on the CPU only and does not require GPU. It requires around 30GB of C
python3 -m fastchat.serve.cli --model-path lmsys/vicuna-7b-v1.3 --device cpu
```
+Use Intel AI Accelerator AVX512_BF16/AMX to accelerate CPU inference.
+```
+CPU_ISA=amx python3 -m fastchat.serve.cli --model-path lmsys/vicuna-7b-v1.3 --device cpu
+```
+
#### Metal Backend (Mac Computers with Apple Silicon or AMD GPUs)
Use `--device mps` to enable GPU acceleration on Mac computers (requires torch >= 2.0).
Use `--load-8bit` to turn on 8-bit compression.
diff --git a/fastchat/constants.py b/fastchat/constants.py
index 8cab6a3f1b..0eb7af3717 100644
--- a/fastchat/constants.py
+++ b/fastchat/constants.py
@@ -22,6 +22,8 @@
SESSION_EXPIRATION_TIME = 3600
# The output dir of log files
LOGDIR = os.getenv("LOGDIR", ".")
+# CPU Instruction Set Architecture
+CPU_ISA = os.getenv("CPU_ISA")
##### For the controller and workers (could be overwritten through ENV variables.)
diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py
index c575eb1055..d40f57a80a 100644
--- a/fastchat/model/model_adapter.py
+++ b/fastchat/model/model_adapter.py
@@ -6,6 +6,9 @@
from typing import Dict, List, Optional
import warnings
+from fastchat.constants import CPU_ISA
+
+
if sys.version_info >= (3, 9):
from functools import cache
else:
@@ -167,6 +170,15 @@ def load_model(
)
if device == "cpu":
kwargs = {"torch_dtype": torch.float32}
+ if CPU_ISA in ["avx512_bf16", "amx"]:
+ try:
+ import intel_extension_for_pytorch as ipex
+
+ kwargs = {"torch_dtype": torch.bfloat16}
+ except ImportError:
+ warnings.warn(
+ "Intel Extension for PyTorch is not installed, it can be installed to accelerate cpu inference"
+ )
elif device == "cuda":
kwargs = {"torch_dtype": torch.float16}
if num_gpus != 1:
@@ -267,6 +279,13 @@ def load_model(
# Load model
model, tokenizer = adapter.load_model(model_path, kwargs)
+ if (
+ device == "cpu"
+ and kwargs["torch_dtype"] is torch.bfloat16
+ and CPU_ISA is not None
+ ):
+ model = ipex.optimize(model, dtype=kwargs["torch_dtype"])
+
if (device == "cuda" and num_gpus == 1 and not cpu_offloading) or device in (
"mps",
"xpu",
|
<!-- Thank you for your contribution! -->
<!-- Please add a reviewer to the assignee section when you create a PR. If you don't have the access to it, we will shortly find a reviewer and assign them to your PR. -->
## Why are these changes needed?
Currently, CPU only inference mode does not support accelerator. Now use intel extension for pytorch to accelerate inference when CPU has Intel AI accelerator.
RFC: https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/examples.html#bfloat16
<!-- Please give a short summary of the change and the problem this solves. -->
## Related issue number (if applicable)
<!-- For example: "Closes #1234" -->
## Checks
- [x] I've run `format.sh` to lint the changes in this PR.
- [x] I've included any doc changes needed.
- [x] I've made sure the relevant tests are passing (if applicable).
|
https://api.github.com/repos/lm-sys/FastChat/pulls/2247
|
2023-08-17T03:45:06Z
|
2023-08-21T04:49:02Z
|
2023-08-21T04:49:02Z
|
2023-08-21T04:49:02Z
| 716
|
lm-sys/FastChat
| 41,470
|
Update network integration auth timeout for connection local
|
diff --git a/test/integration/network-integration.cfg b/test/integration/network-integration.cfg
index dd02334e82f4bb..00764bcdadb636 100644
--- a/test/integration/network-integration.cfg
+++ b/test/integration/network-integration.cfg
@@ -3,6 +3,7 @@
[defaults]
host_key_checking = False
+timeout = 90
[ssh_connection]
ssh_args = '-o UserKnownHostsFile=/dev/null'
|
##### SUMMARY
<!--- Describe the change below, including rationale and design decisions -->
* Incase of network integration test for connection local
test the paramiko_ssh auth_timeout is the value of timeout
under defaults section which is 10 seconds.
* For slower connection 10sec timeout value result in authentication
timeout error hence increase the timeout value to 90 seconds
<!--- HINT: Include "Fixes #nnn" if you are fixing an existing issue -->
##### ISSUE TYPE
<!--- Pick one below and delete the rest -->
- Test Pull Request
##### COMPONENT NAME
<!--- Write the short name of the module, plugin, task or feature below -->
test/integration/network-integration.cfg
##### ADDITIONAL INFORMATION
<!--- Include additional information to help people understand the change here -->
<!--- A step-by-step reproduction of the problem is helpful if there is no related issue -->
<!--- Paste verbatim command output below, e.g. before and after your change -->
```paste below
```
|
https://api.github.com/repos/ansible/ansible/pulls/71057
|
2020-08-03T11:30:19Z
|
2020-08-03T13:58:31Z
|
2020-08-03T13:58:31Z
|
2020-08-31T13:00:19Z
| 105
|
ansible/ansible
| 48,817
|
Fix typo in lowvram patcher
|
diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py
index 8dda84cfd8..708f32aa9a 100644
--- a/comfy/model_patcher.py
+++ b/comfy/model_patcher.py
@@ -278,7 +278,7 @@ def __call__(self, weight):
if weight_key in self.patches:
m.weight_function = LowVramPatch(weight_key, self)
if bias_key in self.patches:
- m.bias_function = LowVramPatch(weight_key, self)
+ m.bias_function = LowVramPatch(bias_key, self)
m.prev_comfy_cast_weights = m.comfy_cast_weights
m.comfy_cast_weights = True
|
Fixes WARNING SHAPE MISMATCH warnings I encountered while merging large fp32 models, may resolve complaints in #3125 and #3122 and #3094
|
https://api.github.com/repos/comfyanonymous/ComfyUI/pulls/3209
|
2024-04-05T03:08:20Z
|
2024-04-05T16:02:13Z
|
2024-04-05T16:02:13Z
|
2024-04-05T16:02:13Z
| 161
|
comfyanonymous/ComfyUI
| 17,743
|
Add 'Inpainting strength' to the 'generation_params' of 'infotext' (params.txt or png chunks)
|
diff --git a/modules/processing.py b/modules/processing.py
index 03c9143dc77..2fc9fe13e24 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -399,6 +399,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration
"Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength),
"Seed resize from": (None if p.seed_resize_from_w == 0 or p.seed_resize_from_h == 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"),
"Denoising strength": getattr(p, 'denoising_strength', None),
+ "Inpainting strength": (None if getattr(p, 'denoising_strength', None) is None else getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight)),
"Eta": (None if p.sampler is None or p.sampler.eta == p.sampler.default_eta else p.sampler.eta),
"Clip skip": None if clip_skip <= 1 else clip_skip,
"ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta,
|
Add 'Inpainting strength' to the `generation_params` dictionary of `infotext` which is saved into the 'params.txt' or png chunks.
Value appears only if 'Denoising strength' appears too.
*New* 'Inpainting strength' option allows to apply/transfer style to an image while preserving its composition and details. To achieve best results you should match 'Denoising & Inpainting strength' parameters to each other.
So you often need to remember which value you used to achieve that result.
|
https://api.github.com/repos/AUTOMATIC1111/stable-diffusion-webui/pulls/4679
|
2022-11-13T14:34:08Z
|
2022-11-19T09:24:44Z
|
2022-11-19T09:24:44Z
|
2022-11-19T09:24:44Z
| 282
|
AUTOMATIC1111/stable-diffusion-webui
| 39,815
|
simplify save_webpages
|
diff --git a/metagpt/tools/libs/gpt_v_generator.py b/metagpt/tools/libs/gpt_v_generator.py
index 3b17fc596..0e9f34770 100644
--- a/metagpt/tools/libs/gpt_v_generator.py
+++ b/metagpt/tools/libs/gpt_v_generator.py
@@ -5,13 +5,13 @@
@Author : mannaandpoem
@File : gpt_v_generator.py
"""
-import os
+import re
from pathlib import Path
from metagpt.const import DEFAULT_WORKSPACE_ROOT
+from metagpt.logs import logger
from metagpt.tools.tool_registry import register_tool
-from metagpt.tools.tool_type import ToolType
-from metagpt.utils.common import encode_image
+from metagpt.utils.common import CodeParser, encode_image
ANALYZE_LAYOUT_PROMPT = """You are now a UI/UX designer, please generate layout information for this image:
@@ -28,11 +28,9 @@
Now, please generate the corresponding webpage code including HTML, CSS and JavaScript:"""
-@register_tool(
- tool_type=ToolType.IMAGE2WEBPAGE.type_name, include_functions=["__init__", "generate_webpages", "save_webpages"]
-)
+@register_tool(include_functions=["__init__", "generate_webpages", "save_webpages"])
class GPTvGenerator:
- """Class for generating webpages at once.
+ """Class for generating webpage code from a given webpage screenshot.
This class provides methods to generate webpages including all code (HTML, CSS, and JavaScript) based on an image.
It utilizes a vision model to analyze the layout from an image and generate webpage codes accordingly.
@@ -75,50 +73,34 @@ async def generate_webpages(self, image_path: str) -> str:
return await self.llm.aask(msg=prompt, images=[encode_image(image_path)])
@staticmethod
- def save_webpages(image_path: str, webpages: str) -> Path:
+ def save_webpages(webpages: str, save_folder_name: str = "example") -> Path:
"""Save webpages including all code (HTML, CSS, and JavaScript) at once.
Args:
- image_path (str): The path of the image file.
webpages (str): The generated webpages content.
+ save_folder_name (str, optional): The name of the folder to save the webpages. Defaults to 'example'.
Returns:
Path: The path of the saved webpages.
"""
# Create a folder called webpages in the workspace directory to store HTML, CSS, and JavaScript files
- webpages_path = DEFAULT_WORKSPACE_ROOT / "webpages" / Path(image_path).stem
- os.makedirs(webpages_path, exist_ok=True)
+ webpages_path = DEFAULT_WORKSPACE_ROOT / "webpages" / save_folder_name
+ logger.info(f"code will be saved at {webpages_path}")
+ webpages_path.mkdir(parents=True, exist_ok=True)
index_path = webpages_path / "index.html"
- try:
- index = webpages.split("```html")[1].split("```")[0]
- style_path = None
- if "styles.css" in index:
- style_path = webpages_path / "styles.css"
- elif "style.css" in index:
- style_path = webpages_path / "style.css"
- style = webpages.split("```css")[1].split("```")[0] if style_path else ""
-
- js_path = None
- if "scripts.js" in index:
- js_path = webpages_path / "scripts.js"
- elif "script.js" in index:
- js_path = webpages_path / "script.js"
-
- js = webpages.split("```javascript")[1].split("```")[0] if js_path else ""
- except IndexError:
- raise ValueError(f"No html or css or js code found in the result. \nWebpages: {webpages}")
-
- try:
- with open(index_path, "w", encoding="utf-8") as f:
- f.write(index)
- if style_path:
- with open(style_path, "w", encoding="utf-8") as f:
- f.write(style)
- if js_path:
- with open(js_path, "w", encoding="utf-8") as f:
- f.write(js)
- except FileNotFoundError as e:
- raise FileNotFoundError(f"Cannot save the webpages to {str(webpages_path)}") from e
+ index_path.write_text(CodeParser.parse_code(block=None, text=webpages, lang="html"))
+
+ extract_and_save_code(folder=webpages_path, text=webpages, pattern="styles?.css", language="css")
+
+ extract_and_save_code(folder=webpages_path, text=webpages, pattern="scripts?.js", language="javascript")
return webpages_path
+
+
+def extract_and_save_code(folder, text, pattern, language):
+ word = re.search(pattern, text)
+ if word:
+ path = folder / word.group(0)
+ code = CodeParser.parse_code(block=None, text=text, lang=language)
+ path.write_text(code, encoding="utf-8")
diff --git a/tests/metagpt/tools/libs/test_gpt_v_generator.py b/tests/metagpt/tools/libs/test_gpt_v_generator.py
index 907006765..4a2e68682 100644
--- a/tests/metagpt/tools/libs/test_gpt_v_generator.py
+++ b/tests/metagpt/tools/libs/test_gpt_v_generator.py
@@ -60,18 +60,24 @@ async def test_generate_webpages(mock_webpage_filename_with_styles_and_scripts,
async def test_save_webpages_with_styles_and_scripts(mock_webpage_filename_with_styles_and_scripts, image_path):
generator = GPTvGenerator()
webpages = await generator.generate_webpages(image_path)
- webpages_dir = generator.save_webpages(image_path=image_path, webpages=webpages)
+ webpages_dir = generator.save_webpages(webpages=webpages, save_folder_name="test_1")
logs.logger.info(webpages_dir)
assert webpages_dir.exists()
+ assert (webpages_dir / "index.html").exists()
+ assert (webpages_dir / "styles.css").exists()
+ assert (webpages_dir / "scripts.js").exists()
@pytest.mark.asyncio
async def test_save_webpages_with_style_and_script(mock_webpage_filename_with_style_and_script, image_path):
generator = GPTvGenerator()
webpages = await generator.generate_webpages(image_path)
- webpages_dir = generator.save_webpages(image_path=image_path, webpages=webpages)
+ webpages_dir = generator.save_webpages(webpages=webpages, save_folder_name="test_2")
logs.logger.info(webpages_dir)
assert webpages_dir.exists()
+ assert (webpages_dir / "index.html").exists()
+ assert (webpages_dir / "style.css").exists()
+ assert (webpages_dir / "script.js").exists()
@pytest.mark.asyncio
|
**Features**
simplify save_webpages
|
https://api.github.com/repos/geekan/MetaGPT/pulls/981
|
2024-03-11T08:31:04Z
|
2024-03-11T12:05:12Z
|
2024-03-11T12:05:12Z
|
2024-03-11T12:05:12Z
| 1,562
|
geekan/MetaGPT
| 16,501
|
test: adding more tests to missing number algorithm
|
diff --git a/bit_manipulation/missing_number.py b/bit_manipulation/missing_number.py
index 32b949daa717..554887b17562 100644
--- a/bit_manipulation/missing_number.py
+++ b/bit_manipulation/missing_number.py
@@ -11,6 +11,12 @@ def find_missing_number(nums: list[int]) -> int:
Example:
>>> find_missing_number([0, 1, 3, 4])
2
+ >>> find_missing_number([4, 3, 1, 0])
+ 2
+ >>> find_missing_number([-4, -3, -1, 0])
+ -2
+ >>> find_missing_number([-2, 2, 1, 3, 0])
+ -1
>>> find_missing_number([1, 3, 4, 5, 6])
2
>>> find_missing_number([6, 5, 4, 2, 1])
@@ -26,3 +32,9 @@ def find_missing_number(nums: list[int]) -> int:
missing_number ^= i ^ nums[i - low]
return missing_number
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
|
### Describe your change:
Adding more tests to missing number algorithm
Fixes #9943
* [ ] Add an algorithm?
* [ ] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
* [X] Add or change doctests? -- Note: Please avoid changing both code and tests in a single pull request.
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms include at least one URL that points to Wikipedia or another similar explanation.
* [x] If this pull request resolves one or more open issues then the description above includes the issue number(s) with a [closing keyword](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue): "Fixes #ISSUE-NUMBER".
|
https://api.github.com/repos/TheAlgorithms/Python/pulls/10394
|
2023-10-13T22:39:52Z
|
2023-10-29T21:43:32Z
|
2023-10-29T21:43:32Z
|
2023-10-29T21:43:34Z
| 289
|
TheAlgorithms/Python
| 30,262
|
#353 - Add OasstApiClient interaction with task
|
diff --git a/website/cypress/contract/oasst_api_contract_tests.cy.ts b/website/cypress/contract/oasst_api_contract_tests.cy.ts
index ff5bb1567a..1ba408d643 100644
--- a/website/cypress/contract/oasst_api_contract_tests.cy.ts
+++ b/website/cypress/contract/oasst_api_contract_tests.cy.ts
@@ -1,4 +1,4 @@
-import OasstApiClient from "src/lib/oasst_api_client";
+import { OasstApiClient } from "src/lib/oasst_api_client";
describe("Contract test for Oasst API", function () {
// Assumes this is running the mock server.
@@ -23,6 +23,27 @@ describe("Contract test for Oasst API", function () {
expect(await oasstApiClient.ackTask(task.id, "321")).to.be.null;
});
+ it("can record a taskInteraction", async () => {
+ const task = await oasstApiClient.fetchTask("random", {
+ sub: "test",
+ name: "test",
+ email: "test",
+ });
+ expect(
+ await oasstApiClient.interactTask(
+ "text_reply_to_message",
+ task.id,
+ "1",
+ { text: "Test" },
+ {
+ sub: "test",
+ name: "test",
+ email: "test",
+ }
+ )
+ ).to.be.not.null;
+ });
+
// TODO(#354): Add test for 204
// TODO(#354): Add test for parsing >=300, throwing an OasstError
// TODO(#354): Add test for parsing >=300, throwing a generic error
diff --git a/website/src/lib/oasst_api_client.ts b/website/src/lib/oasst_api_client.ts
index 45a0859e15..7e22544ab5 100644
--- a/website/src/lib/oasst_api_client.ts
+++ b/website/src/lib/oasst_api_client.ts
@@ -1,5 +1,10 @@
import { JWT } from "next-auth/jwt";
+declare global {
+ // eslint-disable-next-line no-var
+ var oasstApiClient: OasstApiClient | undefined;
+}
+
class OasstError {
message: string;
errorCode: number;
@@ -12,7 +17,7 @@ class OasstError {
}
}
-export default class OasstApiClient {
+export class OasstApiClient {
constructor(private readonly oasstApiUrl: string, private readonly oasstApiKey: string) {}
private async post(path: string, body: any): Promise<any> {
@@ -61,4 +66,33 @@ export default class OasstApiClient {
message_id: messageId,
});
}
+
+ // TODO return a strongly typed Task?
+ // This method is used to record interaction with task while fetching next task.
+ // This is a raw Json type, so we can't use it to strongly type the task.
+ async interactTask(
+ updateType: string,
+ messageId: string,
+ userMessageId: string,
+ content: object,
+ userToken: JWT
+ ): Promise<any> {
+ return this.post("/api/v1/tasks/interaction", {
+ type: updateType,
+ user: {
+ id: userToken.sub,
+ display_name: userToken.name || userToken.email,
+ auth_method: "local",
+ },
+ message_id: messageId,
+ user_message_id: userMessageId,
+ ...content,
+ });
+ }
+}
+
+export const oasstApiClient =
+ globalThis.oasstApiClient || new OasstApiClient(process.env.FASTAPI_URL, process.env.FASTAPI_KEY);
+if (process.env.NODE_ENV !== "production") {
+ globalThis.oasstApiClient = oasstApiClient;
}
diff --git a/website/src/pages/api/new_task/[task_type].ts b/website/src/pages/api/new_task/[task_type].ts
index bbe31bef38..addcf3d8bc 100644
--- a/website/src/pages/api/new_task/[task_type].ts
+++ b/website/src/pages/api/new_task/[task_type].ts
@@ -1,5 +1,5 @@
import { getToken } from "next-auth/jwt";
-import OasstApiClient from "src/lib/oasst_api_client";
+import { oasstApiClient } from "src/lib/oasst_api_client";
import prisma from "src/lib/prismadb";
/**
@@ -21,8 +21,6 @@ const handler = async (req, res) => {
return;
}
- const oasstApiClient = new OasstApiClient(process.env.FASTAPI_URL, process.env.FASTAPI_KEY);
-
// Fetch the new task.
const task = await oasstApiClient.fetchTask(task_type, token);
diff --git a/website/src/pages/api/update_task.ts b/website/src/pages/api/update_task.ts
index 2d371354e5..4eea8c1e1f 100644
--- a/website/src/pages/api/update_task.ts
+++ b/website/src/pages/api/update_task.ts
@@ -1,4 +1,5 @@
import { getToken } from "next-auth/jwt";
+import { oasstApiClient } from "src/lib/oasst_api_client";
import prisma from "src/lib/prismadb";
/**
@@ -34,28 +35,7 @@ const handler = async (req, res) => {
},
});
- // Send the interaction to the Task Backend. This automatically fetches the
- // next task in the sequence (or the done task).
- // TODO(#353): Move this into OasstApiClient.
- const interactionRes = await fetch(`${process.env.FASTAPI_URL}/api/v1/tasks/interaction`, {
- method: "POST",
- headers: {
- "X-API-Key": process.env.FASTAPI_KEY,
- "Content-Type": "application/json",
- },
- body: JSON.stringify({
- type: update_type,
- user: {
- id: token.sub,
- display_name: token.name || token.email,
- auth_method: "local",
- },
- message_id: id,
- user_message_id: interaction.id,
- ...content,
- }),
- });
- const newTask = await interactionRes.json();
+ const newTask = await oasstApiClient.interactTask(update_type, id, interaction.id, content, token);
// Stores the new task with our database.
const newRegisteredTask = await prisma.registeredTask.create({
|
#353
My attempt at adding a `taskInteraction` method with its use in `api/update_task.ts` and contract test.
This is my first time working with cypress, but I hope I understood the idea.
|
https://api.github.com/repos/LAION-AI/Open-Assistant/pulls/421
|
2023-01-05T20:50:26Z
|
2023-01-06T11:22:56Z
|
2023-01-06T11:22:56Z
|
2023-01-06T11:29:49Z
| 1,493
|
LAION-AI/Open-Assistant
| 37,832
|
Fix misspelling
|
diff --git a/README.md b/README.md
index 03b7e8a1f4..dd264ad9ec 100644
--- a/README.md
+++ b/README.md
@@ -55,7 +55,7 @@ u'{"type":"User"...'
Requests allows you to send HTTP/1.1 requests extremely easily. There’s no need to manually add query strings to your URLs, or to form-encode your `PUT` & `POST` data — but nowadays, just use the `json` method!
-Requests is **the most downloaded Python package today**, pulling in around `14M downloads / week`— according to GitHub, Requests is currently [depended upon](https://github.com/psf/requests/network/dependents?package_id=UGFja2FnZS01NzA4OTExNg%3D%3D) by `367_296` repositories. You may certianly put your trust in this code.
+Requests is **the most downloaded Python package today**, pulling in around `14M downloads / week`— according to GitHub, Requests is currently [depended upon](https://github.com/psf/requests/network/dependents?package_id=UGFja2FnZS01NzA4OTExNg%3D%3D) by `367_296` repositories. You may certainly put your trust in this code.
<p> </p>
|
Just noticed a small typo!
|
https://api.github.com/repos/psf/requests/pulls/5197
|
2019-09-18T11:09:41Z
|
2019-09-19T15:29:30Z
|
2019-09-19T15:29:30Z
|
2021-08-30T00:06:40Z
| 310
|
psf/requests
| 32,452
|
Fix bug with blink auth flow
|
diff --git a/homeassistant/components/blink/config_flow.py b/homeassistant/components/blink/config_flow.py
index d244c3164830..5c77add31185 100644
--- a/homeassistant/components/blink/config_flow.py
+++ b/homeassistant/components/blink/config_flow.py
@@ -36,6 +36,7 @@ def _send_blink_2fa_pin(auth, pin):
"""Send 2FA pin to blink servers."""
blink = Blink()
blink.auth = auth
+ blink.setup_login_ids()
blink.setup_urls()
return auth.send_auth_key(blink, pin)
|
<!--
You are amazing! Thanks for contributing to our project!
Please, DO NOT DELETE ANY TEXT from this template! (unless instructed).
-->
## Proposed change
<!--
Describe the big picture of your changes here to communicate to the
maintainers why we should accept this pull request. If it fixes a bug
or resolves a feature request, be sure to link to that issue in the
additional information section.
-->
Just installed a couple blink cameras in my house and wanted to add the blink integration into Home Assistant. After entering my email/password, it asks for a 2FA code that is emailed. However, every time I entered this code, I got "invalid access token".
First I tried googling for the problem: I found some old posts from a few years ago that all mention it's "fixed". Nothing really recent.
Next, I tried updating HA, which necessitated updating python for me (was still on 3.7). I'm now running a completely fresh install of python 3.8 and a fresh, up-to-date install of HA - problem persisted, so I did some more digging.
From the command line, following the directions for [blinkpy](https://github.com/fronzbot/blinkpy) and utilizing the version that was installed in HA, I could authenticate without issue.
Digging into HA's code, I can see that it's calling `blink.setup_urls()` instead of `blink.start()` as per blinkpy's instructions. Changing this line fixes the issue. Digging into it further, I can see that `start()` does a couple things: specifically, it calls `setup_login_ids()` before `setup_urls()` - from my testing on the command line, this is the key to the solution. So, if, for some reason, calling `start()` is undesirable, we could instead run:
```python
blink.setup_login_ids()
blink.setup_urls()
```
...and that _should_ also solve the problem.
## Type of change
<!--
What type of change does your PR introduce to Home Assistant?
NOTE: Please, check only 1! box!
If your PR requires multiple boxes to be checked, you'll most likely need to
split it into multiple PRs. This makes things easier and faster to code review.
-->
- [ ] Dependency upgrade
- [x] Bugfix (non-breaking change which fixes an issue)
- [ ] New integration (thank you!)
- [ ] New feature (which adds functionality to an existing integration)
- [ ] Breaking change (fix/feature causing existing functionality to break)
- [ ] Code quality improvements to existing code or addition of tests
## Additional information
<!--
Details are important, and help maintainers processing your PR.
Please be sure to fill out additional details, if applicable.
-->
- This PR fixes or closes issue: [fixes #44465](https://github.com/home-assistant/core/issues/44465)
- This PR is related to issue:
- Link to documentation pull request:
## Checklist
<!--
Put an `x` in the boxes that apply. You can also fill these out after
creating the PR. If you're unsure about any of them, don't hesitate to ask.
We're here to help! This is simply a reminder of what we are going to look
for before merging your code.
-->
- [x] The code change is tested and works locally.
- [ ] Local tests pass. **Your PR cannot be merged unless tests pass**
- [x] There is no commented out code in this PR.
- [x] I have followed the [development checklist][dev-checklist]
- [ ] The code has been formatted using Black (`black --fast homeassistant tests`)
- [ ] Tests have been added to verify that the new code works.
If user exposed functionality or configuration variables are added/changed:
- [ ] Documentation added/updated for [www.home-assistant.io][docs-repository]
If the code communicates with devices, web services, or third-party tools:
- [ ] The [manifest file][manifest-docs] has all fields filled out correctly.
Updated and included derived files by running: `python3 -m script.hassfest`.
- [ ] New or updated dependencies have been added to `requirements_all.txt`.
Updated by running `python3 -m script.gen_requirements_all`.
- [ ] Untested files have been added to `.coveragerc`.
The integration reached or maintains the following [Integration Quality Scale][quality-scale]:
<!--
The Integration Quality Scale scores an integration on the code quality
and user experience. Each level of the quality scale consists of a list
of requirements. We highly recommend getting your integration scored!
-->
- [ ] No score or internal
- [ ] 🥈 Silver
- [ ] 🥇 Gold
- [ ] 🏆 Platinum
<!--
This project is very active and we have a high turnover of pull requests.
Unfortunately, the number of incoming pull requests is higher than what our
reviewers can review and merge so there is a long backlog of pull requests
waiting for review. You can help here!
By reviewing another pull request, you will help raise the code quality of
that pull request and the final review will be faster. This way the general
pace of pull request reviews will go up and your wait time will go down.
When picking a pull request to review, try to choose one that hasn't yet
been reviewed.
Thanks for helping out!
-->
To help with the load of incoming pull requests:
- [ ] I have reviewed two other [open pull requests][prs] in this repository.
[prs]: https://github.com/home-assistant/core/pulls?q=is%3Aopen+is%3Apr+-author%3A%40me+-draft%3Atrue+-label%3Awaiting-for-upstream+sort%3Acreated-desc+review%3Anone
<!--
Thank you for contributing <3
Below, some useful links you could explore:
-->
[dev-checklist]: https://developers.home-assistant.io/docs/en/development_checklist.html
[manifest-docs]: https://developers.home-assistant.io/docs/en/creating_integration_manifest.html
[quality-scale]: https://developers.home-assistant.io/docs/en/next/integration_quality_scale_index.html
[docs-repository]: https://github.com/home-assistant/home-assistant.io
|
https://api.github.com/repos/home-assistant/core/pulls/44769
|
2021-01-02T17:59:34Z
|
2021-01-04T11:33:35Z
|
2021-01-04T11:33:35Z
|
2021-01-05T13:25:20Z
| 132
|
home-assistant/core
| 39,287
|
Interactive Mobjects Performance Improvements
|
diff --git a/example_scenes.py b/example_scenes.py
index d8b7d4b247..f2a95727e4 100644
--- a/example_scenes.py
+++ b/example_scenes.py
@@ -497,7 +497,8 @@ def text_updater(old_text):
self.add(MotionMobject(text))
self.textbox.set_value("Manim")
- self.embed()
+ # self.wait(60)
+ # self.embed()
# See https://github.com/3b1b/videos for many, many more
diff --git a/manimlib/mobject/interactive.py b/manimlib/mobject/interactive.py
index 7e72d8160e..bb27c99ef7 100644
--- a/manimlib/mobject/interactive.py
+++ b/manimlib/mobject/interactive.py
@@ -22,21 +22,19 @@ class MotionMobject(Mobject):
You could hold and drag this object to any position
"""
- CONFIG = {
- "listen_to_events": True
- }
-
def __init__(self, mobject, **kwargs):
super().__init__(**kwargs)
+ assert(isinstance(mobject, Mobject))
self.mobject = mobject
+ self.mobject.listen_to_events = True
+ self.mobject.on_mouse_drag = self.mob_on_mouse_drag
# To avoid locking it as static mobject
self.mobject.add_updater(lambda mob: None)
self.add(mobject)
- def on_mouse_drag(self, point, d_point, buttons, modifiers):
- if self.mobject.is_point_touching(point):
- self.mobject.move_to(point)
- return False
+ def mob_on_mouse_drag(self, point, d_point, buttons, modifiers):
+ self.mobject.move_to(point)
+ return False
class Button(Mobject):
@@ -44,24 +42,22 @@ class Button(Mobject):
Pass any mobject and register an on_click method
"""
- CONFIG = {
- "listen_to_events": True
- }
-
def __init__(self, mobject, on_click, **kwargs):
super().__init__(**kwargs)
- self.mobject, self.on_click = mobject, on_click
+ self.on_click = on_click
+ self.mobject = mobject
+ self.mobject.listen_to_events = True
+ self.mobject.on_mouse_press = self.mob_on_mouse_press
self.add(self.mobject)
- def on_mouse_press(self, point, button, mods):
- if self.mobject.is_point_touching(point):
- self.on_click()
- return False
+ def mob_on_mouse_press(self, point, button, mods):
+ self.on_click()
+ return False
# Controls
-class ContolMobject(ValueTracker):
+class ControlMobject(ValueTracker):
CONFIG = {
"listen_to_events": True
}
@@ -88,7 +84,7 @@ def set_value_anim(self, value):
pass
-class EnableDisableButton(ContolMobject):
+class EnableDisableButton(ControlMobject):
CONFIG = {
"value_type": np.dtype(bool),
"rect_kwargs": {
@@ -122,7 +118,7 @@ def on_mouse_press(self, point, button, mods):
return False
-class Checkbox(ContolMobject):
+class Checkbox(ControlMobject):
CONFIG = {
"value_type": np.dtype(bool),
"rect_kwargs": {
@@ -191,8 +187,11 @@ def get_cross(self):
return cross
-class LinearNumberSlider(ContolMobject):
+class LinearNumberSlider(ControlMobject):
CONFIG = {
+ # Since, only slider circle listnes to drag event
+ "listen_to_events": False,
+
"value_type": np.float64,
"min_value": -10.0,
"max_value": 10.0,
@@ -222,6 +221,9 @@ def __init__(self, value=0, **kwargs):
self.slider_axis.set_opacity(0.0)
self.slider.move_to(self.slider_axis)
+ self.slider.listen_to_events = True
+ self.slider.on_mouse_drag = self.slider_on_mouse_drag
+
super().__init__(value, self.bar, self.slider, self.slider_axis, ** kwargs)
def assert_value(self, value):
@@ -231,10 +233,9 @@ def set_value_anim(self, value):
prop = (value - self.min_value) / (self.max_value - self.min_value)
self.slider.move_to(self.slider_axis.point_from_proportion(prop))
- def on_mouse_drag(self, point, d_point, buttons, modifiers):
- if self.slider.is_point_touching(point):
- self.set_value(self.get_value_from_point(point))
- return False
+ def slider_on_mouse_drag(self, point, d_point, buttons, modifiers):
+ self.set_value(self.get_value_from_point(point))
+ return False
# Helper Methods
@@ -348,7 +349,7 @@ def get_picked_opacity(self):
return rgba[3]
-class Textbox(ContolMobject):
+class Textbox(ControlMobject):
CONFIG = {
"value_type": np.dtype(object),
@@ -371,6 +372,8 @@ def __init__(self, value="", **kwargs):
digest_config(self, kwargs)
self.isActive = self.isInitiallyActive
self.box = Rectangle(**self.box_kwargs)
+ self.box.listen_to_events = True
+ self.box.on_mouse_press = self.box_on_mouse_press
self.text = Text(value, **self.text_kwargs)
super().__init__(value, self.box, self.text, **kwargs)
self.update_text(value)
@@ -397,11 +400,10 @@ def active_anim(self, isActive):
else:
self.box.set_stroke(self.deactive_color)
- def on_mouse_press(self, point, button, mods):
- if self.box.is_point_touching(point):
- self.isActive = not self.isActive
- self.active_anim(self.isActive)
- return False
+ def box_on_mouse_press(self, point, button, mods):
+ self.isActive = not self.isActive
+ self.active_anim(self.isActive)
+ return False
def on_key_press(self, symbol, modifiers):
char = chr(symbol)
@@ -425,7 +427,6 @@ def on_key_press(self, symbol, modifiers):
class ControlPanel(Group):
CONFIG = {
- "listen_to_events": True,
"panel_kwargs": {
"width": FRAME_WIDTH / 4,
"height": MED_SMALL_BUFF + FRAME_HEIGHT,
@@ -451,6 +452,8 @@ def __init__(self, *controls, **kwargs):
self.panel = Rectangle(**self.panel_kwargs)
self.panel.to_corner(UP + LEFT, buff=0)
self.panel.shift(self.panel.get_height() * UP)
+ self.panel.listen_to_events = True
+ self.panel.on_mouse_scroll = self.panel_on_mouse_scroll
self.panel_opener_rect = Rectangle(**self.opener_kwargs)
self.panel_info_text = Text(**self.opener_text_kwargs)
@@ -458,6 +461,8 @@ def __init__(self, *controls, **kwargs):
self.panel_opener = Group(self.panel_opener_rect, self.panel_info_text)
self.panel_opener.next_to(self.panel, DOWN, aligned_edge=DOWN)
+ self.panel_opener.listen_to_events = True
+ self.panel_opener.on_mouse_drag = self.panel_opener_on_mouse_drag
self.controls = Group(*controls)
self.controls.arrange(DOWN, center=False, aligned_edge=ORIGIN)
@@ -510,14 +515,12 @@ def close_panel(self):
self.move_panel_and_controls_to_panel_opener()
return self
- def on_mouse_drag(self, point, d_point, buttons, modifiers):
- if self.panel_opener.is_point_touching(point):
- self.panel_opener.match_y(Dot(point))
- self.move_panel_and_controls_to_panel_opener()
- return False
+ def panel_opener_on_mouse_drag(self, point, d_point, buttons, modifiers):
+ self.panel_opener.match_y(Dot(point))
+ self.move_panel_and_controls_to_panel_opener()
+ return False
- def on_mouse_scroll(self, point, offset):
- if self.panel.is_point_touching(point):
- factor = 10 * offset[1]
- self.controls.set_y(self.controls.get_y() + factor)
- return False
+ def panel_on_mouse_scroll(self, point, offset):
+ factor = 10 * offset[1]
+ self.controls.set_y(self.controls.get_y() + factor)
+ return False
diff --git a/manimlib/scene/scene.py b/manimlib/scene/scene.py
index 293b7a7195..f15aaa3a85 100644
--- a/manimlib/scene/scene.py
+++ b/manimlib/scene/scene.py
@@ -58,6 +58,9 @@ def __init__(self, **kwargs):
self.mouse_point = Point()
self.mouse_drag_point = Point()
+ self.mob_listners = []
+ self.mobjects_to_drag = []
+
# Much nicer to work with deterministic scenes
if self.random_seed is not None:
random.seed(self.random_seed)
@@ -205,6 +208,9 @@ def add(self, *new_mobjects):
"""
self.remove(*new_mobjects)
self.mobjects += new_mobjects
+ for new_mob in new_mobjects:
+ for mob_listner in filter(lambda mob: mob.listen_to_events, reversed(new_mob.get_family())):
+ self.mob_listners.insert(0, mob_listner)
return self
def add_mobjects_among(self, values):
@@ -232,6 +238,9 @@ def bring_to_front(self, *mobjects):
def bring_to_back(self, *mobjects):
self.remove(*mobjects)
self.mobjects = list(mobjects) + self.mobjects
+ for new_mob in reversed(mobjects):
+ for mob_listner in filter(lambda mob: mob.listen_to_events, reversed(new_mob.get_family())):
+ self.mob_listners.append(mob_listner)
return self
def clear(self):
@@ -519,10 +528,7 @@ def get_event_listeners_mobjects(self):
in reversed order. So the top most mobject's event is called first.
This helps in event bubbling.
"""
- return filter(
- lambda mob: mob.listen_to_events,
- reversed(self.get_mobject_family_members())
- )
+ return self.mob_listners
def on_mouse_motion(self, point, d_point):
self.mouse_point.move_to(point)
@@ -548,13 +554,16 @@ def on_mouse_motion(self, point, d_point):
def on_mouse_drag(self, point, d_point, buttons, modifiers):
self.mouse_drag_point.move_to(point)
+ for mob_listener in self.mobjects_to_drag:
+ propagate_event = mob_listener.on_mouse_drag(point, d_point, buttons, modifiers)
+ if propagate_event is not None and propagate_event is False:
+ return
+
+ def on_mouse_press(self, point, button, mods):
for mob_listener in self.get_event_listeners_mobjects():
if mob_listener.is_point_touching(point):
- propagate_event = mob_listener.on_mouse_drag(point, d_point, buttons, modifiers)
- if propagate_event is not None and propagate_event is False:
- return
+ self.mobjects_to_drag.append(mob_listener)
- def on_mouse_press(self, point, button, mods):
for mob_listener in self.get_event_listeners_mobjects():
if mob_listener.is_point_touching(point):
propagate_event = mob_listener.on_mouse_press(point, button, mods)
@@ -562,6 +571,8 @@ def on_mouse_press(self, point, button, mods):
return
def on_mouse_release(self, point, button, mods):
+ self.mobjects_to_drag = []
+
for mob_listener in self.get_event_listeners_mobjects():
if mob_listener.is_point_touching(point):
propagate_event = mob_listener.on_mouse_release(point, button, mods)
|
# Motivation
As mentioned in https://github.com/3b1b/manim/pull/1323#issuecomment-769350572, interactive mobjects were a bit laggy.
So, to improve interactively mobjects performance, the following changes are made.
1. Maintain a separate list of mobject listeners instead of filtering mobjects on every frame.
> This significantly improved the performance on all events except the on_mouse_drag event. So to improve it the 2nd change was made.
2. Changed the way drag events are handled.
> This improved the performance of on_mouse_drag event.
# Files Changed
1. manimlib/scene/scene.py
> To store a separate list of mob_listners rather than checking it on every frame and changed the on_mouse_drag event handling.
2. manimlib/mobject/interactive.py
> To adapt to the changes made in the way event handling works
3. example_scenes.py
> Commented out "self.embed()"
# Change in how on_mouse_drag works
If mouse is being draged, following events are triggered
1. **on_mouse_press** once
2. Then, till the mouse is being dragged, **on_mouse_drag** is called
3. Finally, **on_mouse_release** once
Previously, on_mouse_drag event of mobject was called on every frame if the point is touching the mobject.
So, if in between dragging, the mouse point leaves the mobject radius, on_mouse_drag event of mobject is stopped.
Now, If on_mouse_press is called, all the mobjects near mouse point is stored in array. And when mouse is being dragged,
on_mouse_drag is called on all the mobjects stored earlier regardless of whether mouse point is touching the mobject or not.
And, once on_mouse_release is called, that array is cleared
This has significantly improved the interactivity involving dragging.
# Results

Now, all the interactive mobjects are responding very quickly to events.
|
https://api.github.com/repos/3b1b/manim/pulls/1326
|
2021-01-31T10:37:33Z
|
2021-02-01T17:30:05Z
|
2021-02-01T17:30:05Z
|
2021-02-02T00:48:37Z
| 2,693
|
3b1b/manim
| 18,404
|
Fixed Broken link of paper jozefowicz15 et al
|
diff --git a/keras/layers/rnn/lstm.py b/keras/layers/rnn/lstm.py
index fb25d029166..c3661f44752 100644
--- a/keras/layers/rnn/lstm.py
+++ b/keras/layers/rnn/lstm.py
@@ -89,7 +89,7 @@ class LSTMCell(DropoutRNNCellMixin, base_layer.BaseRandomLayer):
unit_forget_bias: Boolean (default `True`). If True, add 1 to the bias of
the forget gate at initialization. Setting it to true will also force
`bias_initializer="zeros"`. This is recommended in [Jozefowicz et
- al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
+ al.](https://github.com/mlresearch/v37/blob/gh-pages/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_regularizer: Regularizer function applied to
|
Fixed Broken link of paper jozefowicz15 et al at line 92
|
https://api.github.com/repos/keras-team/keras/pulls/17038
|
2022-09-16T05:47:24Z
|
2022-09-21T18:13:31Z
|
2022-09-21T18:13:31Z
|
2022-09-30T00:30:07Z
| 240
|
keras-team/keras
| 47,572
|
Bump bleach from 3.1.2 to 3.1.4
|
diff --git a/Pipfile.lock b/Pipfile.lock
index 2c39f48f2f..9e84e13c0f 100644
--- a/Pipfile.lock
+++ b/Pipfile.lock
@@ -302,11 +302,11 @@
},
"bleach": {
"hashes": [
- "sha256:2ffa40dfa80b141ff58eee538bce0d2a09c7d456018d00f7a2cb0ebf967c524d",
- "sha256:f0b1ee0315062e60afa6b7cc39b1c3718b591e1d552a8841044dc49a68465659"
+ "sha256:cc8da25076a1fe56c3ac63671e2194458e0c4d9c7becfd52ca251650d517903c",
+ "sha256:e78e426105ac07026ba098f04de8abe9b6e3e98b5befbf89b51a5ef0a4292b03"
],
"index": "pypi",
- "version": "==3.1.2"
+ "version": "==3.1.4"
},
"certifi": {
"hashes": [
@@ -871,10 +871,10 @@
},
"tqdm": {
"hashes": [
- "sha256:0d8b5afb66e23d80433102e9bd8b5c8b65d34c2a2255b2de58d97bd2ea8170fd",
- "sha256:f35fb121bafa030bd94e74fcfd44f3c2830039a2ddef7fc87ef1c2d205237b24"
+ "sha256:03d2366c64d44c7f61e74c700d9b202d57e9efe355ea5c28814c52bfe7a50b8c",
+ "sha256:be5ddeec77d78ba781ea41eacb2358a77f74cc2407f54b82222d7ee7dc8c8ccf"
],
- "version": "==4.43.0"
+ "version": "==4.44.1"
},
"twine": {
"hashes": [
@@ -928,10 +928,10 @@
},
"virtualenv": {
"hashes": [
- "sha256:87831f1070534b636fea2241dd66f3afe37ac9041bcca6d0af3215cdcfbf7d82",
- "sha256:f3128d882383c503003130389bf892856341c1da12c881ae24d6358c82561b55"
+ "sha256:4e399f48c6b71228bf79f5febd27e3bbb753d9d5905776a86667bc61ab628a25",
+ "sha256:9e81279f4a9d16d1c0654a127c2c86e5bca2073585341691882c1e66e31ef8a5"
],
- "version": "==20.0.13"
+ "version": "==20.0.15"
},
"webencodings": {
"hashes": [
|
Bumps [bleach](https://github.com/mozilla/bleach) from 3.1.2 to 3.1.4.
<details>
<summary>Changelog</summary>
<p><em>Sourced from <a href="https://github.com/mozilla/bleach/blob/master/CHANGES">bleach's changelog</a>.</em></p>
<blockquote>
<h2>Version 3.1.4 (March 24th, 2020)</h2>
<p><strong>Security fixes</strong></p>
<ul>
<li>
<p><code>bleach.clean</code> behavior parsing style attributes could result in a
regular expression denial of service (ReDoS).</p>
<p>Calls to <code>bleach.clean</code> with an allowed tag with an allowed
<code>style</code> attribute were vulnerable to ReDoS. For example,
<code>bleach.clean(..., attributes={'a': ['style']})</code>.</p>
<p>This issue was confirmed in Bleach versions v3.1.3, v3.1.2, v3.1.1,
v3.1.0, v3.0.0, v2.1.4, and v2.1.3. Earlier versions used a similar
regular expression and should be considered vulnerable too.</p>
<p>Anyone using Bleach <=v3.1.3 is encouraged to upgrade.</p>
<p><a href="https://bugzilla.mozilla.org/show_bug.cgi?id=1623633">https://bugzilla.mozilla.org/show_bug.cgi?id=1623633</a></p>
</li>
</ul>
<p><strong>Backwards incompatible changes</strong></p>
<ul>
<li>Style attributes with dashes, or single or double quoted values are
cleaned instead of passed through.</li>
</ul>
<p><strong>Features</strong></p>
<p>None</p>
<p><strong>Bug fixes</strong></p>
<p>None</p>
<h2>Version 3.1.3 (March 17th, 2020)</h2>
<p><strong>Security fixes</strong></p>
<p>None</p>
<p><strong>Backwards incompatible changes</strong></p>
<p>None</p>
<p><strong>Features</strong></p>
<ul>
<li>
<p>Add relative link to code of conduct. (<a href="https://github-redirect.dependabot.com/mozilla/bleach/issues/442">#442</a>)</p>
</li>
<li>
<p>Drop deprecated 'setup.py test' support. (<a href="https://github-redirect.dependabot.com/mozilla/bleach/issues/507">#507</a>)</p>
</li>
</ul>
</tr></table> ... (truncated)
</blockquote>
</details>
<details>
<summary>Commits</summary>
<ul>
<li><a href="https://github.com/mozilla/bleach/commit/6e74a5027b57055cdaeb040343d32934121392a7"><code>6e74a50</code></a> Update for v3.1.4 release</li>
<li><a href="https://github.com/mozilla/bleach/commit/d6018f2539d271963c3e7f54f36ef11900363c69"><code>d6018f2</code></a> fix bug 1623633</li>
<li><a href="https://github.com/mozilla/bleach/commit/fc77027e67cc04aff6f4d4885358705f98ad20f4"><code>fc77027</code></a> Merge branch 'v3.1.0-branch'</li>
<li><a href="https://github.com/mozilla/bleach/commit/e4b1c50e098c33f82c862a34bb2a40f9c4458f46"><code>e4b1c50</code></a> Update for v3.1.3 release</li>
<li><a href="https://github.com/mozilla/bleach/commit/59cc502cee44bd18adc78619e6baed7a108c3ba1"><code>59cc502</code></a> Update for v3.1.2 release</li>
<li><a href="https://github.com/mozilla/bleach/commit/3f39d489ab7a1b38df8c245e9bd66217c1698369"><code>3f39d48</code></a> add wheel to requirements-dev</li>
<li><a href="https://github.com/mozilla/bleach/commit/175f67740e7951e1d80cefb7831e6c3e4efeb986"><code>175f677</code></a> fix bug 1621692</li>
<li><a href="https://github.com/mozilla/bleach/commit/e0ad450828832e9548d256f0938823a366337368"><code>e0ad450</code></a> Update for v3.1.1 release</li>
<li><a href="https://github.com/mozilla/bleach/commit/f77e0f6392177a06e46a49abd61a4d9f035e57fd"><code>f77e0f6</code></a> fix bug 1615315</li>
<li><a href="https://github.com/mozilla/bleach/commit/8d416c5ee780efe5daa237368bd9bd1b1af75c12"><code>8d416c5</code></a> Drop support for EOL Python 3.4</li>
<li>Additional commits viewable in <a href="https://github.com/mozilla/bleach/compare/v3.1.2...v3.1.4">compare view</a></li>
</ul>
</details>
<br />
[](https://help.github.com/articles/configuring-automated-security-fixes)
Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
<details>
<summary>Dependabot commands and options</summary>
<br />
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually
- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language
- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language
- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language
- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language
You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/psf/black/network/alerts).
</details>
|
https://api.github.com/repos/psf/black/pulls/1326
|
2020-03-30T20:27:58Z
|
2020-05-08T11:31:47Z
|
2020-05-08T11:31:47Z
|
2020-05-08T11:31:56Z
| 780
|
psf/black
| 24,044
|
Add XP progress
|
diff --git a/website/public/locales/en/leaderboard.json b/website/public/locales/en/leaderboard.json
index 68805e0426..8262746241 100644
--- a/website/public/locales/en/leaderboard.json
+++ b/website/public/locales/en/leaderboard.json
@@ -9,6 +9,7 @@
"labels_simple": "Labels (simple)",
"last_updated_at": "Last updated at: {{val, datetime}}",
"leaderboard": "Leaderboard",
+ "level_progress_message": "With a total of {{score}} points, you have reached level {{level,number,integer}}!",
"month": "Month",
"monthly": "Monthly",
"next": "Next",
@@ -19,6 +20,7 @@
"prompt": "Prompts",
"rank": "Rank",
"rankings": "Rankings",
+ "reached_max_level": "You have reached max level, thank you for your hard work!",
"replies_assistant": "Replies as Assistant",
"replies_prompter": "Replies as Prompter",
"reply": "Replies",
@@ -31,6 +33,7 @@
"view_all": "View all",
"week": "Week",
"weekly": "Weekly",
+ "xp_progress_message": "You need {{need, number, integer}} more points to reach the next level!",
"your_account": "Your account",
"your_stats": "Your statistics"
}
diff --git a/website/src/components/Account/XPBar.tsx b/website/src/components/Account/XPBar.tsx
new file mode 100644
index 0000000000..8be6dba463
--- /dev/null
+++ b/website/src/components/Account/XPBar.tsx
@@ -0,0 +1,51 @@
+import { Grid, GridItem, keyframes, Progress, Text } from "@chakra-ui/react";
+import { Icon } from "@chakra-ui/react";
+import { Star } from "lucide-react";
+import { useTranslation } from "next-i18next";
+import { useMemo } from "react";
+import { useUserScore } from "src/hooks/ui/useUserScore";
+
+const spin = keyframes`
+0% {transform: rotate(0deg);}
+100% {transform: rotate(360deg);}
+`;
+const starAnimation = `${spin} infinite 2.5s cubic-bezier(0.52, -0.43, 0, 1.04)`;
+
+export const XPBar = () => {
+ const { t } = useTranslation("leaderboard");
+ const { level, score, scoreUntilNextLevel, reachedMaxLevel } = useUserScore();
+
+ const nextLevelText = useMemo(() => {
+ if (reachedMaxLevel) {
+ return t("reached_max_level");
+ }
+ return t("xp_progress_message", { need: scoreUntilNextLevel });
+ }, [reachedMaxLevel, scoreUntilNextLevel, t]);
+
+ return (
+ <Grid
+ rowGap={1}
+ columnGap={6}
+ templateAreas={`
+ "star row1"
+ "star row2"
+ "star progress"
+ `}
+ gridTemplateColumns="auto 1fr"
+ alignItems="center"
+ >
+ <GridItem area="star" justifySelf="center">
+ <Icon as={Star} boxSize={20} fill="gold" color="gold" animation={starAnimation} />
+ </GridItem>
+ <GridItem area="row1">
+ <Text>{t("level_progress_message", { score, level })}</Text>
+ </GridItem>
+ <GridItem area="row2">
+ <Text>{nextLevelText}</Text>
+ </GridItem>
+ <GridItem area="progress">
+ <Progress hasStripe colorScheme="yellow" value={level} />
+ </GridItem>
+ </Grid>
+ );
+};
diff --git a/website/src/components/Header/UserScore.tsx b/website/src/components/Header/UserScore.tsx
index b7994bf97a..0b7dc70393 100644
--- a/website/src/components/Header/UserScore.tsx
+++ b/website/src/components/Header/UserScore.tsx
@@ -5,7 +5,7 @@ import { useTranslation } from "next-i18next";
import { useUserScore } from "src/hooks/ui/useUserScore";
export const UserScore = () => {
- const score = useUserScore();
+ const { score } = useUserScore();
const { t } = useTranslation("leaderboard");
if (!Number.isFinite(score)) {
diff --git a/website/src/hooks/ui/useUserScore.ts b/website/src/hooks/ui/useUserScore.ts
index 53ecf28594..999544eab2 100644
--- a/website/src/hooks/ui/useUserScore.ts
+++ b/website/src/hooks/ui/useUserScore.ts
@@ -3,6 +3,18 @@ import { get } from "src/lib/api";
import { LeaderboardEntity, LeaderboardTimeFrame } from "src/types/Leaderboard";
import uswSWRImmutable from "swr/immutable";
+// https://github.com/LAION-AI/Open-Assistant/issues/1957
+function* generateThresholds(baseline = 3, alpha = 1.1521, maxLevel = 100) {
+ let sum = 0;
+ yield sum;
+ for (let i = 1; i < maxLevel; i++) {
+ sum += i * alpha + baseline;
+ yield Math.round(sum);
+ }
+}
+
+const thresholds = Array.from(generateThresholds());
+
export const useUserScore = () => {
const { status } = useSession();
const isLoggedIn = status === "authenticated";
@@ -14,6 +26,13 @@ export const useUserScore = () => {
keepPreviousData: true,
}
);
- const score: number | undefined = entries?.total?.leader_score;
- return score;
+ const score = entries?.total?.leader_score ?? 0;
+ const level = entries?.total?.level ?? 0;
+
+ const currentLevelScore = thresholds[level];
+ const nextLevelScore = thresholds[level + 1] ?? Infinity;
+ const scoreUntilNextLevel = nextLevelScore - score;
+ const reachedMaxLevel = level === 100;
+
+ return { score, level, currentLevelScore, nextLevelScore, scoreUntilNextLevel, reachedMaxLevel };
};
diff --git a/website/src/pages/account/index.tsx b/website/src/pages/account/index.tsx
index 59f40e17a2..ff4e94ffee 100644
--- a/website/src/pages/account/index.tsx
+++ b/website/src/pages/account/index.tsx
@@ -6,6 +6,7 @@ import React from "react";
export { getDefaultStaticProps as getStaticProps } from "src/lib/default_static_props";
import { Pencil } from "lucide-react";
import { useTranslation } from "next-i18next";
+import { XPBar } from "src/components/Account/XPBar";
import { SurveyCard } from "src/components/Survey/SurveyCard";
import { get } from "src/lib/api";
import { getTypeSafei18nKey } from "src/lib/i18n";
@@ -51,6 +52,8 @@ export default function Account() {
<Text as="b">Email</Text>
<Text>{session.user.email ?? t("no_email")}</Text>
</Grid>
+ <Divider my={4} />
+ <XPBar />
</SurveyCard>
<SurveyCard>
<Title>{t("your_stats")}</Title>
diff --git a/website/src/pages/dashboard.tsx b/website/src/pages/dashboard.tsx
index c3ea14f8ea..10a49a8b83 100644
--- a/website/src/pages/dashboard.tsx
+++ b/website/src/pages/dashboard.tsx
@@ -1,4 +1,4 @@
-import { Flex } from "@chakra-ui/react";
+import { Card, CardBody, Flex } from "@chakra-ui/react";
import Head from "next/head";
import { useTranslation } from "next-i18next";
import { useMemo } from "react";
@@ -7,6 +7,7 @@ import { getDashboardLayout } from "src/components/Layout";
import { get } from "src/lib/api";
import { AvailableTasks, TaskCategory } from "src/types/Task";
export { getDefaultStaticProps as getStaticProps } from "src/lib/default_static_props";
+import { XPBar } from "src/components/Account/XPBar";
import { TaskCategoryItem } from "src/components/Dashboard/TaskOption";
import { useCurrentLocale } from "src/hooks/locale/useCurrentLocale";
import { API_ROUTES } from "src/lib/routes";
@@ -33,6 +34,11 @@ const Dashboard = () => {
<Flex direction="column" gap="10">
<WelcomeCard />
<TaskOption content={availableTaskTypes} />
+ <Card>
+ <CardBody>
+ <XPBar />
+ </CardBody>
+ </Card>
<LeaderboardWidget />
</Flex>
</>
diff --git a/website/src/types/Leaderboard.ts b/website/src/types/Leaderboard.ts
index d1a1218f6d..453cbb3e41 100644
--- a/website/src/types/Leaderboard.ts
+++ b/website/src/types/Leaderboard.ts
@@ -23,6 +23,7 @@ export interface LeaderboardEntity {
auth_method: string;
display_name: string;
leader_score: number;
+ level: number; // between 0 and 100
base_date: string;
image?: string;
modified_date: string;
|
Closes #1957

The star is animated, we maybe want to consider [prefers-reduced-motion](https://developer.mozilla.org/en-US/docs/Web/CSS/@media/prefers-reduced-motion) at some point.
|
https://api.github.com/repos/LAION-AI/Open-Assistant/pulls/2093
|
2023-03-17T08:09:36Z
|
2023-03-17T17:38:58Z
|
2023-03-17T17:38:58Z
|
2023-03-17T17:39:00Z
| 2,190
|
LAION-AI/Open-Assistant
| 37,249
|
Better workaround for Navi1, removing --pre for Navi3
|
diff --git a/webui.sh b/webui.sh
index 361255f697a..b348c387eba 100755
--- a/webui.sh
+++ b/webui.sh
@@ -130,12 +130,18 @@ case "$gpu_info" in
if [[ -z "${TORCH_COMMAND}" ]]
then
pyv="$(${python_cmd} -c 'import sys; print(".".join(map(str, sys.version_info[0:2])))')"
- if [[ $(bc <<< "$pyv <= 3.10") -eq 1 ]]
+ # Using an old nightly compiled against rocm 5.2 for Navi1, see https://github.com/pytorch/pytorch/issues/106728#issuecomment-1749511711
+ if [[ $pyv == "3.8" ]]
then
- # Navi users will still use torch 1.13 because 2.0 does not seem to work.
- export TORCH_COMMAND="pip install --pre torch torchvision --index-url https://download.pytorch.org/whl/nightly/rocm5.6"
+ export TORCH_COMMAND="pip install https://download.pytorch.org/whl/nightly/rocm5.2/torch-2.0.0.dev20230209%2Brocm5.2-cp38-cp38-linux_x86_64.whl https://download.pytorch.org/whl/nightly/rocm5.2/torchvision-0.15.0.dev20230209%2Brocm5.2-cp38-cp38-linux_x86_64.whl"
+ elif [[ $pyv == "3.9" ]]
+ then
+ export TORCH_COMMAND="pip install https://download.pytorch.org/whl/nightly/rocm5.2/torch-2.0.0.dev20230209%2Brocm5.2-cp39-cp39-linux_x86_64.whl https://download.pytorch.org/whl/nightly/rocm5.2/torchvision-0.15.0.dev20230209%2Brocm5.2-cp39-cp39-linux_x86_64.whl"
+ elif [[ $pyv == "3.10" ]]
+ then
+ export TORCH_COMMAND="pip install https://download.pytorch.org/whl/nightly/rocm5.2/torch-2.0.0.dev20230209%2Brocm5.2-cp310-cp310-linux_x86_64.whl https://download.pytorch.org/whl/nightly/rocm5.2/torchvision-0.15.0.dev20230209%2Brocm5.2-cp310-cp310-linux_x86_64.whl"
else
- printf "\e[1m\e[31mERROR: RX 5000 series GPUs must be using at max python 3.10, aborting...\e[0m"
+ printf "\e[1m\e[31mERROR: RX 5000 series GPUs python version must be between 3.8 and 3.10, aborting...\e[0m"
exit 1
fi
fi
@@ -143,7 +149,7 @@ case "$gpu_info" in
*"Navi 2"*) export HSA_OVERRIDE_GFX_VERSION=10.3.0
;;
*"Navi 3"*) [[ -z "${TORCH_COMMAND}" ]] && \
- export TORCH_COMMAND="pip install --pre torch torchvision --index-url https://download.pytorch.org/whl/nightly/rocm5.7"
+ export TORCH_COMMAND="pip install torch torchvision --index-url https://download.pytorch.org/whl/nightly/rocm5.7"
;;
*"Renoir"*) export HSA_OVERRIDE_GFX_VERSION=9.0.0
printf "\n%s\n" "${delimiter}"
|
This is a follow-up to my old PR https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/11048 and to https://github.com/AUTOMATIC1111/stable-diffusion-webui/commit/bda86f0fd9653657c146f7c1128f92771d16ad4e
Right now it cannot really work on older Navi1 cards because they need pytorch compiled with an older version of ROCm (5.2 or lower).
https://github.com/ROCm/Tensile/pull/1862 is probably going to make those workarounds not needed anymore on those cards, but right now they are still needed (the PR is merged but not available yet on the stable ROCm builds)
I also found some time ago an old pytorch 2.0 nightly build, wich is at least a bit better than relying on the old 1.13.1
I also removed that horrible line wich was relying on bc and removed the --flag from Navi3's line (not needed)
|
https://api.github.com/repos/AUTOMATIC1111/stable-diffusion-webui/pulls/15224
|
2024-03-11T23:24:29Z
|
2024-03-16T05:45:03Z
|
2024-03-16T05:45:03Z
|
2024-03-18T12:33:41Z
| 870
|
AUTOMATIC1111/stable-diffusion-webui
| 40,434
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.