message
stringlengths 13
484
| diff
stringlengths 38
4.63k
|
---|---|
Stop trying to test Elasticsearch 6.8.0 on ARM
* Stop trying to test 6.8.0 on ARM
This will allow developers with Apple Silicon hardware to still run
integration tests.
* Bump metrics store version for Apple Silicon hardware | @@ -19,6 +19,7 @@ import errno
import functools
import json
import os
+import platform
import random
import socket
import subprocess
@@ -30,7 +31,10 @@ from esrally import client, config, version
from esrally.utils import process
CONFIG_NAMES = ["in-memory-it", "es-it"]
-DISTRIBUTIONS = ["6.8.0", "8.4.0"]
+DISTRIBUTIONS = ["8.4.0"]
+# There are no ARM distribution artefacts for 6.8.0, which can't be tested on Apple Silicon
+if platform.machine() != "arm64":
+ DISTRIBUTIONS.insert(0, "6.8.0")
TRACKS = ["geonames", "nyc_taxis", "http_logs", "nested"]
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
@@ -194,7 +198,7 @@ class TestCluster:
class EsMetricsStore:
- VERSION = "7.6.0"
+ VERSION = "7.17.0"
def __init__(self):
self.cluster = TestCluster("in-memory-it")
|
Serialize Notification now also returns sender email address
sent_by_email_address field was added because sometimes two
people at one institution have the same name and then email
address, which is unique, is more useful. | @@ -1413,6 +1413,12 @@ class Notification(db.Model):
else:
return None
+ def get_created_by_email_address(self):
+ if self.created_by:
+ return self.created_by.email_address
+ else:
+ return None
+
def serialize_for_csv(self):
created_at_in_bst = convert_utc_to_bst(self.created_at)
serialized = {
@@ -1424,6 +1430,7 @@ class Notification(db.Model):
"status": self.formatted_status,
"created_at": time.strftime('%A %d %B %Y at %H:%M', created_at_in_bst.timetuple()),
"created_by_name": self.get_created_by_name(),
+ "created_by_email_address": self.get_created_by_email_address(),
}
return serialized
@@ -1454,6 +1461,7 @@ class Notification(db.Model):
"subject": self.subject,
"created_at": self.created_at.strftime(DATETIME_FORMAT),
"created_by_name": self.get_created_by_name(),
+ "created_by_email_address": self.get_created_by_email_address(),
"sent_at": self.sent_at.strftime(DATETIME_FORMAT) if self.sent_at else None,
"completed_at": self.completed_at(),
"scheduled_for": (
|
update tests/test_poly_spaces.py for Bernstein basis
update _gen_common_data(), test_partition_of_unity(), test_continuity() | @@ -89,7 +89,8 @@ def _gen_common_data(orders, gels, report):
from sfepy.discrete.common.global_interp import get_ref_coors
bases = ([ii for ii in combine([['2_4', '3_8'],
- ['lagrange', 'serendipity', 'lobatto']])]
+ ['lagrange', 'serendipity', 'bernstein',
+ 'lobatto']])]
+ [ii for ii in combine([['2_3', '3_4'],
['lagrange']])])
for geom, poly_space_base in bases:
@@ -202,7 +203,7 @@ class Test(TestCommon):
orders = {'2_3' : 5, '2_4' : 5, '3_4' : 5, '3_8' : 5}
bases = (
[ii for ii in combine(
- [['2_4', '3_8'], ['lagrange', 'serendipity']]
+ [['2_4', '3_8'], ['lagrange', 'serendipity', 'bernstein']]
)]
+ [ii for ii in combine([['2_3', '3_4'], ['lagrange']])]
)
@@ -237,7 +238,7 @@ class Test(TestCommon):
field, ps, rrc, rcell, crc, ccell, vec,
edofs, fdofs) in _gen_common_data(orders, self.gels, self.report):
- if poly_space_base in ('lagrange', 'serendipity'):
+ if poly_space_base in ('lagrange', 'serendipity', 'bernstein'):
rbf = ps.eval_base(rrc)
cbf = ps.eval_base(crc)
|
Use same Travis build badge for Linux and MacOS
Before, it looked like Linux was untested. Only a very careful reading revealed that the build
badge for MacOS also applied to Linux. | @@ -61,8 +61,9 @@ For example, pyfakefs will not work with [`lxml`](http://lxml.de/). In this cas
pyfakefs is currently automatically tested:
* On Linux, with Python 2.7, and 3.4 to 3.7, using [Travis](https://travis-ci.org/jmcgeheeiv/pyfakefs)
-* On MacOS, with Python 2.7, 3.6 and 3.7, also using [Travis](https://travis-ci.org/jmcgeheeiv/pyfakefs).
- The Linux/MacOS build is currently [](https://travis-ci.org/jmcgeheeiv/pyfakefs).
+ [](https://travis-ci.org/jmcgeheeiv/pyfakefs).
+* On MacOS, with Python 2.7, 3.6 and 3.7, also using [Travis](https://travis-ci.org/jmcgeheeiv/pyfakefs)
+ [](https://travis-ci.org/jmcgeheeiv/pyfakefs).
* On Windows, with Python 2.7, and 3.4 to 3.7 using [Appveyor](https://ci.appveyor.com/project/jmcgeheeiv/pyfakefs).
The Windows build is currently [](https://ci.appveyor.com/project/jmcgeheeiv/pyfakefs/branch/master).
|
Set run model default timestamp to 0
Without setting to 0, the finished at field could be null if the argo workflow is already evicted from the cluster.
This result in errors parsing the table.
Alternatively we can use sql.NullInt64 type to parse the sql but that's less elegant. | @@ -20,10 +20,10 @@ type Run struct {
Name string `gorm:"column:Name; not null;"` /* The name of the K8s resource. Follow regex '[a-z0-9]([-a-z0-9]*[a-z0-9])?'*/
StorageState string `gorm:"column:StorageState; not null;"`
Namespace string `gorm:"column:Namespace; not null;"`
- Description string `gorm:"column:Description; not null"`
- CreatedAtInSec int64 `gorm:"column:CreatedAtInSec; not null"`
- ScheduledAtInSec int64 `gorm:"column:ScheduledAtInSec;"`
- FinishedAtInSec int64 `gorm:"column:FinishedAtInSec;"`
+ Description string `gorm:"column:Description; not null;"`
+ CreatedAtInSec int64 `gorm:"column:CreatedAtInSec; not null;"`
+ ScheduledAtInSec int64 `gorm:"column:ScheduledAtInSec; default:0;"`
+ FinishedAtInSec int64 `gorm:"column:FinishedAtInSec; default:0;"`
Conditions string `gorm:"column:Conditions; not null"`
Metrics []*RunMetric
ResourceReferences []*ResourceReference
|
Update 'title' and 'managedby' entries
Update yaml file to better reflect recent developments regarding A2D2. | -Name: "A2D2: AEV Autonomous Driving Dataset"
+Name: "A2D2: Audi Autonomous Driving Dataset"
Description:
An open multi-sensor dataset for autonomous driving research.
This dataset comprises semantically segmented images, semantic
@@ -8,8 +8,8 @@ Description:
active research and development in AI, computer vision, and
robotics for autonomous driving.
Contact: [email protected]
-Documentation: https://www.audi-electronics-venture.de/aev/web/en/driving-dataset.html
-ManagedBy: Audi Electronics Ventures
+Documentation: http://a2d2.audi
+ManagedBy: Audi AG
UpdateFrequency:
The dataset may be updated with additional or corrected data
on a need-to-update basis.
@@ -24,7 +24,7 @@ Tags:
- aws-pds
License: https://creativecommons.org/licenses/by-nd/4.0/
Resources:
- - Description: https://www.audi-electronics-venture.de/aev/web/en/driving-dataset.html
+ - Description: http://a2d2.audi
ARN: arn:aws:s3:::aev-autonomous-driving-dataset
Region: eu-central-1
Type: S3 Bucket
|
Sometimes gfy fails.
KeyError
'gfyItem' | @@ -121,12 +121,15 @@ def get_url(submission, mp4_instead_gif=True):
elif 'gfycat.com' in urlparse(url).netloc:
client = GfycatClient()
rname = re.findall(r'gfycat.com\/(?:detail\/)?(\w*)', url)[0]
+ try:
urls = client.query_gfy(rname)['gfyItem']
logging.warning('Gfy url!')
if mp4_instead_gif:
return TYPE_GIF, urls['mp4Url'], 'mp4'
else:
return TYPE_GIF, urls['max5mbGif'], 'gif'
+ except KeyError:
+ return TYPE_OTHER, url, None
else:
return TYPE_OTHER, url, None
|
Making boost compiling again with emscripten 1.38.29
We had to move again the "hack" of transforming the bc files to a files
in the packaging. I think the b2 change on how it packages the files
so now that is possible only in the packaging. | @@ -440,26 +440,6 @@ class BoostConan(ConanFile):
# self.run("%s --show-libraries" % b2_exe)
self.run(full_command)
- arch = self.settings.get_safe('arch')
- if arch.startswith("asm.js"):
- self._create_emscripten_libs()
-
- def _create_emscripten_libs(self):
- # Boost Build doesn't create the libraries, but it gets close,
- # leaving .bc files where the libraries would be.
- staged_libs = os.path.join(
- self.source_folder, self._boost_dir, "stage", "lib"
- )
- for bc_file in os.listdir(staged_libs):
- if bc_file.startswith("lib") and bc_file.endswith(".bc"):
- a_file = bc_file[:-3] + ".a"
- cmd = "emar q {dst} {src}".format(
- dst=os.path.join(staged_libs, a_file),
- src=os.path.join(staged_libs, bc_file),
- )
- self.output.info(cmd)
- self.run(cmd)
-
@property
def _b2_os(self):
return {"Windows": "windows",
@@ -839,6 +819,26 @@ class BoostConan(ConanFile):
if self.options.header_only:
self.copy(pattern="*", dst="include/boost", src="%s/boost" % self._boost_dir)
+ arch = self.settings.get_safe('arch')
+ if arch.startswith("asm.js"):
+ self._create_emscripten_libs()
+
+ def _create_emscripten_libs(self):
+ # Boost Build doesn't create the libraries, but it gets close,
+ # leaving .bc files where the libraries would be.
+ staged_libs = os.path.join(
+ self.package_folder, "lib"
+ )
+ for bc_file in os.listdir(staged_libs):
+ if bc_file.startswith("lib") and bc_file.endswith(".bc"):
+ a_file = bc_file[:-3] + ".a"
+ cmd = "emar q {dst} {src}".format(
+ dst=os.path.join(staged_libs, a_file),
+ src=os.path.join(staged_libs, bc_file),
+ )
+ self.output.info(cmd)
+ self.run(cmd)
+
def package_info(self):
gen_libs = [] if self.options.header_only else tools.collect_libs(self)
|
fix(doc): update gnome-settings-daemon to org.gnome.SettingsDaemon
Since Gnome 3.23.2, gnome-settings-daemon was split into helper daemons (source: https://gitlab.gnome.org/GNOME/gnome-settings-daemon/blob/master/NEWS).
The old config lead to an immediate logout after login in Qtile. | @@ -42,6 +42,16 @@ This adds a new entry "Qtile GNOME" to GDM's login screen.
The custom session for gnome-session.
+For Gnome >= 3.23.2 (Ubuntu >= 17.04, Fedora >= 26, etc.)
+::
+
+ $ cat /usr/share/gnome-session/sessions/qtile.session
+ [GNOME Session]
+ Name=Qtile session
+ RequiredComponents=qtile;org.gnome.SettingsDaemon.A11ySettings;org.gnome.SettingsDaemon.Clipboard;org.gnome.SettingsDaemon.Color;org.gnome.SettingsDaemon.Datetime;org.gnome.SettingsDaemon.Housekeeping;org.gnome.SettingsDaemon.Keyboard;org.gnome.SettingsDaemon.MediaKeys;org.gnome.SettingsDaemon.Mouse;org.gnome.SettingsDaemon.Power;org.gnome.SettingsDaemon.PrintNotifications;org.gnome.SettingsDaemon.Rfkill;org.gnome.SettingsDaemon.ScreensaverProxy;org.gnome.SettingsDaemon.Sharing;org.gnome.SettingsDaemon.Smartcard;org.gnome.SettingsDaemon.Sound;org.gnome.SettingsDaemon.Wacom;org.gnome.SettingsDaemon.XSettings;
+
+Or for older Gnome versions
+
::
$ cat /usr/share/gnome-session/sessions/qtile.session
|
utils: don't assume all exceptions have .strerror
Now that we're calling the actual sensors code, there's a bug :). Not all
python Exceptions have strerror. | @@ -152,8 +152,8 @@ def catch_exception_and_warn(warning=Warning, return_on_exception=None,
try:
return_value = func(*args, **kwargs)
except excepts as err:
- logger.warning(err.strerror)
- warnings.warn(err.strerror, warning)
+ logger.warning(str(err))
+ warnings.warn(str(err), warning)
return return_value
return wrapper
return decorator
|
BUG FIX in scenario.py: Utility object is not JSON serializable
celery workers cannot pass Python objects between tasks. Because the Utility object used to be in dfm.available_techs it was not noticed that dfm.util was not getting deleted until I posted a job to a running API instance (locally). | @@ -240,7 +240,7 @@ def setup_scenario(self, run_uuid, data, raw_post):
dfm_dict = vars(dfm) # serialize for celery
# delete python objects, which are not serializable
- for k in ['storage', 'site', 'elec_tariff', 'pvs', 'pvnms', 'load'] + dfm.available_techs:
+ for k in ['storage', 'site', 'elec_tariff', 'pvs', 'pvnms', 'load', 'util'] + dfm.available_techs:
if dfm_dict.get(k) is not None:
del dfm_dict[k]
|
Fix links in README for java client
Fix links in README for java client | @@ -71,8 +71,8 @@ ListArtifacts.Response listArtifacts(String runUuid, String path)
### Java Usage
-For a simple example see [QuickStartDriver.java](src/main/java/org/mlflow/client/samples/QuickStartDriver.java).
-For full examples of API coverage see the [tests](src/test/java/org/mlflow/client) such as [ApiClientTest.java](src/test/java/org/mlflow/client/ApiClientTest.java).
+For a simple example see [QuickStartDriver.java](src/main/java/org/mlflow/tracking/samples/QuickStartDriver.java).
+For full examples of API coverage see the [tests](src/test/java/org/mlflow/tracking) such as [MlflowClientTest.java](src/test/java/org/mlflow/tracking/MlflowClientTest.java).
```
package org.mlflow.tracking.samples;
|
yaml: Update load methods to use Text rather than str
Yaml loading accepts bytes and unicode, either directly or via IO.
For python 3, bytes and str work fine, but for Python 2 code this is redundant and limited.
Text instead of str should make type checks more accurate. | @@ -26,14 +26,14 @@ def scan(stream, Loader=...): ...
def parse(stream, Loader=...): ...
def compose(stream, Loader=...): ...
def compose_all(stream, Loader=...): ...
-def load(stream: Union[bytes, IO[bytes], str, IO[str]], Loader=...) -> Any: ...
-def load_all(stream: Union[bytes, IO[bytes], str, IO[str]], Loader=...) -> Iterator[Any]: ...
-def full_load(stream: Union[bytes, IO[bytes], str, IO[str]]) -> Any: ...
-def full_load_all(stream: Union[bytes, IO[bytes], str, IO[str]]) -> Iterator[Any]: ...
-def safe_load(stream: Union[bytes, IO[bytes], str, IO[str]]) -> Any: ...
-def safe_load_all(stream: Union[bytes, IO[bytes], str, IO[str]]) -> Iterator[Any]: ...
-def unsafe_load(stream: Union[bytes, IO[bytes], str, IO[str]]) -> Any: ...
-def unsafe_load_all(stream: Union[bytes, IO[bytes], str, IO[str]]) -> Iterator[Any]: ...
+def load(stream: Union[bytes, IO[bytes], Text, IO[Text]], Loader=...) -> Any: ...
+def load_all(stream: Union[bytes, IO[bytes], Text, IO[Text]], Loader=...) -> Iterator[Any]: ...
+def full_load(stream: Union[bytes, IO[bytes], Text, IO[Text]]) -> Any: ...
+def full_load_all(stream: Union[bytes, IO[bytes], Text, IO[Text]]) -> Iterator[Any]: ...
+def safe_load(stream: Union[bytes, IO[bytes], Text, IO[Text]]) -> Any: ...
+def safe_load_all(stream: Union[bytes, IO[bytes], Text, IO[Text]]) -> Iterator[Any]: ...
+def unsafe_load(stream: Union[bytes, IO[bytes], Text, IO[Text]]) -> Any: ...
+def unsafe_load_all(stream: Union[bytes, IO[bytes], Text, IO[Text]]) -> Iterator[Any]: ...
def emit(events, stream=..., Dumper=..., canonical=..., indent=..., width=..., allow_unicode=..., line_break=...): ...
@overload
def serialize_all(
|
osd: fix automatic prepare when auto_discover
Use `devices` variable instead of `ansible_devices`, otherwise it means
we are not using the devices which have been 'auto discovered' | docker run --net=host \
--pid=host \
--privileged=true \
- --name=ceph-osd-prepare-{{ ansible_hostname }}-{{ item.key }} \
+ --name=ceph-osd-prepare-{{ ansible_hostname }}-{{ item.split('/')[-1] }} \
-v /etc/ceph:/etc/ceph \
-v /var/lib/ceph/:/var/lib/ceph/ \
-v /dev:/dev \
-e DEBUG=verbose \
-e CLUSTER={{ cluster }} \
-e CEPH_DAEMON=OSD_CEPH_DISK_PREPARE \
- -e OSD_DEVICE=/dev/{{ item.key }} \
+ -e OSD_DEVICE={{ item }} \
{{ docker_env_args }} \
{{ ceph_osd_docker_prepare_env }} \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
- with_dict: "{{ ansible_devices }}"
+ with_items: "{{ devices }}"
when:
- osd_auto_discovery
- containerized_deployment
- - ansible_devices is defined
- - item.value.removable == "0"
- - item.value.partitions|count == 0
- - item.value.holders|count == 0
+ - devices is defined
- name: manually prepare ceph "{{ osd_objectstore }}" non-containerized osd disk(s) with collocated osd data and journal
command: "ceph-disk prepare {{ ceph_disk_cli_options }} {{ item.1 }}"
|
Add interaction check to command tree
In some cases, it's desirable for our command tree to only process a
subset of incoming interactions, such as in a multi process deployment. | @@ -971,6 +971,17 @@ class CommandTree(Generic[ClientT]):
await ctx_menu.on_error(interaction, e)
await self.on_error(interaction, ctx_menu, e)
+ async def interaction_check(self, interaction: Interaction, /) -> bool:
+ """|coro|
+
+ A global check to determine if an :class:`~discord.Interaction` should
+ be processed by the tree.
+
+ The default implementation returns True (all interactions are processed),
+ but can be overridden if custom behaviour is desired.
+ """
+ return True
+
async def call(self, interaction: Interaction) -> None:
"""|coro|
@@ -994,6 +1005,9 @@ class CommandTree(Generic[ClientT]):
AppCommandError
An error occurred while calling the command.
"""
+ if not await self.interaction_check(interaction):
+ return
+
data: ApplicationCommandInteractionData = interaction.data # type: ignore
type = data.get('type', 1)
if type != 1:
|
Update tests
We want to ensure that targeting invalid minions does *not* allow us to
run any sort of shenanigans on the minions in question! This ensures
that valid commands (i.e. file.touch in this case) cannot inadvertently
be sent to minions that we don't have ACL for. | @@ -112,12 +112,14 @@ publisher_acl:
bob:
- '*1':
- test.*
+ - file.touch
external_auth:
pam:
bob:
- '*1':
- test.*
+ - file.touch
nodegroups:
second_string: "minion_*2"
@@ -139,12 +141,14 @@ publisher_acl:
bob:
- '*1':
- test.*
+ - file.touch
external_auth:
pam:
bob:
- '*1':
- test.*
+ - file.touch
"""
)
@@ -491,6 +495,23 @@ def eauth_blocked_minions(request):
yield request.param
[email protected]
+def docker_minions(
+ docker_minion,
+ docker_minion_a1,
+ docker_minion_a2,
+ docker_minion_b1,
+ docker_minion_b2,
+):
+ yield [
+ docker_minion,
+ docker_minion_a1,
+ docker_minion_a2,
+ docker_minion_b1,
+ docker_minion_b2,
+ ]
+
+
@pytest.fixture(
params=[
"test.arg good_argument",
@@ -678,14 +699,15 @@ def test_eauth_user_should_be_able_to_target_valid_minions_with_valid_command(
def test_eauth_user_should_not_be_able_to_target_invalid_minions(
- eauth_blocked_minions, docker_master
+ eauth_blocked_minions, docker_master, docker_minions
):
res = docker_master.run(
- f"salt -a pam --username bob --password '' {eauth_blocked_minions} test.arg hello -t 20 --out=json",
+ f"salt -a pam --username bob --password '' {eauth_blocked_minions} file.touch /tmp/fun.txt -t 20 --out=json",
)
- results = json_output_to_dict(res.stdout)
- assert "Authorization error occurred" in res.stdout
- assert sorted(results) == []
+ assert "Authorization error occurred." == res.data or res.data is None
+ for minion in docker_minions:
+ res = minion.run("ls /tmp/fun.txt")
+ assert "ls: /tmp/fun.txt: No such file or directory" == res.stderr.strip()
@pytest.mark.skip(reason="Not sure about blocklist")
|
[GH 3611] `YOUR_ENV_VAR` is leaking from helm defaults into user clusters
Summary:
[helm] `YOUR_ENV_VAR` is leaking from helm defaults into user clusters
Test Plan: integration
Reviewers: nate | @@ -274,8 +274,7 @@ pipelineRun:
# env:
# ENV_ONE: one
# ENV_TWO: two
- env:
- YOUR_ENV_VAR: ""
+ env: {}
####################################################################################################
# Scheduler: Configuration for the scheduler
|
tippy: Tranfer subs-sort tooltip to tippyjs.
As zulip is tranfering its tooltip to tippy the
tooltips for subs sort options are tranfered to
use tippy instead of title. Placement is bottom.
Refer | import $ from "jquery";
import _ from "lodash";
+import tippy from "tippy.js";
import render_subscription from "../templates/subscription.hbs";
import render_subscription_settings from "../templates/subscription_settings.hbs";
@@ -564,19 +565,19 @@ export function setup_page(callback) {
const sort_toggler = components.toggle({
values: [
{
- label_html: `<i class="fa fa-sort-alpha-asc" title="${i18n.t(
+ label_html: `<i class="fa fa-sort-alpha-asc tippy-bottom tippy-zulip-tooltip" data-tippy-content="${i18n.t(
"Sort by name",
)}"></i>`,
key: "by-stream-name",
},
{
- label_html: `<i class="fa fa-user-o" title="${i18n.t(
+ label_html: `<i class="fa fa-user-o tippy-bottom tippy-zulip-tooltip" data-tippy-content="${i18n.t(
"Sort by number of subscribers",
)}"></i>`,
key: "by-subscriber-count",
},
{
- label_html: `<i class="fa fa-bar-chart" title="${i18n.t(
+ label_html: `<i class="fa fa-bar-chart tippy-bottom tippy-zulip-tooltip" data-tippy-content="${i18n.t(
"Sort by estimated weekly traffic",
)}"></i>`,
key: "by-weekly-traffic",
@@ -589,6 +590,11 @@ export function setup_page(callback) {
});
$("#subscriptions_table .search-container").prepend(sort_toggler.get());
+ // place subs tooltips at bottom
+ tippy(".tippy-bottom", {
+ placement: "bottom",
+ });
+
// Reset our internal state to reflect that we're initially in
// the "Subscribed" tab if we're reopening "Manage streams".
subscribed_only = true;
|
Removed the "providing_args" argument - this is deprecated in Django 3.1
and was purely documentational, so no replacement is needed | @@ -10,5 +10,5 @@ run_weekly_jobs = Signal()
run_monthly_jobs = Signal()
run_yearly_jobs = Signal()
-pre_command = Signal(providing_args=["args", "kwargs"])
-post_command = Signal(providing_args=["args", "kwargs", "outcome"])
+pre_command = Signal()
+post_command = Signal()
|
Fixed errors in time-service startup with local Redis
Periods were trying to get configured before Redis is started | @@ -71,11 +71,6 @@ sed -i 's@/{S3_URL}@'$S3_URL'@g' /etc/onearth/config/layers/*/*.yaml # in case t
sed -i 's@{S3_URL}@'$S3_URL'@g' /etc/onearth/config/layers/*/*/*.yaml
sed -i 's@{S3_URL}@'$S3_URL'@g' /etc/onearth/config/layers/*/*.yaml
-# Load custom time period configurations
-for i in /etc/onearth/config/endpoint/epsg{3031,3413,4326}*.yaml; do
- python3 /usr/bin/oe_periods_configure.py -e "$i" -r $REDIS_HOST
-done
-
# Start Redis and load sample data if running locally
if [ "$REDIS_HOST" = "127.0.0.1" ]; then
echo 'Starting Redis server'
@@ -401,7 +396,17 @@ if [ "$REDIS_HOST" = "127.0.0.1" ]; then
/usr/bin/redis-cli -h $REDIS_HOST -n 0 SET epsg4326:std:layer:SSMI_Cloud_Liquid_Water_Over_Oceans_Ascending:default "2012-09-10"
/usr/bin/redis-cli -h $REDIS_HOST -n 0 SADD epsg4326:std:layer:SSMI_Cloud_Liquid_Water_Over_Oceans_Ascending:periods "2012-09-10/2018-12-31/P1D"
+ # Load custom time period configurations and generate periods
+ for i in /etc/onearth/config/endpoint/epsg{3031,3413,4326}*.yaml; do
+ python3 /usr/bin/oe_periods_configure.py -e "$i" -r $REDIS_HOST -g
+ done
+
else
+ # Load custom time period configurations
+ for i in /etc/onearth/config/endpoint/epsg{3031,3413,4326}*.yaml; do
+ python3 /usr/bin/oe_periods_configure.py -e "$i" -r $REDIS_HOST
+ done
+
# Load time periods by scraping S3 bucket
if [ "$FORCE_TIME_SCRAPE" = true ]; then
python3 /usr/bin/oe_scrape_time.py -r -b $S3_URL $REDIS_HOST >>/var/log/onearth/config.log 2>&1
|
fix Dispatcher.release_key
del bucker['key'] raises KeyError: 'key' | @@ -1367,7 +1367,7 @@ class Dispatcher(DataMixin, ContextInstanceMixin):
bucket = await self.storage.get_bucket(chat=chat_id, user=user_id)
if bucket and key in bucket:
- del bucket['key']
+ del bucket[key]
await self.storage.set_bucket(chat=chat_id, user=user_id, bucket=bucket)
return True
return False
|
Multiple slashes processing modified
Instead of deleting empty keys on put() stage, replacement of multiple consecutive slashes with one while running parse() is implemented. | """ Config class"""
+import re
class Config:
""" Class for configs that can be represented as nested dicts with easy indexing by slashes """
@@ -141,7 +142,6 @@ class Config:
variable = variable.strip('/')
if '/' in variable:
var = variable.split('/')
- var = list(filter(('').__ne__, var)) #remove empty keys
prefix = var[:-1]
var_name = var[-1]
else:
@@ -190,6 +190,7 @@ class Config:
for key, value in items:
if isinstance(value, dict):
value = self.parse(value)
+ key = re.sub('/{2,}', '/', key) #merge multiple consecutive slashes '/' to one
self.put(key, value, new_config)
return new_config
|
Check for what DVLALetterTemplate is called with
Extends the test to make sure that the thing that builds each line of
the file is getting called with the right template, personalisation and
numeric ID. Will be helpful the more complicated the call to the
template gets. | @@ -971,17 +971,29 @@ def test_build_dvla_file(sample_letter_template, mocker):
create_notification(template=job.template, job=job)
create_notification(template=job.template, job=job)
- mocked = mocker.patch("app.celery.tasks.s3upload")
- mocker.patch("app.celery.tasks.LetterDVLATemplate.__str__", return_value="dvla|string")
+ mocker.patch("app.celery.tasks.random.randint", return_value=999)
+ mocked_upload = mocker.patch("app.celery.tasks.s3upload")
+ mocked_letter_template = mocker.patch("app.celery.tasks.LetterDVLATemplate")
+ mocked_letter_template_instance = mocked_letter_template.return_value
+ mocked_letter_template_instance.__str__.return_value = "dvla|string"
build_dvla_file(job.id)
- file = "dvla|string\ndvla|string\n"
-
- assert mocked.called
- mocked.assert_called_once_with(filedata=file,
+ mocked_upload.assert_called_once_with(
+ filedata="dvla|string\ndvla|string\n",
region=current_app.config['AWS_REGION'],
bucket_name=current_app.config['DVLA_UPLOAD_BUCKET_NAME'],
- file_location="{}-dvla-job.text".format(job.id))
+ file_location="{}-dvla-job.text".format(job.id)
+ )
+
+ # Template
+ assert mocked_letter_template.call_args[0][0]['subject'] == 'Template subject'
+ assert mocked_letter_template.call_args[0][0]['content'] == 'Dear Sir/Madam, Hello. Yours Truly, The Government.'
+
+ # Personalisation
+ assert mocked_letter_template.call_args[0][1] is None
+
+ # Named arguments
+ assert mocked_letter_template.call_args[1]['numeric_id'] == 999
def test_build_dvla_file_retries_if_all_notifications_are_not_created(sample_letter_template, mocker):
|
MappingProjection: instantiate function for MATRIX PState instead of just setting
- Fixes | @@ -694,11 +694,17 @@ class MappingProjection(PathwayProjection_Base):
matrix = get_matrix(self._parameter_states[MATRIX].value)
initial_rate = matrix * 0.0
- self._parameter_states[MATRIX].function = AccumulatorIntegrator(owner=self._parameter_states[MATRIX],
+ # KDM 7/11/19: instead of simply setting the function, we need to reinstantiate to ensure
+ # new defaults get set properly
+ self._parameter_states[MATRIX]._instantiate_function(
+ function=AccumulatorIntegrator(
+ owner=self._parameter_states[MATRIX],
default_variable=matrix,
initializer=matrix,
# rate=initial_rate
)
+ )
+ self._parameter_states[MATRIX]._instantiate_value(context)
# # Assign ParameterState the same Log as the MappingProjection, so that its entries are accessible to Mechanisms
|
[DOCKER] Update lint to reflect the latest state
Pins mypy version. | //
// NOTE: these lines are scanned by docker/dev_common.sh. Please update the regex as needed. -->
-ci_lint = "tlcpack/ci-lint:v0.65"
+ci_lint = "tlcpack/ci-lint:v0.66"
ci_gpu = "tlcpack/ci-gpu:v0.75"
ci_cpu = "tlcpack/ci-cpu:v0.74"
ci_wasm = "tlcpack/ci-wasm:v0.71"
|
ONLY_USER works when cache is disabled
The _get_user_attempts function now checks for AXES_ONLY_USER_FAILURES,
and only includes the IP when AXES_ONLY_USER_FAILURES = False. | @@ -171,11 +171,18 @@ def _get_user_attempts(request):
)
if not attempts:
- params = {'ip_address': ip, 'trusted': False}
+ params = {'trusted': False}
+
+ if AXES_ONLY_USER_FAILURES:
+ params['username'] = username
+ elif LOCK_OUT_BY_COMBINATION_USER_AND_IP:
+ params['username'] = username
+ params['ip_address'] = ip
+ else:
+ params['ip_address'] = ip
+
if USE_USER_AGENT:
params['user_agent'] = ua
- if LOCK_OUT_BY_COMBINATION_USER_AND_IP:
- params['username'] = username
attempts = AccessAttempt.objects.filter(**params)
|
Update develop_guide.md
edit a sentence | @@ -23,7 +23,7 @@ Parameter object is the only way to pass user-define runtime parameters to the d
In order to define a usable parameter object, three steps will be needed.
a. Open a new python file, rename it as xxx_param.py where xxx stands for your module'name, putting it in folder federatedm/param/.
- The class object define it xxx_param.py should inherit the BaseParam class that define in federatedml/param/base_param.py
+ The class object defined in xxx_param.py should inherit the BaseParam class that define in federatedml/param/base_param.py
b. __init__ of your parameter class should specify all parameters that the module use.
|
improve `TestUpscaleDouble`
implement cardinality test
use `assert_close`
fix gradcheck test
- enable fast mode | @@ -200,27 +200,33 @@ class TestUpscaleDouble(BaseTester):
x = self.prepare_data(shape, device, dtype)
assert kornia.geometry.transform.upscale_double(x) is not None
- def test_exception(self, device, dtype):
+ def test_exception(self):
with pytest.raises(TypeError):
- assert kornia.geometry.transform.upscale_double(None)
+ kornia.geometry.transform.upscale_double(None)
- def test_cardinality(self, device, dtype):
- with pytest.raises(TypeError):
- img = torch.rand(10)
- assert kornia.geometry.transform.upscale_double(img)
+ @pytest.mark.parametrize("shape", ((5, 5), (2, 5, 5), (1, 2, 5, 5)))
+ def test_cardinality(self, shape, device, dtype):
+ x = self.prepare_data(shape, device, dtype)
+ actual = kornia.geometry.transform.upscale_double(x)
+
+ h, w = shape[-2:]
+ expected = (*shape[:-2], h * 2, w * 2)
+
+ assert tuple(actual.shape) == expected
@pytest.mark.jit
def test_jit(self, device, dtype):
img = self.prepare_data((1, 2, 5, 5), device, dtype)
op = kornia.geometry.transform.upscale_double
op_jit = torch.jit.script(op)
- assert_close(op(img), op_jit(img))
+ self.assert_close(op(img), op_jit(img))
@pytest.mark.grad
- def test_gradcheck(self, device, dtype):
- x = self.prepare_data((1, 2, 5, 5), device, dtype, requires_grad=True)
+ def test_gradcheck(self, device):
+ x = self.prepare_data((1, 2, 5, 5), device)
+ x = utils.tensor_to_gradcheck_var(x)
assert gradcheck(
- kornia.geometry.transform.upscale_double, (x,), rtol=5e-2, raise_exception=True, fast_mode=False
+ kornia.geometry.transform.upscale_double, (x,), rtol=5e-2, raise_exception=True, fast_mode=True
)
@pytest.mark.skip(reason="not implemented yet")
@@ -268,13 +274,13 @@ class TestUpscaleDouble(BaseTester):
elif len(shape) == 4:
expected = expected[None]
- assert_close(upscaled, expected)
- assert torch.all(upscaled == expected)
+ self.assert_close(upscaled, expected)
+
downscaled_back = upscaled[..., ::2, ::2]
- assert torch.all(x == downscaled_back)
+ self.assert_close(x, downscaled_back)
@staticmethod
- def prepare_data(shape, device, dtype, requires_grad=False):
+ def prepare_data(shape, device, dtype=torch.float32):
xm = torch.tensor(
[[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [2, 2, 2, 2, 2], [3, 3, 3, 3, 3], [4, 4, 4, 4, 4]],
device=device,
@@ -292,6 +298,5 @@ class TestUpscaleDouble(BaseTester):
else:
x[..., 0, :, :] = xm
x[..., 1, :, :] = ym
- if requires_grad:
- x.requires_grad_()
+
return x
|
[Core] fix test_object_directory_failure flakiness
There could be a race that the task finished execution before owner is killed. | @@ -664,12 +664,12 @@ def test_object_directory_failure(ray_start_cluster):
def task(x):
pass
+ cluster.remove_node(node_to_kill, allow_graceful=False)
tasks = []
repeat = 3
for i in range(num_nodes):
for _ in range(repeat):
tasks.append(task.options(resources={str(i): 1}).remote(obj))
- cluster.remove_node(node_to_kill, allow_graceful=False)
for t in tasks:
with pytest.raises(ray.exceptions.RayTaskError):
|
svtplay: kanaler will work again
fixes: | @@ -41,13 +41,8 @@ class Svtplay(Service, MetadataThumbMixin):
urldata = self.get_urldata()
if parse.path[:8] == "/kanaler":
- match = re.search('data-video-id="([\\w-]+)"', urldata)
-
- if not match:
- yield ServiceError("Can't find video info.")
- return
-
- _url = urljoin(URL_VIDEO_API, match.group(1))
+ ch = "ch-{}".format(parse.path[parse.path.rfind("/") + 1 :])
+ _url = urljoin(URL_VIDEO_API, ch)
res = self.http.get(_url)
try:
janson = res.json()
|
Update molecule tag in examples.rst
##### SUMMARY
<!--- Your description here -->
Updating to the latest version, since in the documentation it is still using an old version. Also, quay.io/ansible/molecule:latest pulls a 2.20 image, not the new v3.
##### ISSUE TYPE
Docs Pull Request
+label: docsite_pr | @@ -21,7 +21,7 @@ follows.
-v "$(pwd)":/tmp/$(basename "${PWD}"):ro \
-v /var/run/docker.sock:/var/run/docker.sock \
-w /tmp/$(basename "${PWD}") \
- quay.io/ansible/molecule:2.20 \
+ quay.io/ansible/molecule:3.0.8 \
molecule test
.. _`quay.io`: https://quay.io/repository/ansible/molecule
|
nfs: add missing | bool filters
To address this warning:
```
[DEPRECATION WARNING]: evaluating nfs_ganesha_dev as a bare variable, this
behaviour will go away and you might need to add |bool to the expression in the
future
``` | gpgkey: "{{ ceph_stable_key }}"
baseurl: "{{ ceph_mirror }}/nfs-ganesha/rpm-{{ nfs_ganesha_stable_branch }}/{{ ceph_release }}/$basearch"
when:
- - nfs_ganesha_stable
+ - nfs_ganesha_stable | bool
- ceph_repository == 'community'
- name: red hat based systems - dev repo related tasks
group: root
backup: yes
when:
- - nfs_ganesha_dev
+ - nfs_ganesha_dev | bool
- ceph_repository == 'dev'
- name: red hat based systems - install nfs packages
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
register: result
until: result is succeeded
- when: nfs_obj_gw
+ when: nfs_obj_gw | bool
|
Fix zpsp to have element names as its keys
symbol shows the POTCAR label e.g. "Ca_pv" while element its name "Ca". | @@ -297,7 +297,7 @@ class Critic2Caller:
if potcar_path:
potcar = Potcar.from_file(potcar_path)
- zpsp = {p.symbol: p.zval for p in potcar}
+ zpsp = {p.element: p.zval for p in potcar}
if not zpsp:
|
Update INSTALLATION.md
Clarify Python versions. | If Python isn't already available on your system, detailed instructions by platform can be found in the [Python Setup and Usage][using python] section of the official documentation.
Real Python also offers a [very helpful guide][helpful guide] to installation on various platforms, including iOS and Android.
-Exercism tests and tooling currently supports `Python 3.8+`.
+Exercism tests and tooling currently supports `Python 3.8` (tests) and `Python 3.9` (tooling).
[helpful guide]: https://realpython.com/installing-python/
[using python]: https://docs.python.org/3/using/index.html
|
Remove NASA APOD official and unofficial
NASA APOD (official API) is already included in NASA
NASA APOD (unofficial API) is a mirror of NASA APOD | @@ -1348,8 +1348,6 @@ API | Description | Auth | HTTPS | CORS |
| [Minor Planet Center](http://www.asterank.com/mpc) | Asterank.com Information | No | No | Unknown |
| [NASA](https://api.nasa.gov) | NASA data, including imagery | No | Yes | No |
| [NASA ADS](https://ui.adsabs.harvard.edu/help/api/api-docs.html) | NASA Astrophysics Data System | `OAuth` | Yes | Yes |
-| [NASA APOD (official API)](https://api.nasa.gov/planetary/apod?api_key=DEMO_KEY) | Official API for getting APOD (Astronomy Image of the Day) images along with metadata | `apiKey` | Yes | No |
-| [NASA APOD (unofficial API)](https://apodapi.herokuapp.com/) | API for getting APOD (Astronomy Image of the Day) images along with metadata | No | Yes | No |
| [Newton](https://newton.vercel.app) | Symbolic and Arithmetic Math Calculator | No | Yes | No |
| [Noctua](https://api.noctuasky.com/api/v1/swaggerdoc/) | REST API used to access NoctuaSky features | No | Yes | Unknown |
| [Numbers](https://math.tools/api/numbers/) | Number of the day, random number, number facts and anything else you want to do with numbers | `apiKey` | Yes | No |
|
Allow for skipping custom env.d file checks
Currently if the env.d check fails and halts the upgrade, re-running
the run-upgrade.sh script doesn't pass the skip-tags flag to
Ansible. This prompts the user to set the env variable if they want
to further skip checks and then passes the skip-tags as necessary. | layout in {{ repo_root_dir }}/inventory/env.d. The difference between these files
should be carefully reviewed to understand whether the changes are still necessary
and applicable to the environment. If all the user-space env.d files are necessary,
- then please re-run this playbook with the CLI option '--skip-tags custom-envd-file-check'.
+ then please export SKIP_CUSTOM_ENVD_CHECK=true and re-run the playbook or
+ run-upgrade.sh script.
when:
- _envd_dir_contents.matched > 0
+ - not(lookup('env', 'SKIP_CUSTOM_ENVD_CHECK') | bool)
tags:
- custom-envd-file-check
|
Integrating MaskedAdagrad
Summary: Pull Request resolved:
Test Plan: unit test | @@ -519,7 +519,8 @@ class AdagradOptimizer(Optimizer):
def __init__(self, alpha=0.01, epsilon=1e-4, decay=1, policy="fixed",
sparse_dedup_aggregator=None, rowWise=False, engine='',
lars=None, output_effective_lr=False,
- output_effective_lr_and_update=False, **kwargs):
+ output_effective_lr_and_update=False,
+ mask_tensor=None, **kwargs):
super(AdagradOptimizer, self).__init__()
self.alpha = alpha
self.epsilon = epsilon
@@ -531,8 +532,14 @@ class AdagradOptimizer(Optimizer):
self.lars = lars
self.output_effective_lr = output_effective_lr
self.output_effective_lr_and_update = output_effective_lr_and_update
+ self.mask_tensor = mask_tensor
self.init_kwargs = kwargs
+ self.use_mask = False
+ if self.mask_tensor is not None:
+ assert type(mask_tensor) is np.ndarray, "mask_tensor must be a numpy array!"
+ self.use_mask = True
+
def _run(self, net, param_init_net, param_info):
param = param_info.blob
grad = param_info.grad
@@ -614,6 +621,11 @@ class AdagradOptimizer(Optimizer):
value=0.0
)
+ # Adding masked Adagrad for dense parameter for now
+ if not isinstance(grad, core.GradientSlice) and self.mask_tensor is not None:
+ # Assuming np array for the mask tensor
+ mask_blob = param_init_net.GivenTensorFill([], [str(param) + "_mask"], values=self.mask_tensor, shape=self.mask_tensor.shape)
+
self._aux_params.local.append(param_squared_sum)
if self.rowWise:
@@ -638,11 +650,24 @@ class AdagradOptimizer(Optimizer):
else:
output_args = [param, param_squared_sum]
if self.output_effective_lr_and_update:
+ assert self.use_mask is False, \
+ "MaskedAdagrad doesn't support outputting effective_lr_and_update"
output_args.append(str(param) + '_effective_lr')
output_args.append(str(param) + '_update')
elif self.output_effective_lr:
+ assert self.use_mask is False, \
+ "MaskedAdagrad doesn't support outputting effective_lr"
output_args.append(str(param) + '_effective_lr')
+ if self.use_mask:
+ net.MaskedAdagrad(
+ [param, param_squared_sum, grad, lr, mask_blob],
+ output_args,
+ epsilon=self.epsilon,
+ decay=float(self.decay),
+ engine=self.engine
+ )
+ else:
net.Adagrad(
[param, param_squared_sum, grad, lr],
output_args,
|
utilities: Add fastpath when checking if object is compatible with itself
This is a common situation in tests that assign the same object to e.g.
default variable, and then variable for execution. | @@ -459,6 +459,12 @@ def iscompatible(candidate, reference=None, **kargs):
# ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
pass
+ # If the two are the same thing, can settle it right here
+ # This is a common pattern for tests that use the same structure
+ # as default variable and variable
+ if reference is not None and candidate is reference:
+ return True
+
# If args not provided, assign to default values
# if not specified in args, use these:
# args[kwCompatibilityType] = list
|
Use default Redis port in RedisStore constructor
Summary: TSIA | @@ -25,7 +25,7 @@ namespace rendezvous {
class RedisStore : public Store {
public:
- RedisStore(const std::string& host, int port);
+ explicit RedisStore(const std::string& host, int port = 6379);
virtual ~RedisStore();
virtual void set(const std::string& key, const std::vector<char>& data)
|
Fixed docstring errors in function
* Renamed function parameters to avoid name conflicts in documentation
build | @@ -358,14 +358,14 @@ RHEL6 = "Red Hat Enterprise Linux Server release 6.5 (Santiago)"
RHEL7 = "Red Hat Enterprise Linux Server release 7.0 (Maipo)"
-def redhat_release(major, minor=""):
+def redhat_release(rel_major, rel_minor=""):
"""
Helper function to construct a redhat-release string for a specific RHEL
major and minor version. Only constructs redhat-releases for RHEL major
releases 4, 5, 6 & 7
- :param major major: RHEL major number. Accepts str, int or float (as major.minor)
- :param minor minor: RHEL minor number. Optional and accepts str or int
+ :param rel_major rel_major: RHEL major number. Accepts str, int or float (as major.minor)
+ :param rel_minor rel_minor: RHEL minor number. Optional and accepts str or int
For example, to construct a redhat-release for:
RHEL4U9: redhat_release('4.9') or (4.9) or (4, 9)
@@ -375,29 +375,29 @@ def redhat_release(major, minor=""):
Limitation with float args: (x.10) will be parsed as minor = 1
"""
- if isinstance(major, str) and '.' in major:
- major, minor = major.split('.')
- elif isinstance(major, float):
- major, minor = str(major).split('.')
- elif isinstance(major, int):
- major = str(major)
- if isinstance(minor, int):
- minor = str(minor)
-
- if major == '4':
- if minor:
- minor = "" if minor == '0' else " Update %s" % minor
- return "Red Hat Enterprise Linux AS release %s (Nahant%s)" % (major, minor)
+ if isinstance(rel_major, str) and '.' in rel_major:
+ rel_major, rel_minor = rel_major.split('.')
+ elif isinstance(rel_major, float):
+ rel_major, rel_minor = str(rel_major).split('.')
+ elif isinstance(rel_major, int):
+ rel_major = str(rel_major)
+ if isinstance(rel_minor, int):
+ rel_minor = str(rel_minor)
+
+ if rel_major == '4':
+ if rel_minor:
+ rel_minor = "" if rel_minor == '0' else " Update %s" % rel_minor
+ return "Red Hat Enterprise Linux AS release %s (Nahant%s)" % (rel_major, rel_minor)
template = "Red Hat Enterprise Linux Server release %s%s (%s)"
- if major == '5':
- if minor:
- minor = "" if minor == '0' else "." + minor
- return template % (major, minor, "Tikanga")
- elif major == '6' or major == '7':
- if not minor:
- minor = "0"
- name = "Santiago" if major == '6' else "Maipo"
- return template % (major, "." + minor, name)
+ if rel_major == '5':
+ if rel_minor:
+ rel_minor = "" if rel_minor == '0' else "." + rel_minor
+ return template % (rel_major, rel_minor, "Tikanga")
+ elif rel_major == '6' or rel_major == '7':
+ if not rel_minor:
+ rel_minor = "0"
+ name = "Santiago" if rel_major == '6' else "Maipo"
+ return template % (rel_major, "." + rel_minor, name)
else:
- raise Exception("invalid major version: %s" % major)
+ raise Exception("invalid major version: %s" % rel_major)
|
several fixes. more informative logging at beginning of training.
Now buckets never exceed max_batch_len and ordering is mantained when shuffle is false | @@ -12,7 +12,6 @@ import logging
from operator import itemgetter
from torch.utils.data import RandomSampler, DistributedSampler, Sampler
import numpy as np
-import math
from typing import List
from speechbrain.dataio.dataset import DynamicItemDataset
@@ -334,7 +333,7 @@ class DynamicBatchSampler(Sampler):
):
self._dataset = dataset
self._ex_lengths = {}
- ex_ids = list(self._dataset.data.keys())
+ ex_ids = self._dataset.data_ids
if lengths_list is not None:
# take length of examples from this argument and bypass length_key
@@ -376,7 +375,7 @@ class DynamicBatchSampler(Sampler):
self._drop_last = drop_last
# Calculate bucket lengths
self._bucket_lens = [
- max(1, math.ceil(max_batch_length / self._bucket_boundaries[i]))
+ max(1, int(max_batch_length / self._bucket_boundaries[i]))
for i in range(len(self._bucket_boundaries))
] + [1]
self._epoch = epoch
@@ -402,9 +401,10 @@ class DynamicBatchSampler(Sampler):
bucket_boundary = float(left_bucket_length)
while True:
bucket_boundary *= bucket_length_multiplier
- bucket_boundaries.add(bucket_boundary)
if bucket_boundary >= max_batch_length:
break
+ bucket_boundaries.add(bucket_boundary)
+
return list(sorted(bucket_boundaries))
def _generate_batches(self):
@@ -440,10 +440,15 @@ class DynamicBatchSampler(Sampler):
len(self._batches), len(self._bucket_boundaries)
)
)
- for i in range(len(self._bucket_lens)):
+ boundaries = [0] + self._bucket_boundaries.tolist()
+ for i in range(len(self._bucket_boundaries)):
logger.info(
- "DynamicBatchSampler: Bucket {} has {} examples.".format(
- i, bucket_stats[i]
+ "DynamicBatchSampler: Bucket {} with boundary {}-{} and batch_size {} has {} examples.".format(
+ i,
+ np.around(boundaries[i], 2),
+ np.around(boundaries[i + 1], 2),
+ self._bucket_lens[i],
+ bucket_stats[i],
)
)
|
refactor: test_ui_tools: Move reactions_view outputs into parameters.
Add expected_text and expected_attributes to test parameters, from test
body. | @@ -2736,8 +2736,9 @@ class TestMessageBox:
# FIXME This is the same parametrize as MsgInfoView:test_height_reactions
@pytest.mark.parametrize(
- "to_vary_in_each_message",
+ "to_vary_in_each_message, expected_text, expected_attributes",
[
+ case(
{
"reactions": [
{
@@ -2780,11 +2781,26 @@ class TestMessageBox:
},
"reaction_type": "unicode_emoji",
},
- ]
- }
+ ],
+ },
+ " :thumbs_up: 1 :zulip: 2 :heart: 1 ",
+ [
+ ("reaction", 15),
+ (None, 1),
+ ("reaction_mine", 11),
+ (None, 1),
+ ("reaction", 11),
+ ],
+ ),
],
)
- def test_reactions_view(self, message_fixture, to_vary_in_each_message):
+ def test_reactions_view(
+ self,
+ message_fixture,
+ to_vary_in_each_message,
+ expected_text,
+ expected_attributes,
+ ):
self.model.user_id = 1
varied_message = dict(message_fixture, **to_vary_in_each_message)
msg_box = MessageBox(varied_message, self.model, None)
@@ -2792,16 +2808,8 @@ class TestMessageBox:
reactions_view = msg_box.reactions_view(reactions)
- assert reactions_view.original_widget.text == (
- " :thumbs_up: 1 :zulip: 2 :heart: 1 "
- )
- assert reactions_view.original_widget.attrib == [
- ("reaction", 15),
- (None, 1),
- ("reaction_mine", 11),
- (None, 1),
- ("reaction", 11),
- ]
+ assert reactions_view.original_widget.text == expected_text
+ assert reactions_view.original_widget.attrib == expected_attributes
@pytest.mark.parametrize(
"message_links, expected_text, expected_attrib, expected_footlinks_width",
|
Reset circle ci cache keys.
Testing Circle CI before submitting PR. | @@ -71,27 +71,27 @@ jobs:
- restore_cache:
name: Restore /opt/conda from cache
keys:
- - v0-opt-conda-{{ checksum "~/python_version.md5" }}
+ - v11-opt-conda-{{ checksum "~/python_version.md5" }}
- restore_cache:
name: Restore virtualenv from cache
keys:
- - v0-python-venv-{{ checksum "~/python_version.md5" }}
+ - v11-python-venv-{{ checksum "~/python_version.md5" }}
- restore_cache:
name: Restore nvm and node_modules from cache
keys:
- - v1-nvm_node_modules-{{ checksum "~/package-lock.md5" }}
+ - v11-nvm_node_modules-{{ checksum "~/package-lock.md5" }}
- restore_cache:
name: Restore protobufs from cache
keys:
- - v0-protobuf-{{ checksum "~/protobuf.md5" }}
+ - v11-protobuf-{{ checksum "~/protobuf.md5" }}
- restore_cache:
name: Restore make from cache
keys:
- - v0_make.bin-{{ checksum "~/make.md5" }}
+ - v11_make.bin-{{ checksum "~/make.md5" }}
#################################################################
# Pre Make commands
@@ -111,7 +111,7 @@ jobs:
- save_cache:
name: Save make to cache
- key: v0_make.bin-{{ checksum "~/make.md5" }}
+ key: v11_make.bin-{{ checksum "~/make.md5" }}
paths:
- make.bin
@@ -203,26 +203,26 @@ jobs:
#################################################################
- save_cache:
name: Save /opt/conda to cache
- key: v0-opt-conda-{{ checksum "~/python_version.md5" }}
+ key: v11-opt-conda-{{ checksum "~/python_version.md5" }}
paths:
- /opt/conda
- save_cache:
name: Save virtualenv to cache
- key: v0-python-venv-{{ checksum "~/python_version.md5" }}
+ key: v11-python-venv-{{ checksum "~/python_version.md5" }}
paths:
- venv
- save_cache:
name: Save nvm and node_modules to cache
- key: v1-nvm_node_modules-{{ checksum "~/package-lock.md5" }}
+ key: v11-nvm_node_modules-{{ checksum "~/package-lock.md5" }}
paths:
- frontend/node_modules
- ~/.nvm
- save_cache:
name: Save generated protobufs to cache
- key: v0-protobuf-{{ checksum "~/protobuf.md5" }}
+ key: v11-protobuf-{{ checksum "~/protobuf.md5" }}
paths:
- frontend/src/protobuf.js
- lib/streamlit/protobuf
|
Add illustration of connect via hostname, password, etc.
When I am using this library, I cannot find the demonstration of how can I connect with hostname and password before I read the source code. | @@ -86,6 +86,8 @@ Simple consumer:
async def main(loop):
+ # Connect with the givien parameters is also valiable.
+ # aio_pika.connect_robust(host="host", login="login", password="password")
connection = await aio_pika.connect_robust(
"amqp://guest:[email protected]/", loop=loop
)
|
Escape markdown in faulty source commands
Closes | @@ -2,7 +2,7 @@ import inspect
from pathlib import Path
from typing import Optional, Tuple, Union
-from discord import Embed
+from discord import Embed, utils
from discord.ext import commands
from bot.bot import Bot
@@ -36,7 +36,7 @@ class SourceConverter(commands.Converter):
return argument.lower()
raise commands.BadArgument(
- f"Unable to convert `{argument}` to valid command{', tag,' if show_tag else ''} or Cog."
+ f"Unable to convert `{utils.escape_markdown(argument)}` to valid command{', tag,' if show_tag else ''} or Cog."
)
|
TST: pass `solver` kwarg to function `synth.is_realizable`
because `solver` is an argument given to the function
`multiple_env_actions_check`. | @@ -411,7 +411,7 @@ def multiple_env_actions_check(solver='omega'):
moore=False,
plus_one=False,
qinit='\A \E')
- r = synth.is_realizable(solver, specs, sys=sys)
+ r = synth.is_realizable(specs, sys=sys, solver=solver)
assert r
# slightly relax assumption
specs = spec.GRSpec(
@@ -419,7 +419,7 @@ def multiple_env_actions_check(solver='omega'):
moore=False,
plus_one=False,
qinit='\A \E')
- r = synth.is_realizable(solver, specs, sys=sys)
+ r = synth.is_realizable(specs, sys=sys, solver=solver)
assert not r
|
Update __init__.py
fix check for pyarrow | @@ -37,7 +37,7 @@ from .utils import *
from .utils.tqdm_utils import disable_progress_bar
-if int(pyarrow.__version__.split(".")[1]) < 16 or int(pyarrow.__version__.split(".")[0]) > 0:
+if int(pyarrow.__version__.split(".")[1]) < 16 and int(pyarrow.__version__.split(".")[0]) == 0:
raise ImportWarning(
"To use `nlp`, the module `pyarrow>=0.16.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
|
Update hyperspectral_tutorial.md
update to apply_mask param | @@ -207,10 +207,10 @@ Binary mask after [filtering objects by the region of interest](roi_objects.md)
# Apply the mask of the leaf to the entire datacube, and store it where the datacube is stored.
# Inputs:
- # rgb_img - RGB image data or hyperspectral image data
+ # img - RGB image data or hyperspectral image data
# mask - Binary mask image data
# mask_color - 'white' or 'black'
- spectral_array.array_data = pcv.apply_mask(rgb_img=spectral_array.array_data, mask=kept_mask, mask_color="black")
+ spectral_array.array_data = pcv.apply_mask(img=spectral_array.array_data, mask=kept_mask, mask_color="black")
```
|
Node schema: split remote and local
With this change, the schema structure is clear. The following changes
will update node classes. | @@ -587,33 +587,27 @@ class Capability(NodeSpace):
self.node_count = 1
-@dataclass_json()
+@dataclass_json(undefined=Undefined.INCLUDE)
@dataclass
-class LocalNode(TypedSchema):
- type: str = field(
- default=constants.ENVIRONMENTS_NODES_LOCAL,
- metadata=metadata(
- required=True,
- validate=validate.OneOf([constants.ENVIRONMENTS_NODES_LOCAL]),
- ),
- )
+class Node(TypedSchema):
+ type: str
+ capability: Capability = field(default_factory=Capability)
name: str = ""
is_default: bool = field(default=False)
- capability: Capability = field(default_factory=Capability)
+
+ delay_parsed: CatchAll = field(default_factory=dict) # type: ignore
@dataclass_json()
@dataclass
-class RemoteNode(TypedSchema):
- type: str = field(
- default=constants.ENVIRONMENTS_NODES_REMOTE,
- metadata=metadata(
- required=True,
- validate=validate.OneOf([constants.ENVIRONMENTS_NODES_REMOTE]),
- ),
- )
- name: str = ""
- is_default: bool = field(default=False)
+class LocalNode(Node):
+ type: str = constants.ENVIRONMENTS_NODES_LOCAL
+
+
+@dataclass_json()
+@dataclass
+class RemoteNode(Node):
+ type: str = constants.ENVIRONMENTS_NODES_REMOTE
address: str = ""
port: int = field(
default=22, metadata=metadata(validate=validate.Range(min=1, max=65535))
@@ -626,7 +620,6 @@ class RemoteNode(TypedSchema):
username: str = field(default="", metadata=metadata(required=True))
password: str = ""
private_key_file: str = ""
- capability: Capability = field(default_factory=Capability)
def __post_init__(self, *args: Any, **kwargs: Any) -> None:
add_secret(self.address)
|
Fix calls to disconnect after logout
Introduced by | @@ -606,9 +606,6 @@ class TelegramBaseClient(abc.ABC):
# You don't need to use this if you used "with client"
await client.disconnect()
"""
- if self.session is None:
- return # already logged out and disconnected
-
if self.loop.is_running():
# Disconnect may be called from an event handler, which would
# cancel itself during itself and never actually complete the
@@ -656,6 +653,9 @@ class TelegramBaseClient(abc.ABC):
connection._proxy = proxy
async def _disconnect_coro(self: 'TelegramClient'):
+ if self.session is None:
+ return # already logged out and disconnected
+
await self._disconnect()
# Also clean-up all exported senders because we're done with them
|
Add TODOs for all pending review feedback
Should be incorporated before merging to master. | @@ -23,6 +23,9 @@ class RemoteState(object):
key = self._cache_key(resource)
if key in self._cache:
return self._cache[key]
+ # TODO: This code will likely be refactored and pulled into
+ # per-resource classes so the RemoteState object doesn't need
+ # to know about every type of resource.
if isinstance(resource, models.ManagedIAMRole):
result = self._resource_exists_iam_role(resource)
elif isinstance(resource, models.LambdaFunction):
@@ -46,6 +49,8 @@ class RemoteState(object):
# type: (models.ManagedIAMRole) -> Optional[models.ManagedModel]
# We only need ManagedIAMRole support for now, but this will
# need to grow as needed.
+ # TODO: revisit adding caching. We don't need to make 2 API calls
+ # here.
if not self.resource_exists(resource):
return None
role = self._client.get_role(resource.role_name)
@@ -72,6 +77,10 @@ class PlanStage(object):
plan.extend(result)
return plan
+ # TODO: This code will likely be refactored and pulled into
+ # per-resource classes so the PlanStage object doesn't need
+ # to know about every type of resource.
+
def plan_lambdafunction(self, resource):
# type: (models.LambdaFunction) -> List[models.APICall]
role_arn = self._get_role_arn(resource.role)
@@ -98,6 +107,8 @@ class PlanStage(object):
resource=resource,
)
]
+ # TODO: Consider a smarter diff where we check if we even need
+ # to do an update() API call.
params = {
'function_name': resource.function_name,
'role_arn': resource.role.role_arn,
|
Don't crash when receiving updates prior to login
Fixes and enables | @@ -397,6 +397,9 @@ class UpdateMethods:
# Some updates require our own ID, so we must make sure
# that the event builder has offline access to it. Calling
# `get_me()` will cache it under `self._self_input_peer`.
+ #
+ # It will return `None` if we haven't logged in yet which is
+ # fine, we will just retry next time anyway.
await self.get_me(input_peer=True)
built = EventBuilderDict(self, update, others)
@@ -566,8 +569,15 @@ class EventBuilderDict:
try:
return self.__dict__[builder]
except KeyError:
+ # Updates may arrive before login (like updateLoginToken) and we
+ # won't have our self ID yet (anyway only new messages need it).
+ self_id = (
+ self.client._self_input_peer.user_id
+ if self.client._self_input_peer
+ else None
+ )
event = self.__dict__[builder] = builder.build(
- self.update, self.others, self.client._self_input_peer.user_id)
+ self.update, self.others, self_id)
if isinstance(event, EventCommon):
event.original_update = self.update
|
fix printing a node header (a kind wasn't being printed)
Summary: Pull Request resolved: | @@ -244,22 +244,20 @@ std::ostream &Node::print(std::ostream &out, size_t level,
auto* pyOp = static_cast<const ::torch::jit::PythonOp*>(this);
out << "^" << pyOp->name();
pyOp->writeScalars(out);
- } else if (print_attributes) {
- if (hasAttribute(attr::Subgraph) && groups) {
+ } else if (hasAttribute(attr::Subgraph) && groups) {
out << kind().toQualString() << "_" << groups->size();
- if (numAttributes() > 1 && kind() != prim::DifferentiableGraph) {
+ if (print_attributes && numAttributes() > 1 &&
+ kind() != prim::DifferentiableGraph) {
printAttributes(out, /*ignore_subgraph=*/true);
}
groups->push_back(this);
} else {
out << kind().toQualString();
- if (hasAttributes()) {
+ if (print_attributes && hasAttributes()) {
printAttributes(out);
}
}
- }
-
out << "(" << inputs() << ")";
if (print_scopes) {
|
Fixed i386 build
TypeError: longs are not supported for this option | @@ -383,7 +383,7 @@ class HttpCurlTimeoutLoaderTestCase(DummyAsyncHttpClientTestCase):
config = Config()
config.HTTP_LOADER_CURL_ASYNC_HTTP_CLIENT = True
config.HTTP_LOADER_CURL_LOW_SPEED_TIME = 1
- config.HTTP_LOADER_CURL_LOW_SPEED_LIMIT = 1000000000000
+ config.HTTP_LOADER_CURL_LOW_SPEED_LIMIT = 1000000000
ctx = Context(None, config, None)
loader.load(ctx, url, self.stop)
@@ -406,7 +406,7 @@ class HttpTimeoutLoaderTestCase(DummyAsyncHttpClientTestCase):
url = self.get_url('/')
config = Config()
config.HTTP_LOADER_CURL_LOW_SPEED_TIME = 1
- config.HTTP_LOADER_CURL_LOW_SPEED_LIMIT = 1000000000000
+ config.HTTP_LOADER_CURL_LOW_SPEED_LIMIT = 1000000000
ctx = Context(None, config, None)
loader.load(ctx, url, self.stop)
|
Reset filters if a new search term is searched for.
Disable filter selection when nothing to filter by. | :label="$tr('resourceType')"
:options="contentKindFilterOptions"
:inline="true"
+ :disabled="!contentKindFilterOptions.length"
class="filter"
v-model="contentKindFilterSelection"
/>
:label="$tr('channels')"
:options="channelFilterOptions"
:inline="true"
+ :disabled="!channelFilterOptions.length"
class="filter"
v-model="channelFilterSelection"
/>
return { label: this.$tr('all'), value: ALL_FILTER };
},
contentKindFilterOptions() {
+ if (this.content_kinds.length) {
const options = Object.keys(kindFilterToLabelMap)
- .filter(kind => !this.content_kinds.length || this.content_kinds.includes(kind))
+ .filter(kind => this.content_kinds.includes(kind))
.map(kind => ({
label: this.$tr(kindFilterToLabelMap[kind]),
value: kind,
}));
return [this.allFilter, ...options];
+ }
+ return [];
},
channelFilterOptions() {
+ if (this.channel_ids.length) {
const options = this.channels
.filter(channel => this.channel_ids.includes(channel.id))
.map(channel => ({
value: channel.id,
}));
return [this.allFilter, ...options];
+ }
+ return [];
},
filterUpdate() {
return (
this.searchQuery = val || '';
},
filterUpdate() {
- this.search();
+ this.search(true);
},
},
beforeMount() {
this.searchQuery = '';
}
},
- search() {
+ search(filterUpdate = false) {
if (this.searchQuery !== '') {
const query = {
searchTerm: this.searchQuery,
};
+ if (filterUpdate === true) {
if (this.contentKindFilterSelection.value) {
query.kind = this.contentKindFilterSelection.value;
}
if (this.channelFilterSelection.value) {
query.channel_id = this.channelFilterSelection.value;
}
+ }
this.$router.push({
name: PageNames.SEARCH,
query,
|
[bugfix] Remove test_bluwiki test_T235768_failure
Neither nor exits
(any longer). Remove this test. | @@ -88,10 +88,6 @@ class StandardVersionSiteTestCase(SiteDetectionTestCase):
"""Test detection of MediaWiki sites for en.wikifur.com."""
self.assertSite('https://en.wikifur.com/wiki/$1')
- def test_bluwiki(self):
- """Test detection of MediaWiki sites for bluwiki.com."""
- self.assertSite('http://bluwiki.com/go/$1')
-
class NonStandardVersionSiteTestCase(SiteDetectionTestCase):
|
Fix bug causing default flag argument to fail.
`",".join` fails when passed a list of integers | @@ -115,7 +115,8 @@ def evaluate_task_on_model(task: str, model: str):
container_cmd.append(json_file)
if FLAGS.json_shots:
- container_cmd.append(f"--json_shots={','.join(FLAGS.json_shots)}")
+ json_shots = [str(shot) for shot in FLAGS.json_shots]
+ container_cmd.append(f"--json_shots={','.join(json_shots)}")
if FLAGS.max_examples:
container_cmd.append(f"--max_examples={FLAGS.max_examples}")
|
Fix, add Missing Scipy v1.6.0 Implicit Imports to ImplicitImports Plugin
* Adding missing implicit imports used in scipy v1.6.0
Not sure from which version of scipy this is needed, but on my machine it's version v1.6.0 | @@ -770,8 +770,12 @@ class NuitkaPluginPopularImplicitImports(NuitkaPluginBase):
yield "scipy.sparse.csgraph._validation"
elif full_name == "scipy._lib":
yield "scipy._lib.messagestream"
+ elif full_name == "scipy.spatial":
+ yield "scipy.spatial.transform"
+ elif full_name == "scipy.spatial.transform":
+ yield "scipy.spatial.transform._rotation_groups"
- # scipy imports -------------------------------------------------------
+ # statsmodels imports -------------------------------------------------------
elif full_name == "statsmodels.nonparametric":
yield "statsmodels.nonparametric.linbin"
yield "statsmodels.nonparametric._smoothers_lowess"
|
enhancement: add an utility function for memoization
add an utility function for memoization will be used to cache some list
of instances of processor classes for example later. | from __future__ import absolute_import
import collections
+import functools
import glob
import itertools
import os.path
@@ -448,4 +449,30 @@ def filter_options(keys, options):
"""
return dict((k, options[k]) for k in keys if k in options)
+
+def memoize(fnc):
+ """memoization function.
+
+ >>> import random
+ >>> imax = 100
+ >>> def fnc1(arg=True):
+ ... return arg and random.choice((True, False))
+ >>> fnc2 = memoize(fnc1)
+ >>> (ret1, ret2) = (fnc1(), fnc2())
+ >>> assert any(fnc1() != ret1 for i in range(imax))
+ >>> assert all(fnc2() == ret2 for i in range(imax))
+ """
+ cache = dict()
+
+ @functools.wraps(fnc)
+ def wrapped(*args, **kwargs):
+ """Decorated one"""
+ key = repr(args) + repr(kwargs)
+ if key not in cache:
+ cache[key] = fnc(*args, **kwargs)
+
+ return cache[key]
+
+ return wrapped
+
# vim:sw=4:ts=4:et:
|
changing simple_dispatch with simple_dispatch
The simple_least_costs example is not executable in the last oemof version, so I suggest to change it to simple_dispatch or similar. | @@ -248,7 +248,7 @@ Execute an example with different solver (default: 'cbc').
.. code:: console
- oemof_examples simple_least_costs
- oemof_examples simple_least_costs -s glpk
+ oemof_examples simple_dispatch
+ oemof_examples simple_dispatch -s glpk
If you want to run solph examples you need to have a solver installed (recommended: cbc), see the ":ref:`linux_solver_label`" or ":ref:`windows_solver_label`" section. To get more information about the solph examples see the ":ref:`solph_examples_label`" section.
|
docs: Note need to log out and in again on push notifs setup.
This often surprises people, so mention it up front.
(Also it'd probably be good to add some code to make this step
unnecessary.) | @@ -32,6 +32,10 @@ follows:
Note that if you installed Zulip older than 1.6, you'll need to add
the line (it won't be there to uncomment).
+4. If you or your users have already set up the Zulip mobile app,
+ you'll each need to log out and log back in again in order to start
+ getting push notifications.
+
That should be all you need to do!
If you'd like to verify the full pipeline, you can do the following.
|
fix return type of tfds.load in doc
Was referring to `tfds.data.Dataset` instead of `tf.data.Dataset`. | @@ -291,7 +291,7 @@ def load(
Returns:
ds: `tf.data.Dataset`, the dataset requested, or if `split` is None, a
- `dict<key: tfds.Split, value: tfds.data.Dataset>`. If `batch_size=-1`,
+ `dict<key: tfds.Split, value: tf.data.Dataset>`. If `batch_size=-1`,
these will be full datasets as `tf.Tensor`s.
ds_info: `tfds.core.DatasetInfo`, if `with_info` is True, then `tfds.load`
will return a tuple `(ds, ds_info)` containing dataset information
|
Allow running manage.py without arguments
This is useful for listing all available management commands and it is
how Django's bash completion script works | @@ -22,7 +22,7 @@ SITE_NAME = basename(SETTINGS_ROOT)
SITE_ID = 1
# Useful flag for special-casing shell operations
-SHELL = sys.argv[1] in ["shell", "dbshell"]
+SHELL = len(sys.argv) > 1 and sys.argv[1] in ["shell", "dbshell"]
# Add our project to our pythonpath, this way we don't need to type our project
# name in our dotted import paths:
|
Worked in suggested edits for PR
see | @@ -3,15 +3,10 @@ Messaging Basics
--------------
-.. |Main menu icon| image:: ../../images/sidebar_main_menu_icon.png
- :width: 33
- :alt: Main menu
-
**Write messages** using the text input box at the bottom of the screen.
Press ENTER to send a message. Use SHIFT+ENTER to create a new
-line without sending a message. To swap send/new line keyboard behavior
-change the settings under:
-|Main menu icon| > "Account Settings" > "Advanced" > "Send messages on CTRL+ENTER".
+line without sending a message. To send messages on CTRL+ENTER and use ENTER to insert new lines go to **Main Menu** > **Account Settings** > **Advanced** > **Send messages on CTRL+ENTER**.
+
**Reply to messages** by clicking the reply arrow next to the message
text.
|
Remove mox from nova/tests/unit/consoleauth/test_consoleauth.py
Partially-Implements: blueprint remove-mox-pike | @@ -19,7 +19,6 @@ Tests for Consoleauth Code.
"""
import mock
-from mox3 import mox
from oslo_utils import timeutils
import six
@@ -32,6 +31,8 @@ from nova import test
class ConsoleauthTestCase(test.NoDBTestCase):
"""Test Case for consoleauth."""
+ rpcapi = 'nova.compute.rpcapi.ComputeAPI.'
+
def setUp(self):
super(ConsoleauthTestCase, self).setUp()
self.manager_api = self.manager = manager.ConsoleAuthManager()
@@ -79,11 +80,11 @@ class ConsoleauthTestCase(test.NoDBTestCase):
self.instance_uuid)
def _stub_validate_console_port(self, result):
- def fake_validate_console_port(ctxt, instance, port, console_type):
+ def fake_validate_console_port(self, ctxt, instance,
+ port, console_type):
return result
- self.stubs.Set(self.manager.compute_rpcapi,
- 'validate_console_port',
+ self.stub_out(self.rpcapi + 'validate_console_port',
fake_validate_console_port)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@@ -193,12 +194,11 @@ class ControlauthMemcacheEncodingTestCase(test.NoDBTestCase):
[mock.call(b'instance', mock.ANY)])
def test_check_token_encoding(self):
- self.mox.StubOutWithMock(self.manager.mc, "get")
- self.manager.mc.get(mox.IsA(six.binary_type)).AndReturn(None)
-
- self.mox.ReplayAll()
-
+ with mock.patch.object(self.manager.mc,
+ "get",
+ return_value=None) as mock_get:
self.manager.check_token(self.context, self.u_token)
+ mock_get.assert_called_once_with(test.MatchType(six.binary_type))
def test_delete_tokens_for_instance_encoding(self):
with test.nested(
@@ -225,16 +225,9 @@ class ControlauthMemcacheEncodingTestCase(test.NoDBTestCase):
class CellsConsoleauthTestCase(ConsoleauthTestCase):
"""Test Case for consoleauth w/ cells enabled."""
+ rpcapi = 'nova.cells.rpcapi.CellsAPI.'
+
def setUp(self):
super(CellsConsoleauthTestCase, self).setUp()
self.flags(enable=True, group='cells')
self.is_cells = True
-
- def _stub_validate_console_port(self, result):
- def fake_validate_console_port(ctxt, instance_uuid, console_port,
- console_type):
- return result
-
- self.stubs.Set(self.manager.cells_rpcapi,
- 'validate_console_port',
- fake_validate_console_port)
|
fix args not being passed
In function coherence_function_g2 | @@ -457,6 +457,8 @@ def coherence_function_g2(H, state0, taulist, c_ops, a_op, solver="me", args={},
`me` or `mc`.
a_op : Qobj
operator A.
+ args : dict
+ Dictionary of arguments to be passed to solver.
solver : str
choice of solver (`me` for master-equation and
`es` for exponential series).
@@ -478,7 +480,7 @@ def coherence_function_g2(H, state0, taulist, c_ops, a_op, solver="me", args={},
state0 = steadystate(H, c_ops)
n = np.array([expect(state0, a_op.dag() * a_op)])
else:
- n = mesolve(H, state0, taulist, c_ops, [a_op.dag() * a_op]).expect[0]
+ n = mesolve(H, state0, taulist, c_ops, [a_op.dag() * a_op], args=args).expect[0]
# calculate the correlation function G2 and normalize with n to obtain g2
G2 = correlation_3op_1t(H, state0, taulist, c_ops,
|
Fix autodiff of nll_loss
Summary: Pull Request resolved: | @@ -129,7 +129,7 @@ bool isDifferentiable(Node* n) {
if (n->matches(
"aten::nll_loss(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index) -> Tensor")) {
// TODO(asuhan): support weight
- return n->namedInput(attr::weight)->node()->kind() == prim::Undefined;
+ return n->namedInput(attr::weight)->node()->kind() == prim::None;
}
// linear blocks may appear as inputs to graph executors, but they are removed
@@ -717,7 +717,7 @@ class GradientHelper {
"aten::nll_loss(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index) -> Tensor")) {
auto graph = node->owningGraph();
auto total_weight = graph->insertNode(graph->createUndefined());
- auto weight = graph->insertNode(graph->createUndefined());
+ auto weight = graph->insertNode(graph->createNone(TensorType::get()));
auto backward_value = graph->insert(
aten::nll_loss_backward,
{grads.at(0).value(),
|
Don't print newline when decrypting
This is just used by humans at the moment so we haven't cared about the
whitespace that much. I've updated a runbook where I tell people they
can "recreate" a secret using this as the stdin. We need to not print
the newline in that case. | @@ -201,10 +201,12 @@ def paasta_secret(args):
print_paasta_helper(secret_path, args.secret_name, args.shared)
elif args.action == "decrypt":
- print(decrypt_secret(
+ print(
+ decrypt_secret(
secret_provider=secret_provider,
secret_name=args.secret_name,
- ))
+ ), end='',
+ )
else:
print("Unknown action")
sys.exit(1)
|
Update ua.txt
Covering this case more reliably. | @@ -1871,11 +1871,12 @@ jndi:iiop
# Reference: https://twitter.com/BillDemirkapi/status/1470055644740923398
# Reference: https://twitter.com/VessOnSecurity/status/1470373438363734026
# Reference: https://twitter.com/gwillem/status/1470395476570746885
+# Reference: https://twitter.com/11xuxx/status/1471236310299906050
# Reference: https://github.com/tangxiaofeng7/CVE-2021-44228-Apache-Log4j-Rce
# Reference: https://github.com/Puliczek/CVE-2021-44228-PoC-log4j-bypass-words
# Reference: https://github.com/SigmaHQ/sigma/blob/master/rules/web/web_cve_2021_44228_log4j_fields.yml
-(?i)\$({|%7B)(:|%3A){1,2}-\w+|(?i)\$({|%7B)\$({|%7B)(:|%3A){1,2}-?|(?i)\$({|%7B)\$({|%7B)\w+(:|%3A)|(?i)\$({|%7B)\w+(:|%3A)|(?i)7Bjndi(:|%3A)?
+(?i)\$({|%7B)(:|%3A){1,2}-\w+|(?i)\$({|%7B)\$({|%7B)(:|%3A){1,2}-?|(?i)\$({|%7B)\w+\$({|%7B)\w+(:|%3A)|(?i)\$({|%7B)\w+(:|%3A)|(?i)7Bjndi(:|%3A)?
# Reference: https://twitter.com/bad_packets/status/1470639403546472449
|
Use GetModelConstraints from ModelConfigFacade
previously it was ClientFacade.GetModelConstraints | @@ -2012,7 +2012,7 @@ class Model:
:returns: A ``dict`` of constraints.
"""
constraints = {}
- client_facade = client.ClientFacade.from_connection(self.connection())
+ client_facade = client.ModelConfigFacade.from_connection(self.connection())
result = await client_facade.GetModelConstraints()
# GetModelConstraints returns GetConstraintsResults which has a
|
hyperparams -> hyperparameters
Thanks | @@ -25,12 +25,12 @@ def _prepare_study_with_trials(no_trials=False, less_than_two=False, with_c_d=Tr
Args:
no_trials: If ``False``, create a study with no trials.
- less_than_two: If ``True``, create a study with two/four hyperparams where
+ less_than_two: If ``True``, create a study with two/four hyperparameters where
'param_a' (and 'param_c') appear(s) only once while 'param_b' (and 'param_b')
appear(s) twice in `study.trials`.
- with_c_d: If ``True``, the study has four hyperparams named 'param_a',
+ with_c_d: If ``True``, the study has four hyperparameters named 'param_a',
'param_b', 'param_c', and 'param_d'. Otherwise, there are only two
- hyperparams ('param_a' and 'param_b').
+ hyperparameters ('param_a' and 'param_b').
Returns:
:class:`~optuna.study.Study`
|
Fix issue labeler
It didn't work yet, this is why | @@ -11,5 +11,5 @@ jobs:
with:
repo-token: "${{ secrets.GITHUB_TOKEN }}"
configuration-path: .github/labeler.yml
- not-before: 2022-08-07T00:00:00Z
+ include-title: 1
enable-versioned-regex: 0
|
Fix rename of H7 RM0455 OCTOSPI peripheral
Previous modification did not work as this peripheral is derivedFrom OCTOSPI in
the original SVD | @@ -12,8 +12,6 @@ _modify:
name: FDCAN2
DAC:
name: DAC1
- OCTOSPI1_CONTROL_REGISTER:
- name: OCTOSPI1
# The SVD is just quite different to the RM for all these registers.
# We'll go with the RM convention even though it is inconsistent too.
@@ -313,6 +311,8 @@ AXI:
# Work around the DMA_STR? interrupt mess in the SVD.
# Some interrupts are on DMA2 instead on DMA1 and/or called DMA_STR? without
# the numeral.
+#
+# Since it is not possible to modify a derivedFrom peripheral, we delete it first
_delete:
- DMA2
@@ -322,6 +322,7 @@ _delete:
- UART8
- USART9
- USART10
+ - OCTOSPI1_CONTROL_REGISTER
_add:
DMA2:
@@ -394,6 +395,13 @@ _add:
USART10:
description: USART10 global interrupt
value: 141
+ OCTOSPI1:
+ derivedFrom: OCTOSPI2
+ baseAddress: 0x52005000
+ interrupts:
+ OCTOSPI1:
+ description: OCTOSPI global interrupt
+ value: 92
DAC2:
derivedFrom: DAC1
baseAddress: 0x58003400
|
AC: fix a divide-by-zero warning in AverageMeter
We're dividing by `increment`, so that's what we should be checking. This was
probably a copy-paste error from `evaluate`. | @@ -41,7 +41,7 @@ class AverageMeter:
loss = float(loss)
else:
loss = loss.astype(float)
- return np.divide(loss, increment, out=np.zeros_like(loss), where=self.total_count != 0)
+ return np.divide(loss, increment, out=np.zeros_like(loss), where=increment != 0)
def evaluate(self):
if self.total_count is None:
|
Add `TRANSITIONING` to possible values in `current_transport_state`
In a loop that async calls get_current_transport_info and prints it during a change from stopped to playing a uri, I noticed that Sonos can also return `TRANSITIONING` as a possible transport state. | @@ -1246,7 +1246,7 @@ class SoCo(_SocoSingletonBase):
Returns:
A dictionary containing the following information about the speakers
playing state
- current_transport_state (PLAYING, PAUSED_PLAYBACK, STOPPED),
+ current_transport_state (PLAYING, TRANSITIONING, PAUSED_PLAYBACK, STOPPED),
current_trasnport_status (OK, ?), current_speed(1,?)
This allows us to know if speaker is playing or not. Don't know other
|
Fix tx receipt `status`
It can be `None` or missing for old transactions | @@ -128,7 +128,7 @@ class EthereumTxManager(models.Manager):
if ethereum_tx.block is None:
ethereum_tx.block = EthereumBlock.objects.get_or_create_from_block(block, current_block_number=current_block_number)
ethereum_tx.gas_used = tx_receipt['gasUsed']
- ethereum_tx.status = tx_receipt['status']
+ ethereum_tx.status = tx_receipt.get('status')
ethereum_tx.transaction_index = tx_receipt['transactionIndex']
ethereum_tx.save(update_fields=['block', 'gas_used', 'status', 'transaction_index'])
ethereum_txs.append(ethereum_tx)
@@ -146,7 +146,7 @@ class EthereumTxManager(models.Manager):
tx_receipt = ethereum_client.get_transaction_receipt(tx_hash)
ethereum_tx.block = EthereumBlock.objects.get_or_create_from_block_number(tx_receipt['blockNumber'])
ethereum_tx.gas_used = tx_receipt['gasUsed']
- ethereum_tx.status = tx_receipt['status']
+ ethereum_tx.status = tx_receipt.get('status')
ethereum_tx.transaction_index = tx_receipt['transactionIndex']
ethereum_tx.save(update_fields=['block', 'gas_used', 'status', 'transaction_index'])
return ethereum_tx
@@ -165,7 +165,7 @@ class EthereumTxManager(models.Manager):
gas=tx['gas'],
gas_price=tx['gasPrice'],
gas_used=tx_receipt and tx_receipt['gasUsed'],
- status=tx_receipt and tx_receipt['status'],
+ status=tx_receipt and tx_receipt.get('status'),
transaction_index=tx_receipt and tx_receipt['transactionIndex'],
data=HexBytes(tx.get('data') or tx.get('input')),
nonce=tx['nonce'],
@@ -180,7 +180,7 @@ class EthereumTx(TimeStampedModel):
related_name='txs') # If mined
tx_hash = Sha3HashField(unique=True, primary_key=True)
gas_used = Uint256Field(null=True, default=None) # If mined
- status = models.IntegerField(null=True, default=None) # If mined
+ status = models.IntegerField(null=True, default=None) # If mined. Old txs don't have `status`
transaction_index = models.PositiveIntegerField(null=True, default=None) # If mined
_from = EthereumAddressField(null=True, db_index=True)
gas = Uint256Field()
|
[internal] Add output to confirm immutable input race condition.
[ci skip-build-wheels] | @@ -44,6 +44,15 @@ impl ImmutableInputs {
let digest_str = digest.hash.to_hex();
let path = self.workdir.path().join(digest_str);
+ if let Ok(meta) = tokio::fs::metadata(&path).await {
+ // TODO: If this error triggers, it indicates that we have previously checked out this
+ // directory, either due to a race condition, or due to a previous failure to
+ // materialize. See https://github.com/pantsbuild/pants/issues/13899
+ return Err(format!(
+ "Destination for immutable digest already exists: {:?}",
+ meta
+ ));
+ }
self
.store
.materialize_directory(path.clone(), digest, Permissions::ReadOnly)
|
CVE number was assigned
As stated. | id: wp-plugin-marmoset-viewer-xss
info:
- name: Wordpress Plugin Marmoset Viewer XSS
+ name: Wordpress Plugin Marmoset Viewer XSS [CVE-2021-24495]
author: johnjhacking
severity: medium
tags: wordpress,xss
|
urplay: sometimes it adds country code several times
this happen when you download all subtitles. | @@ -52,7 +52,7 @@ class Urplay(Service, OpenGraphThumbMixin):
label = stream["tt"]["language"]
if stream["tt"]["scope"] != "complete":
label = "{}-{}".format(label, stream["tt"]["scope"])
- yield subtitle(copy.copy(self.config), "tt", stream["tt"]["location"], label, output=self.output)
+ yield subtitle(copy.copy(self.config), "tt", stream["tt"]["location"], label, output=copy.copy(self.output))
def find_all_episodes(self, config):
episodes = []
|
[query/service] retry entire partition when we encounter transient errors in compiled code
Ideally, a stream would be able to recover from a transient error by
seeking, but until we have that functionality, this avoids having
one failure out of 5000 (which I have now seen twice).
Example: | @@ -138,7 +138,9 @@ object Worker {
var result: Array[Byte] = null
var userError: HailException = null
try {
+ retryTransientErrors {
result = f(context, htc, theHailClassLoader, fs)
+ }
} catch {
case err: HailException => userError = err
}
|
Typo fix in text sentiment tutorial
Trivial typo fix in docs | @@ -101,7 +101,7 @@ label_pipeline = lambda x: int(x) - 1
#
# Before sending to the model, ``collate_fn`` function works on a batch of samples generated from ``DataLoader``. The input to ``collate_fn`` is a batch of data with the batch size in ``DataLoader``, and ``collate_fn`` processes them according to the data processing pipelines declared previously. Pay attention here and make sure that ``collate_fn`` is declared as a top level def. This ensures that the function is available in each worker.
#
-# In this example, the text entries in the original data batch input are packed into a list and concatenated as a single tensor for the input of ``nn.EmbeddingBag``. The offset is a tensor of delimiters to represent the beginning index of the individual sequence in the text tensor. Label is a tensor saving the labels of indidividual text entries.
+# In this example, the text entries in the original data batch input are packed into a list and concatenated as a single tensor for the input of ``nn.EmbeddingBag``. The offset is a tensor of delimiters to represent the beginning index of the individual sequence in the text tensor. Label is a tensor saving the labels of individual text entries.
from torch.utils.data import DataLoader
|
StandardNodeGadget : Remove outdated LRUCache workaround
LRUCache now rethrows the previous exception when `get()` is called again for a second time. | @@ -87,16 +87,7 @@ class StandardNodeGadget::ErrorGadget : public Gadget
void addError( PlugPtr plug, const std::string &error )
{
PlugEntry &entry = m_errors[plug];
- if( entry.error.empty() || !boost::ends_with( error, "Previous attempt to get item failed." ) )
- {
- // Update the error message. Unfortunately the IECore::LRUCache at the
- // heart of Gaffer's caching does not remember the details of exceptions that
- // occurred when the cache entry is in error - instead it throws a different
- // exception saying "Previous attempt to get item failed.". We ignore these less
- // helpful messages in favour of a previous messages if one exists.
- /// \todo Improve LRUCache behaviour and remove this workaround.
entry.error = error;
- }
if( !entry.parentChangedConnection.connected() )
{
entry.parentChangedConnection = plug->parentChangedSignal().connect( boost::bind( &ErrorGadget::parentChanged, this, ::_1 ) );
|
packaging: Use cpu directory by default
Default to using the `cpu` directory since the base one is not reliable | @@ -34,7 +34,7 @@ setup_cuda() {
# First, compute version suffixes. By default, assume no version suffixes
export VERSION_SUFFIX=""
export PYTORCH_VERSION_SUFFIX=""
- export WHEEL_DIR=""
+ export WHEEL_DIR="cpu/"
# Wheel builds need suffixes (but not if they're on OS X, which never has suffix)
if [[ "$BUILD_TYPE" == "wheel" ]] && [[ "$(uname)" != Darwin ]]; then
# The default CUDA has no suffix
|
Added .explain() method to QueryBuilder.
This provides a way to access the query execution plan. | @@ -1600,6 +1600,16 @@ class QueryBuilder(ObservesEvents):
sql = grammar.compile(self._action, qmark=False).to_sql()
return sql
+ def explain(self):
+ """Explains the Query execution plan.
+
+ Returns:
+ Collection
+ """
+ sql = self.to_sql()
+ explanation = self.statement(f'EXPLAIN {sql}')
+ return explanation
+
def run_scopes(self):
for name, scope in self._global_scopes.get(self._action, {}).items():
scope(self)
|
update data owner upload dataset notebook to use dataset url
update utils to use dataset url instead of participation number | @@ -58,20 +58,10 @@ def split_into_train_test_val_sets(data, test=0.10, val=0.10):
return data_dict
-def load_data_as_df(
- participation_number, total_participants, file_path="./MedNIST.pkl"
-):
+def load_data_as_df(file_path="./MedNIST.pkl"):
df = pd.read_pickle(file_path)
df.sort_values("patient_id", inplace=True, ignore_index=True)
- # Calculate start and end index based on your participant number
- batch_size = df.shape[0] // total_participants
- start_idx = (participation_number - 1) * batch_size
- end_idx = start_idx + batch_size
-
- # Slice the dataframe according
- df = df[start_idx:end_idx]
-
# Get label mapping
mapping = get_label_mapping()
@@ -99,11 +89,23 @@ def get_data_description(data):
return description
-def download_mednist_dataset():
- if not os.path.exists("./MedNIST.pkl"):
+def get_data_filename(dataset_url):
+ return dataset_url.split("/")[-1]
+
+
+def get_dataset_name(dataset_url):
+ filename = dataset_url.split("/")[-1]
+ return filename.split(".pkl")[0]
+
+
+def download_mednist_dataset(dataset_url):
+ filename = get_data_filename(dataset_url)
+ if not os.path.exists(f"./{filename}"):
os.system(
- 'curl -O "https://media.githubusercontent.com/media/shubham3121/datasets/main/MedNIST/MedNIST.pkl"'
+ f'curl -O "{dataset_url}"'
)
print("MedNIST is successfully downloaded.")
else:
print("MedNIST is already downloaded")
+
+ return filename
|
Fix a bug about variable spelling errors
The variable subsampling_factors in class CustomConverterMulEnc was incorrectly written as subsamping_factors , resulting in inconsistency. | @@ -324,12 +324,12 @@ class CustomConverterMulEnc(object):
"""
- def __init__(self, subsamping_factors=[1, 1], dtype=torch.float32):
+ def __init__(self, subsampling_factors=[1, 1], dtype=torch.float32):
"""Initialize the converter."""
- self.subsamping_factors = subsamping_factors
+ self.subsampling_factors = subsampling_factors
self.ignore_id = -1
self.dtype = dtype
- self.num_encs = len(subsamping_factors)
+ self.num_encs = len(subsampling_factors)
def __call__(self, batch, device=torch.device("cpu")):
"""Transform a batch and send it to a device.
@@ -348,7 +348,7 @@ class CustomConverterMulEnc(object):
ys = batch[0][-1]
# perform subsampling
- if np.sum(self.subsamping_factors) > self.num_encs:
+ if np.sum(self.subsampling_factors) > self.num_encs:
xs_list = [
[x[:: self.subsampling_factors[i], :] for x in xs_list[i]]
for i in range(self.num_encs)
|
update mpca.py docstring
update docstring for two additional attributions shape_in and shape_out | @@ -72,8 +72,10 @@ class MPCA(BaseEstimator, TransformerMixin):
max_iter (int, optional): max number of iteration. Defaults to 1.
Attributes:
- proj_mats: a list of transposed projection matrices
- idx_order: the ordering index of projected (and vectorised) features in decreasing variance
+ proj_mats (list): a list of transposed projection matrices
+ idx_order (ndarray): the ordering index of projected (and vectorised) features in decreasing variance
+ shape_in (tuple): input tensor shapes, i.e. (P_1, P_2, ..., P_N)
+ shape_out (tuple): output tensor shapes, i.e. (i_1, i_2, ..., i_N)
Examples:
>>> import numpy as np
>>> from kale.embed.mpca import MPCA
@@ -187,6 +189,7 @@ class MPCA(BaseEstimator, TransformerMixin):
self.proj_mats = proj_mats
self.idx_order = idx_order
+ self.shape_out = shape_out
return self
|
Add UUID to batch jobs
This was somehow lost in the monorepo-ization, was merged into batch as | @@ -119,7 +119,8 @@ class Job(object):
metadata = kube.client.V1ObjectMeta(generate_name = 'job-{}-'.format(self.id),
labels = {
'app': 'batch-job',
- 'hail.is/batch-instance': instance_id
+ 'hail.is/batch-instance': instance_id,
+ 'uuid': uuid.uuid4().hex
}),
spec = pod_spec)
|
tests: Speed up transport config tests by avoiding interpreter discovery
Reduced execution time of tests/ansible/integration/transport_config/all.yml
from 11 minutes to 49 seconds. | # integration/transport_config
# Hosts with twiddled configs that need to be checked somehow.
+[transport_config:children]
+transport_config_undiscover
+tc_python_path
-# tansport()
+[transport_config_undiscover:children]
+tc_become
+tc_become_method
+tc_become_pass
+tc_become_user
+tc_password
+tc_port
+tc_remote_addr
+tc_remote_user
+tc_transport
+
+[transport_config_undiscover:vars]
+# If python interpreter path is unset, Ansible tries to connect & discover it.
+# That causes approx 10 seconds timeout per task - there's no host to connect to.
+# This optimisation should not be relied in any test.
+# Note: tc-python-path-* are intentionally not included.
+ansible_python_interpreter = python3000 # Not expected to exist
+
+[tc_transport]
tc-transport-unset
tc-transport-local ansible_connection=local
tc-transport-smart ansible_connection=smart
-# python_path()
+[tc_python_path]
tc-python-path-unset
tc-python-path-hostvar ansible_python_interpreter=/hostvar/path/to/python
tc-python-path-local-unset ansible_connection=local
tc-python-path-local-explicit ansible_connection=local ansible_python_interpreter=/a/b/c
-# remote_addr()
+[tc_remote_addr]
tc-remote-addr-unset # defaults to inventory_hostname
tc-remote-addr-explicit-ssh ansible_ssh_host=ansi.ssh.host
tc-remote-addr-explicit-host ansible_host=ansi.host
tc-remote-addr-explicit-both ansible_ssh_host=a.b.c ansible_host=b.c.d
-# password()
+[tc_password]
tc-password-unset
tc-password-explicit-ssh ansible_ssh_pass=ansi-ssh-pass
tc-password-explicit-pass ansible_password=ansi-pass
tc-password-explicit-both ansible_password=a.b.c ansible_ssh_pass=c.b.a
-# remote_user()
+[tc_remote_user]
tc-remote-user-unset # defaults to C.DEFAULT_REMOTE_USER
tc-remote-user-explicit-ssh ansible_ssh_user=ansi-ssh-user
tc-remote-user-explicit-user ansible_user=ansi-user
tc-remote-user-explicit-both ansible_user=a.b.c ansible_ssh_user=c.b.a
-# become()
+[tc_become]
tc-become-unset
tc-become-set
-# become_method()
+[tc_become_method]
tc-become-method-unset
tc-become-method-su ansible_become_method=su
-# become_user()
+[tc_become_user]
tc-become-user-unset
tc-become-user-set ansible_become_user=ansi-become-user
-# become_pass()
+[tc_become_pass]
tc-become-pass-unset
tc-become-pass-password ansible_become_password=apassword
tc-become-pass-pass ansible_become_pass=apass
tc-become-pass-both ansible_become_pass=bpass ansible_become_password=bpassword
-# port()
+[tc_port]
tc-port-unset
tc-port-explicit-port ansible_port=1234
tc-port-explicit-ssh ansible_ssh_port=4321
|
Reword somewhat confusing "top or bottom" description
Relates to | {% if stats.interesting %}
{% for measure in stats.interesting %}
{% if forloop.first %}
- <p>Over the last three months, we found that this {{ bookmark.org_type }} was in the top or bottom 10% on
+ <p>Over the last three months, we found that this {{ bookmark.org_type }} deviated a long way from the median practice on
<a href="{{ dashboard_uri }}#{{measure.id}}">{{ measure.name }}</a>:<br>
<a href="{{ dashboard_uri }}#{{measure.id}}"><img src="cid:{{ interesting_image }}"></a><br>
{% elif stats.interesting|length == 2 %}
- It was also in the top or bottom 10% on <a href="{{ dashboard_uri }}#{{measure.id}}">{{ measure.name }}</a>.
+ It was also an outlier on <a href="{{ dashboard_uri }}#{{measure.id}}">{{ measure.name }}</a>.
{% else %}
{% if forloop.counter == 2 %}
- It was also in the top or bottom 10% on:
+ It was also an outlier on:
<ul>
{% endif %}
<li><a href="{{ dashboard_uri }}#{{measure.id}}">{{ measure.name }}</a></li>
|
Clarify degree to which DDT is implemented
This commit reflects the fact that `first_derivative` is almost but
not quite a drop-in replacement for DDT. Using red for this instance
seemed too pessimistic, so I opted for the blue indication. | @@ -367,10 +367,10 @@ blue is uncertain of parity, and white is unevaluated.
<tr>
<td class="tg-implemented">DDT(S)</td>
<td class="tg-implemented">Time derivative</td>
- <td class="tg-implemented"><a href="api/generated/generated/metpy.calc.first_derivative.html">metpy.calc.first_derivative</a></td>
- <td></td>
- <td class="tg-no">No</td>
- <td></td>
+ <td class="tg-implemented"><a href="api/generated/generated/metpy.calc.first_derivative.html">metpy.calc.first_derivative</a>, but requires three time points vs. GEMPAK's two</td>
+ <td class="tg-info">Almost</td>
+ <td class="tg-yes">Yes</td>
+ <td class="tg-yes">Yes</td>
</tr>
<tr>
<td class="tg-implemented">DDX(S)</td>
|
Add BaseException.__getitem__ & __getslice__
Refs python/mypy#4215
Fixes false positive
error: Value of type Exception is not indexable | @@ -874,6 +874,9 @@ class BaseException(object):
args = ... # type: Tuple[Any, ...]
message = ... # type: str
def __init__(self, *args: object, **kwargs: object) -> None: ...
+ def __getitem__(self, i: int) -> Any: ...
+ def __getslice__(self, start: int, stop: int) -> Tuple[Any, ...]: ...
+
class GeneratorExit(BaseException): ...
class KeyboardInterrupt(BaseException): ...
class SystemExit(BaseException):
|
Update README_EN.md
update link for figure 3.4 | @@ -75,7 +75,7 @@ The meaning of the expression is the increase or decrease of the document $U_i$
Based on the above inference, the RankNet network structure is constructed, which is composed of several layers of hidden layers and full connected layers. As shown in the figure, the document features are used in the hidden layers, and the all connected layer is transformed by layer by layer,completing the transformation from the underlying feature space to the high-level feature space. The structure of docA and docB is symmetrical and they are input into the final RankCost layer.
-
+
Figure.3 The structure diagram of RankNet network
@@ -174,7 +174,7 @@ Replace the gradient representation in RankNet and get the ranking model called
From the above derivation we can see that the LambdaRank network structure is very similar to the RankNet structure. as the picture shows
-
+
Figure 4. Network structure of LambdaRank
|
packages/dcos-image-deps: remove enum34 dependency
That dependency only makes sense when executing Python code
in a CPython interpreter older than version 3.4. | "url": "https://pypi.python.org/packages/cf/23/ef729d6ef3a9d19732d480eaaf94a72799a99a38ed25eda10f8e68ffd408/azure_storage-0.32.0-py3-none-any.whl",
"sha1": "3cc425a291fe6359f6d786aa040059004082795d"
},
- "enum34": {
- "kind": "url",
- "url": "https://pypi.python.org/packages/af/42/cb9355df32c69b553e72a2e28daee25d1611d2c0d9c272aa1d34204205b2/enum34-1.1.6-py3-none-any.whl",
- "sha1": "d8746baffb8d5f6af9aacc4d563d202f0bb9c334"
- },
"msrest": {
"kind": "url",
"url": "https://pypi.python.org/packages/f5/b6/176a2109be5354bbcb31bf52e32ed91b1fc398f1a30ed12db0b429c64928/msrest-0.4.0-py3-none-any.whl",
|
Fix frontend entrypoint
it's either bash:
```
[[ ! -f "/certs/cert.crt" || ! -f "/certs/key.key" ]]
```
or shell:
```
[ ! -f "/certs/cert.crt" ] || [ ! -f "/certs/key.key" ]
```
but not a combination of both. | @@ -6,7 +6,7 @@ then
exit 0
fi
-if [ ! -f "/certs/cert.crt" || ! -f "/certs/key.key" ]; then
+if [ ! -f "/certs/cert.crt" ] || [ ! -f "/certs/key.key" ]; then
echo "No certificates found. Generating self signed"
openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /certs/key.key -out /certs/cert.crt -subj "/C=US/ST=Oregon/L=Portland/O=Company Name/OU=Org/CN=$RCONWEB_EXTERNAL_ADDRESS"
fi
|
Update README.md
add Checklist in list of attack recipes | @@ -109,6 +109,8 @@ Attacks on classification tasks, like sentiment classification and entailment:
- **alzantot**: Genetic algorithm attack from (["Generating Natural Language Adversarial Examples" (Alzantot et al., 2018)](https://arxiv.org/abs/1804.07998)).
- **bae**: BERT masked language model transformation attack from (["BAE: BERT-based Adversarial Examples for Text Classification" (Garg & Ramakrishnan, 2019)](https://arxiv.org/abs/2004.01970)).
- **bert-attack**: BERT masked language model transformation attack with subword replacements (["BERT-ATTACK: Adversarial Attack Against BERT Using BERT" (Li et al., 2020)](https://arxiv.org/abs/2004.09984)).
+- **checklist**: Invariance testing implemented in CheckList that contract, extend, and substitues name entities. (["Beyond Accuracy: Behavioral
+ Testing of NLP models with CheckList" (Ribeiro et al., 2020)](https://arxiv.org/abs/2005.04118)).
- **faster-alzantot**: modified, faster version of the Alzantot et al. genetic algorithm, from (["Certified Robustness to Adversarial Word Substitutions" (Jia et al., 2019)](https://arxiv.org/abs/1909.00986)).
- **deepwordbug**: Greedy replace-1 scoring and multi-transformation character-swap attack (["Black-box Generation of Adversarial Text Sequences to Evade Deep Learning Classifiers" (Gao et al., 2018)](https://arxiv.org/abs/1801.04354)).
- **hotflip**: Beam search and gradient-based word swap (["HotFlip: White-Box Adversarial Examples for Text Classification" (Ebrahimi et al., 2017)](https://arxiv.org/abs/1712.06751)).
|
RegexBlock error_message typo in docs
In the documention an example had a singular | @@ -132,7 +132,7 @@ A single-line text input that validates a string against a regex expression. The
.. code-block:: python
- blocks.RegexBlock(regex=r'^[0-9]{3}$', error_message={
+ blocks.RegexBlock(regex=r'^[0-9]{3}$', error_messages={
'invalid': "Not a valid library card number."
})
|
Support 'applications' key in bundles
Fixes | @@ -1658,8 +1658,9 @@ class BundleHandler(object):
apps, args = [], []
default_series = bundle.get('series')
+ apps_dict = bundle.get('applications', bundle.get('services', {}))
for app_name in self.applications:
- app_dict = bundle['services'][app_name]
+ app_dict = apps_dict[app_name]
charm_dir = os.path.abspath(os.path.expanduser(app_dict['charm']))
if not os.path.isdir(charm_dir):
continue
@@ -1688,7 +1689,7 @@ class BundleHandler(object):
], loop=self.model.loop)
# Update the 'charm:' entry for each app with the new 'local:' url.
for app_name, charm_url in zip(apps, charm_urls):
- bundle['services'][app_name]['charm'] = charm_url
+ apps_dict[app_name]['charm'] = charm_url
return bundle
@@ -1714,7 +1715,9 @@ class BundleHandler(object):
@property
def applications(self):
- return list(self.bundle['services'].keys())
+ apps_dict = self.bundle.get('applications',
+ self.bundle.get('services', {}))
+ return list(apps_dict.keys())
def resolve(self, reference):
if reference and reference.startswith('$'):
|
Suppress stderr for expected errors
Messages caused confusion for some users | @@ -77,7 +77,7 @@ function setup_target_org() {
if [ -z "$CF_ORG" ]; then
CF_ORG={{ context.name }}-org
fi
- if ! $CF org $CF_ORG >/dev/null; then
+ if ! $CF org $CF_ORG >/dev/null 2>/dev/null; then
cf create-org $CF_ORG
ignore_failure=`$CF set-quota $CF_ORG runaway`
fi
@@ -88,7 +88,7 @@ function setup_target_space() {
if [ -z "$CF_SPACE" ]; then
CF_SPACE={{ context.name }}-space
fi
- if ! $CF space $CF_SPACE >/dev/null; then
+ if ! $CF space $CF_SPACE >/dev/null 2>/dev/null; then
cf create-space $CF_SPACE
apply_open_security_group "$APPLY_OPEN_SECURITY_GROUP"
fi
@@ -116,7 +116,7 @@ function apply_open_security_group() {
if ! is_true "$1"; then
return
fi
- if ! $CF security-group all_open >/dev/null; then
+ if ! $CF security-group all_open >/dev/null 2>/dev/null; then
cf create-security-group all_open ${PACKAGE_PATH}/cf_cli/all_open.json
fi
cf bind-security-group all_open "$CF_ORG" "$CF_SPACE"
|
Updated vae readme
fix broken link with regards to sigma vae samples | @@ -82,7 +82,7 @@ python run.py -config ./configs/vanilla_vae.yaml
[5]: https://github.com/probml/pyprobml/blob/master/scripts/vae/assets/info_vae_recon.png
[6]: https://github.com/probml/pyprobml/blob/master/scripts/vae/assets/logcosh_vae_recon.png
[7]: https://github.com/probml/pyprobml/blob/master/scripts/vae/assets/two_stage_vae_recon.png
-[8]: https://github.com/AntixK/PyTorch-VAE/blob/master/assets/sigma_vae_recon.png
+[8]: https://github.com/probml/pyprobml/blob/master/scripts/vae/assets/sigma_vae_recon.png
[9]: https://github.com/probml/pyprobml/blob/master/scripts/vae/assets/vq_vae_recon.png
[10]: https://github.com/probml/pyprobml/blob/master/scripts/vae/assets/vanilla_vae_samples.png
[11]: https://github.com/probml/pyprobml/blob/master/scripts/vae/assets/hinge_vae_samples.png
|
Subsets and Splits