message
stringlengths 13
484
| diff
stringlengths 38
4.63k
|
---|---|
Fix Eltex.MES platform
HG--
branch : feature/microservices | @@ -48,7 +48,9 @@ class Script(BaseScript):
"42": "MES-1024",
"43": "MES-2124",
"52": "MES-1124",
- "54": "MES-5248"
+ "54": "MES-5248",
+ "59": "MES-2124P",
+ "81": "MES-3324F"
}
def execute(self):
|
Fix generating error message
The exception was always thrown.
Woe to you, oh strict evaluation... | @@ -171,13 +171,15 @@ def deserialize_energy_system(cls, path,
return instance
data['buses'] = {
- name: create(typemap.get(bus.get('type', 'bus'),
- raisestatement(
+ name: create(mapping if mapping
+ else raisestatement(
ValueError,
- "Typemap is missing a mapping for 'bus'.")),
+ "Typemap is missing a mapping for '{}'."
+ .format(bus.get('type', 'bus'))),
{'label': name},
bus['parameters'])
- for name, bus in sorted(data['buses'].items())}
+ for name, bus in sorted(data['buses'].items())
+ for mapping in (typemap.get(bus.get('type', 'bus')),)}
data['components'] = {
|
Corrected user-wide config paths for *NIX
Tested config in `~/.config/manim/manim.cfg` and `~/config/manim/manim.cfg` on Debian.
The config in `config` doesn't work, while the one in `.config` does. | @@ -213,8 +213,8 @@ The user-wide config file lives in a special folder, depending on the operating
system.
* Windows: :code:`UserDirectory`/AppData/Roaming/Manim/manim.cfg
-* MacOS: :code:`UserDirectory`/config/manim/manim.cfg
-* Linux: :code:`UserDirectory`/config/manim/manim.cfg
+* MacOS: :code:`UserDirectory`/.config/manim/manim.cfg
+* Linux: :code:`UserDirectory`/.config/manim/manim.cfg
Here, :code:`UserDirectory` is the user's home folder.
|
Fix example in the documentation
Functions not coroutine functions should be passed to the executor. | @@ -20,7 +20,7 @@ a separate thread via the ``run_in_executor`` function.
async def io_background_task():
...
- async def cpu_background_task():
+ def cpu_background_task():
...
@app.route('/jobs/', methods=['POST'])
|
Permanently disable systemd-logind.service
We stop it immediately and also use masking to prevent starting at the
next reboot.
Resolves | owner: root
group: root
mode: 0644
+
+- name: Disable systemd.logind permenantly by masking
+ systemd:
+ name: systemd-logind.service
+ state: stopped
+ masked: yes
|
Update noaa-goes.yaml
update name | -Name: "NOAA Geostationary Operational Environmental Satellites (GOES) 16 & 17"
+Name: "NOAA Geostationary Operational Environmental Satellites (GOES) 16, 17 & 18"
Description: |
NEW GOES-18 Data!!! GOES-18 is now provisional and data has began streaming. Data files will be available between Provisional and the Operational Declaration of the satellite, however, data will have the caveat GOES-18 Preliminary, Non-Operational Data. The exception is during the interleave period when ABI Radiances and Cloud and Moisture Imagery data will be shared operationally via the NOAA Open Data Dissemination Program.
<br/>
|
Change polar region message to warning
Due to feedback at the sprint review meeting I have changed the polar region message to a warning so it is more obvious to the user | @@ -89,7 +89,7 @@ class ReferenceGrid(object):
outsideSouthPolar = southPoly.disjoint(inputFeature.projectAs(sr))
if(not outsideNorthPolar or not outsideSouthPolar):
- arcpy.AddMessage("The GRG extent is within a polar region." +
+ arcpy.AddWarning("The GRG extent is within a polar region." +
" Cells that fall within the polar region will not be created.")
out_features = out_features.value
|
fix: Throw actual exception instead of ValidationError
to make tests pass | @@ -43,11 +43,17 @@ def run_server_script_for_doc_event(doc, event):
for script_name in scripts:
try:
frappe.get_doc('Server Script', script_name).execute_doc(doc)
- except Exception:
+ except Exception as e:
message = frappe._('Error executing Server Script {0}. Open Browser Console to see traceback.').format(
frappe.utils.get_link_to_form('Server Script', script_name)
)
- frappe.throw(title=frappe._('Server Script Error'), msg=message)
+ exception = type(e)
+ if getattr(frappe, 'request', None):
+ # all exceptions throw 500 which is internal server error
+ # however server script error is a user error
+ # so we should throw 417 which is expectation failed
+ exception.http_status_code = 417
+ frappe.throw(title=frappe._('Server Script Error'), msg=message, exc=exception)
def get_server_script_map():
# fetch cached server script methods
|
Windows: Prevent scons from scanning MSVC installations when in MinGW
mode
* This also avoids warnings about it not being installed. | @@ -534,8 +534,13 @@ def createEnvironment(tools):
if mingw_mode:
- # Force usage of MinGW.
+ # Force usage of MinGW, disable MSVC tools.
compiler_tools = ["mingw"]
+
+ import SCons.Tool.MSCommon.vc # pylint: disable=import-error
+
+ SCons.Tool.MSCommon.vc.msvc_setup_env = lambda *args: None
+
else:
# Everything else should use default.
compiler_tools = ["default"]
|
fix configs order
don't overlap user's yaml config with .bzt-rc | @@ -168,7 +168,8 @@ class CLI(object):
if self.options.no_system_configs is None:
self.options.no_system_configs = False
- if not self.options.no_system_configs:
+ load_hidden_configs = not self.options.no_system_configs
+ if load_hidden_configs:
bzt_rc = os.path.expanduser(os.path.join('~', ".bzt-rc"))
if os.path.exists(bzt_rc):
self.log.debug("Using personal config: %s" % bzt_rc)
@@ -177,7 +178,7 @@ class CLI(object):
self.log.info("No personal config found, creating one at %s", bzt_rc)
shutil.copy(os.path.join(RESOURCES_DIR, 'base-bzt-rc.yml'), bzt_rc)
- configs += [bzt_rc]
+ configs.insert(0, bzt_rc)
self.log.info("Starting with configs: %s", configs)
merged_config = self.engine.configure(configs, not self.options.no_system_configs)
|
[auth] missing ssl_mode
SQLConfig requires an `ssl_mode`, this prevents auth from creating Developer accounts.
I already deployed this. | @@ -228,7 +228,8 @@ GRANT ALL ON `{name}`.* TO '{name}'@'%';
db=self.name,
ssl_ca='/sql-config/server-ca.pem',
ssl_cert='/sql-config/client-cert.pem',
- ssl_key='/sql-config/client-key.pem')
+ ssl_key='/sql-config/client-key.pem',
+ ssl_mode='VERIFY_CA')
return create_secret_data_from_config(
config, server_ca, client_cert, client_key)
|
Update changelog with custom tracker change
Changelog updated with change for adding event_broker param to custom
tracker store | @@ -39,6 +39,7 @@ Changed
Deserialisation of pickled trackers will be deprecated in version 2.0. For now,
trackers are still loaded from pickle but will be dumped as json in any subsequent
save operations.
+- In custom tracker store instantiation added ``event_broker``.
Removed
-------
|
Improve list VM reliability
wait about 5 minutes, and add more logs | @@ -895,12 +895,16 @@ class AzurePlatform(Platform):
return errors
# the VM may not be queried after deployed. use retry to mitigate it.
- @retry(tries=60, delay=1) # type: ignore
+ @retry(exceptions=LisaException, tries=150, delay=2) # type: ignore
def _load_vms(
self, environment: Environment, log: Logger
) -> Dict[str, VirtualMachine]:
compute_client = get_compute_client(self)
environment_context = get_environment_context(environment=environment)
+ log.debug(
+ f"listing vm in resource group "
+ f"'{environment_context.resource_group_name}'"
+ )
vms_map: Dict[str, VirtualMachine] = dict()
vms = compute_client.virtual_machines.list(
environment_context.resource_group_name
@@ -910,8 +914,8 @@ class AzurePlatform(Platform):
vms_map[vm.name] = vm
if not vms_map:
raise LisaException(
- f"cannot find vm in resource group "
- f"{environment_context.resource_group_name}"
+ f"deployment succeeded, but VM not found in 5 minutes "
+ f"from '{environment_context.resource_group_name}'"
)
return vms_map
|
Add link to audio examples
llvmlite error is unrelated to change, so will submit anyway. | ## Onsets and Frames: Dual-Objective Piano Transcription
For model details, see our paper on arXiv:
-[Onsets and Frames: Dual-Objective Piano Transcription](https://arxiv.org/abs/1710.11153)
+[Onsets and Frames: Dual-Objective Piano Transcription](https://arxiv.org/abs/1710.11153). You can also listen to the [Audio Examples](http://download.magenta.tensorflow.org/models/onsets_frames_transcription/index.html) described in the paper.
## Colab Notebook
|
[runtime_env] Fix ray_constants import in release test
Fixes a typo in an import statement that caused the runtime_env_wheel_urls release test to fail. Unfortunately this test checks wheels that are autobuilt from commits on master, so there isn't a convenient way to test it before merging it to master. | @@ -21,7 +21,7 @@ import time
import requests
import pprint
-import ray._private.runtime_env.constants as ray_constants
+import ray._private.ray_constants as ray_constants
from ray._private.utils import get_master_wheel_url, get_release_wheel_url
|
docker: Update deploy to use Docker Python image
Similar to the last commit, apply this for the deploy image | -FROM centos:centos7
+FROM python:2.7.14-slim-stretch
# need to compile swig
ENV SWIG_FEATURES="-D__x86_64__"
@@ -8,33 +8,40 @@ ENV SWIG_FEATURES="-D__x86_64__"
ENV OLYMPIA_UID=9500
RUN useradd -u ${OLYMPIA_UID} -s /sbin/nologin olympia
-ADD docker/git.gpg.key /etc/pki/rpm-gpg/RPM-GPG-KEY-git
-ADD docker/epel.gpg.key /etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
-ADD docker/nodesource.gpg.key /etc/pki/rpm-gpg/RPM-GPG-KEY-nodesource
+# Add nodesource repository and requirements
+ADD docker/nodesource.gpg.key /etc/pki/gpg/GPG-KEY-nodesource
+RUN apt-get update && apt-get install -y \
+ gnupg2 \
+ && rm -rf /var/lib/apt/lists/*
+# ADD docker/nodesource.repo /etc/yum.repos.d/nodesource.repo
+RUN cat /etc/pki/gpg/GPG-KEY-nodesource | apt-key add -
+ADD docker/debian-stretch-nodesource-repo /etc/apt/sources.list.d/nodesource.list
-ADD docker/epel.repo /etc/yum.repos.d/
-ADD docker/git.repo /etc/yum.repos.d/git.repo
-ADD docker/nodesource.repo /etc/yum.repos.d/nodesource.repo
-
-RUN yum install -y \
- gcc-c++ \
- gettext \
+RUN apt-get update && apt-get install -y \
+ # General (dev-) dependencies
+ bash-completion \
+ build-essential \
+ curl \
+ libjpeg-dev \
+ libsasl2-dev \
+ libxml2-dev \
+ libxslt-dev \
+ locales \
+ zlib1g-dev \
+ libffi-dev \
+ libssl-dev \
+ python-dev \
+ python-pip \
+ nodejs \
+ npm \
# Git, because we're using git-checkout dependencies
git \
- libffi-devel \
- libxml2-devel \
- libxslt-devel \
- make \
- mariadb \
- mariadb-devel \
- nodejs \
- openssl-devel \
- python-devel \
+ # Dependencies for mysql-python
+ mysql-client \
+ default-libmysqlclient-dev \
swig \
- uwsgi-2.0.13.1-2.el7 \
- uwsgi-plugin-python \
- && yum clean all \
- && curl -sSL https://bootstrap.pypa.io/get-pip.py | python
+ gettext \
+ && rm -rf /var/lib/apt/lists/*
# Compile required locale
RUN localedef -i en_US -f UTF-8 en_US.UTF-8
@@ -55,7 +62,7 @@ RUN pip install --no-cache-dir --exists-action=w --no-deps -r requirements/syste
&& pip install --no-cache-dir --exists-action=w --no-deps -r requirements/prod_without_hash.txt\
&& pip install --no-cache-dir --exists-action=w --no-deps -e .
-RUN echo -e "from olympia.lib.settings_base import *\n\
+RUN echo "from olympia.lib.settings_base import *\n\
STYLUS_BIN = 'node_modules/stylus/bin/stylus'\n\
LESS_BIN = 'node_modules/less/bin/lessc'\n\
CLEANCSS_BIN = 'node_modules/clean-css-cli/bin/cleancss'\n\
|
Hungary (HU) Capacity Update
* Hungary (HU) Capacity Update
also added Geothermal and Unknown data
* Updated README
* Error Fix | ]
],
"capacity": {
- "biomass": 274,
+ "biomass": 305,
"coal": 1049,
- "gas": 4114,
- "hydro": 57,
+ "gas": 4028,
+ "geothermal": 3,
+ "hydro": 58,
"hydro storage": 0,
- "nuclear": 1887,
- "oil": 410,
- "solar": 225,
- "wind": 329
+ "nuclear": 1899,
+ "oil": 421,
+ "solar": 944,
+ "wind": 327,
+ "unknown": 74
},
"contributors": [
- "https://github.com/corradio"
+ "https://github.com/corradio",
+ "https://github.com/nessie2013"
],
"parsers": {
"consumption": "ENTSOE.fetch_consumption",
|
docs: Update Facebook scopes allowed without an app review
According to the Facebook [permissions page](https://developers.facebook.com/docs/facebook-login/permissions#reference-user_friends), the `user_friends` scope now requires an app review. | @@ -533,7 +533,7 @@ The following Facebook settings are available:
'facebook': {
'METHOD': 'oauth2',
'SDK_URL': '//connect.facebook.net/{locale}/sdk.js',
- 'SCOPE': ['email', 'public_profile', 'user_friends'],
+ 'SCOPE': ['email', 'public_profile'],
'AUTH_PARAMS': {'auth_type': 'reauthenticate'},
'INIT_PARAMS': {'cookie': True},
'FIELDS': [
@@ -570,7 +570,7 @@ SDK_URL:
SCOPE:
By default, the ``email`` scope is required depending on whether or not
``SOCIALACCOUNT_QUERY_EMAIL`` is enabled.
- Apps using permissions beyond ``email``, ``public_profile`` and ``user_friends``
+ Apps using permissions beyond ``email`` and ``public_profile``
require review by Facebook.
See `Permissions with Facebook Login <https://developers.facebook.com/docs/facebook-login/permissions>`_
for more information.
|
Remove commented out code from factories mock change
[skip ci] | @@ -585,14 +585,7 @@ class PreprintFactory(DjangoModelFactory):
preprint.save()
if license_details:
preprint.set_preprint_license(license_details, auth=auth)
- # create_identifier_patcher = mock.patch("website.identifiers.client.EzidClient.create_identifier")
- # mock_create_identifier = create_identifier_patcher.start()
- # domain = get_top_level_domain(preprint)
- # mock_create_identifier.return_value = {
- # 'success': '{doi}{domain}/{guid} | {ark}{domain}/{guid}'.format(
- # doi=settings.DOI_NAMESPACE, domain=domain, ark=settings.ARK_NAMESPACE, guid=preprint._id
- # )
- # }
+
if is_published:
create_task_patcher = mock.patch('website.preprints.tasks.get_and_set_preprint_identifiers.s')
mock_create_identifier = create_task_patcher.start()
|
Update vasp_check.py
New line | @@ -13,6 +13,7 @@ class VASPCheck(rfm.RunOnlyRegressionTest):
self.valid_prog_environs = ['cpeIntel']
else:
self.valid_prog_environs = ['builtin']
+
self.modules = ['VASP']
force = sn.extractsingle(r'1 F=\s+(?P<result>\S+)',
self.stdout, 'result', float)
|
Fix tags in collectd
ansible-playbook -i hosts install/collectd.yml --tags="undercloud"
ansible-playbook -i hosts install/collectd.yml --tags="controller"
ansible-playbook -i hosts install/collectd.yml --tags="compute" | - hosts: undercloud
roles:
- { role: osp_version }
+ tags: undercloud, controller, compute
tasks:
- name: set fact collectd_container
set_fact:
collectd_container: "{{ (rhosp_major|int > 14)| ternary(true, false) }}"
+ tags: undercloud, controller, compute
|
Fix error in layout for index.py.
Change 'app_github_url' keyword to 'app_name', to match the changes in 'app_wrapper'. | @@ -94,11 +94,6 @@ def demo_app_header_colors(name):
return {}
-def demo_app_github_url(name):
- """ Returns the link with the code for the demo app. """
- return name
-
-
def demo_app_link_id(name):
"""Returns the value of the id of the dcc.Link related to the demo app. """
return 'app-link-id-{}'.format(name.replace("_", "-"))
@@ -145,7 +140,7 @@ def display_app(pathname):
children=app_page_layout(
apps[app_name].layout(),
app_title=demo_app_name(app_name),
- app_github_url=demo_app_github_url(app_name),
+ app_name=app_name,
**demo_app_header_colors(app_name)
))
else:
|
Removes type information checks
This is untested and breaks with single channel images. | @@ -93,22 +93,6 @@ def _validate_tifffile(
if Image.COLOR_SPACE_COMPONENTS[color_space] != tif_color_channels:
raise ValidationError("Image contains invalid amount of channels.")
- # Checks type information
- try:
- if str(tags["SampleFormat"].value[0]) == "IEEEFP":
- if tags["BitsPerSample"].value[0] != 32:
- raise ValidationError(
- "Image data type has an invalid byte size"
- )
-
- elif str(tags["SampleFormat"].value[0]) == "UINT":
- if tags["BitsPerSample"].value[0] not in (8, 16, 32):
- raise ValidationError(
- "Image data type has an invalid byte size"
- )
- except KeyError:
- raise ValidationError("Image lacks sample information")
-
try:
image_width = tags["ImageWidth"].value
image_height = tags["ImageLength"].value
|
Volatile input keys should also consider non-Variable arguments
Additionally, check Variable argument sizes | @@ -166,10 +166,14 @@ class Traceable(object):
Traceable._next_trace_id += 1
def get_input_key(self, args):
- if any(arg.volatile if isinstance(arg, Variable) else False for arg in args):
- return self.VOLATILE
- return tuple(arg.requires_grad if isinstance(arg, Variable) else arg
- for arg in args)
+ is_volatile = any(arg.volatile if isinstance(arg, Variable) else False for arg in args)
+ if is_volatile:
+ def get_var_key(var):
+ return (var.size(), self.VOLATILE)
+ else:
+ def get_var_key(var):
+ return (var.size(), var.requires_grad)
+ return tuple(get_var_key(arg) if isinstance(arg, Variable) else arg for arg in args)
def get_trace_inputs(self, args, extra=()):
return tuple(itertools.chain(self._state_values(), flatten(args), extra))
@@ -191,7 +195,8 @@ class Traceable(object):
return function._unflatten(flat_out, self.proto)
- def record_trace(self, args, is_volatile=False, extra=()):
+ def record_trace(self, args, extra=()):
+ is_volatile = any(arg.volatile if isinstance(arg, Variable) else False for arg in args)
trace_inputs = self.get_trace_inputs(args, extra)
trace = torch._C._tracer_enter(trace_inputs, 0 if is_volatile else self.num_derivatives)
@@ -226,7 +231,7 @@ class Traceable(object):
return self.run_closure(trace_info.closure, args)
# Otherwise, we have to collect a new trace
- trace, out = self.record_trace(args, input_key == self.VOLATILE)
+ trace, out = self.record_trace(args)
trace_info.traces.append(trace)
return out
|
Ignore InvalidVideoListTypeError exception on playback started
when the profile is new not have continueWatching list data then we can ignore it | @@ -11,6 +11,7 @@ from __future__ import absolute_import, division, unicode_literals
import resources.lib.common as common
from resources.lib.common.cache_utils import CACHE_BOOKMARKS, CACHE_COMMON
+from resources.lib.common.exceptions import InvalidVideoListTypeError
from resources.lib.globals import G
from resources.lib.services.msl.msl_utils import EVENT_START, EVENT_ENGAGE, EVENT_STOP, EVENT_KEEP_ALIVE
from resources.lib.utils.logging import LOG
@@ -45,6 +46,7 @@ class AMVideoEvents(ActionManager):
def on_playback_started(self, player_state):
# Clear continue watching list data on the cache, to force loading of new data
# but only when the videoid not exists in the continue watching list
+ try:
videoid_exists, list_id = common.make_http_call('get_continuewatching_videoid_exists',
{'video_id': str(self.videoid_parent.value)})
if not videoid_exists:
@@ -53,6 +55,9 @@ class AMVideoEvents(ActionManager):
# When the continueWatching context is invalidated from a refreshListByContext call
# the LoCo need to be updated to obtain the new list id, so we delete the cache to get new data
G.CACHE.delete(CACHE_COMMON, 'loco_list')
+ except InvalidVideoListTypeError:
+ # Ignore possible "No lists with context xxx available" exception due to a new profile without data
+ pass
def on_tick(self, player_state):
if self.lock_events:
|
block bot upgrade
excluded from block list those who liked me in recent feed (usually 18 posts) | @@ -29,6 +29,16 @@ your_followers = False
while not your_followers:
your_followers = bot.get_user_followers(bot.user_id)
+your_likers = set()
+if bot.getSelfUserFeed():
+ media_items = [item['pk'] for item in bot.LastJson["items"]]
+ for media in media_items:
+ if bot.getMediaLikers(media):
+ media_likers = bot.LastJson["users"]
+ for item in tqdm(media_likers):
+ your_likers.add(item["username"])
+
+your_followers = list(set(your_followers) - your_likers)
random.shuffle(your_followers)
for user in tqdm(your_followers):
|
Make sure real OS tests are not run by default
code had been commented out accidentally
fixes | @@ -369,8 +369,8 @@ class RealFsTestCase(TestCase, RealFsTestMixin):
self.open = fake_filesystem.FakeFileOpen(self.filesystem)
self.os = fake_filesystem.FakeOsModule(self.filesystem)
self.create_basepath()
- # elif not os.environ.get('TEST_REAL_FS'):
- # self.skip_real_fs()
+ elif not os.environ.get('TEST_REAL_FS'):
+ self.skip_real_fs()
self.setUpFileSystem()
|
Change function signature to implement the algorithm in the logout handler instead of extending main get function.
Call the AWS Cognito API as described in
Use native coroutines instead
Fix auth_state is not cleared before logout | @@ -39,6 +39,7 @@ from tornado.auth import OAuth2Mixin
from tornado import gen, web
from tornado.httpclient import HTTPRequest, AsyncHTTPClient
+from tornado.httputil import url_concat
from jupyterhub.handlers import LogoutHandler
from jupyterhub.auth import LocalAuthenticator
@@ -65,22 +66,37 @@ class AWSCognitoLogoutHandler(LogoutHandler):
provider in addition to clearing the session with Jupyterhub, otherwise
only the Jupyterhub session is cleared.
"""
- @gen.coroutine
- def get(self):
+ async def handle_logout(self):
+ http_client = AsyncHTTPClient()
+
+ params = dict(
+ client_id=self.authenticator.client_id
+ )
+ url = url_concat("https://%s/logout" % AWSCOGNITO_DOMAIN, params)
+
+ headers = {
+ "Accept": "application/json",
+ "User-Agent": "JupyterHub"
+ }
+
+ req = HTTPRequest(url,
+ method="GET",
+ headers=headers,
+ validate_cert=True,
+ body=''
+ )
+
+ await http_client.fetch(req)
user = self.get_current_user()
if user:
self.clear_tokens(user)
self.clear_login_cookie()
- if self.authenticator.logout_redirect_url:
- self.redirect(self.authenticator.logout_redirect_url)
- else:
- super().get()
- @gen.coroutine
- def clear_tokens(self, user):
- state = yield user.get_auth_state()
+ async def clear_tokens(self, user):
+ state = await user.get_auth_state()
if state:
- state['tokens'] = ''
+ state['access_token'] = ''
+ state['awscognito_user'] = ''
user.save_auth_state(state)
class AWSCognitoAuthenticator(OAuthenticator):
|
Add Google analytics
Summary: See title. | @@ -27,6 +27,9 @@ const siteConfig = {
organizationName: 'pytorch',
projectName: 'botorch',
+ // Google analytics
+ gaTrackingId: 'UA-139570076-2',
+
// links that will be used in the header navigation bar
headerLinks: [
{doc: 'introduction', label: 'Docs'},
|
Remove EINTR branch from PR PEP 475 handles Python 3.5+
When PR was submitted, older versions of Python were supported. Now only Python 3.6+ is supposed, so we can rely on PEP 475 | @@ -189,9 +189,7 @@ def _open_socket(addrinfo_list, sockopt, timeout):
eConnRefused = (errno.ECONNREFUSED, errno.WSAECONNREFUSED)
except:
eConnRefused = (errno.ECONNREFUSED, )
- if error.errno == errno.EINTR:
- continue
- elif error.errno in eConnRefused:
+ if error.errno in eConnRefused:
err = error
continue
else:
|
Accept epoch time offset for alarm-history
monasca-ui sends offset as epoch time in ms
to get alarm state history.
Currently this causes an error (see related story).
Story:
Task: 3928 | @@ -724,8 +724,13 @@ class MetricsRepository(metrics_repository.AbstractMetricsRepository):
raise exceptions.RepositoryException(ex)
def _build_offset_clause(self, offset):
-
if offset:
+ # offset may be given as a timestamp or as epoch time in ms
+ if str(offset).isdigit():
+ # epoch time
+ offset_clause = " and time > {}ms".format(offset)
+ else:
+ # timestamp
offset_clause = " and time > '{}'".format(offset)
else:
offset_clause = ""
|
Remote TODO; now
For | @@ -228,7 +228,6 @@ ctext = r"(?: {HTAB} | {SP} | [\x21-\x27] | [\x2A-\x5b] | \x5D-\x7E | {obs_text}
# comment = "(" *( ctext / quoted-pair / comment ) ")"
comment = r"(?: \( (?: {ctext} | {quoted_pair} )* \) ) ".format(**locals())
-# TODO: handle recursive comments - see <https://pypi.python.org/pypi/regex/>
# Via = 1#( received-protocol RWS received-by [ RWS comment ] )
|
change Patch, PatchBoundary to dataclass
This patch reimplements Patch and PatchBoundary as dataclass for simplicity. | @@ -20,6 +20,7 @@ from .elementseq import References
from .pointsseq import PointsSequence
from .sample import Sample
+from dataclasses import dataclass
from functools import reduce
from os import environ
from typing import Any, FrozenSet, Iterable, Iterator, List, Mapping, Optional, Sequence, Tuple, Union
@@ -2922,29 +2923,25 @@ class HierarchicalTopology(TransformChainsTopology):
return function.PlainBasis(hbasis_coeffs, hbasis_dofs, ndofs, self.f_index, self.f_coords)
-class PatchBoundary(types.Singleton):
+@dataclass(eq=True, frozen=True)
+class PatchBoundary:
- @types.apply_annotations
- def __init__(self, id: types.tuple[types.strictint], dim, side, reverse: types.tuple[bool], transpose: types.tuple[types.strictint]):
- super().__init__()
- self.id = id
- self.dim = dim
- self.side = side
- self.reverse = reverse
- self.transpose = transpose
+ id: Tuple[int, ...]
+ dim: int
+ side: int
+ reverse: Tuple[bool, ...]
+ transpose: Tuple[int, ...]
def apply_transform(self, array):
return array[tuple(slice(None, None, -1) if i else slice(None) for i in self.reverse)].transpose(self.transpose)
-class Patch(types.Singleton):
+@dataclass(eq=True, frozen=True)
+class Patch:
- @types.apply_annotations
- def __init__(self, topo: stricttopology, verts: types.arraydata, boundaries: types.tuple[types.strict[PatchBoundary]]):
- super().__init__()
- self.topo = topo
- self.verts = numpy.asarray(verts)
- self.boundaries = boundaries
+ topo: Topology
+ verts: types.arraydata
+ boundaries: Tuple[PatchBoundary, ...]
class MultipatchTopology(TransformChainsTopology):
|
Update error.py
create custom DeprecationWarning subclass to print deprecation warnings to the console with ANSI formatting | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
+import os
class AssertionException(Exception):
@@ -31,6 +32,12 @@ class AssertionException(Exception):
return self.reported
-class DeprecationWarning(Exception):
+class UserSyncDeprecationWarning(DeprecationWarning):
def __init__(self, message):
+ super(DeprecationWarning, self).__init__(message)
self.message = message
+
+ def __str__(self):
+ # this line required for ANSI text formatting
+ os.system('')
+ return "\x1b[33;21m" + str(self.message) + "\x1b[0m"
|
keep stderr out of stdout file mirror
Summary: combining the with statements pushed stderr logs into stdout
Test Plan: Streamed spew test, saw split logs
Reviewers: #ft, alangenfeld | @@ -181,7 +181,8 @@ def mirror_step_io(step_context):
ensure_dir(os.path.dirname(outpath))
ensure_dir(os.path.dirname(errpath))
- with mirror_stream(sys.stdout, outpath), mirror_stream(sys.stderr, errpath):
+ with mirror_stream(sys.stderr, errpath):
+ with mirror_stream(sys.stdout, outpath):
yield
# touch the file to signify that compute is complete
|
DRY-up loops to kick off status aggregation tasks
This will make it easier to parallelise by service in the following
commits, since we only have one loop to change. | @@ -91,31 +91,21 @@ def create_nightly_notification_status():
yesterday = convert_utc_to_bst(datetime.utcnow()).date() - timedelta(days=1)
- # email and sms
- for i in range(4):
+ for notification_type in [SMS_TYPE, EMAIL_TYPE, LETTER_TYPE]:
+ days = 10 if notification_type == LETTER_TYPE else 4
+
+ for i in range(days):
process_day = yesterday - timedelta(days=i)
- for notification_type in [SMS_TYPE, EMAIL_TYPE]:
+
create_nightly_notification_status_for_day.apply_async(
kwargs={'process_day': process_day.isoformat(), 'notification_type': notification_type},
queue=QueueNames.REPORTING
)
current_app.logger.info(
- f"create-nightly-notification-status task: create-nightly-notification-status-for-day task created "
+ f"create-nightly-notification-status-for-day task created "
f"for type {notification_type} for {process_day}"
)
- # letters
- for i in range(10):
- process_day = yesterday - timedelta(days=i)
- create_nightly_notification_status_for_day.apply_async(
- kwargs={'process_day': process_day.isoformat(), 'notification_type': LETTER_TYPE},
- queue=QueueNames.REPORTING
- )
- current_app.logger.info(
- f"create-nightly-notification-status task: create-nightly-notification-status-for-day task created "
- f"for type letter for {process_day}"
- )
-
@notify_celery.task(name="create-nightly-notification-status-for-day")
def create_nightly_notification_status_for_day(process_day, notification_type):
|
Fix PyCharm instructions in README
Without this change, PyCharm won't refresh the file in the editor after Black runs. | @@ -658,7 +658,7 @@ $ where black
- Scope: Project Files
- Program: <install_location_from_step_2>
- Arguments: `$FilePath$`
- - Output paths to refresh: `$FilePathRelativeToProjectRoot$`
+ - Output paths to refresh: `$FilePath$`
- Working directory: `$ProjectFileDir$`
- Uncheck "Auto-save edited files to trigger the watcher"
|
CompoundEditor : Handle drags out of the tab close button
Since we added the close button, we can now end up receiving move and
release events that we have no interest in. We should just silently pass
these on. | @@ -941,6 +941,10 @@ class _TabDragBehaviour( QtCore.QObject ) :
if event.button() != QtCore.Qt.LeftButton :
return False
+ if not self.__qTabBar :
+ # We can end up here from drag interactions with the close button
+ return False
+
try :
# We only consume this event if we've been messing with events and
@@ -1006,6 +1010,10 @@ class _TabDragBehaviour( QtCore.QObject ) :
if not event.buttons() & QtCore.Qt.LeftButton :
return False
+ if not self.__qTabBar :
+ # We can end up here from drag interactions with the close button
+ return False
+
# If the user moves the mouse out of the tab bar, we need to make the
# underlying TabBar think the user let go of the mouse so it aborts
# any in-progress move of the tab (constrained to this TabBar) So we
|
Improve error message
By passing in additional parameters to the validation function users can better find where they set the invalid inheritance-break flag. | @@ -165,17 +165,19 @@ class ConfigurationCore(ABC):
pass
@staticmethod
- def validate_break_inheritance_flag(config, level):
+ def validate_break_inheritance_flag(config, section_name, parent_key=""):
for key, value in config.items():
if "inherit" == key:
+ parent_key_description = ' under key "' + parent_key + '"'
fatal(
- f"The inheritance break flag cannot be placed at the {level} level\n"
- f"because {level} level is the highest level in the configuration file.\n",
+ f'The inheritance-break flag set in "{section_name}"{parent_key_description} is invalid\n'
+ f"because it has no higher level setting to inherit from.\n",
exit_code=EXIT_INVALID_INPUT,
)
- break
elif type(value) is CommentedMap:
- ConfigurationCore.validate_break_inheritance_flag(value, level)
+ ConfigurationCore.validate_break_inheritance_flag(
+ value, section_name, key
+ )
@staticmethod
def merge_configs(more_general_config, more_specific_config) -> dict:
|
Update to wagtail-purge==0.2.0
Changelog: | @@ -42,12 +42,12 @@ mistune==2.0.3
more-itertools==8.12.0
phonenumberslite==8.12.39
Pillow==9.0.1
-psycopg2==2.8.6
+psycopg2-binary
reportlab==3.6.3
social_auth_app_django==5.0.0
tomd==0.1.3
wagtail-cache==1.0.2
-wagtail-purge==0.1
+wagtail-purge==0.2
wagtail==3.0.1
whitenoise==5.3.0
xmltodict==0.12.0
|
map popup menu dashboard link fixed
HG--
branch : feature/microservices | @@ -898,9 +898,12 @@ Ext.define("NOC.inv.map.MapPanel", {
},
onNodeMenuDashboard: function() {
- var me = this;
+ var me = this,
+ objectType = me.nodeMenuObjectType;
+
+ if('managedobject' == me.nodeMenuObjectType) objectType = 'mo';
window.open(
- '/ui/grafana/dashboard/script/noc.js?dashboard=' + me.nodeMenuObjectType + '&id=' + me.nodeMenuObject
+ '/ui/grafana/dashboard/script/noc.js?dashboard=' + objectType + '&id=' + me.nodeMenuObject
);
},
|
new attribute in composites `mirror-initial`
initially mirrors a mirrorable composite
lec.mirror-initial = true
lec_43.mirror-initial = true | @@ -76,6 +76,7 @@ class Composite:
self.inter = False
self.noswap = False
self.mirror = False
+ self.mirror_initial = False
self.order = order
def str_title():
@@ -178,6 +179,8 @@ class Composite:
self.noswap = value
elif attr == 'mirror':
self.mirror = value
+ elif attr == 'mirror-initial':
+ self.mirror_initial = value
self.frame[0].original_size = size
self.frame[1].original_size = size
@@ -232,7 +235,8 @@ def add_mirrored_composites(composites):
if c.mirror:
r = c.mirrored()
r.order = len(composites) + len(result)
- result[mirror_name(c_name)] = r
+ # HOTFIX 36c3
+ result[c_name], result[mirror_name(c_name)] = (r,c) if c.mirror_initial else (c,r)
return composites.update(result)
|
ui_report: Do not pass jQuery element to the html method.
Although it works, this feature is not documented. | @@ -61,9 +61,8 @@ export function generic_embed_error(error_html) {
export function generic_row_button_error(xhr, btn) {
if (xhr.status >= 400 && xhr.status < 500) {
- btn.closest("td").html(
- $("<p>").addClass("text-error").text(JSON.parse(xhr.responseText).msg),
- );
+ const $error = $("<p>").addClass("text-error").text(JSON.parse(xhr.responseText).msg);
+ btn.closest("td").empty().append($error);
} else {
btn.text($t({defaultMessage: "Failed!"}));
}
|
Update test_xarray.py
Try new fixture management | import pytest
+import warnings
from argopy import DataFetcher as ArgoDataFetcher
from argopy.errors import InvalidDatasetStructure, ErddapServerError
@@ -21,10 +22,12 @@ def ds_pts():
.region([-75, -55, 30.0, 40.0, 0, 100.0, "2011-01-01", "2011-01-15"])
.to_xarray()
)
- except ErddapServerError: # Test is passed when something goes wrong because of the erddap server, not our fault !
- pass
- except ValueError: # Catches value error for incorrect standard levels as inputs
- pass
+ except Exception as e: # Test is passed when something goes wrong because of the erddap server, not our fault !
+ warnings.warn("Error when fetching tests data: %s" % str(e.args))
+ if "toto" not in data or "standard" not in data:
+ # We don't have what we need for testing, skip this test module:
+ pytest.xfail("failing configuration (but should work)")
+ else:
return data
@@ -77,6 +80,7 @@ class Test_interp_std_levels:
@requires_connected_erddap_phy
class Test_teos10:
def test_teos10_variables_default(self, ds_pts):
+ """Add default new set of TEOS10 variables"""
for key, this in ds_pts.items():
for format in ["point", "profile"]:
that = this.copy() # To avoid modifying the original dataset
@@ -87,6 +91,7 @@ class Test_teos10:
assert "CT" in that.variables
def test_teos10_variables_single(self, ds_pts):
+ """Add a single TEOS10 variables"""
for key, this in ds_pts.items():
for format in ["point", "profile"]:
that = this.copy() # To avoid modifying the original dataset
@@ -96,8 +101,8 @@ class Test_teos10:
assert "PV" in that.variables
def test_teos10_variables_inplace(self, ds_pts):
+ """Compute all default variables to a new dataset"""
for key, this in ds_pts.items():
- ds = this.argo.teos10(inplace=False)
- # So "SA" must be in 'ds' but not in 'this'
+ ds = this.argo.teos10(inplace=False) # So "SA" must be in 'ds' but not in 'this'
assert "SA" in ds.variables
assert "SA" not in this.variables
|
Fix A2A-VC config written toward A2O-VC
Fix path config in A2A-VC, which was written toward A2O-VC.
This will fix training crash by missing address. | @@ -31,7 +31,7 @@ downstream_expert:
eval_batch_size: 5
trdev_data_root: "./downstream/a2a-vc-vctk/data/VCTK-Corpus/wav48"
- eval_data_root: "./downstream/a2o-vc-vcc2020/data/vcc2020"
+ eval_data_root: "./downstream/a2a-vc-vctk/data/vcc2020"
spk_embs_root: "./downstream/a2a-vc-vctk/data/spk_embs/"
lists_root: "./downstream/a2a-vc-vctk/data/lists"
eval_lists_root: "./downstream/a2o-vc-vcc2020/data/lists"
|
Replaced circular link
Replaced circular link to GitHub README file (where migration recommendations were published) with migration content from the README file history. | @@ -80,7 +80,9 @@ Deploy Mattermost on Docker for production use
Upgrade from ``mattermost-docker``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-For an in-depth guide to upgrading from the deprecated `mattermost-docker repository <https://github.com/mattermost/mattermost-docker>`__, please refer to `this document <https://github.com/mattermost/docker/blob/main/scripts/UPGRADE.md>`__. For additional help or questions, please refer to `this issue <https://github.com/mattermost/mattermost-docker/issues/489>`__.
+To migrate from the deprecated `mattermost-docker repository <https://github.com/mattermost/mattermost-docker>`__ running with the image ``mattermost/mattermost-prod-app``, we recommend migrating either to ``mattermost/mattermost-enterprise-edition`` or ``mattermost/mattermost-team-edition`` images, which are the official images supported by Mattermost. These images support Postgres 10+ databases, which we know has been a long-running challenge for the community, and you will not lose any features or functionality by moving to these new images.
+
+For additional help or questions, please refer to `this issue <https://github.com/mattermost/mattermost-docker/issues/489>`__.
Installing a different version of Mattermost
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -89,7 +91,7 @@ Installing a different version of Mattermost
2. Run ``git pull`` to fetch any recent changes to the repository, paying attention to any potential ``env.example`` changes.
-3. Adjust the ``MATTERMOST_IMAGE_TAG`` in the ``.env`` file to point your desired `enterprise <(https://hub.docker.com/r/mattermost/mattermost-enterprise-edition/tags?page=1&ordering=last_updated>`__ or `team <https://hub.docker.com/r/mattermost/mattermost-team-edition/tags?page=1&ordering=last_updated>`__ image version.
+3. Adjust the ``MATTERMOST_IMAGE_TAG`` in the ``.env`` file to point your desired `enterprise <https://hub.docker.com/r/mattermost/mattermost-enterprise-edition/tags?page=1&ordering=last_updated>`__ or `team <https://hub.docker.com/r/mattermost/mattermost-team-edition/tags?page=1&ordering=last_updated>`__ image version.
4. Redeploy Mattermost.
|
removed legacy code
removed deprecated timesamp code | @@ -17,7 +17,6 @@ mkdir -p "$LOGPATH"
# excludes sensative parameters
# shellcheck disable=SC2129
echo "*** Start config parameters ****" >> "$LOG"
-echo "Timestamp: [`date`]" >> "$LOG"
echo -e "\tTimestamp: $(date -R)" >> "$LOG"
# shellcheck disable=SC2002
cat "$ARM_CONFIG"|sed '/^[#;].*$/d;/^$/d;/if/d;/^ /d;/^else/d;/^fi/d;/KEY=/d;/PASSWORD/d' >> "$LOG"
|
Remove old "simple_polygons" fields in schemas
These were missed in [1].
[1]: | @@ -15,7 +15,6 @@ create_broadcast_message_schema = {
'finishes_at': {'type': 'string', 'format': 'datetime'},
'areas': {'type': 'object'},
'areas_2': {'type': 'object'},
- 'simple_polygons': {"type": "array", "items": {"type": "array"}},
'content': {'type': 'string', 'minLength': 1},
'reference': {'type': 'string', 'minLength': 1, 'maxLength': 255},
},
@@ -44,7 +43,6 @@ update_broadcast_message_schema = {
'finishes_at': {'type': 'string', 'format': 'datetime'},
'areas': {'type': 'object'},
'areas_2': {'type': 'object'},
- 'simple_polygons': {"type": "array", "items": {"type": "array"}},
},
'required': [],
'additionalProperties': False
|
set 'running' event after observer has started
this prevents 'running' from being set if starting the observer fails | @@ -1903,8 +1903,6 @@ class MaestralMonitor(object):
name="Maestral uploader"
)
- self.running.set()
-
try:
self.local_observer_thread.start()
except OSError as exc:
@@ -1922,6 +1920,8 @@ class MaestralMonitor(object):
else:
raise exc
+ self.running.set()
+
self.connection_thread.start()
self.download_thread.start()
self.upload_thread.start()
@@ -1970,7 +1970,7 @@ class MaestralMonitor(object):
"""Stops syncing and destroys worker threads."""
if not self.running.is_set():
- logger.debug("Syncing was already stopped")
+ logger.debug("Syncing is already stopped")
return
self._auto_resume_on_connect = False
|
ModLog: support self_stream voice state
This feature will be available in discord.py 1.3. | @@ -25,7 +25,11 @@ CHANNEL_CHANGES_SUPPRESSED = ("_overwrites", "position")
MEMBER_CHANGES_SUPPRESSED = ("status", "activities", "_client_status", "nick")
ROLE_CHANGES_UNSUPPORTED = ("colour", "permissions")
-VOICE_STATE_ATTRIBUTES = {"self_video": "Broadcasting", "channel.name": "Channel"}
+VOICE_STATE_ATTRIBUTES = {
+ "channel.name": "Channel",
+ "self_stream": "Streaming",
+ "self_video": "Broadcasting",
+}
class ModLog(Cog, name="ModLog"):
|
message view: Change PM flag to match PM compose flag.
Fixes | @@ -1647,8 +1647,8 @@ blockquote p {
border-top-color: hsla(0, 0%, 0%, 0.0);
border-right-color: hsla(0, 0%, 0%, 0.0);
border-bottom-color: hsla(0, 0%, 0%, 0.0);
- background-color: hsl(0, 0%, 7%);
- border-left-color: hsl(0, 0%, 7%);
+ background-color: hsl(0, 0%, 27%);
+ border-left-color: hsl(0, 0%, 27%);
color: #ffffff;
border-width: 0px;
}
|
XFail test_hinge_loss temporarily
See XFailing right now to unblock CI.
Authors:
- Micka (https://github.com/lowener)
Approvers:
- Dante Gama Dessavre (https://github.com/dantegd)
URL: | @@ -1381,6 +1381,8 @@ def test_sparse_pairwise_distances_output_types(input_type, output_type):
assert isinstance(S, cp.ndarray)
[email protected](reason='Temporarily disabling this test. '
+ 'See rapidsai/cuml#3569')
@pytest.mark.parametrize("nrows, ncols, n_info",
[
unit_param(30, 10, 7),
|
smart_classroom_demo: Fix trivial issues
* the comment is wrong (it was a remnant of an earlier version of the code
that I forgot to change);
* `face_config` isn't used in the body of the `if`, while `fd_model_path`
is, so it makes more sense to check the latter. | @@ -670,8 +670,8 @@ int main(int argc, char* argv[]) {
std::unique_ptr<FaceRecognizer> face_recognizer;
- if (face_config.enabled && !fr_model_path.empty() && !lm_model_path.empty()) {
- // Create face tracker
+ if (!fd_model_path.empty() && !fr_model_path.empty() && !lm_model_path.empty()) {
+ // Create face recognizer
detection::DetectorConfig face_registration_det_config(fd_model_path, fd_weights_path);
face_registration_det_config.deviceName = FLAGS_d_fd;
|
Add create_network method in etcd db.
Add create_network method in etcd db. Now support two db types,
both etcd and sql for storing network.
Relatated bug: | @@ -1336,3 +1336,17 @@ class EtcdAPI(object):
except Exception as e:
LOG.error('Error occurred while retrieving quota usage: %s',
six.text_type(e))
+
+ @lockutils.synchronized('etcd_network')
+ def create_network(self, context, network_value):
+ if not network_value.get('uuid'):
+ network_value['uuid'] = uuidutils.generate_uuid()
+ if network_value.get('name'):
+ self._validate_unique_container_name(context,
+ network_value['name'])
+ network = models.Network(network_value)
+ try:
+ network.save()
+ except Exception:
+ raise
+ return network
|
Use method dispatch for executing instructions
Removes chain of if/else statements. | @@ -391,15 +391,23 @@ class Executor(object):
def execute(self, api_calls):
# type: (List[models.Instruction]) -> None
for instruction in api_calls:
- if isinstance(instruction, models.APICall):
+ getattr(self, '_do_%s' % instruction.__class__.__name__.lower(),
+ lambda x: None)(instruction)
+
+ def _do_apicall(self, instruction):
+ # type: (models.APICall) -> None
final_kwargs = self._resolve_variables(instruction)
method = getattr(self._client, instruction.method_name)
# TODO: we need proper error handling here.
result = method(**final_kwargs)
self.stack.append(result)
- elif isinstance(instruction, models.StoreValue):
+
+ def _do_storevalue(self, instruction):
+ # type: (models.StoreValue) -> None
self.variables[instruction.name] = self.stack[-1]
- elif isinstance(instruction, models.RecordResourceValue):
+
+ def _do_recordresourcevalue(self, instruction):
+ # type: (models.RecordResourceValue) -> None
d = self.resource_values.setdefault(
instruction.resource_name, {})
d['resource_type'] = instruction.resource_type
@@ -409,11 +417,17 @@ class Executor(object):
else:
value = self.stack[-1]
d[instruction.name] = value
- elif isinstance(instruction, models.Push):
+
+ def _do_push(self, instruction):
+ # type: (models.Push) -> None
self.stack.append(instruction.value)
- elif isinstance(instruction, models.Pop):
+
+ def _do_pop(self, instruction):
+ # type: (models.Pop) -> None
self.stack.pop()
- elif isinstance(instruction, models.JPSearch):
+
+ def _do_jpsearch(self, instruction):
+ # type: (models.JPSearch) -> None
v = self.stack.pop()
result = jmespath.search(instruction.expression, v)
self.stack.append(result)
|
Temporarily disable caching
Requires AllowOverride Index in the Apache config | @@ -11,6 +11,7 @@ Header set Access-Control-Allow-Origin "*"
# Allow brief caching of API responses
<IfModule mod_expires.c>
- ExpiresByType application/json "access plus 2 hours"
+ # ExpiresActive on
+ # ExpiresByType application/json "access plus 2 hours"
</IfModule>
|
Error Handler: Changed way of help command get + send to avoid warning
Only get coroutine when this is gonna be awaited. | @@ -159,19 +159,17 @@ class ErrorHandler(Cog):
* ArgumentParsingError: send an error message
* Other: send an error message and the help command
"""
- prepared_help_command = self.get_help_command(ctx)
-
if isinstance(e, errors.MissingRequiredArgument):
await ctx.send(f"Missing required argument `{e.param.name}`.")
- await prepared_help_command
+ await self.get_help_command(ctx)
self.bot.stats.incr("errors.missing_required_argument")
elif isinstance(e, errors.TooManyArguments):
await ctx.send(f"Too many arguments provided.")
- await prepared_help_command
+ await self.get_help_command(ctx)
self.bot.stats.incr("errors.too_many_arguments")
elif isinstance(e, errors.BadArgument):
await ctx.send(f"Bad argument: {e}\n")
- await prepared_help_command
+ await self.get_help_command(ctx)
self.bot.stats.incr("errors.bad_argument")
elif isinstance(e, errors.BadUnionArgument):
await ctx.send(f"Bad argument: {e}\n```{e.errors[-1]}```")
@@ -181,7 +179,7 @@ class ErrorHandler(Cog):
self.bot.stats.incr("errors.argument_parsing_error")
else:
await ctx.send("Something about your input seems off. Check the arguments:")
- await prepared_help_command
+ await self.get_help_command(ctx)
self.bot.stats.incr("errors.other_user_input_error")
@staticmethod
|
ci: make jobs interruptible
This will cancel old running pipelines if a new one is created. | @@ -16,12 +16,14 @@ init:
- shell
script:
- schutzbot/update_github_status.sh start
+ interruptible: true
RPM:
stage: rpmbuild
extends: .terraform
script:
- sh "schutzbot/mockbuild.sh"
+ interruptible: true
parallel:
matrix:
- RUNNER:
|
Cleaning locales that are symlinked to other locales.
Fixes | import copy
import json
import logging
+import os
import threading
import progressbar
import texttable
@@ -54,6 +55,17 @@ class Translator(object):
self.instructions = instructions
self._inject = inject
+ @staticmethod
+ def _cleanup_symlinks(locales):
+ """Symlinked locales should be ignored."""
+ clean_locales = []
+ for locale in locales:
+ locale_path = 'translations/{}'.format(str(locale))
+ if os.path.islink(locale_path):
+ continue
+ clean_locales.append(locale)
+ return clean_locales
+
def _download_content(self, stat):
raise NotImplementedError
@@ -157,6 +169,7 @@ class Translator(object):
def update_acl(self, locales=None):
locales = locales or self.pod.catalogs.list_locales()
+ locales = self._cleanup_symlinks(locales)
if not locales:
self.pod.logger.info('No locales to found to update.')
return
@@ -185,6 +198,7 @@ class Translator(object):
def update_meta(self, locales=None):
locales = locales or self.pod.catalogs.list_locales()
+ locales = self._cleanup_symlinks(locales)
if not locales:
self.pod.logger.info('No locales to found to update.')
return
@@ -211,6 +225,7 @@ class Translator(object):
prune=False):
source_lang = self.pod.podspec.default_locale
locales = locales or self.pod.catalogs.list_locales()
+ locales = self._cleanup_symlinks(locales)
stats = []
num_files = len(locales)
if not locales:
|
Port buildgen to py3
### Problem
Porting buildgen to py3. Needs references to str and object.
### Solution
added builtins import for str and object | @@ -8,6 +8,7 @@ import ast
import logging
import re
import sys
+from builtins import object, str
from difflib import unified_diff
from pants.build_graph.address import Address, BuildFileAddress
|
the pip install for TF2 needs updating
TF2 installs GPU by default - need to drop the -gpu modifier to install correctly via pip in colab.
likely the same issue in all TF2 colab examples | },
"source": [
"# We want to use TensorFlow 2.0 in the Eager mode for this demonstration. But this module works as well with the Graph mode.\n",
- "!pip install -U --pre tensorflow-gpu --quiet"
+ "!pip install tensorflow --quiet"
],
"execution_count": 0,
"outputs": []
|
fix images
paypal embed didn't work here | @@ -14,11 +14,6 @@ CWL v1.2.x: https://github.com/common-workflow-language/cwl-v1.2/
[**Support**](#Support) [](https://gitter.im/common-workflow-language/common-workflow-language?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[](https://github.com/common-workflow-language/common-workflow-language/stargazers)
-<form action="https://www.paypal.com/cgi-bin/webscr" method="post" target="_top">
-<input type="hidden" name="cmd" value="_s-xclick">
-<input type="hidden" name="hosted_button_id" value="Z55VS5LBBSZTJ">
-<input type="image" src="https://www.paypalobjects.com/en_US/i/btn/btn_donate_LG.gif" width="92" heigth="26" name="submit" alt="Donate to Common Workflow Language via PayPal">
-</form>
<a href="https://www.youtube.com/watch?v=86eY8xs-Vo8"><img align="right"
src="https://github.com/common-workflow-language/logo/raw/main/intro_video_screenshot_413x193.png"
@@ -31,7 +26,7 @@ meet the needs of data-intensive science, such as Bioinformatics, Medical
Imaging, Astronomy, Physics, and Chemistry.
<a href="https://open-stand.org/about-us/principles"><img align="left"
-src="https://standards.ieee.org/images/openstand/128x128-blue2.png" alt="Open Stand badge"></a>
+src="https://github.com/common-workflow-language/cwl-website/raw/main/openstand-128x128-blue.png" alt="Open Stand badge"></a>
CWL is developed by a multi-vendor working group consisting of
organizations and individuals aiming to enable scientists to share data
analysis workflows. [The CWL project is maintained on
|
Set c99 to intel icc compiler so numpy will build
Numpy will not build if this is not set because it
has code that follows the c99 standard. icc is set to c89 by
default. Look below at IntelCCompilerW which is the icc equivalent
on Windows and that one already has c99 set. | @@ -58,7 +58,7 @@ def __init__(self, verbose=0, dry_run=0, force=0):
v = self.get_version()
mpopt = 'openmp' if v and v < '15' else 'qopenmp'
- self.cc_exe = ('icc -m64 -fPIC -fp-model strict -O3 '
+ self.cc_exe = ('icc -std=c99 -m64 -fPIC -fp-model strict -O3 '
'-fomit-frame-pointer -{}').format(mpopt)
compiler = self.cc_exe
|
Add DebOps project to eco test pipeline
The 'debops/debops' repository contains a set of Ansible roles and
playbooks focused on Debian and Ubuntu server management. | - name: ansible_collection_system
url: https://github.com/devroles/ansible_collection_system
contact: greg-hellings
+ - name: debops
+ url: https://github.com/debops/debops
+ contact: drybjed
tasks:
- name: Clone repo
|
Fixes for /integrations/ page.
This fixes the hubot text that still stays when you transition to
integration details along with fixing the first animation that is
choppy and previews briefly before fading in.
Fixes | @@ -48,14 +48,14 @@ var integration_events = function () {
if (hashes.indexOf(_hash) > -1) {
$lozenge_icon = $(".integration-lozenges .integration-lozenge.integration-" + _hash).clone(true);
currentblock = $(hash);
- instructionbox.children(".integration-lozenge").replaceWith($lozenge_icon);
+ instructionbox.hide().children(".integration-lozenge").replaceWith($lozenge_icon);
instructionbox.append($lozenge_icon, currentblock);
$(".inner-content").removeClass("show");
setTimeout(function () {
instructionbox.hide();
$(".integration-lozenges").addClass("hide");
- $(".portico-page-header.extra, .portico-large-text.extra, #integration-main-text").hide();
+ $(".extra, #integration-main-text").hide();
instructionbox.append(currentblock);
instructionbox.show();
@@ -77,7 +77,7 @@ var integration_events = function () {
setTimeout(function () {
$("#integration-list-link").css("display", "none");
$(".integration-lozenges").removeClass("hide");
- $(".portico-page-header.extra, .portico-large-text.extra, #integration-main-text").show();
+ $(".extra, #integration-main-text").show();
instructionbox.hide();
$lozenge_icon.remove();
currentblock.appendTo("#integration-instructions-group");
|
DOC: now support sphinx-autbuild
[NEW] inside the doc directory, `make livehtml` serves
web pages to browser, allowing checking doc appearance
while writing. | @@ -87,6 +87,7 @@ doctest:
@echo "Testing of doctests in the sources finished, look at the " \
"results in _build/doctest/output.txt."
+
.PHONY: livehtml
livehtml:
- sphinx-autobuild -p 5500 -b html $(ALLSPHINXOPTS) "$(SOURCEDIR)" $(BUILDDIR)/html
+ sphinx-autobuild -p 5500 -b html $(ALLSPHINXOPTS) _build/html
|
datasets.fetch_spm_multimodal_fmri() generates events.tsv files; warns in tests
- Added _make_events_filepath_spm_multimodal_fmri().
- Events files are not egnerated during nosetests, and a warning is presented. | @@ -8,6 +8,7 @@ import glob
import json
import os
import re
+import warnings
from botocore.handlers import disable_signing
import nibabel as nib
@@ -19,6 +20,7 @@ from nilearn.datasets.utils import (_fetch_file,
_uncompress_file,
)
from scipy.io import loadmat
+from scipy.io.matlab.miobase import MatReadError
from sklearn.datasets.base import Bunch
from nistats.utils import _verify_events_file_uses_tab_separators
@@ -547,6 +549,15 @@ def _glob_spm_multimodal_fmri_data(subject_dir):
_subject_data = _get_session_trials_spm_multimodal(subject_dir, session, _subject_data)
if not _subject_data:
return None
+ try:
+ paradigm = _make_events_file_spm_multimodal_fmri(_subject_data, session)
+ except MatReadError as mat_err:
+ warnings.warn('{}. An events.tsv file cannot be generated'.format(str(mat_err)))
+ else:
+ events_filepath = _make_events_filepath_spm_multimodal_fmri(_subject_data, session)
+ paradigm.to_csv(events_filepath, sep='\t', index=False)
+ _subject_data['events{}'.format(session+1)] = events_filepath
+
# glob for anat data
_subject_data = _get_anatomical_data_spm_multimodal(subject_dir, _subject_data)
@@ -582,10 +593,17 @@ def _download_data_spm_multimodal(data_dir, subject_dir, subject_id):
return _glob_spm_multimodal_fmri_data(subject_dir)
-def _make_events_file_spm_multimodal_fmri(subject_data, fmri_img):
+def _make_events_filepath_spm_multimodal_fmri(_subject_data, session):
+ key = 'trials_ses{}'.format(session+1)
+ events_file_location = os.path.dirname(_subject_data[key])
+ events_filename = 'session{}_events.tsv'.format(session+1)
+ events_filepath = os.path.join(events_file_location, events_filename)
+ return events_filepath
+
+
+def _make_events_file_spm_multimodal_fmri(_subject_data, session):
tr = 2.
- for idx in range(len(fmri_img)):
- timing = loadmat(getattr(subject_data, "trials_ses%i" % (idx + 1)),
+ timing = loadmat(_subject_data["trials_ses%i" % (session + 1)],
squeeze_me=True, struct_as_record=False)
faces_onsets = timing['onsets'][0].ravel()
scrambled_onsets = timing['onsets'][1].ravel()
@@ -594,6 +612,7 @@ def _make_events_file_spm_multimodal_fmri(subject_data, fmri_img):
conditions = (['faces'] * len(faces_onsets) +
['scrambled'] * len(scrambled_onsets))
paradigm = pd.DataFrame({'trial_type': conditions, 'onset': onsets})
+ return paradigm
def fetch_spm_multimodal_fmri(data_dir=None, data_name="spm_multimodal_fmri",
|
ASTNodeType: remove ASTNodeType from get_inheritance_chain's result
TN: | @@ -1751,13 +1751,16 @@ class StructType(CompiledType):
@classmethod
def get_inheritance_chain(cls):
"""
- Return a list for all classes from ASTNodeType to `cls` in the
- inheritance chain.
+ Return a list for all classes from ASTNodeType (excluded) to `cls`
+ (included) in the inheritance chain. Root-most classes come first.
:rtype: list[ASTNodeType]
"""
- return reversed([base_class for base_class in cls.mro()
- if getattr(base_class, 'is_ast_node', False)])
+ return reversed([
+ base_class for base_class in cls.mro()
+ if (issubtype(base_class, ASTNodeType)
+ and base_class is not ASTNodeType)
+ ])
@classmethod
def get_properties(cls, predicate=None, include_inherited=True):
|
Change default iter/get messages limit
And fix-up previous commit. | @@ -1013,7 +1013,7 @@ class TelegramClient(TelegramBareClient):
else:
return self(messages.DeleteMessagesRequest(message_ids, revoke=revoke))
- def iter_messages(self, entity, limit=20, offset_date=None,
+ def iter_messages(self, entity, limit=None, offset_date=None,
offset_id=0, max_id=0, min_id=0, add_offset=0,
search=None, filter=None, from_user=None,
batch_size=100, wait_time=None, _total=None):
@@ -1100,7 +1100,8 @@ class TelegramClient(TelegramBareClient):
# We can emulate their behaviour locally by setting offset = max_id
# and simply stopping once we hit a message with ID <= min_id.
offset_id = max(offset_id, max_id)
- if offset_id - min_id <= 1: # Both exclusive, so 1 difference = empty
+ if offset_id and min_id:
+ if offset_id - min_id <= 1:
return
entity = self.get_input_entity(entity)
@@ -1211,9 +1212,23 @@ class TelegramClient(TelegramBareClient):
"""
Same as :meth:`iter_messages`, but returns a list instead
with an additional ``.total`` attribute on the list.
+
+ If the `limit` is not set, it will be 1 by default unless both
+ `min_id` **and** `max_id` are set (as *named* arguments), in
+ which case the entire range will be returned.
+
+ This is so because any integer limit would be rather arbitrary and
+ it's common to only want to fetch one message, but if a range is
+ specified it makes sense that it should return the entirety of it.
"""
total = [0]
kwargs['_total'] = total
+ if len(args) == 1 and 'limit' not in kwargs:
+ if 'min_id' in kwargs and 'max_id' in kwargs:
+ kwargs['limit'] = None
+ else:
+ kwargs['limit'] = 1
+
msgs = UserList(self.iter_messages(*args, **kwargs))
msgs.total = total[0]
return msgs
|
Adds CV_time and CV_current as summary stats, and a helper function to
extract the CV portion of charge. | @@ -796,6 +796,18 @@ class BEEPDatapath(abc.ABC, MSONable):
summary["paused"] = self.raw_data.groupby("cycle_index").apply(
get_max_paused_over_threshold)
+ # Add CV_time and CV_current summary stats
+ CV_time = []
+ CV_current = []
+ for cycle in summary.cycle_index:
+ raw_cycle = self.raw_data.loc[self.raw_data.cycle_index == cycle]
+ charge = raw_cycle.loc[raw_cycle.current > 0]
+ CV = get_CV_segment_from_charge(charge)
+ CV_time.append(CV.test_time.iat[-1] - CV.test_time.iat[0])
+ CV_current.append(CV.current.iat[-1])
+ summary["CV_time"] = CV_time
+ summary["CV_current"] = CV_current
+
summary = self._cast_dtypes(summary, "summary")
last_voltage = self.raw_data.loc[
@@ -988,6 +1000,22 @@ class BEEPDatapath(abc.ABC, MSONable):
diagnostic_available["cycle_type"] * len(starts_at)
)
+ # Add CV_time and CV_current summary stats
+ CV_time = []
+ CV_current = []
+ for cycle in diag_summary.cycle_index:
+ raw_cycle = self.raw_data.loc[self.raw_data.cycle_index == cycle]
+
+ # Charge is the very first step_index
+ CCCV = raw_cycle.loc[raw_cycle.step_index == raw_cycle.step_index.min()]
+ CV = get_CV_segment_from_charge(CCCV)
+
+ CV_time.append(CV.test_time.iat[-1] - CV.test_time.iat[0])
+ CV_current.append(CV.current.iat[-1])
+
+ diag_summary["CV_time"] = CV_time
+ diag_summary["CV_current"] = CV_current
+
diag_summary = self._cast_dtypes(diag_summary, "diagnostic_summary")
return diag_summary
@@ -1385,3 +1413,31 @@ def get_max_paused_over_threshold(group, paused_threshold=3600):
else:
max_paused_duration = 0
return max_paused_duration
+
+
+def get_CV_segment_from_charge(charge):
+ """
+ Extracts the constant voltage segment from charge. Works for both CCCV or
+ CC steps followed by a CV step.
+
+ Args:
+ group (pd.DataFrame): charge dataframe for a single cycle
+
+ Returns:
+ (pd.DataFrame): dataframe containing the CV segment
+
+ """
+ # Compute dI and dV
+ dI = np.diff(charge.current)
+ dV = np.diff(charge.voltage)
+ dt = np.diff(charge.test_time)
+
+ # Find the first index where dt>1 and abs(dV/dt)<tol and abs(dI/dt)>tol
+ i = 0
+ while i < len(dV) and not (dt[i] > 1 and abs(dV[i]/dt[i]) < 5.e-5 and abs(dI[i]/dt[i]) > 1.e-4):
+ i = i+1
+
+ # Filter for CV phase
+ CV = charge.loc[charge.test_time >= charge.test_time.iat[i-1]]
+
+ return(CV)
|
[Doc] Fixed typo error at comment.
VideoDatset -> VideoDataset | @@ -102,7 +102,7 @@ class SampleFrames:
test_mode (bool): Store True when building test or validation dataset.
Default: False.
start_index (None): This argument is deprecated and moved to dataset
- class (``BaseDataset``, ``VideoDatset``, ``RawframeDataset``, etc),
+ class (``BaseDataset``, ``VideoDataset``, ``RawframeDataset``, etc),
see this: https://github.com/open-mmlab/mmaction2/pull/89.
keep_tail_frames (bool): Whether to keep tail frames when sampling.
Default: False.
@@ -282,7 +282,7 @@ class UntrimmedSampleFrames:
frame_interval (int): Temporal interval of adjacent sampled frames.
Default: 16.
start_index (None): This argument is deprecated and moved to dataset
- class (``BaseDataset``, ``VideoDatset``, ``RawframeDataset``, etc),
+ class (``BaseDataset``, ``VideoDataset``, ``RawframeDataset``, etc),
see this: https://github.com/open-mmlab/mmaction2/pull/89.
"""
|
Update scripts/generate_ipfs_hashes.py
Revert only hash | @@ -152,7 +152,7 @@ def ipfs_hashing(
# use ignore patterns somehow
# ignore_patterns = configuration.fingerprint_ignore_patterns]
assert configuration.directory is not None
- result_list = client.add(configuration.directory, only_hash=True)
+ result_list = client.add(configuration.directory)
key = os.path.join(
configuration.author, package_type.to_plural(), configuration.directory.name,
)
|
Changed motion check to look at sorted date list
Previous method using clips resulted in an unsorted array, so the
newest clip wasn't always added during check | @@ -221,7 +221,8 @@ class BlinkCamera():
# Check if the most recent clip is included in the last_record list
# and that the last_record list is populated
try:
- new_clip = self.blink.videos[self.name][0]['clip']
+ records = sorted(self.blink.record_dates[self.name])
+ new_clip = records.pop()
if new_clip not in self.last_record and self.last_record:
self.motion_detected = True
self.last_record.insert(0, new_clip)
@@ -293,6 +294,7 @@ class Blink():
self._video_count = 0
self._all_videos = {}
self._summary = None
+ self.record_dates = list()
@property
def camera_thumbs(self):
@@ -376,6 +378,7 @@ class Blink():
def get_videos(self, start_page=0, end_page=1):
"""Retrieve last recorded videos per camera."""
videos = list()
+ all_dates = dict()
for page_num in range(start_page, end_page + 1):
this_page = self._video_request(page_num)
if not this_page:
@@ -388,6 +391,12 @@ class Blink():
camera_name = entry['camera_name']
clip_addr = entry['address']
thumb_addr = entry['thumbnail']
+ clip_date = clip_addr.split('_')[-6:]
+ clip_date = '_'.join(clip_date)
+ clip_date = clip_date.split('.')[0]
+ if camera_name not in all_dates:
+ all_dates[camera_name] = list()
+ all_dates[camera_name].append(clip_date)
try:
self._all_videos[camera_name].append(
{
@@ -402,6 +411,7 @@ class Blink():
'thumb': thumb_addr,
}
]
+ self.record_dates = all_dates
def get_cameras(self):
"""Find and creates cameras."""
|
settings: Extend `DATA_UPLOAD_MAX_MEMORY_SIZE` from default value.
In django 1.10 was added `DATA_UPLOAD_MAX_MEMORY_SIZE` parameter,
which controls max size of uploading files. By default it is 2.5MB. | @@ -115,6 +115,7 @@ DEFAULT_SETTINGS = {'TWITTER_CONSUMER_KEY': '',
'S3_SECRET_KEY': '',
'S3_AVATAR_BUCKET': '',
'LOCAL_UPLOADS_DIR': None,
+ 'DATA_UPLOAD_MAX_MEMORY_SIZE': 25 * 1024 * 1024,
'MAX_FILE_UPLOAD_SIZE': 25,
'MAX_AVATAR_FILE_SIZE': 5,
'MAX_ICON_FILE_SIZE': 5,
|
feat: add checkpoint timedelta checker
Simply call `brownie run sidechain/checkpoint get_checkpoint_delta` | -from brownie import Contract, accounts, history
+import datetime
+
+from brownie import Contract, accounts, history, network
# this script is used for bridging CRV rewards to sidechains
# it should be run once per week, just after the start of the epoch week
@@ -105,3 +107,22 @@ def avax():
streamer = Contract(addr)
token = streamer.reward_tokens(0)
streamer.notify_reward_amount(token, {"from": acct})
+
+
+def get_checkpoint_delta():
+ networks = {f"{k.lower()}-main": v for k, v in globals().items() if k.isupper()}
+
+ for network_id, streamers in networks.items():
+ # connect to appropritate network
+ now = datetime.datetime.now()
+ network.disconnect()
+ network.connect(network_id)
+ print(network_id)
+ # reward token 0 is CRV
+ crv_token = Contract(streamers[-1]).reward_tokens(0)
+ for streamer_addr in streamers:
+ streamer = Contract(streamer_addr)
+ period_finish = datetime.datetime.fromtimestamp(
+ int(streamer.reward_data(crv_token)["period_finish"])
+ )
+ print(streamer.address, "dt:", str(period_finish - now))
|
Fixes an issue on localhost deploying to nested lxd
Fixes | @@ -63,6 +63,8 @@ class DeployController:
""" handles deployment
"""
for service in self.applications:
+ if app.current_cloud == "localhost":
+ service.placement_spec = None
juju.deploy_service(service,
app.metadata_controller.series,
utils.info,
|
8443 should be removed since we are defaulting to https
If port 8443 is used a redirect_uri_mismatch during OAuth flow from Github will happen, which prevents logging into the Openshift Console. | @@ -72,7 +72,7 @@ export AWS_SECRET_ACCESS_KEY=bar
```
### GitHub Authentication
-GitHub authentication is the default authentication mechanism used for this reference architecture. GitHub authentication requires an OAuth application to be created. The values should reflect the hosted zone defined in Route53 for example the Homepage URL would be https://openshift-master.sysdeseng.com and Authorization callback URL is https://openshift-master.sysdeseng.com:8443/oauth2callback/github.
+GitHub authentication is the default authentication mechanism used for this reference architecture. GitHub authentication requires an OAuth application to be created. The values should reflect the hosted zone defined in Route53 for example the Homepage URL would be https://openshift-master.sysdeseng.com and Authorization callback URL is https://openshift-master.sysdeseng.com/oauth2callback/github.
### Region
The default region is us-east-1 but can be changed when running the ose-on-aws script by specifying --region=us-west-2 for example. The region must contain at least 3 Availability Zones.
|
Update version 0.8.3 -> 0.8.4
New Features
* `assert_bqm_almost_equal` function for testing
* `ScaleComposite`
* `sample_column` optional keyword argument for `SampleSet.to_pandas_df`
* `sample_dict_cast` optional keyword argument for `SampleSet.data`
Fixes
* `BQM.normalize` now ignored ignored variables/interactions when determining scale | #
# ================================================================================================
-__version__ = '0.8.3'
+__version__ = '0.8.4'
__author__ = 'D-Wave Systems Inc.'
__authoremail__ = '[email protected]'
__description__ = 'A shared API for binary quadratic model samplers.'
|
fix deepspeep + t5 error,
The type of activation should be the same type as weight, both should be FP16. | @@ -31,4 +31,4 @@ class T5LayerNorm(nn.Module):
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
- return self.weight * hidden_states
\ No newline at end of file
+ return self.weight * hidden_states.type_as(self.weight)
|
Preferred url scheme
Added the flask preferred url scheme since we should always be running
behind an HTTPS reverse proxy. This only works when generating urls
using url_for outside of a request context (which is not super common),
but adding it to be sure. | @@ -16,6 +16,7 @@ class Config(object):
# Display Config
RESULTS_PER_PAGE = 100
PREVIEW_LENGTH = 100
+ PREFERRED_URL_SCHEME = 'https'
# Data store config
ELASTICSEARCH_URL = os.environ.get('ELASTICSEARCH_URL') or \
|
Make sure to commit when giving away files
This might help with | @@ -1360,6 +1360,7 @@ class CachingFileStore(AbstractFileStore):
# Record that.
self.cur.execute('UPDATE refs SET state = ? WHERE path = ? AND file_id = ?', ('mutable', localFilePath, fileStoreID))
self.cur.execute('DELETE FROM files WHERE id = ?', (fileStoreID,))
+ self.con.commit()
# Now we're done
return True
|
docs: Fix fluid-soundfont download link.
This fixes a dead download link and adds a link to list of other downloadable soundfonts. | @@ -66,8 +66,8 @@ install one by doing the following:
**Ubuntu:** Use the command `sudo apt-get install fluid-soundfont-gm`.<br />
**Mac:** Download the soundfont from
-http://www.musescore.org/download/fluid-soundfont.tar.gz and unpack the SF2
-file.
+ftp://ftp.osuosl.org/pub/musescore/soundfont/fluid-soundfont.tar.gz and unpack the SF2
+file or choose one from this list https://musescore.org/en/handbook/soundfonts-and-sfz-files#list.
## Set Up
|
Use addClassResourceCleanup in account service test
This patch is to use addClassResourceCleanup for the
account service test. | @@ -44,14 +44,13 @@ class AccountTest(base.BaseObjectTest):
for i in range(ord('a'), ord('f') + 1):
name = data_utils.rand_name(name='%s-' % six.int2byte(i))
cls.container_client.update_container(name)
+ cls.addClassResourceCleanup(base.delete_containers,
+ [name],
+ cls.container_client,
+ cls.object_client)
cls.containers.append(name)
cls.containers_count = len(cls.containers)
- @classmethod
- def resource_cleanup(cls):
- cls.delete_containers()
- super(AccountTest, cls).resource_cleanup()
-
@decorators.attr(type='smoke')
@decorators.idempotent_id('3499406a-ae53-4f8c-b43a-133d4dc6fe3f')
def test_list_containers(self):
|
CI: more aggressive cache invalidation
E.g. if we bump the python version, should not reuse the pip cache.
Easiest to invalidate cache if any build-specific file changes. | @@ -157,6 +157,7 @@ task:
fingerprint_script:
- echo $CIRRUS_TASK_NAME
- find contrib/deterministic-build/*.txt -type f -print0 | sort -z | xargs -0 sha256sum | sha256sum
+ - find contrib/build-wine/ -type f -print0 | sort -z | xargs -0 sha256sum | sha256sum
populate_script: mkdir -p contrib/build-wine/.cache/win32/wine_pip_cache
dll_cache:
folder: contrib/build-wine/.cache/win32/dlls
@@ -186,6 +187,7 @@ task:
folder: packages
fingerprint_script:
- echo $CIRRUS_TASK_NAME && cat contrib/deterministic-build/requirements.txt && cat contrib/make_packages.sh
+ - find contrib/android/ -type f -print0 | sort -z | xargs -0 sha256sum | sha256sum
p4a_cache:
folders:
- ".buildozer/android/platform/build-$APK_ARCH/packages"
@@ -216,6 +218,7 @@ task:
folder: packages
fingerprint_script:
- echo $CIRRUS_TASK_NAME && cat contrib/deterministic-build/requirements.txt && cat contrib/make_packages.sh
+ - find contrib/android/ -type f -print0 | sort -z | xargs -0 sha256sum | sha256sum
p4a_cache:
folders:
- ".buildozer/android/platform/build-$APK_ARCH/packages"
@@ -241,6 +244,7 @@ task:
fingerprint_script:
- echo $CIRRUS_TASK_NAME
- find contrib/deterministic-build/*.txt -type f -print0 | sort -z | xargs -0 shasum -a 256 | shasum -a 256
+ - find contrib/osx/ -type f -print0 | sort -z | xargs -0 sha256sum | sha256sum
populate_script: mkdir -p ~/Library/Caches/pip
install_script:
- git fetch --all --tags
@@ -263,6 +267,7 @@ task:
fingerprint_script:
- echo $CIRRUS_TASK_NAME
- find contrib/deterministic-build/*.txt -type f -print0 | sort -z | xargs -0 sha256sum | sha256sum
+ - find contrib/build-linux/appimage/ -type f -print0 | sort -z | xargs -0 sha256sum | sha256sum
populate_script: mkdir -p contrib/build-linux/appimage/.cache/appimage/pip_cache
build_script:
- ./contrib/build-linux/appimage/make_appimage.sh
@@ -281,6 +286,7 @@ task:
fingerprint_script:
- echo $CIRRUS_TASK_NAME
- find contrib/deterministic-build/*.txt -type f -print0 | sort -z | xargs -0 sha256sum | sha256sum
+ - find contrib/build-linux/sdist/ -type f -print0 | sort -z | xargs -0 sha256sum | sha256sum
populate_script: mkdir -p ~/.cache/pip
build_script:
- ./contrib/build-linux/sdist/make_sdist.sh
|
Run sublime.set_timeout also for docs that are already resolved
For some reason ST does not like it when we show a popup from within
the run method of LspResolveDocs. | @@ -22,7 +22,8 @@ class LspResolveDocsCommand(sublime_plugin.TextCommand):
# don't show the detail in the cooperate AC popup if it is already shown in the AC details filed.
self.is_detail_shown = bool(detail)
minihtml_content = self.get_content(documentation, detail)
- self.show_popup(minihtml_content)
+ # NOTE: For some reason, ST does not like it when we show a popup from within this run method.
+ sublime.set_timeout(lambda: self.show_popup(minihtml_content))
if not detail or not documentation:
# To make sure that the detail or documentation fields doesn't exist we need to resove the completion item.
|
Drop test_dmp_zz_modular_resultant()
As an implicit rule, we don't test private methods. There is an
indirect test for same f and g, but p=7 in
test_PolyElement_subresultants(). | @@ -838,13 +838,3 @@ def test_PolyElement_cancel():
g = t**2 + (x**2 + 2)/2
assert f.cancel(g) == ((-x**2 - 4)*t, 4*t**2 + 2*x**2 + 4)
-
-
-def test_dmp_zz_modular_resultant():
- R, x, y = ring('x,y', ZZ)
- R1 = R.drop(x)
-
- f = x + y + 2
- g = 2*x*y + x + 3
-
- assert R._modular_resultant(f, g, 5) == -2*R1.y**2 + 1
|
increase wholesale COST to 1,000 from 100
to force excess production into curtailment when zero wholesale compensation | @@ -457,7 +457,7 @@ class UrdbParse:
if sum(negative_wholesale_rate_costs) == 0:
# no export to grid benefit, so force excess energy into curtailment
- negative_wholesale_rate_costs = [100 for x in self.wholesale_rate]
+ negative_wholesale_rate_costs = [1000.0 for x in self.wholesale_rate]
# FuelRate = array(Tech, FuelBin, TimeStep) is the cost of electricity from each Tech, so 0's for PV, PVNM
energy_rates = []
|
Paginator Migration - Emoji and actions
Switched the emoji used to clear the reactions of a paginator [":x:"]
With [":trashcan:"], Clicking on this emoji deletes the message | @@ -10,7 +10,7 @@ FIRST_EMOJI = "\u23EE" # [:track_previous:]
LEFT_EMOJI = "\u2B05" # [:arrow_left:]
RIGHT_EMOJI = "\u27A1" # [:arrow_right:]
LAST_EMOJI = "\u23ED" # [:track_next:]
-DELETE_EMOJI = "\u274c" # [:x:]
+DELETE_EMOJI = "<:trashcan:637136429717389331>" # [:trashcan:]
PAGINATION_EMOJI = [FIRST_EMOJI, LEFT_EMOJI, RIGHT_EMOJI, LAST_EMOJI, DELETE_EMOJI]
@@ -113,7 +113,7 @@ class LinePaginator(Paginator):
# Reaction is on this message
reaction_.message.id == message.id,
# Reaction is one of the pagination emotes
- reaction_.emoji in PAGINATION_EMOJI,
+ str(reaction_.emoji) in PAGINATION_EMOJI, # Note: DELETE_EMOJI is a string and not unicode
# Reaction was not made by the Bot
user_.id != ctx.bot.user.id,
# There were no restrictions
@@ -185,7 +185,7 @@ class LinePaginator(Paginator):
log.debug("Timed out waiting for a reaction")
break # We're done, no reactions for the last 5 minutes
- if reaction.emoji == DELETE_EMOJI:
+ if str(reaction.emoji) == DELETE_EMOJI: # Note: DELETE_EMOJI is a string and not unicode
log.debug("Got delete reaction")
break
@@ -261,8 +261,8 @@ class LinePaginator(Paginator):
await message.edit(embed=embed)
- log.debug("Ending pagination and removing all reactions...")
- await message.clear_reactions()
+ log.debug("Ending pagination and deleting the message")
+ await message.delete()
class ImagePaginator(Paginator):
@@ -323,7 +323,7 @@ class ImagePaginator(Paginator):
# Reaction is on the same message sent
reaction_.message.id == message.id,
# The reaction is part of the navigation menu
- reaction_.emoji in PAGINATION_EMOJI,
+ str(reaction_.emoji) in PAGINATION_EMOJI, # Note: DELETE_EMOJI is a string and not unicode
# The reactor is not a bot
not member.bot
))
@@ -369,8 +369,8 @@ class ImagePaginator(Paginator):
# Deletes the users reaction
await message.remove_reaction(reaction.emoji, user)
- # Delete reaction press - [:x:]
- if reaction.emoji == DELETE_EMOJI:
+ # Delete reaction press - [:trashcan:]
+ if str(reaction.emoji) == DELETE_EMOJI: # Note: DELETE_EMOJI is a string and not unicode
log.debug("Got delete reaction")
break
@@ -424,5 +424,5 @@ class ImagePaginator(Paginator):
await message.edit(embed=embed)
- log.debug("Ending pagination and removing all reactions...")
- await message.clear_reactions()
+ log.debug("Ending pagination and deleting the message")
+ await message.delete()
|
[Test] Use a flag to separate integration test from sample test
* fork the sample test workflow
* Revert "fork the sample test workflow"
This reverts commit
* update sample-test workflow def
* update
* Unconditionally run int test | @@ -29,6 +29,8 @@ spec:
value: sample-tests
- name: namespace
value: kubeflow
+ - name: is-integration-test
+ value: "false"
templates:
- name: sample-test
inputs:
@@ -37,6 +39,7 @@ spec:
- name: test-results-gcs-dir
- name: sample-tests-image-suffix
- name: namespace
+ - name: is-integration-test
steps:
- - name: build-sample-tests-image
template: build-image-by-dockerfile
@@ -63,29 +66,46 @@ spec:
- name: test-name
value: "{{item}}"
withItems:
- - xgboost_training_cm
- lightweight_component
- dsl_static_type_checking
- pipeline_transformers
- secret
- sidecar
- - dataflow
- execution_order
- imagepullsecrets
- retry
- artifact_location
- preemptible_tpu_gpu
- volume_snapshot_ops
- - kubeflow_tf_serving
- - container_build
- loop_output
- loop_parameter
- loop_static
- resource_ops
- multiple_outputs
+ when: "{{inputs.parameters.is-integration-test}} == false"
+ - name: run-integration-tests-loop
+ template: run-sample-tests
+ arguments:
+ parameters:
+ - name: namespace
+ value: "{{inputs.parameters.namespace}}"
+ - name: sample-tests-image
+ value: "{{inputs.parameters.target-image-prefix}}{{inputs.parameters.sample-tests-image-suffix}}"
+ - name: target-image-prefix
+ value: "{{inputs.parameters.target-image-prefix}}"
+ - name: test-results-gcs-dir
+ value: "{{inputs.parameters.test-results-gcs-dir}}"
+ - name: test-name
+ value: "{{item}}"
+ withItems:
+ - xgboost_training_cm
+ - dataflow
- ai_platform
+ - kubeflow_tf_serving
+ - container_build
- parameterized_tfx_oss
- iris
+ # when: "{{inputs.parameters.is-integration-test}} == true"
# Build and push image
- name: build-image-by-dockerfile
retryStrategy:
|
FAQ update for instructions to fix timeout errors
Fixed formatting with this commit. | @@ -178,7 +178,9 @@ What should I do if validation or grading of a notebook fails with a "Timeout wa
---------------------------------------------------------------------------------------------------------------
This occurs because the validator or autograder is taking too long to validate or autograde your notebook. This
can be fixed by adding the following line to nbgrader_config.py:
+
.. code:: python
+
# increase timeout to 60 seconds
c.ExecutePreprocessor.timeout = 60
|
Add fingerprint for 2019 Honda Civic Hatchback
Honda Civic Hatchback 1.0T Elegance (Europe - Poland) | @@ -414,6 +414,7 @@ FW_VERSIONS = {
b'37805-5AG-Z910\x00\x00',
b'37805-5AJ-A750\x00\x00',
b'37805-5AJ-L750\x00\x00',
+ b'37805-5AK-T530\x00\x00',
b'37805-5AN-A750\x00\x00',
b'37805-5AN-A830\x00\x00',
b'37805-5AN-A840\x00\x00',
@@ -474,6 +475,7 @@ FW_VERSIONS = {
b'28101-5DJ-A710\x00\x00',
b'28101-5DV-E330\x00\x00',
b'28101-5DV-E610\x00\x00',
+ b'28101-5DV-E820\x00\x00',
],
(Ecu.vsa, 0x18da28f1, None): [
b'57114-TBG-A330\x00\x00',
@@ -505,6 +507,7 @@ FW_VERSIONS = {
b'77959-TGG-A020\x00\x00',
b'77959-TGG-A030\x00\x00',
b'77959-TGG-G010\x00\x00',
+ b'77959-TGG-G110\x00\x00',
b'77959-TGG-J320\x00\x00',
b'77959-TGG-Z820\x00\x00',
],
@@ -535,6 +538,7 @@ FW_VERSIONS = {
b'78109-TGL-G120\x00\x00',
b'78109-TGL-G130\x00\x00',
b'78109-TGL-G230\x00\x00',
+ b'78109-TGL-GM10\x00\x00',
],
(Ecu.fwdRadar, 0x18dab0f1, None): [
b'36802-TBA-A150\x00\x00',
@@ -557,6 +561,7 @@ FW_VERSIONS = {
b'36161-TGG-A120\x00\x00',
b'36161-TGG-G050\x00\x00',
b'36161-TGG-G130\x00\x00',
+ b'36161-TGG-G140\x00\x00',
b'36161-TGK-Q120\x00\x00',
b'36161-TGL-G050\x00\x00',
b'36161-TGL-G070\x00\x00',
|
require --cluster-path and create it if doesn't exist
Raise a CluserPathNotProvidedError when --cluster-path
is not given on the command line. This will also create
the given --cluster-path if it does not exist. | @@ -12,6 +12,7 @@ import random
import ocs
from ocsci import config as ocsci_config
+from ocsci.exceptions import ClusterPathNotProvidedError
__all__ = [
"pytest_addoption",
@@ -99,6 +100,10 @@ def process_cluster_cli_params(config):
"""
cluster_path = get_cli_param(config, 'cluster_path')
+ if not cluster_path:
+ raise ClusterPathNotProvidedError()
+ if not os.path.exists(cluster_path):
+ os.makedirs(cluster_path)
# Importing here cause once the function is invoked we have already config
# loaded, so this is OK to import once you sure that config is loaded.
from oc.openshift_ops import OCP
|
Typo in mathematical expression of Attention in DNA
Replace Q with Theta | @@ -185,9 +185,9 @@ class DNAConv(MessagePassing):
.. math::
\mathbf{x}_{v \leftarrow w}^{(t)} = \textrm{Attention} \left(
\mathbf{x}^{(t-1)}_v \, \mathbf{\Theta}_Q^{(t)}, [\mathbf{x}_w^{(1)},
- \ldots, \mathbf{x}_w^{(t-1)}] \, \mathbf{Q}_K^{(t)}, \,
+ \ldots, \mathbf{x}_w^{(t-1)}] \, \mathbf{\Theta}_K^{(t)}, \,
[\mathbf{x}_w^{(1)}, \ldots, \mathbf{x}_w^{(t-1)}] \,
- \mathbf{Q}_V^{(t)} \right)
+ \mathbf{\Theta}_V^{(t)} \right)
with :math:`\mathbf{\Theta}_Q^{(t)}, \mathbf{\Theta}_K^{(t)},
\mathbf{\Theta}_V^{(t)}` denoting (grouped) projection matrices for query,
|
bug fix type mismatch
int has no attribute of item | @@ -42,9 +42,9 @@ def get_n_params(model):
return pp
def train(model, G):
- best_val_acc = 0
- best_test_acc = 0
- train_step = 0
+ best_val_acc = torch.tensor(0)
+ best_test_acc = torch.tensor(0)
+ train_step = torch.tensor(0)
for epoch in np.arange(args.n_epoch) + 1:
model.train()
logits = model(G, 'paper')
|
verifier: handle SIGINT and SIGTERM correctly
We now ensure that the revocation notifier is started and stopped by the
same process. | SPDX-License-Identifier: Apache-2.0
Copyright 2017 Massachusetts Institute of Technology.
'''
-
+import signal
import traceback
import sys
import functools
@@ -1007,26 +1007,27 @@ def main():
context = cloud_verifier_common.init_mtls()
- # after TLS is up, start revocation notifier
- if config.getboolean('cloud_verifier', 'revocation_notifier'):
- logger.info("Starting service for revocation notifications on port %s", config.getint('cloud_verifier', 'revocation_notifier_port'))
- revocation_notifier.start_broker()
-
sockets = tornado.netutil.bind_sockets(
int(cloudverifier_port), address=cloudverifier_host)
- task_id = tornado.process.fork_processes(config.getint(
- 'cloud_verifier', 'multiprocessing_pool_num_workers'))
- asyncio.set_event_loop(asyncio.new_event_loop())
- # Auto reactivate agent
- if task_id == 0:
- asyncio.ensure_future(activate_agents(cloudverifier_id, cloudverifier_host, cloudverifier_port))
server = tornado.httpserver.HTTPServer(app, ssl_options=context, max_buffer_size=max_upload_size)
server.add_sockets(sockets)
+ signal.signal(signal.SIGTERM, lambda *_: sys.exit(0))
+
try:
- tornado.ioloop.IOLoop.instance().start()
- except KeyboardInterrupt:
- tornado.ioloop.IOLoop.instance().stop()
+ server.start(config.getint('cloud_verifier', 'multiprocessing_pool_num_workers'))
+ if tornado.process.task_id() == 0:
+ # Start the revocation notifier only on one process
if config.getboolean('cloud_verifier', 'revocation_notifier'):
+ logger.info("Starting service for revocation notifications on port %s",
+ config.getint('cloud_verifier', 'revocation_notifier_port'))
+ revocation_notifier.start_broker()
+ # Auto activate agents
+ asyncio.ensure_future(activate_agents(cloudverifier_id, cloudverifier_host, cloudverifier_port))
+
+ tornado.ioloop.IOLoop.current().start()
+ except (KeyboardInterrupt, SystemExit):
+ tornado.ioloop.IOLoop.current().stop()
+ if tornado.process.task_id() == 0 and config.getboolean('cloud_verifier', 'revocation_notifier'):
revocation_notifier.stop_broker()
|
change: rebase test default target env to py310
Rebase the test default target environment to python 3.10. | @@ -7,8 +7,8 @@ python =
3.6: py36
3.7: py37
3.8: py38
- 3.9: py39, type-check, lint, plugins, min
- 3.10: py310
+ 3.9: py39
+ 3.10: py310, type-check, lint, plugins, min
[flake8]
exclude = .git,.tox,dist,*egg,setup.py
|
Image size reduction
There is DEBIAN_FRONTEND no need for a variable, when the script setup_16.x is executed, it is declared.
Adding a key --no-install-recommends when installing apt packages, you can save about 600 megabytes of disk space! | @@ -4,22 +4,21 @@ WORKDIR /app
ADD . /app
-ENV DEBIAN_FRONTEND=noninteractive
RUN curl -sL https://deb.nodesource.com/setup_16.x | bash - && \
# install prequired modules to support install of mlflow and related components
- apt-get install -y nodejs build-essential openjdk-11-jre-headless \
+ apt-get install -y --no-install-recommends nodejs build-essential openjdk-11-jre-headless \
# cmake and protobuf-compiler required for onnx install
cmake protobuf-compiler && \
# install required python packages
pip install --no-cache-dir -r requirements/dev-requirements.txt && \
# install mlflow in editable form
pip install --no-cache-dir -e . && \
- # Build MLflow UI
+ # build MLflow UI
npm install --global yarn && \
cd mlflow/server/js && \
yarn install && \
yarn build && \
- # clear cache
+ # clean cache
apt-get autoremove -yqq --purge && apt-get clean && rm -rf /var/lib/apt/lists/* && \
npm cache clean --force && \
yarn cache clean --all
|
chore: platform specific syntax for pulsar-client
1. use pulsar-client == 2.10.0 for macos since 2.10.1 for macos not published
2. remove markdown since 3.3.5 is yanked | pip>=21
apsw<3.10
importlib_metadata<2.0.0
-markdown==3.3.5
pkginfo==1.7.1
beautifultable==1.0.0
cachetools==3.0.0
@@ -46,7 +45,8 @@ cryptography==3.3.2
sortedcontainers==2.2.2
pytorch-lightning>=1.6.5
filelock==3.3.1
-pulsar-client==2.10.1
+pulsar-client==2.10.1; sys_platform == "linux"
+pulsar-client==2.10.0; sys_platform == "darwin"
fastavro==1.4.1
lightgbm==3.3.1
etaf-crypto
@@ -70,4 +70,3 @@ prettytable>=1.0.0,<2.0.0
setuptools>=50.0,<51.0
sshtunnel>=0.1.5,<0.2.0
packaging<21.0,>=20.4
-
|
Enable menu if two factor or webauthn is enabled
The menu shows links for both features so makes sense to show the menu if the settings are enabled. | -{% if security.registerable or security.recoverable or security.confirmable or security.unified_signin %}
+{% if security.registerable or security.recoverable or security.confirmable or security.unified_signin or security.two_factor or security.webauthn %}
<hr>
<h2>{{ _fsdomain('Menu') }}</h2>
<ul>
|
fix<tickets>: Force HELPDESK_PUBLIC_TICKET_QUEUE for anon tickets
Before: we set initial value for the widget and had it hidden. So user could still change the queue with some HTML knowledge.
Now: we drop the field at all and assign queue directly, utterly ignoring the POST request content for "queue" field. | @@ -192,8 +192,12 @@ class AbstractTicketForm(CustomFieldMixin, forms.Form):
self.customfield_to_field(field, instanceargs)
+ def _get_queue(self):
+ # this procedure is re-defined for anon submission form
+ return Queue.objects.get(id=int(self.cleaned_data['queue']))
+
def _create_ticket(self):
- queue = Queue.objects.get(id=int(self.cleaned_data['queue']))
+ queue = self._get_queue()
ticket = Ticket(title=self.cleaned_data['title'],
submitter_email=self.cleaned_data['submitter_email'],
@@ -338,15 +342,29 @@ class PublicTicketForm(AbstractTicketForm):
Add any (non-staff) custom fields that are defined to the form
"""
super(PublicTicketForm, self).__init__(*args, **kwargs)
-
if hasattr(settings, 'HELPDESK_PUBLIC_TICKET_QUEUE'):
- self.fields['queue'].widget = forms.HiddenInput()
+ del self.fields['queue']
+ else:
+ self.fields['queue'].choices = [
+ ('', '--------')
+ ] + [
+ (q.id, q.title) for q in Queue.objects.filter(allow_public_submission=True)
+ ]
if hasattr(settings, 'HELPDESK_PUBLIC_TICKET_PRIORITY'):
self.fields['priority'].widget = forms.HiddenInput()
if hasattr(settings, 'HELPDESK_PUBLIC_TICKET_DUE_DATE'):
self.fields['due_date'].widget = forms.HiddenInput()
- self.fields['queue'].choices = [('', '--------')] + [
- (q.id, q.title) for q in Queue.objects.filter(allow_public_submission=True)]
+
+ def _get_queue(self):
+ if getattr(settings, 'HELPDESK_PUBLIC_TICKET_QUEUE', None):
+ # force queue to be the pre-defined one
+ # (only for anon submissions)
+ return Queue.objects.filter(
+ slug=settings.HELPDESK_PUBLIC_TICKET_QUEUE
+ ).first()
+ else:
+ # get the queue user entered
+ return Queue.objects.get(id=int(self.cleaned_data['queue']))
def save(self):
"""
|
Subsets and Splits