message
stringlengths 13
484
| diff
stringlengths 38
4.63k
|
---|---|
Change eos BGP_PREFIX_THRESH_EXCEEDED error
The error `BGP_PREFIX_THRESH_EXCEEDED` under `eos` should be
`BGP_PREFIX_LIMIT_EXCEEDED`. This is because the message listed only
occurs after the limit has been exceeded. | @@ -16,7 +16,7 @@ prefix:
messages:
# 'error' should be unique and vendor agnostic. Currently we are using the JUNOS syslog message name as the canonical name.
# This may change if we are able to find a more well defined naming system.
- - error: BGP_PREFIX_THRESH_EXCEEDED
+ - error: BGP_PREFIX_LIMIT_EXCEEDED
tag: BGP-3-NOTIFICATION
values:
peer: (\d+\.\d+\.\d+\.\d+)
|
- push adding comment to instance to Extractor phase
Pyblish allows modifying comment after collect phase, eg. collector wouldn't collect it.
Should be pushed back to Collect phase after Pyblish is eradicated. | @@ -73,7 +73,9 @@ class CollectComment(
"""
label = "Collect Instance Comment"
- order = pyblish.api.CollectorOrder + 0.49
+ # TODO change to CollectorOrder after Pyblish is purged
+ # Pyblish allows modifying comment after collect phase
+ order = pyblish.api.ExtractorOrder - 0.49
def process(self, context):
context_comment = self.cleanup_comment(context.data.get("comment"))
|
PR feedback
Fix wrong function name
Add more test for `_broadcast_block` | @@ -368,11 +368,11 @@ async def test_bcc_receive_server_process_received_block(request, event_loop, mo
class OtherException(Exception):
pass
- def import_block_raises_validation_error(block, performa_validation=True):
+ def import_block_raises_other_exception_error(block, performa_validation=True):
raise OtherException
with monkeypatch.context() as m:
- m.setattr(bob_recv_server.chain, 'import_block', import_block_raises_validation_error)
+ m.setattr(bob_recv_server.chain, 'import_block', import_block_raises_other_exception_error)
with pytest.raises(OtherException):
bob_recv_server._process_received_block(block_not_orphan)
@@ -398,6 +398,16 @@ async def test_bcc_receive_server_broadcast_block(request, event_loop, monkeypat
alice_msg_buffer = MsgBuffer()
alice.add_subscriber(alice_msg_buffer)
+ # test: with `from_peer=alice`, bob broadcasts to all peers except alice. Therefore, alice
+ # should fail to receive the block.
+ bob_peers = bob_recv_server._peer_pool.connected_nodes.values()
+ assert len(bob_peers) == 1
+ alice_in_bobs_peer_pool = tuple(bob_peers)[0]
+ # NOTE: couldn't use `alice` directly, `from_peer` should be a `BCCPeer` in its PeerPool
+ bob_recv_server._broadcast_block(block_orphan, from_peer=alice_in_bobs_peer_pool)
+ with pytest.raises(asyncio.TimeoutError):
+ await asyncio.wait_for(alice_msg_buffer.msg_queue.get(), 0.1)
+
# test: with `from_peer=None` it broadcasts the block to all bob's peers. Try the orphan block
# first.
bob_recv_server._broadcast_block(block_orphan, from_peer=None)
|
Update position of vias and metal in pn junction
Currently vias and metal sit in the middle of the N++ and P++ regions, which isn't how folks normally design PN junctions.
Metal and vias have been moved to the outside of the junctions now. | @@ -773,14 +773,14 @@ def pn(
sections.append(ppp)
if layer_via is not None:
- offset = width_high_doping / 2 + gap_high_doping
+ offset = width_high_doping + gap_high_doping - width_via/2
via_top = Section(width=width_via, offset=+offset, layer=layer_via)
via_bot = Section(width=width_via, offset=-offset, layer=layer_via)
sections.append(via_top)
sections.append(via_bot)
if layer_metal is not None:
- offset = width_high_doping / 2 + gap_high_doping
+ offset = width_high_doping + gap_high_doping - width_metal/2
port_types = ("electrical", "electrical")
metal_top = Section(
width=width_via,
|
Corrected an error in "Upgrading PyPSA"
It says "pip install -U pandas" instead of "... pypsa" | @@ -136,7 +136,7 @@ We recommend always keeping your PyPSA installation up-to-date, since
bugs get fixed and new features are added. To upgrade PyPSA with pip,
do at the command line::
- pip install -U pandas
+ pip install -U pypsa
Don't forget to read the :doc:`release_notes` regarding API changes
that might require you to update your code.
|
Test model config validation
The following commit introduces a new set of tests for model config
where we attempt to send a json object as a string to the model and
then checking the result. | import asyncio
+import json
import os
+import random
+import string
import time
import uuid
from concurrent.futures import ThreadPoolExecutor
@@ -14,8 +17,7 @@ from juju.client.client import ApplicationFacade, ConfigValue
from juju.errors import JujuError
from juju.model import Model, ModelObserver
from juju.utils import block_until, run_with_interrupt
-import random
-import string
+
from .. import base
MB = 1
@@ -576,6 +578,9 @@ async def test_watcher_reconnect(event_loop):
@pytest.mark.asyncio
async def test_config(event_loop):
async with base.CleanModel() as model:
+ # first test get_config with nothing.
+ result = await model.get_config()
+ assert 'extra-info' not in result
await model.set_config({
'extra-info': 'booyah',
'test-mode': ConfigValue(value=True),
@@ -586,6 +591,26 @@ async def test_config(event_loop):
assert result['extra-info'].value == 'booyah'
[email protected]
[email protected]
+async def test_config_with_json(event_loop):
+ async with base.CleanModel() as model:
+ # first test get_config with nothing.
+ result = await model.get_config()
+ assert 'extra-complex-info' not in result
+ # test model config with more complex data
+ expected = ['foo', {'bar': 1}]
+ await model.set_config({
+ 'extra-complex-info': json.dumps(expected),
+ 'test-mode': ConfigValue(value=True),
+ })
+ result = await model.get_config()
+ assert 'extra-complex-info' in result
+ assert result['extra-complex-info'].source == 'model'
+ recieved = json.loads(result['extra-complex-info'].value)
+ assert recieved == recieved
+
+
@base.bootstrapped
@pytest.mark.asyncio
async def test_set_constraints(event_loop):
|
elchecking: fix standalone program
parse_binary_bootlog(..) now returns also a Failure object. | @@ -21,7 +21,7 @@ refstate_str = args.refstate_file.read()
refstate = json.loads(refstate_str)
log_bin = args.eventlog_file.read()
tpm = tpm_main.tpm()
-log_data = tpm.parse_binary_bootlog(log_bin)
+_, log_data = tpm.parse_binary_bootlog(log_bin)
with open("/tmp/parsed.json", "wt", encoding="utf-8") as log_data_file:
log_data_file.write(json.dumps(log_data, indent=True))
why_not = policy.evaluate(refstate, log_data)
|
BaseAction: enable overriding specific email fields
This may come useful for cases when email template is treated as
string, e.g. it cannot define multiple recipient ("To:") addresses. | @@ -82,6 +82,42 @@ class BaseAction:
pass
return ctx
+ def subject(self):
+ """Overwrite in order to set own subject from descending Action."""
+ return ""
+
+ def sender(self):
+ """Overwrite in order to set own sender from descending Action."""
+ return ""
+
+ def recipients(self):
+ """Overwrite in order to set own recipients from descending Action."""
+ return None
+
+ def cc_recipients(self):
+ """Overwrite in order to set own CC recipients from descending
+ Action."""
+ return None
+
+ def bcc_recipients(self):
+ """Overwrite in order to set own BCC recipients from descending
+ Action."""
+ return None
+
+ def reply_to(self):
+ """Overwrite in order to set own reply-to from descending Action."""
+ return ""
+
+ def email_text(self):
+ """Overwrite in order to set own email text body from descending
+ Action."""
+ return ""
+
+ def email_html(self):
+ """Overwrite in order to set own email HTML body from descending
+ Action."""
+ return ""
+
def _context(self, additional_context: Optional[Dict] = None) -> Dict:
"""Prepare general context for lazy-evaluated email message used later
on."""
@@ -103,7 +139,17 @@ class BaseAction:
# build email
self.logger.debug('Building email with provided context...')
- email = self.template.build_email(context=self.context)
+ email = self.template.build_email(
+ subject=self.subject(),
+ sender=self.sender(),
+ recipients=self.recipients(),
+ cc_recipients=self.cc_recipients(),
+ bcc_recipients=self.bcc_recipients(),
+ reply_to=self.reply_to(),
+ text=self.email_text(),
+ html=self.email_html(),
+ context=self.context,
+ )
return email
def __call__(self, *args, **kwargs):
|
[Release] Update index.json for extension [ webpubsub ]
Triggered by Azure CLI Extensions Release Pipeline - ADO_BUILD_URL:
Last commit: | "version": "1.1.0"
},
"sha256Digest": "802e829313a4993702d114a94eb8b119a376085d92b0a7860fd2c308e73112f6"
+ },
+ {
+ "downloadUrl": "https://azcliprod.blob.core.windows.net/cli-extensions/webpubsub-1.2.0-py3-none-any.whl",
+ "filename": "webpubsub-1.2.0-py3-none-any.whl",
+ "metadata": {
+ "azext.isPreview": false,
+ "azext.minCliCoreVersion": "2.39.0",
+ "classifiers": [
+ "Development Status :: 4 - Beta",
+ "Intended Audience :: Developers",
+ "Intended Audience :: System Administrators",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
+ "License :: OSI Approved :: MIT License"
+ ],
+ "extensions": {
+ "python.details": {
+ "contacts": [
+ {
+ "email": "[email protected]",
+ "name": "Microsoft Corporation",
+ "role": "author"
+ }
+ ],
+ "document_names": {
+ "description": "DESCRIPTION.rst"
+ },
+ "project_urls": {
+ "Home": "https://github.com/Azure/azure-cli-extensions/tree/main/src/webpubsub"
+ }
+ }
+ },
+ "extras": [],
+ "generator": "bdist_wheel (0.30.0)",
+ "license": "MIT",
+ "metadata_version": "2.0",
+ "name": "webpubsub",
+ "run_requires": [
+ {
+ "requires": [
+ "websockets (~=10.4)"
+ ]
+ }
+ ],
+ "summary": "Microsoft Azure Command-Line Tools Webpubsub Extension",
+ "version": "1.2.0"
+ },
+ "sha256Digest": "faf83397b2215c7686ae7b3e93a576c3fd0aa9c70173ab72be66d2089b84b280"
}
]
},
|
LOG.exception for mech dict extend failure
We generate a new exception here instead of re-raising
so we need to use LOG.exception to make debugging easier.
TrivialFix | @@ -937,7 +937,7 @@ class ExtensionManager(stevedore.named.NamedExtensionManager):
try:
getattr(driver.obj, method_name)(session, base_model, result)
except Exception:
- LOG.error(_LE("Extension driver '%(name)s' failed in "
+ LOG.exception(_LE("Extension driver '%(name)s' failed in "
"%(method)s"),
{'name': driver.name, 'method': method_name})
raise ml2_exc.ExtensionDriverError(driver=driver.name)
|
Move housenumber indexer before field indexer
If it comes later, it will overide the fields boost. | @@ -77,9 +77,9 @@ RESULTS_FORMATTERS = [
'addok.helpers.formatters.geojson',
]
INDEXERS = [
+ 'addok.helpers.index.housenumbers_indexer',
'addok.helpers.index.fields_indexer',
'addok.helpers.index.filters_indexer',
- 'addok.helpers.index.housenumbers_indexer',
'addok.helpers.index.document_indexer',
]
DEINDEXERS = [
|
Reduce the number of days an issue is stale by 25
This reduces the amount of time an issue is stale to approximately
a little less than 3 years and 2 months. | # Probot Stale configuration file
# Number of days of inactivity before an issue becomes stale
-# 1200 is approximately 3 years and 3 months
-daysUntilStale: 1200
+# 1175 is approximately 3 years and 2 months
+daysUntilStale: 1175
# Number of days of inactivity before a stale issue is closed
daysUntilClose: 7
|
Explain `adq_per_quantity` in a comment.
Addresses review feedback from | @@ -351,6 +351,17 @@ class Presentation(models.Model):
is_generic = models.NullBooleanField(default=None)
is_current = models.BooleanField(default=True)
replaced_by = models.ForeignKey('self', null=True, blank=True)
+
+ # An ADQ is the assumed average maintenance dose per day for a
+ # drug used for its main indication in adults.
+ #
+ # If a presentation's ADQ is "20mg", and its `quantity` field is
+ # measured in 10 mg tablets, then the `adq_per_quantity` whould be
+ # 2. In other words, `adq_per_quantity` is a factor to apply to
+ # `quantity`, to obtain an ADQ.
+ #
+ # See https://github.com/ebmdatalab/openprescribing/issues/934 for
+ # more detail
adq_per_quantity = models.FloatField(null=True, blank=True)
objects = PresentationManager()
|
Upgrade NVIDIA driver on CI to 430.40
Summary:
Pull Request resolved:
Test Plan: Imported from OSS | @@ -45,7 +45,7 @@ retry () {
retry sudo pip -q install awscli==1.16.35
if [ -n "${USE_CUDA_DOCKER_RUNTIME:-}" ]; then
- DRIVER_FN="NVIDIA-Linux-x86_64-410.104.run"
+ DRIVER_FN="NVIDIA-Linux-x86_64-430.40.run"
wget "https://s3.amazonaws.com/ossci-linux/nvidia_driver/$DRIVER_FN"
sudo /bin/bash "$DRIVER_FN" -s --no-drm || (sudo cat /var/log/nvidia-installer.log && false)
nvidia-smi
|
add middleware tutorial
and blank line 0_0 | @@ -365,6 +365,7 @@ try:
except ImportError:
macro = None
+
class MacroRule(ABCMessageRule):
def __init__(self, pattern: Union[str, List[str]]):
if macro is None:
|
[internal] BSP: support shutdown method and exit notification
Add support for `build/shutdown` method (which no-ops) and the `build/exit` notification which instructs the BSP server to immediately exit.
[ci skip-rust]
[ci skip-build-wheels] | @@ -69,6 +69,8 @@ def _make_error_future(exc: Exception) -> Future:
class BSPConnection:
_INITIALIZE_METHOD_NAME = "build/initialize"
+ _SHUTDOWN_METHOD_NAME = "build/shutdown"
+ _EXIT_NOTIFCATION_NAME = "build/exit"
def __init__(
self,
@@ -124,6 +126,20 @@ class BSPConnection:
)
)
+ # Handle the `build/shutdown` method and `build/exit` notification.
+ if method_name == self._SHUTDOWN_METHOD_NAME:
+ # Return no-op success for the `build/shutdown` method. This doesn't actually cause the server to
+ # exit. That will occur once the client sends the `build/exit` notification.
+ return None
+ elif method_name == self._EXIT_NOTIFCATION_NAME:
+ # The `build/exit` notification directs the BSP server to immediately exit.
+ # The read-dispatch loop will exit once it notices that the inbound handle is closed. So close the
+ # inbound handle (and outbound handle for completeness) and then return to the dispatch loop
+ # to trigger the exit.
+ self._inbound.close()
+ self._outbound.close()
+ return None
+
method_mapping = self._handler_mappings.get(method_name)
if not method_mapping:
return _make_error_future(JsonRpcMethodNotFound.of(method_name))
|
Fix the java installation in the DeepVariant OSS prereqs, which is
currently failing on Debian. | @@ -65,7 +65,8 @@ if ! java -version 2>&1 | fgrep "1.8"; then
[[ $(lsb_release -d | grep 'Debian') ]] && \
sudo -H apt-get install -y gnupg dirmngr && \
sudo -H apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys EEA14886
- sudo add-apt-repository -y ppa:webupd8team/java
+ echo "deb http://ppa.launchpad.net/webupd8team/java/ubuntu xenial main" | tee /etc/apt/sources.list.d/webupd8team-java.list
+ echo "deb-src http://ppa.launchpad.net/webupd8team/java/ubuntu xenial main" | tee -a /etc/apt/sources.list.d/webupd8team-java.list
sudo -H apt-get -qq -y update
echo "oracle-java8-installer shared/accepted-oracle-license-v1-1 select true" | sudo debconf-set-selections
sudo -H apt-get install -y oracle-java8-installer
|
[utils.notify] don't assume available notification backends
Don't assume that notify-send is available on Linux or user
notification centers are available on macOS. | @@ -325,10 +325,12 @@ class DesktopNotifier:
macos_version, *_ = platform.mac_ver()
if platform.system() == 'Darwin':
- if IS_MACOS_BUNDLE and Version(macos_version) >= Version('10.14.0'):
+ if (IS_MACOS_BUNDLE and Version(macos_version) >= Version('10.14.0')
+ and UNUserNotificationCenter.currentNotificationCenter()):
# UNUserNotificationCenter is only supported from signed app bundles
return SupportedImplementations.notification_center
- elif Version(macos_version) < Version('10.16.0'):
+ elif (Version(macos_version) < Version('10.16.0')
+ and NSUserNotificationCenter.defaultUserNotificationCenter):
# deprecated but still works
return SupportedImplementations.legacy_notification_center
elif shutil.which('osascript'):
@@ -339,8 +341,12 @@ class DesktopNotifier:
DesktopNotifierFreedesktopDBus('test')
return SupportedImplementations.freedesktop_dbus
except Exception:
+ pass
+
+ if shutil.which('notify-send'):
return SupportedImplementations.notify_send
+
return SupportedImplementations.stdout
|
[TE] Correctly generate buffer binds with axis separators
In SchedulePostProcToPrimfunc, when the axis separator attribute is
moved to the buffer properties, it doesn't update buffers that are in
the buffer bind scope. This occurs if `Stage.tensorize` is called for
a stage whose layout transformation includes `te.AXIS_SEPARATOR`. | @@ -289,6 +289,13 @@ class AxisSeparatorsAttrUnwrapper : StmtExprMutator {
if (op->attr_key == tir::attr::axis_separators) {
return op->body;
+ } else if (op->attr_key == tir::attr::buffer_bind_scope) {
+ Array<ObjectRef> tuple = Downcast<Array<ObjectRef>>(op->node);
+ Buffer view_buffer = Downcast<Buffer>(tuple[0]);
+ Buffer source_buffer = Downcast<Buffer>(tuple[1]);
+ return AttrStmt(
+ Array<ObjectRef>{GetRemappedBuffer(view_buffer), GetRemappedBuffer(source_buffer)},
+ op->attr_key, op->value, op->body);
} else {
return ret;
}
|
Add root rotation bounds updater test
Test that client does not rotate beyond a configured upper bound,
i.e. `current_version + MAX_NUMBER_ROOT_ROTATIONS` | @@ -61,6 +61,7 @@ import tuf.exceptions
import tuf.repository_tool as repo_tool
import tuf.unittest_toolbox as unittest_toolbox
import tuf.client.updater as updater
+import tuf.settings
import securesystemslib
import six
@@ -256,6 +257,57 @@ class TestUpdater(unittest_toolbox.Modified_TestCase):
+ def test_root_rotation_max(self):
+ """Test that client does not rotate beyond a configured upper bound, i.e.
+ `current_version + MAX_NUMBER_ROOT_ROTATIONS`. """
+ # NOTE: The nature of below root changes is irrelevant. Here we only want
+ # the client to update but not beyond a configured upper bound.
+
+ # 1.root.json --> 2.root.json (add root2 and root3 keys)
+ repository = repo_tool.load_repository(self.repository_directory)
+ repository.root.load_signing_key(self.role_keys['root']['private'])
+ repository.root.add_verification_key(self.role_keys['root2']['public'])
+ repository.root.load_signing_key(self.role_keys['root2']['private'])
+ repository.root.add_verification_key(self.role_keys['root3']['public'])
+ repository.root.load_signing_key(self.role_keys['root3']['private'])
+ repository.writeall()
+
+ # 2.root.json --> 3.root.json (change threshold)
+ repository.root.threshold = 2
+ repository.writeall()
+
+ # 3.root.json --> 4.root.json (change threshold again)
+ repository.root.threshold = 3
+ repository.writeall()
+
+ # Move staged metadata to "live" metadata
+ shutil.rmtree(os.path.join(self.repository_directory, 'metadata'))
+ shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'),
+ os.path.join(self.repository_directory, 'metadata'))
+
+ # Assert that repo indeed has "4.root.json" and that it's the latest root
+ self.assertTrue(filecmp.cmp(
+ os.path.join(self.repository_directory, 'metadata', '4.root.json'),
+ os.path.join(self.repository_directory, 'metadata', 'root.json')))
+
+ # Lower max root rotation cap so that client stops updating early
+ max_rotation_backup = tuf.settings.MAX_NUMBER_ROOT_ROTATIONS
+ tuf.settings.MAX_NUMBER_ROOT_ROTATIONS = 2
+
+ # Update on client 1.root.json --> 2.root.json --> 3.root.json,
+ # but not stop before updating to 4.root.json
+ self.repository_updater.refresh()
+
+ # Assert that the client indeed only updated until 3.root.json
+ self.assertTrue(filecmp.cmp(
+ os.path.join(self.repository_directory, 'metadata', '3.root.json'),
+ os.path.join(self.client_metadata_current, 'root.json')))
+
+ # reset
+ tuf.settings.MAX_NUMBER_ROOT_ROTATIONS = max_rotation_backup
+
+
+
def test_root_rotation_missing_keys(self):
repository = repo_tool.load_repository(self.repository_directory)
|
fix potential scheduler block when `on_finished` triggered when newtask_queue is full
ref | @@ -518,7 +518,7 @@ class Scheduler(object):
project._selected_tasks = False
project._send_finished_event_wait = 0
- self.newtask_queue.put({
+ self._postpone_request.append({
'project': project.name,
'taskid': 'on_finished',
'url': 'data:,on_finished',
|
Provide statistics on OpenCL profiling analysis
Close | Common OpenCL abstract base classe for different processing
"""
-from __future__ import absolute_import, print_function, division
-
__author__ = "Jerome Kieffer"
__contact__ = "[email protected]"
__license__ = "MIT"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
-__date__ = "04/12/2020"
+__date__ = "02/03/2021"
__status__ = "stable"
+import sys
import os
import logging
import gc
-from collections import namedtuple
+from collections import namedtuple, OrderedDict
import numpy
import threading
from .common import ocl, pyopencl, release_cl_buffers, query_kernel_info, allocate_texture, check_textures_availability
@@ -342,20 +341,45 @@ class OpenclProcessing(object):
ev = pyopencl.enqueue_copy(*copy_args, **copy_kwargs)
self.profile_add(ev, "Transfer to texture")
- def log_profile(self):
+ def log_profile(self, stats=False):
"""If we are in profiling mode, prints out all timing for every single OpenCL call
+
+ :param stats: if True, prints the statistics on each kernel instead of all execution timings
+ :return: list of lines to print
"""
- t = 0.0
- out = ["", "Profiling info for OpenCL %s" % self.__class__.__name__]
+ total_time = 0.0
+ out = [""]
+ if stats:
+ stats = OrderedDict()
+ out.append(f"OpenCL kernel profiling statistics in milliseconds for: {self.__class__.__name__}")
+ out.append(f"{'Kernel name':>50} (count): min median max mean std")
+ else:
+ stats = None
+ out.append(f"Profiling info for OpenCL: {self.__class__.__name__}")
+
if self.profile:
for e in self.events:
if "__len__" in dir(e) and len(e) >= 2:
- et = 1e-6 * (e[1].profile.end - e[1].profile.start)
- out.append("%50s:\t%.3fms" % (e[0], et))
- t += et
-
+ name = e[0]
+ pr = e[1].profile
+ t0 = pr.start
+ t1 = pr.end
+ et = 1e-6 * (t1 - t0)
+ total_time += et
+ if stats is None:
+ out.append(f"{name:>50} : {et:.3f}ms")
+ else:
+ if name in stats:
+ stats[name].append(et)
+ else:
+ stats[name] = [et]
+ if stats is not None:
+ for k, v in stats.items():
+ n = numpy.array(v)
+ out.append(f"{k:>50} ({len(v):5}): {n.min():8.3f} {numpy.median(n):8.3f} {n.max():8.3f} {n.mean():8.3f} {n.std():8.3f}")
out.append("_" * 80)
- out.append("%50s:\t%.3fms" % ("Total execution time", t))
+ out.append(f"{'Total OpenCL execution time':>50} : {total_time:.3f}ms")
+
logger.info(os.linesep.join(out))
return out
|
Update README.rst
force add MIT license badge | |pic1| |pic2| |pic3|
-.. |pic1| image:: https://img.shields.io/github/license/architecture-building-systems/CityEnergyAnalyst
+.. |pic1| image:: https://img.shields.io/badge/License-MIT-blue.svg
:alt: GitHub license
.. |pic2| image:: https://img.shields.io/github/repo-size/architecture-building-systems/CityEnergyAnalyst
:alt: Repo Size
|
Updated help.py - generate-sas example
Added MacOS example for generating SAS token with expiration time. The call to the date function `date -d` works on Linux, but not on MacOS. I provided MacOS sample. | @@ -863,10 +863,14 @@ helps['storage account generate-sas'] = """
short-summary: 'Storage account name. Must be used in conjunction with either storage account key or a SAS
token. Environment Variable: AZURE_STORAGE_ACCOUNT'
examples:
- - name: Generate a sas token for the account that is valid for queue and table services.
+ - name: Generate a sas token for the account that is valid for queue and table services on Linux.
text: |
end=`date -d "30 minutes" '+%Y-%m-%dT%H:%MZ'`
az storage account generate-sas --permissions cdlruwap --account-name MyStorageAccount --services qt --resource-types sco --expiry $end -otsv
+ - name: Generate a sas token for the account that is valid for queue and table services on MacOS.
+ text: |
+ end=`date -v+30M '+%Y-%m-%dT%H:%MZ'`
+ az storage account generate-sas --permissions cdlruwap --account-name MyStorageAccount --services qt --resource-types sco --expiry $end -otsv
"""
helps['storage container generate-sas'] = """
|
Update test_roots.py
Making black changes manually. | @@ -251,16 +251,18 @@ class RootsTest(TestCase, AdaptedConfigurationTestCaseMixin, LoaderModuleMockMix
with tempfile.TemporaryDirectory() as tmpdirname:
mtime_map_path = os.path.join(tmpdirname, "roots", "mtime_map")
os.makedirs(os.path.dirname(mtime_map_path))
- with salt.utils.files.fopen(mtime_map_path, 'wb') as fp:
+ with salt.utils.files.fopen(mtime_map_path, "wb") as fp:
fp.write(b"\x9c")
with patch(
- "salt.fileserver.reap_fileserver_cache_dir", MagicMock(return_value=True)
+ "salt.fileserver.reap_fileserver_cache_dir",
+ MagicMock(return_value=True)
), patch(
- "salt.fileserver.generate_mtime_map", MagicMock(return_value=new_mtime_map)
+ "salt.fileserver.generate_mtime_map",
+ MagicMock(return_value=new_mtime_map)
), patch.dict(
- roots.__opts__, {"fileserver_events": False, "cachedir":
- tmpdirname},
+ roots.__opts__,
+ {"fileserver_events": False, "cachedir": tmpdirname},
):
ret = roots.update()
|
Update bio-alignment-chart.md
add `no_display=true` to iframed cell | @@ -48,7 +48,7 @@ fig.show()
## Alignment Chart in dash_bio
-```python
+```python no_display=true
from IPython.display import IFrame
snippet_url = 'https://dash-gallery.plotly.host/python-docs-dash-snippets/'
IFrame(snippet_url + 'bio-alignmentchart', width='100%', height=630)
|
Update bearing_seal_element.py
Substitute pytest.approx() to np.allclose() on the bearing __eq___ method. | @@ -6,7 +6,6 @@ import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
-import pytest
import scipy.interpolate as interpolate
import xlrd
@@ -45,7 +44,7 @@ class _Coefficient:
self.interpolated = lambda x: np.array(self.coefficient[0])
def __eq__(self, other):
- if pytest.approx(self.__dict__["coefficient"]) == other.__dict__["coefficient"]:
+ if np.allclose(self.__dict__["coefficient"]),other.__dict__["coefficient"]):
return True
else:
return False
|
Update feature_request.md
This bugs me every single time, the missing space in the rendered version gives me claustrophobia :smile: | @@ -7,7 +7,7 @@ assignees: ''
---
-**Is your feature request related to a problem? Please describe.**
+#### Is your feature request related to a problem? Please describe.
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
#### Describe the solution you'd like
|
Feature: suggest command usage for misspelt commands
Migration to error_handler.py
Suggesting misspelt commands, in progress | -import contextlib
+# import contextlib
+import difflib
import logging
from discord.ext.commands import (
@@ -75,7 +76,7 @@ class ErrorHandler(Cog):
if not ctx.channel.id == Channels.verification:
tags_get_command = self.bot.get_command("tags get")
ctx.invoked_from_error_handler = True
-
+ command_name = ctx.invoked_with
log_msg = "Cancelling attempt to fall back to a tag due to failed checks."
try:
if not await tags_get_command.can_run(ctx):
@@ -87,9 +88,31 @@ class ErrorHandler(Cog):
return
# Return to not raise the exception
- with contextlib.suppress(ResponseCodeError):
- await ctx.invoke(tags_get_command, tag_name=ctx.invoked_with)
+ log.debug("Calling...")
+ tags_cog = self.bot.get_cog("Tags")
+ sent = await tags_cog._get_command(ctx, command_name)
+ # sent = await tags_get_command.callback(tags_get_command.cog, ctx, ctx.invoked_with)
+ if sent:
+ log.debug("Found")
return
+ # No similar tag found, or tag on cooldown -
+ # searching for a similar command
+ log.debug("Not Found")
+ raw_commands = [
+ (cmd.name, *cmd.aliases)
+ for cmd in self.bot.walk_commands()
+ if not cmd.hidden
+ ]
+ raw_commands = [c for data in raw_commands for c in data]
+ similar_command_data = difflib.get_close_matches(command_name, raw_commands, 1)
+ log.debug(similar_command_data)
+ similar_command = self.bot.get_command(similar_command_data[0])
+ if similar_command.can_run(ctx):
+ misspelled_content = ctx.message.content
+ await ctx.send(
+ f"Did you mean:\n**{misspelled_content.replace(command_name, similar_command.name)}**"
+ )
+
elif isinstance(e, BadArgument):
await ctx.send(f"Bad argument: {e}\n")
await ctx.invoke(*help_command)
|
DOC: Improve intersect1d docstring
The docstring now says what to expect if you call
intersect1d(assume_unique=True) but pass in non-unique data. | @@ -369,7 +369,9 @@ def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
Input arrays. Will be flattened if not already 1D.
assume_unique : bool
If True, the input arrays are both assumed to be unique, which
- can speed up the calculation. Default is False.
+ can speed up the calculation. If True but ``ar1`` or ``ar2`` are not
+ unique, incorrect results and out-of-bounds indices could result.
+ Default is False.
return_indices : bool
If True, the indices which correspond to the intersection of the two
arrays are returned. The first instance of a value is used if there are
|
Update README.md
Correct spelling of optional | @@ -68,7 +68,7 @@ Here's a sample:
- Having some very basic shell (command prompt) skills. Feel free to ask for help on the Discord!
- Having a dedicated server - This app is meant to run 24/7 - (If you don't have that, you can just run a cheap Virtual Private Server)
- The below need to be installed on the server where the rcon will run:
- - (Otionnal but recommanded) GIT: https://git-scm.com/downloads (if you don't use git you need to download the releases)
+ - (Optional but recommanded) GIT: https://git-scm.com/downloads (if you don't use git you need to download the releases)
- Docker Engine (Community) installed: https://docs.docker.com/install/
- Docker Compose installed: https://docs.docker.com/compose/install/
|
Test fixed after increasing the precision of FDOS. External verificaiton
of fermi level not available at the moment, but the following paper shows
that fermi level should decrease in GaAs with increasing temperature,
which the previous test did not agree with, while this one does. | @@ -102,7 +102,7 @@ class DefectsThermodynamicsTest(PymatgenTest):
def test_solve_for_fermi_energy(self):
fermi_energy = self.pd.solve_for_fermi_energy(100.0, self.mu_elts, self.dos)
- self.assertAlmostEqual(fermi_energy, 0.5738732534885003, 3)
+ self.assertAlmostEqual(fermi_energy, 0.8334775317578078, 3)
fermi_energy = self.pd.solve_for_fermi_energy(1000.0, self.mu_elts, self.dos)
self.assertAlmostEqual(fermi_energy, 0.74139553, 3)
|
Remove Sentium, it's broken
Sentium returns http code 503 when accessed | @@ -1617,7 +1617,6 @@ API | Description | Auth | HTTPS | CORS |
| [LibreTranslate](https://libretranslate.com/docs) | Translation tool with 17 available languages | No | Yes | Unknown |
| [Semantria](https://semantria.readme.io/docs) | Text Analytics with sentiment analysis, categorization & named entity extraction | `OAuth` | Yes | Unknown |
| [Sentiment Analysis](https://www.meaningcloud.com/developer/sentiment-analysis) | Multilingual sentiment analysis of texts from different sources | `apiKey` | Yes | Yes |
-| [Sentium](https://sentim-api.herokuapp.com/) | Free API for Text Sentimental analysis | No | Yes | Unknown |
| [Tisane](https://tisane.ai/) | Text Analytics with focus on detection of abusive content and law enforcement applications | `OAuth` | Yes | Yes |
| [Watson Natural Language Understanding](https://cloud.ibm.com/apidocs/natural-language-understanding/natural-language-understanding) | Natural language processing for advanced text analysis | `OAuth` | Yes | Unknown |
|
Disable getNumGPUs rewrite
Summary:
cc iotamudelta
Pull Request resolved: | "RoiPooling2d_backward_kernel<<<": "RoiPooling2d_backward_kernel<float><<<"
}
},
- {
- "path": "aten/src/ATen/Context.h",
- "s_constants": {
- "detail::getCUDAHooks().getNumGPUs()": "1"
- }
- },
{
"path": "aten/src/ATen/native/cuda/Unique.cu",
"s_constants": {
|
Corrected treatment protocol version
Corrected treatment protocol version | "dissociation_protocol": "6.2.0",
"enrichment_protocol": "3.1.0",
"ipsc_induction_protocol": "3.2.0",
- "treatment_protocol": "0.0.0"
+ "treatment_protocol": "0.0.1"
},
"imaging": {
"imaging_preparation_protocol": "2.2.0",
|
CompoundEditor : Improve editor link swatch colors
We inadvertently had a light blue color in there that was virtually the
same as the highlight color. This removes this and tweaks some of the
others a little. | @@ -1632,8 +1632,7 @@ class _DrivenEditorSwatch( _Frame ) :
__drivenEditorColors = [
imath.Color3f( 0.71, 0.43, 0.47 ),
imath.Color3f( 0.85, 0.80, 0.48 ),
- imath.Color3f( 0.62, 0.79, 0.93 ),
- imath.Color3f( 0.27, 0.45, 0.21 ),
+ imath.Color3f( 0.35, 0.55, 0.28 ),
imath.Color3f( 0.57, 0.43, 0.71 )
]
__drivenEditorColorsLastUsed = 0
|
Use a new variable for static_broadcasted_argnums as a tuple.
* Use a new variable for static_broadcasted_argnums as a tuple.
This works around a bug in pytype (b/156151503). | @@ -1022,7 +1022,9 @@ def pmap(fun: Callable, axis_name: Optional[AxisName] = None, *, in_axes=0,
_check_callable(fun)
axis_name = _TempAxisName(fun) if axis_name is None else axis_name
if isinstance(static_broadcasted_argnums, int):
- static_broadcasted_argnums = (static_broadcasted_argnums,)
+ static_broadcasted_tuple: Tuple[int, ...] = (static_broadcasted_argnums,)
+ else:
+ static_broadcasted_tuple = tuple(static_broadcasted_argnums)
# axis_size is an optional integer representing the global axis size.
# The aggregate size (across all hosts) size of the mapped axis must match
@@ -1034,15 +1036,15 @@ def pmap(fun: Callable, axis_name: Optional[AxisName] = None, *, in_axes=0,
@wraps(fun)
def f_pmapped(*args, **kwargs):
f = lu.wrap_init(fun)
- if static_broadcasted_argnums:
- if max(static_broadcasted_argnums) >= len(args):
+ if static_broadcasted_tuple:
+ if max(static_broadcasted_tuple) >= len(args):
msg = ("pmapped function has static_broadcasted_argnums={} but was "
"called with only {} positional argument{}. All static "
"broadcasted arguments must be passed positionally.")
- raise ValueError(msg.format(static_broadcasted_argnums, len(args),
+ raise ValueError(msg.format(static_broadcasted_tuple, len(args),
"s" if len(args) > 1 else ""))
dyn_argnums = [i for i in range(len(args))
- if i not in static_broadcasted_argnums]
+ if i not in static_broadcasted_tuple]
f, dyn_args = argnums_partial(f, dyn_argnums, args)
if isinstance(in_axes, tuple):
dyn_in_axes = tuple(in_axes[i] for i in dyn_argnums)
|
Trying to rollback python
I am testing what happens to the Java dependency with this version. | -dist: xenial
+dist: trusty
git:
depth: false
addons:
@@ -11,7 +11,7 @@ addons:
secure: Uw/F4E7ZsnAni7KxX4kFw+Xq0iZ7pveGAEAzb+BRfBk80Hv94Ptq1fztTjAEG9mcNU9+5MJS7YMEc2YDS3dftWHfMHhu7/eg12dMHDwmlmWxMNC/nOgQASSQdJTxU6VdCmjds1pW97nkwlJl8hKy4TPR+Ll7ll4Ha4Yx+02JrzCzQA5syG8agY4zT0ybuBjF+a3ZnTdOrJIhi4PNf6+KbmLmUZqj+PbFpOUQnxHIP2r9XcmlVZR/okVz3eTTat0LIIzVArxcepA+veewxyg4Io/qJBqtAfwQTN/18GlyHfjE5CgfZua7BV/CH7picN3tAOp2Mwt/xnzO+ZJe1Plt/vApZfUhv7J7YkSxwWYtHOqEgcS5mlfvaQ1mnFgyzXZPdWc6gQbw1EQRmNBCpSpbJJo59Fy3kCmyRMre7cyAmHYMloFXlWIKrBk7/eE7hGDaivu6knMBwZ4eCFCZrmYayZeF8l+KXkDRXDrCEMp92AhU+TQTkSyAXnl4xpJwxOimU0Yy0X414cHx/3w7X7i6bHdIMrIvy9qSPIwNtsdcmNPg8/VlPzpbVC6G5qhs9HFRjVexuoXYogfU7sVqxhOK3kpJ1TVMylgUp2Xdqy+uR8g7n+teUq1Rgb8WnoQ0YSbbhrC61FDeY1/bykiz9DcN0A9wHjvaDRnxujTbC1h28Gk=
language: python
python:
-- '3.7'
+- '3.6'
before_install:
- pip install --upgrade pytest
- pip install --upgrade mypy
|
updated sso-office.md
removed image names
fixed
continuation of PR | @@ -9,23 +9,23 @@ Follow these steps to configure Mattermost to use your Office 365 logon credenti
2. In the left-hand navigation pane, select the **Azure Active Directory service**, and then select **App registrations > New registration**.
-
+
3. Give your new registration a **Name**, and then define which **Supported account types** can access the application. For example, if this is to be only accessed from your enterprise's Azure AD accounts, then select _Accounts in this organizational directory only_. The **Redirect URI** should be defined as Web client. Also input the URL with the host name that will be specific to your Mattermost service followed by `/signup/office365/complete`. An example below is: https://your.mattermost.com/signup/office365/complete
- 
+ 
Now the App Registration has been created and you can configure it further. The standard Azure AD documentation is [here](https://docs.microsoft.com/en-gb/azure/active-directory/develop/quickstart-register-app) for reference.
4. Select **Certificates and Secrets** from the menu, and click the button to generate a **New Client secret**. Provide a description and define the expiry for the token.
- 
+ 
Click _Add_ and you will be provided with the _client secret value_, copy this and save it for use in the Mattermost configuration as the **Application Secret Password**.
5. Select **Overview** from the menu and copy the _Application (client) ID_ and the _Directory (tenant) ID_, for use in the Mattermost configuration as the **Application ID** and as part of the **Auth Endpoint** and **Token Endpoint** URL.
- 
+ 
6. Log in to Mattermost and then go to the **System Console > OAuth 2.0 > Select OAuth 2.0 service provider**, choose **Office 365** as the service provider. Enter the _client secret value_ you copied in Step 4 as the **Application Secret Password**. Paste the _Application (client) ID_ you saved in Step 5 into the **Application ID** field.
|
Remove fixed TODO
Summary:
Pull Request resolved: | @@ -189,8 +189,6 @@ public:
return std::move(*this);
}
- // TODO allow input schema to be just the operator name + overload name, in that case use schema generated from kernel function
-
private:
template<class... ConfigParameters>
void op_(FunctionSchema&& schema, ConfigParameters&&... configParameters) {
|
MySQL 8 support note
Add note on authentication change in MySQL 8 and how to support it | @@ -65,7 +65,7 @@ While community support exists for Fedora, FreeBSD and Arch Linux, Mattermost do
Database Software
^^^^^^^^^^^^^^^^^
-- MySQL 5.6+
+- MySQL 5.6, 5.7, 8 (Please see note below on MySQL 8 support)
- PostgreSQL 9.4+
- Amazon Aurora MySQL 5.6+
@@ -82,6 +82,17 @@ Search limitations on MySQL:
- Hashtags or recent mentions of usernames containing a dot do not return search results.
+**MySql 8 Support**:
+
+In MySQL 8.0.4 the MySQL team changed the deafult authentication plugin from ``mysql_native_password`` to ``caching_sha2_password`` (https://mysqlserverteam.com/mysql-8-0-4-new-default-authentication-plugin-caching_sha2_password/). If you are using MySQL 8.0.4+ you will need to enable ``mysql_native_password`` by adding the following entry in you MySQL configuration file.
+
+.. code-block:: text
+
+[mysqld]
+default-authentication-plugin=mysql_native_password
+
+
+
Hardware Requirements
---------------------
|
Fixing Chat Preview For Users who do not have picture
Added Section Segregation on Config | {% endfor %}
</select>
</div>
-
+ <hr>
+ <h4>Recording Options</h4>
{% if sysSettings.allowRecording == True %}
<div class="form-group row">
<div class="col-6">
<input type="checkbox" data-toggle="toggle" id="allowComments" name="allowComments" {% if channel.allowComments == True %} checked {% endif %}>
</div>
</div>
-
+ <hr>
+ <h4>Chat Options</h4>
<div class="form-group row">
<div class="col-6">
<label for="chatSelect" class="col-form-label"><b>Enable Chat </b></label>
<input type="color" id="chatTextColor-{{channel.id}}" name="chatTextColor" value="{{channel.chatTextColor}}" onchange="updateChatPreview({{channel.id}})">
</div>
</div>
-
+ <h4>Chat Preview</h4>
<div id="chatPreviewBox-{{channel.id}}" class="chatBar-Item chat-{{channel.chatBG}} {{channel.chatAnimation}}">
<div class="charBar-Item-Image">
- <img src="/images/{{current_user.pictureLocation}}" width="52px" height="52px">
+ <img src="/images/{{current_user.pictureLocation}}" width="52px" height="52px" onerror="this.src='/static/img/user2.png';">
</div>
<div id="chatPreviewText-{{channel.id}}" class="chatBar-Item-Text" style="color:{{channel.chatTextColor}};">
<b>{{current_user.username}}</b> 12:00
</div>
</div>
+ <hr>
<div class="row">
<div class="col-12">
<div class="form-group">
|
Remove unnecessary credentials
Dataframe datasets ignore the constructor credentials,
and, in any case, for dataframes we're setting the Geocoder
credentials to upload them. | @@ -128,7 +128,7 @@ class Isolines(Service):
input_dataframe = None
if isinstance(source, pd.DataFrame):
input_dataframe = source
- source = Dataset(input_dataframe, credentials=self._credentials)
+ source = Dataset(input_dataframe)
if dry_run:
num_rows = source.get_num_rows()
|
update FigureManager: addFigureInfo
main label in lower left corner describing some display conditions (from ImagingPath.createRayTracePlot) | @@ -25,13 +25,13 @@ class FigureManager:
self.drawings = []
- # ok. A Drawing should contain its own Aperture and labels set at a specific position.
+ # A Drawing should contain its own Aperture and labels set at a specific position.
# FigureManager can display them, request their position to check they do not overlap.
# If they overlap he can ask to update their position
# * Labels do not need size rescaling but position update (delta Y is -5% of displayRange ish)
# But there's also some Labels that are not necessarily tied to a drawing. like A/F stops
- def createFigure(self, style='presentation', comments=None):
+ def createFigure(self, style='presentation', comments=None, title=None):
if style == 'teaching':
self.figure, (self.axes, self.axesComments) = plt.subplots(2, 1, figsize=(10, 7))
self.axesComments.axis('off')
@@ -40,6 +40,7 @@ class FigureManager:
else:
self.figure, self.axes = plt.subplots(figsize=(10, 7))
+ self.axes.set(xlabel='Distance', ylabel='Height', title=title)
def add(self, *dataObjects):
"""Add a supported object to the display.
@@ -64,6 +65,12 @@ class FigureManager:
for line in [*lines]:
self.axes.add_line(line)
+ def addFigureInfo(self, text):
+ """Text note in the bottom left of the figure. This note is fixed and cannot be moved."""
+ # fixme: might be better to put it out of the axes since it only shows object height and display conditions
+ self.axes.text(0.05, 0.15, text, transform=self.axes.transAxes,
+ fontsize=12, verticalalignment='top', clip_box=self.axes.bbox, clip_on=True)
+
def draw(self):
for drawing in self.drawings:
drawing.applyTo(self.axes)
|
Fix PR
Turns out that comments are associated with a specific AST node in
python, and are not AST nodes on their own. Therefore, "continue" after
detecting the shebang comment in fact causes the future import to be
placed after the first AST node, which might be anything. | @@ -243,7 +243,6 @@ def future_import(feature, node):
# Is it a shebang or encoding line?
if is_shebang_comment(node) or is_encoding_comment(node):
shebang_encoding_idx = idx
- continue
if is_docstring(node):
# skip over docstring
continue
|
issue split up Connection._connect()
The logic was getting too busy. | @@ -551,20 +551,11 @@ class Connection(ansible.plugins.connection.ConnectionBase):
return stack, seen_names
- def _connect(self):
+ def _connect_broker(self):
"""
- Establish a connection to the master process's UNIX listener socket,
- constructing a mitogen.master.Router to communicate with the master,
- and a mitogen.parent.Context to represent it.
-
- Depending on the original transport we should emulate, trigger one of
- the _connect_*() service calls defined above to cause the master
- process to establish the real connection on our behalf, or return a
- reference to the existing one.
+ Establish a reference to the Broker, Router and parent context used for
+ connections.
"""
- if self.connected:
- return
-
if not self.broker:
self.broker = mitogen.master.Broker()
self.router, self.parent = mitogen.unix.connect(
@@ -572,6 +563,11 @@ class Connection(ansible.plugins.connection.ConnectionBase):
broker=self.broker,
)
+ def _build_stack(self):
+ """
+ Construct a list of dictionaries representing the connection
+ configuration between the controller and the target.
+ """
if hasattr(self._play_context, 'delegate_to'):
target_config = config_from_hostvars(
transport=self._play_context.connection,
@@ -589,7 +585,14 @@ class Connection(ansible.plugins.connection.ConnectionBase):
connection=self
)
stack, _ = self._stack_from_config(target_config)
+ return stack
+ def _connect_stack(self, stack):
+ """
+ Pass `stack` to ContextService, requesting a copy of the context object
+ representing the target. If no connection exists yet, ContextService
+ will establish it before returning it or throwing an error.
+ """
dct = self.parent.call_service(
service_name='ansible_mitogen.services.ContextService',
method_name='get',
@@ -610,6 +613,24 @@ class Connection(ansible.plugins.connection.ConnectionBase):
self.fork_context = dct['init_child_result']['fork_context']
self.home_dir = dct['init_child_result']['home_dir']
+ def _connect(self):
+ """
+ Establish a connection to the master process's UNIX listener socket,
+ constructing a mitogen.master.Router to communicate with the master,
+ and a mitogen.parent.Context to represent it.
+
+ Depending on the original transport we should emulate, trigger one of
+ the _connect_*() service calls defined above to cause the master
+ process to establish the real connection on our behalf, or return a
+ reference to the existing one.
+ """
+ if self.connected:
+ return
+
+ self._connect_broker()
+ stack = self._build_stack()
+ self._connect_stack(stack)
+
def close(self, new_task=False):
"""
Arrange for the mitogen.master.Router running in the worker to
|
Update vault.py
added log notice for missing entries
use get() | @@ -87,7 +87,9 @@ def ext_pillar(minion_id, # pylint: disable=W0613
url = 'v1/{0}'.format(path)
response = __utils__['vault.make_request']('GET', url)
if response.status_code == 200:
- vault_pillar = response.json()['data']
+ vault_pillar = response.json().get('data', {})
+ else:
+ log.info('Vault secret not found for: %s', path)
except KeyError:
log.error('No such path in Vault: %s', path)
|
Updating .semgrepignore
Adding directories to be ignored by semgrep.
These files do not need to be checked and are causing semgrep to fail | #spl files may contain eval and other statements that should NOT trigger semgrep warnings
*.spl
+#Ignore Markdown and Wiki Pages
+*.md
+*.wiki
+
#Temporarily ignoring this directory as we discuss a path moving forward
#for Splunk Packaging Toolkit Update Strategy
/dist/
+#Don't check yaml files in these directories
+/response_tasks
+/detections
+
|
MAINT: Fix issue with C compiler args containing spaces
Instead of doing a dumb string split, use shlex to make sure args
containing spaces are handled properly. | import os
import sys
import subprocess
+import shlex
from distutils.errors import CompileError, DistutilsExecError, LibError
from distutils.unixccompiler import UnixCCompiler
@@ -30,15 +31,15 @@ def UnixCCompiler__compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts
if 'OPT' in os.environ:
# XXX who uses this?
from sysconfig import get_config_vars
- opt = " ".join(os.environ['OPT'].split())
- gcv_opt = " ".join(get_config_vars('OPT')[0].split())
- ccomp_s = " ".join(self.compiler_so)
+ opt = shlex.join(shlex.split(os.environ['OPT']))
+ gcv_opt = shlex.join(shlex.split(get_config_vars('OPT')[0]))
+ ccomp_s = shlex.join(self.compiler_so)
if opt not in ccomp_s:
ccomp_s = ccomp_s.replace(gcv_opt, opt)
- self.compiler_so = ccomp_s.split()
- llink_s = " ".join(self.linker_so)
+ self.compiler_so = shlex.split(ccomp_s)
+ llink_s = shlex.join(self.linker_so)
if opt not in llink_s:
- self.linker_so = llink_s.split() + opt.split()
+ self.linker_so = self.linker_so + shlex.split(opt)
display = '%s: %s' % (os.path.basename(self.compiler_so[0]), src)
|
DOC: added guidance for new routine
Added guidance for when to use the new load routine to the new instrument example. Also updated xarray/pandas guidance. | @@ -324,25 +324,26 @@ The load module method signature should appear as:
data by day. This can present some issues for data sets that are stored
by month or by year. See ``instruments.methods.nasa_cdaweb.py`` for an example
of returning daily data when stored by month.
-- tag and inst_id specify the data set to be loaded
+- tag and inst_id are always available as inputs, as they commmonnly specify
+ the data set to be loaded
- The load routine should return a tuple with (data, pysat metadata object).
-- `data` is a pandas DataFrame, column names are the data labels, rows are
- indexed by datetime objects.
-- For multi-dimensional data, an xarray can be
- used instead. When returning xarray data, a variable at the top-level of the
- instrument module must be set:
+- For simple time-series data sets, `data` is a pandas DataFrame, column names
+ are the data labels, rows are indexed by datetime objects.
+- For multi-dimensional data, `data` can be set to an xarray Dataset instead.
+ When returning xarray data, a variable at the top-level of the instrument
+ module must be set:
.. code:: python
pandas_format = False
-- The pandas DataFrame or xarray needs to be indexed with datetime objects. For
- xarray objects this index needs to be named 'Epoch' or 'time'. In a future
- version the supported names for the time index may be reduced. 'Epoch'
- should be used for pandas though wider compatibility is expected.
+- The pandas DataFrame or xarray Dataset needs to be indexed with datetime
+ objects. This index needs to be named 'Epoch' or 'time'.
- ``pysat.utils.create_datetime_index`` provides quick generation of an
appropriate datetime index for irregularly sampled data sets with gaps
-
+- If your data is a CSV formatted file, you can incorporate the
+ ``pysat.instruments.methods.general.load_csv_data`` routine (see
+ :ref:`api--methods-general`) into your load method.
- A pysat meta object may be obtained from ``pysat.Meta()``. The :ref:`api-meta`
object uses a pandas DataFrame indexed by variable name with columns for
metadata parameters associated with that variable, including items like
|
Fix build error with MSVC
Summary:
Close
Possibly broken by
Pull Request resolved: | @@ -13,8 +13,8 @@ Tensor& quantized_copy_(Tensor& self, const Tensor& src) {
"Quantized copy only works with contiguous Tensors");
TORCH_CHECK(self.sizes().equals(src.sizes()),
"Quantized copy only works with Tensors with the same shape");
- float* src_data = src.data<float>();
AT_DISPATCH_QINT_TYPES(self.scalar_type(), "Copy", [&]() {
+ float* src_data = src.data<float>();
scalar_t* self_data = self.data<scalar_t>();
for (int i = 0; i < self.numel(); ++i) {
self_data[i] = quantize_val<scalar_t>(
|
Update `versionadded` for `Config.from_file`
According to the change log at the release `Config.from_file` will be published with is now 2.0.0 rather than 1.2.0. | @@ -194,7 +194,7 @@ class Config(dict):
implements a ``read`` method.
:param silent: Ignore the file if it doesn't exist.
- .. versionadded:: 1.2
+ .. versionadded:: 2.0
"""
filename = os.path.join(self.root_path, filename)
|
Fixing incorrect macro in a detection
whose test file was updated. | @@ -6,7 +6,7 @@ author: Rod Soto
type: Hunting
datamodel: []
description: This hunting search provides information on possible exploitation attempts against Splunk Secure Gateway App Mobile Alerts feature in Splunk versions 9.0, 8.2.x, 8.1.x. An authenticated user can run arbitrary operating system commands remotely through the use of specially crafted requests to the mobile alerts feature in the Splunk Secure Gateway app.
-search: '`splunkd_webx` uri_path="/servicesNS/nobody/splunk_secure_gateway/storage/collections/data/mobile_alerts*" sort="notification.created_at:-1" | table clientip file host method uri_query sort | `splunk_rce_via_splunk_secure_gateway__splunk_mobile_alerts_feature_filter`'
+search: '`splunkda` uri_path="/servicesNS/nobody/splunk_secure_gateway/storage/collections/data/mobile_alerts*" sort="notification.created_at:-1" | table clientip file host method uri_query sort | `splunk_rce_via_splunk_secure_gateway__splunk_mobile_alerts_feature_filter`'
how_to_implement: This search only applies if Splunk Mobile Gateway is deployed in the vulnerable Splunk versions.
known_false_positives: This detection does not require you to ingest any new data. The detection does require the ability to search the _internal index. Focus of this search is "uri_path=/servicesNS/nobody/splunk_secure_gateway/storage/collections/data/mobile_alerts*" which is the injection point.
references:
|
Fix scheduler_plugin_configuration fixture when scheduler not present
Handle the case when scheduler fixture is not present | @@ -1381,7 +1381,11 @@ def run_benchmarks(request, mpi_variants, test_datadir, instance, os, region, be
@pytest.fixture()
-def scheduler_plugin_configuration(request, region, upload_scheduler_plugin_definitions, scheduler=None):
+def scheduler_plugin_configuration(request, region, upload_scheduler_plugin_definitions):
+ try:
+ scheduler = request.getfixturevalue("scheduler")
+ except pytest.FixtureLookupError:
+ scheduler = None
scheduler_plugin = request.config.getoption("tests_config", default={}).get("scheduler-plugins", {}).get(scheduler)
scheduler_definition_url = upload_scheduler_plugin_definitions.get(scheduler, {}).get(region, {})
if scheduler_definition_url:
|
minor formatting error log
Summary:
Pull Request resolved:
as title | @@ -583,8 +583,9 @@ class CAFFE2_API TensorImpl : public c10::intrusive_ptr_target {
IsType<T>(),
"Tensor type mismatch, caller expects elements to be ",
TypeMeta::TypeName<T>(),
- " while tensor contains ",
- storage_.dtype().name());
+ ", while tensor contains ",
+ storage_.dtype().name(),
+ ". ");
return static_cast<T*>(storage_.data());
}
|
Modify the notes of upload_image_data() method
The notes of upload_image_data() method in
zun.image.galnce.driver should be "Upload an image." .
Closes-bug: | @@ -145,7 +145,7 @@ class GlanceDriver(driver.ContainerImageDriver):
raise exception.ZunException(six.text_type(e))
def upload_image_data(self, context, img_id, data):
- """Update an image."""
+ """Upload an image."""
LOG.debug('Uploading an image to glance %s', img_id)
try:
return utils.upload_image_data(context, img_id, data)
|
Make rename exception-handling code py3-compatible
sys.maxint doesn't exist in py3, but sys.maxnum will have desired functionality in both py2/py3
Similar to solution implemented here:
Issue encountered here: | @@ -94,7 +94,7 @@ if os.name == 'nt': # pragma: no cover
except OSError as e:
if e.errno != errno.EEXIST:
raise
- old = "%s-%08x" % (dst, random.randint(0, sys.maxint))
+ old = "%s-%08x" % (dst, random.randint(0, sys.maxsize))
os.rename(dst, old)
os.rename(src, dst)
try:
|
fix: increase the timeout for checking the connection
This commit increases the retry number for
checking that the submariner connection is up
and running. | subctl show all | grep connected
args:
executable: /bin/bash
- retries: 10
+ retries: 40
delay: 10
register: subma_verify
until: subma_verify.rc == 0
|
Fixed bug where tables with remote pagination would modify the wrong
rows if not yet sorted. | @@ -986,11 +986,8 @@ class Tabulator(BaseTable):
nrows = self.page_size
start = (self.page-1)*nrows
end = start+nrows
- if self.sorters:
index = self._processed.iloc[start:end].index.values
self.value[column].loc[index] = array
- else:
- self.value[column].iloc[start:end] = array
def _update_selection(self, indices):
if self.pagination != 'remote':
|
Add missing torchvision 0.10.1
torchvision compatible with torch 1.9.1 was missing in table of supported versions. | @@ -25,6 +25,8 @@ supported Python versions.
+--------------------------+--------------------------+---------------------------------+
| ``1.10.0`` | ``0.11.1`` | ``>=3.6``, ``<=3.9`` |
+--------------------------+--------------------------+---------------------------------+
+| ``1.9.1`` | ``0.10.1`` | ``>=3.6``, ``<=3.9`` |
++--------------------------+--------------------------+---------------------------------+
| ``1.9.0`` | ``0.10.0`` | ``>=3.6``, ``<=3.9`` |
+--------------------------+--------------------------+---------------------------------+
| ``1.8.2`` | ``0.9.2`` | ``>=3.6``, ``<=3.9`` |
|
change made
Change made from fcurella comments on the pull request | localized = True
#default_locale is 'en_US' in the previous State of this application
default_locale = 'la'
-#external provider
-external_provider = ''
from .. import BaseProvider
@@ -42,9 +40,7 @@ class Provider(BaseProvider):
'ext_word_list' --- a list of word you would like to have
instead of 'Lorem ipsum'
"""
- if ext_word_list:
return [cls.word(ext_word_list) for _ in range(0, nb)]
- return [cls.word() for _ in range(0, nb)]
@classmethod
def sentence(cls, nb_words=6, variable_nb_words=True, ext_word_list=None):
@@ -65,10 +61,7 @@ class Provider(BaseProvider):
if variable_nb_words:
nb_words = cls.randomize_nb_elements(nb_words)
- if ext_word_list:
- words = cls.words(nb_words, ext_word_list)
- else:
- words = cls.words(nb_words)
+ words = cls.words(nb=nb_words, ext_world_list=ext_world_list)
words[0] = words[0].title()
return cls.word_connector.join(words) + cls.sentence_punctuation
|
Incidents: implement & schedule `crawl_incidents` task
See docstring for further information. This will run on start-up
to retroactively add missing emoji.
Ratelimit-wise this should be fine, as there should never be too
many missing emoji. | +import asyncio
import logging
import typing as t
from enum import Enum
@@ -27,7 +28,38 @@ class Incidents(Cog):
"""Automation for the #incidents channel."""
def __init__(self, bot: Bot) -> None:
+ """Schedule `crawl_task` on start-up."""
self.bot = bot
+ self.crawl_task = self.bot.loop.create_task(self.crawl_incidents())
+
+ async def crawl_incidents(self) -> None:
+ """
+ Crawl #incidents and add missing emoji where necessary.
+
+ This is to catch-up should an incident be reported while the bot wasn't listening.
+ Internally, we simply walk the channel history and pass each message to `on_message`.
+
+ In order to avoid drowning in ratelimits, we take breaks after each message.
+
+ Once this task is scheduled, listeners should await it. The crawl assumes that
+ the channel history doesn't change as we go over it.
+ """
+ await self.bot.wait_until_guild_available()
+ incidents: discord.TextChannel = self.bot.get_channel(Channels.incidents)
+
+ # Limit the query at 50 as in practice, there should never be this many messages,
+ # and if there are, something has likely gone very wrong
+ limit = 50
+
+ # Seconds to sleep after each message
+ sleep = 2
+
+ log.debug(f"Crawling messages in #incidents: {limit=}, {sleep=}")
+ async for message in incidents.history(limit=limit):
+ await self.on_message(message)
+ await asyncio.sleep(sleep)
+
+ log.debug("Crawl task finished!")
@staticmethod
async def add_signals(incident: discord.Message) -> None:
|
[Jira Service management] Update service_desk.py
* Update service_desk.py
with the default 'application/json' content-type the upload is not working
in the documentation there is a hint which headers should be set
if the X-Atlassian-Token is missing you receive a 404
* Update service_desk.py
* Update service_desk.py | @@ -466,10 +466,14 @@ class ServiceDesk(AtlassianRestAPI):
"""
url = "rest/servicedeskapi/servicedesk/{}/attachTemporaryFile".format(service_desk_id)
+ # no application/json content type and an additional X-Atlassian-Token header
+ # https://docs.atlassian.com/jira-servicedesk/REST/4.14.1/#servicedeskapi/servicedesk/{serviceDeskId}/attachTemporaryFile-attachTemporaryFile
+ experimental_headers = self.experimental_headers.copy()
+ del experimental_headers["Content-Type"]
+ experimental_headers["X-Atlassian-Token"] = "no-check"
+
with open(filename, "rb") as file:
- result = self.post(path=url, headers=self.experimental_headers, files={"file": file}).get(
- "temporaryAttachments"
- )
+ result = self.post(path=url, headers=experimental_headers, files={"file": file}).get("temporaryAttachments")
temp_attachment_id = result[0].get("temporaryAttachmentId")
return temp_attachment_id
|
Better document rule processing order
Close | @@ -297,6 +297,8 @@ RuleDescriptor object
- Each condition is a dict with ``name``, ``minimum`` and ``maximum`` keys.
- ``subs``: list of substitutions
- Each substitution is stored as tuples of glyphnames, e.g. ("a", "a.alt").
+- Note: By default, rules are applied *before* text shaping/OpenType layout. See
+ `5.0 rules element`_.
Evaluating rules
----------------
@@ -849,6 +851,7 @@ glyphname pairs: the glyphs that need to be substituted. For a rule to be trigge
**only one** of the conditionsets needs to be true, ``OR``. Within a conditionset
**all** conditions need to be true, ``AND``.
+.. attributes-11:
Attributes
----------
|
Remove now-unused Path type
The engine lost Paths from its Snapshots at some point, and we didn't clean up. | @@ -26,13 +26,6 @@ class FileContent(datatype([('path', text_type), ('content', binary_type)])):
return repr(self)
-class Path(datatype([('path', text_type), 'stat'])):
- """A filesystem path, holding both its symbolic path name, and underlying canonical Stat.
-
- Both values are relative to the ProjectTree's buildroot.
- """
-
-
class PathGlobs(datatype([
'include',
'exclude',
|
Unmark xpass dials.tests.util.test_nexus.test_run
The original issue has now been fixed upstream. | -import pytest
-
-
[email protected](reason="https://github.com/cctbx/cctbx_project/pull/686")
def test_run(dials_regression, run_in_tmpdir):
from os.path import join
|
Improve spectral_norm (fixes
* Improve spectral_norm (fixes
Thank you Morgan Funtowicz for the report and minimal example!
* compute sigma only once | @@ -14,10 +14,11 @@ class SpectralNorm(object):
self.eps = eps
def compute_weight(self, module):
- weight = module._parameters[self.name + '_org']
- u = module._buffers[self.name + '_u']
+ weight = getattr(module, self.name + '_org')
+ u = getattr(module, self.name + '_u')
height = weight.size(0)
weight_mat = weight.view(height, -1)
+ with torch.no_grad():
for _ in range(self.n_power_iterations):
# Spectral norm of weight equals to `u^T W v`, where `u` and `v`
# are the first left and right singular vectors.
@@ -26,20 +27,21 @@ class SpectralNorm(object):
u = normalize(torch.matmul(weight_mat, v), dim=0, eps=self.eps)
sigma = torch.dot(u, torch.matmul(weight_mat, v))
- weight.data /= sigma
+ weight = weight / sigma
return weight, u
def remove(self, module):
weight = module._parameters[self.name + '_org']
- del module._parameters[self.name]
- del module._buffers[self.name + '_u']
- del module._parameters[self.name + '_org']
+ delattr(module, self.name)
+ delattr(module, self.name + '_u')
+ delattr(module, self.name + '_org')
module.register_parameter(self.name, weight)
def __call__(self, module, inputs):
weight, u = self.compute_weight(module)
setattr(module, self.name, weight)
- setattr(module, self.name + '_u', u)
+ with torch.no_grad():
+ getattr(module, self.name).copy_(weight)
@staticmethod
def apply(module, name, n_power_iterations, eps):
@@ -48,7 +50,9 @@ class SpectralNorm(object):
height = weight.size(0)
u = normalize(weight.new_empty(height).normal_(0, 1), dim=0, eps=fn.eps)
+ delattr(module, fn.name)
module.register_parameter(fn.name + "_org", weight)
+ module.register_buffer(fn.name, weight)
module.register_buffer(fn.name + "_u", u)
module.register_forward_pre_hook(fn)
|
Add typing for `LightningOptimizer`
Summary:
### New commit log messages
Add typing for `LightningOptimizer` | @@ -128,6 +128,8 @@ class ReAgentLightningModule(pl.LightningModule):
return ret
def optimizers(self, use_pl_optimizer: bool = True):
+ # pyre-fixme[6]: Expected `typing_extensions.Literal[True]` for 1st param
+ # but got `bool`.
o = super().optimizers(use_pl_optimizer)
if isinstance(o, list):
return o
|
fix: recursion error in translations
If bad translations is found then while `msgprint` it attempts to load
translation again because of its arg `title = _("Message")` | @@ -323,7 +323,6 @@ def get_translation_dict_from_file(path, lang, app):
app=app, lang=lang, values=cstr(item)
)
frappe.log_error(message=msg, title="Error in translation file")
- frappe.msgprint(msg)
return translation_map
|
Removed ivadomed import
Currently failing in RTD build: | @@ -20,7 +20,8 @@ import shlex
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
-import ivadomed
+# TODO: find a way to import ivadomed within RTD build
+# import ivadomed
from recommonmark.transform import AutoStructify
@@ -62,7 +63,8 @@ github_doc_root = 'https://github.com/neuropoly/ivado-medical-imaging/tree/maste
# built documents.
#
# The short X.Y version.
-version = ivadomed.__version__
+# TODO: uncomment once ivadomed can be imported
+# version = ivadomed.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
|
Fix MSVC: Add an explicit warning to modules that use the "array.array" internals and try to compile in PyPy.
Closes | cdef extern from *:
"""
#if CYTHON_COMPILING_IN_PYPY
+ #ifdef _MSC_VER
+ #pragma message ("This module uses CPython specific internals of 'array.array', which are not available in PyPy.")
+ #else
#warning This module uses CPython specific internals of 'array.array', which are not available in PyPy.
#endif
+ #endif
"""
from libc.string cimport memset, memcpy
|
Backfill cl/363031725
Update reference docs to note that notifications can overlap in a bucket | @@ -149,10 +149,8 @@ _CREATE_DESCRIPTION = """
service account this permission. If not, the create command attempts to
grant it.
- You can create multiple notification configurations for a bucket, but their
- triggers cannot overlap such that a single event could send multiple
- notifications. Attempting to create a notification configuration that
- overlaps with an existing notification configuration results in an error.
+ A bucket can have up to 100 total notification configurations and up to 10
+ notification configurations set to trigger for a specific event.
<B>CREATE EXAMPLES</B>
Begin sending notifications of all changes to the bucket example-bucket
|
notifications.py: Improve the regex for matching relative URLs.
Fixes: | @@ -70,37 +70,41 @@ def topic_narrow_url(realm, stream, topic):
def relative_to_full_url(base_url, content):
# type: (Text, Text) -> Text
- # URLs for uploaded content are of the form
- # "/user_uploads/abc.png". Make them full paths.
- #
- # There's a small chance of colliding with non-Zulip URLs containing
- # "/user_uploads/", but we don't have much information about the
- # structure of the URL to leverage.
+ # URLs for uploaded content are of the form:
+ # "/user_uploads/abc.png".
+ # Make them full paths. Explanation for all the regexes below:
+ # (\=['\"]) matches anything that starts with `=` followed by `"` or `'`.
+ # ([^\r\n\t\f <]) matches any character which is not a whitespace or `<`.
+ # ([^<]+>) matches any sequence of characters which does not contain `<`
+ # and ends in `>`.
+ # The last positive lookahead ensures that we replace URLs only within a tag.
content = re.sub(
- r"/user_uploads/(\S*)",
+ r"(?<=\=['\"])/user_uploads/([^\r\n\t\f <]*)(?=[^<]+>)",
base_url + r"/user_uploads/\1", content)
- # Our proxying user-uploaded images seems to break inline images in HTML
- # emails, so scrub the image but leave the link.
+ # Inline images can't be displayed in the emails as the request
+ # from the mail server can't be authenticated because it has no
+ # user_profile object linked to it. So we scrub the image but
+ # leave the link.
content = re.sub(
r"<img src=(\S+)/user_uploads/(\S+)>", "", content)
# URLs for emoji are of the form
# "static/generated/emoji/images/emoji/snowflake.png".
content = re.sub(
- r"/static/generated/emoji/images/emoji/",
+ r"(?<=\=['\"])/static/generated/emoji/images/emoji/(?=[^<]+>)",
base_url + r"/static/generated/emoji/images/emoji/",
content)
# Realm emoji should use absolute URLs when referenced in missed-message emails.
content = re.sub(
- r"/user_avatars/(\d+)/emoji/",
+ r"(?<=\=['\"])/user_avatars/(\d+)/emoji/(?=[^<]+>)",
base_url + r"/user_avatars/\1/emoji/", content)
# Stream links need to be converted from relative to absolute. They
# have href values in the form of "/#narrow/stream/...".
content = re.sub(
- r"/#narrow/stream/",
+ r"(?<=\=['\"])/#narrow/stream/(?=[^<]+>)",
base_url + r"/#narrow/stream/",
content)
@@ -137,7 +141,14 @@ def build_message_list(user_profile, messages):
# type: (Message) -> Dict[str, Text]
plain = message.content
plain = fix_plaintext_image_urls(plain)
- plain = relative_to_full_url(user_profile.realm.uri, plain)
+ # There's a small chance of colliding with non-Zulip URLs containing
+ # "/user_uploads/", but we don't have much information about the
+ # structure of the URL to leverage. We can't use `relative_to_full_url()`
+ # function here because it uses a stricter regex which will not work for
+ # plain text.
+ plain = re.sub(
+ r"/user_uploads/(\S*)",
+ user_profile.realm.uri + r"/user_uploads/\1", plain)
assert message.rendered_content is not None
html = message.rendered_content
|
changelog update
Forgot to include the changelog when I pushed v0.8.4. | @@ -20,13 +20,19 @@ After downloading the directory, the package can be installed by running::
Changelog
=========
-0.8.4 (2018-01-30)
+0.8.4 (2018-02-24)
------------------
+* Added new slice sampling option (`'rslice'`).
+
* Changed internals to allow user to access quantities during dynamic batch
allocation. Breaks some aspects of backwards compatibility with generators.
-* Added new slice sampling options (`'rslice'` and `'hslice'`).
+* Simplified parallelism options.
+
+* Fixed a singular decomposition bug that occasionally appeared during runtime.
+
+* Small plotting/utility improvements.
0.8.3 (2017-12-13)
------------------
|
First Steps
Minor wording change. | @@ -17,7 +17,7 @@ There are two options to run WISDEM with these files. The first option is to use
The first step for either option is to make copies of example files
-------------------------------------------------------------------
-Before you start editing your WISDEM input files, please make copies of the original files in a separate folder. If you edit copies of the original files, you can always revert back to a version of the files that is known to execute successfully.
+Before you start editing your WISDEM input files, please make copies of the original files in a separate folder. This ensures that, if you edit copies of the original files, you can always revert back to a version of the files that is known to execute successfully.
Option 1: Text editor and command line
--------------------------------------
|
Catch case where every reflection is an outlier
and quit nicely. Fixes | @@ -495,7 +495,9 @@ def finalise(self, analysis=None):
self._reflections.flags.used_in_refinement,
)
- logger.debug("%d reflections remain in the manager", len(self._reflections))
+ logger.info("%d reflections remain in the manager", len(self._reflections))
+ if len(self._reflections) == 0:
+ raise DialsRefineConfigError("No reflections available for refinement")
# print summary after outlier rejection
if rejection_occurred:
|
Update README.md
Update Citation APA style version number | @@ -151,7 +151,7 @@ See [https://github.com/ccbogel/QualCoder-Debians](https://github.com/ccbogel/Qu
## Citation APA style
-Curtain, C. (2020) QualCoder 1.9 [Computer software]. Retrieved from
+Curtain, C. (2020) QualCoder 2.0 [Computer software]. Retrieved from
https://github.com/ccbogel/QualCoder/releases/tag/1.9
## Publications using QualCoder
|
Fix illegal output test
* We changed the behavior when get_attribute fails in outputs
* Instead of raising an error and fail all the outputs, we put the error string where it failed
so the valid outputs will be visible | @@ -437,14 +437,9 @@ class DeploymentsTestCase(base_test.BaseServerTestCase):
blueprint_file_name='blueprint_with_illegal_output.yaml',
blueprint_id=id_,
deployment_id=id_)
- try:
- self.client.deployments.outputs.get(id_)
- self.fail()
- except CloudifyClientError, e:
- self.assertEqual(400, e.status_code)
- self.assertEqual(
- manager_exceptions.DeploymentOutputsEvaluationError.ERROR_CODE,
- e.error_code)
+ outputs = self.client.deployments.outputs.get(id_)
+ self.assertIn("More than one node instance found for node",
+ outputs['outputs']['ip_address'])
@attr(client_min_version=3.1,
client_max_version=base_test.LATEST_API_VERSION)
|
Simplying conditional logic in template
Benefits readability at small cost of repetition
Addresses review feedback | {% endblock extra_css %}
{% block content %}
+ {% if entity %}
+
<h1>
{{ measure.name }}
- {% if entity %}<br><small>{{ entity.name }}</small>{% endif %}
+ <br><small>{{ entity.name }}</small>
</h1>
{% if entity %}
{% if entity_type == 'practice' %}
</a>
</p>
- <p>This measure shows how this organisation compares with its peers across NHS England. This is indicative only, and should be approached with caution. <a href='{% url 'faq' %}#measureinterpret'>Read more about measures</a>.</p>
+ <p>This measure shows how this organisation compares with its
+ peers across NHS England. This is indicative only, and should be
+ approached with caution.
+
+ <a href='{% url 'faq' %}#measureinterpret'>Read more about measures</a>.</p>
+
+ {% else %}
+
+ <h1>{{ measure.name }}</h1>
+
{% endif %}
{% include '_measures_heading.html' with entity_type=entity_type %}
|
Fix location issue under windows.
This patch fix | @@ -1210,6 +1210,11 @@ def update_readme_data(readme_file, **readme_updates):
readme_data = json.load(f)
readme_data[extensions_key] = generation_data
+ for denomination, data in readme_data.copy().items():
+ if "location" in data and data["location"] and "\\" in data["location"]:
+ # Windows compatibility: #1166
+ readme_data[denomination]["location"] = data["location"].replace("\\", "/")
+
with open(readme_file, "w") as f:
json.dump(readme_data, f)
|
Update unintialized matrix explanation
Added a point of clarification because the meaning of an uninitialized matrix can be confusing to beginner students who expect that all newly created values are set to a known value, such as zero.
Issue: | @@ -29,6 +29,8 @@ import torch
x = torch.empty(5, 3)
print(x)
+# Note: An uninitialized matrix is declared, but does not contain definite known values before it is used.
+# When an uninitialized matrix is created, whatever values were in the allocated memory at the time will appear as the initial values.
###############################################################
# Construct a randomly initialized matrix:
|
Rename argument object_id to object_ids for _no_older_operations
In a *for* statement there are two variables with the same name object_id.
Since caller uses object_ids, rename argument object_id to object_ids. | @@ -60,7 +60,7 @@ def _is_valid_delete_operation(session, row):
return True
-def _no_older_operations(session, object_id, row):
+def _no_older_operations(session, object_ids, row):
"""Check that no older operation exist.
Determine that there aren't any operations still in the queue for the
@@ -68,10 +68,10 @@ def _no_older_operations(session, object_id, row):
If such an operation is found, False is returned.
If no older operations exist, True is returned.
"""
- if not isinstance(object_id, (list, tuple)):
- object_id = (object_id,)
+ if not isinstance(object_ids, (list, tuple)):
+ object_ids = (object_ids,)
- for object_id in object_id:
+ for object_id in object_ids:
if db.check_for_pending_or_processing_ops(
session, object_id, seqnum=row.seqnum):
return False
|
Fix CulebraTestCase when useuiautomatorhelper is set
Fix Pycharm test runner case
Print TestProgram USAGE exception only in debug mode | @@ -2630,8 +2630,7 @@ class ViewClient:
self.forceViewServerUse = forceviewserveruse
''' Force the use of ViewServer even if the conditions to use UiAutomator are satisfied '''
- self.useUiAutomator = (self.build[
- VERSION_SDK_PROPERTY] >= 16) and not forceviewserveruse # jelly bean 4.1 & 4.2
+ self.useUiAutomator = self.uiAutomatorHelper or (self.build[VERSION_SDK_PROPERTY] >= 16) and not forceviewserveruse # jelly bean 4.1 & 4.2
if DEBUG:
print(" ViewClient.__init__: useUiAutomator=", self.useUiAutomator, "sdk=",
self.build[VERSION_SDK_PROPERTY], "forceviewserveruse=", forceviewserveruse, file=sys.stderr)
@@ -4797,7 +4796,7 @@ class CulebraTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.kwargs1 = {'ignoreversioncheck': False, 'verbose': False, 'ignoresecuredevice': False}
- cls.kwargs2 = {'startviewserver': True, 'forceviewserveruse': False, 'autodump': False,
+ cls.kwargs2 = {'startviewserver': False, 'forceviewserveruse': False, 'autodump': False,
'ignoreuiautomatorkilled': True}
@classmethod
@@ -4815,8 +4814,8 @@ class CulebraTestCase(unittest.TestCase):
# Handle the special case for utrunner.py (Pycharm test runner)
progname = os.path.basename(sys.argv[0])
- if progname == 'utrunner.py':
- testname = sys.argv[1]
+ if progname == 'utrunner.py' or progname == '_jb_unittest_runner.py':
+ testname = sys.argv[1] if len(sys.argv) >= 2 else ""
# a string containing the args
testargs = sys.argv[2] if len(sys.argv) >= 3 else ""
# used by utrunner.py (usually `true`) but depends on the number of args
@@ -4843,7 +4842,7 @@ class CulebraTestCase(unittest.TestCase):
except:
pass
- if self.kwargs2.get('useuiautomatorhelper'):
+ if 'useuiautomatorhelper' in self.kwargs2 and self.kwargs2.get('useuiautomatorhelper'):
# FIXME: we could find better alternatives for device and serialno when UiAutomatorHelper is used
# Searialno could be obtained form UiAutomatorHelper too.
self.vc = ViewClient("UI_AUTOMATOR_HELPER_DEVICE", "UI_AUTOMATOR_HELPER_SERIALNO", **self.kwargs2)
@@ -4971,6 +4970,7 @@ class CulebraTestCase(unittest.TestCase):
try:
unittest.TestProgram.USAGE = unittest.TestProgram.USAGE.replace(old, new)
except AttributeError as ex:
+ if DEBUG:
print(ex, file=sys.stderr)
argsToRemove = []
i = 0
|
[IMPR] Make GoogleSearchPageGenerator a abc.Generator
Derive GoogleSearchPageGenerator from tools.collections.GeneratorWrapper
rename the __iter__ method to the generator property to be reused by
the Wrapper class | @@ -31,6 +31,7 @@ from pywikibot.backports import (
from pywikibot.comms import http
from pywikibot.exceptions import APIError, ServerError
from pywikibot.tools import deprecated
+from pywikibot.tools.collections import GeneratorWrapper
from pywikibot.tools.itertools import filter_unique, itergroup
@@ -780,9 +781,8 @@ def LiveRCPageGenerator(site: OPT_SITE_TYPE = None,
# following classes just ported from version 1 without revision; not tested
-class GoogleSearchPageGenerator(Iterable['pywikibot.page.Page']):
- """
- Page generator using Google search results.
+class GoogleSearchPageGenerator(GeneratorWrapper):
+ """Page generator using Google search results.
To use this generator, you need to install the package 'google':
@@ -793,6 +793,9 @@ class GoogleSearchPageGenerator(Iterable['pywikibot.page.Page']):
As there are concerns about Google's Terms of Service, this
generator prints a warning for each query.
+
+ .. versionchanged:: 7.6
+ subclassed from :class:`pywikibot.tools.collections.GeneratorWrapper`
"""
def __init__(self, query: Optional[str] = None,
@@ -834,11 +837,15 @@ class GoogleSearchPageGenerator(Iterable['pywikibot.page.Page']):
pywikibot.warning('Please read http://www.google.com/accounts/TOS')
yield from google.search(query)
- def __iter__(self):
- """Iterate results.
+ @property
+ def generator(self) -> Iterator['pywikibot.page.Page']:
+ """Yield results from :meth:`queryGoogle` query.
Google contains links in the format:
https://de.wikipedia.org/wiki/en:Foobar
+
+ .. versionchanged:: 7.6
+ changed from iterator method to generator property
"""
# restrict query to local site
local_query = '{} site:{}'.format(self.query, self.site.hostname())
@@ -894,7 +901,7 @@ def MySQLPageGenerator(query: str, site: OPT_SITE_TYPE = None,
class XMLDumpPageGenerator(abc.Iterator): # type: ignore[type-arg]
- """Xml generator that yields Page objects.
+ """Xml iterator that yields Page objects.
.. versionadded:: 7.2
the `content` parameter
@@ -955,7 +962,7 @@ class XMLDumpPageGenerator(abc.Iterator): # type: ignore[type-arg]
@deprecated('XMLDumpPageGenerator with content=True parameter', since='7.2.0')
class XMLDumpOldPageGenerator(XMLDumpPageGenerator):
- """Xml generator that yields Page objects with old text loaded.
+ """Xml iterator that yields Page objects with old text loaded.
.. deprecated:: 7.2
:class:`XMLDumpPageGenerator` with `content` parameter should be
|
fix: [cli] allow to load YAML and JSON data contains bare arrays
[cli] allow to load YAML and JSON data contains data other than mapping
objects such like bare arrays, primitive data and so on. | @@ -349,7 +349,11 @@ def main(argv=None):
args = _parse_args((argv if argv else sys.argv)[1:])
cnf = os.environ.copy() if args.env else {}
diff = _load_diff(args)
+
+ if cnf:
API.merge(cnf, diff)
+ else:
+ cnf = diff
if args.args:
diff = anyconfig.parser.parse(args.args)
|
Export TOIL_TORQUE_ARGS
Environmental variable allowing passing of TORQUE scheduler specific parameters | @@ -121,6 +121,15 @@ class TorqueBatchSystem(AbstractGridEngineBatchSystem):
if cpu is not None and math.ceil(cpu) > 1:
qsubline.extend(['-l ncpus=' + str(int(math.ceil(cpu)))])
+ # "Native extensions" for TORQUE (see DRMAA or SAGA)
+ nativeConfig = os.getenv('TOIL_TORQUE_ARGS')
+ if nativeConfig is not None:
+ logger.debug("Native TORQUE options appended to qsub from TOIL_TORQUE_RESOURCES env. variable: {}".format(nativeConfig))
+ #if ("--mem" in nativeConfig) or ("--cpus-per-task" in nativeConfig):
+ if ("mem=" in nativeConfig) or ("nodes=" in nativeConfig) or ("ppn=" in nativeConfig):
+ raise ValueError("Some resource arguments are incompatible: {}".format(nativeConfig))
+ qsubline.extend(nativeConfig.split())
+
return qsubline
def generateTorqueWrapper(self, command):
|
maybe space is keeping it from being able to convert?
from reading something online maybe there are spaces? | @@ -64,6 +64,7 @@ def read_data(filename):
# Reformat wavelengths
header_dict["wavelength"] = header_dict["wavelength"].replace("{", "")
header_dict["wavelength"] = header_dict["wavelength"].replace("}", "")
+ header_dict["wavelength"] = header_dict["wavelength"].replace(" ", "")
header_dict["wavelength"] = header_dict["wavelength"].split(",")
# Create dictionary of wavelengths
|
URL encode the range in the value_* functions
This fixes using this endpoing when the worksheet name has forward
slashes or other strange characters. | @@ -8,6 +8,11 @@ This module contains common spreadsheets' models.
"""
+try:
+ from urllib.parse import quote
+except:
+ from urllib import quote
+
from ..base import BaseCell, BaseSpreadsheet
from ..exceptions import WorksheetNotFound, CellNotFound
@@ -93,22 +98,22 @@ class Spreadsheet(BaseSpreadsheet):
return r.json()
def values_append(self, range, params, body):
- url = SPREADSHEET_VALUES_APPEND_URL % (self.id, range)
+ url = SPREADSHEET_VALUES_APPEND_URL % (self.id, quote(range, safe=''))
r = self.client.request('post', url, params=params, json=body)
return r.json()
def values_clear(self, range):
- url = SPREADSHEET_VALUES_CLEAR_URL % (self.id, range)
+ url = SPREADSHEET_VALUES_CLEAR_URL % (self.id, quote(range, safe=''))
r = self.client.request('post', url)
return r.json()
def values_get(self, range, params=None):
- url = SPREADSHEET_VALUES_URL % (self.id, range)
+ url = SPREADSHEET_VALUES_URL % (self.id, quote(range, safe=''))
r = self.client.request('get', url, params=params)
return r.json()
def values_update(self, range, params=None, body=None):
- url = SPREADSHEET_VALUES_URL % (self.id, range)
+ url = SPREADSHEET_VALUES_URL % (self.id, quote(range, safe=''))
r = self.client.request('put', url, params=params, json=body)
return r.json()
|
public public apis guide
Test Plan: manual inspection
Reviewers: max, catherinewu | "path": "/community/releases",
"name": "Releases & Deprecations"
},
+ {
+ "path": "/community/public-apis",
+ "name": "Changing Public APIs"
+ },
{
"path": "https://join.slack.com/t/dagster/shared_invite/enQtNjEyNjkzNTA2OTkzLTI0MzdlNjU0ODVhZjQyOTMyMGM1ZDUwZDQ1YjJmYjI3YzExZGViMDI1ZDlkNTY5OThmYWVlOWM1MWVjN2I3NjU",
"isAbsolutePath": true,
|
shrink-mds: use mds_to_kill_hostname instead
When using fqdn in inventory host file, this task will fail because the
mds is registered with its shortname.
It means we must use `mds_to_kill_hostname` in this task.
Closes: | tasks:
# get rid of this as soon as "systemctl stop ceph-msd@$HOSTNAME" also
# removes the MDS from the FS map.
- - name: exit mds if it the deployment is containerized
+ - name: exit mds when containerized deployment
+ command: "{{ container_exec_cmd | default('') }} ceph tell mds.{{ mds_to_kill_hostname }} exit"
when: containerized_deployment | bool
- command: "{{ container_exec_cmd | default('') }} ceph tell mds.{{ mds_to_kill }} exit"
- name: get ceph status
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json"
|
Ensure reflections fill the scan-range sufficiently.
Fixes | import libtbx
from scitbx import matrix
+from dials.util import Sorry
from dials.array_family import flex
from dials.algorithms.refinement import weighting_strategies
from dials.algorithms.refinement.analysis.centroid_analysis import CentroidAnalyser
@@ -169,6 +170,15 @@ def _create_block_columns(self):
self._reflections["block_centre"] = flex.double(len(self._reflections))
return
+ @staticmethod
+ def _check_scan_range(exp_phi, start, stop):
+ """Check that the observed reflections fill the scan-range"""
+
+ # Allow up to 2 degrees between the observed phi extrema and the
+ # scan edges
+ if min(exp_phi) - start > 0.03491 or stop - max(exp_phi) > 0.03491:
+ raise Sorry("The reflections do not fill the scan range.")
+
def per_width(self, width, deg=True):
"""Set blocks for all experiments according to a constant width"""
@@ -198,6 +208,8 @@ def per_width(self, width, deg=True):
for e in block_starts
]
+ self._check_scan_range(exp_phi, start, stop)
+
for b_num, (b_start, b_cent) in enumerate(zip(block_starts, block_centres)):
sub_isel = isel.select(
(b_start <= exp_phi) & (exp_phi <= (b_start + _width))
@@ -228,6 +240,8 @@ def per_image(self):
start, stop = flex.min(frames), flex.max(frames)
frame_range = range(start, stop + 1)
+ self._check_scan_range(exp_phi, start, stop)
+
for f_num, f in enumerate(frame_range):
sub_isel = isel.select(frames == f)
f_cent = f + 0.5
@@ -602,8 +616,6 @@ def _id_refs_to_keep(self, obs_data):
# sanity check to catch a mutilated scan that does not make sense
if passed2.count(True) == 0:
- from dials.util import Sorry
-
raise Sorry(
"Experiment id {0} contains no reflections with valid "
"scan angles".format(iexp)
|
Allow configurable timeouts in admin client check version
Currently there's no way to pass timeout to check_version if called from admin. | @@ -206,7 +206,7 @@ class KafkaAdminClient(object):
self._client = KafkaClient(metrics=self._metrics,
metric_group_prefix='admin',
**self.config)
- self._client.check_version()
+ self._client.check_version(timeout=(self.config['api_version_auto_timeout_ms'] / 1000))
# Get auto-discovered version from client if necessary
if self.config['api_version'] is None:
@@ -273,7 +273,7 @@ class KafkaAdminClient(object):
response = future.value
controller_id = response.controller_id
# verify the controller is new enough to support our requests
- controller_version = self._client.check_version(controller_id)
+ controller_version = self._client.check_version(controller_id, timeout=(self.config['api_version_auto_timeout_ms'] / 1000))
if controller_version < (0, 10, 0):
raise IncompatibleBrokerVersion(
"The controller appears to be running Kafka {}. KafkaAdminClient requires brokers >= 0.10.0.0."
|
Add `__repr__` method to `OldPluginWrapper` so proper name is displayed.
This is only really an issue in the plugin settings views. | @@ -371,6 +371,9 @@ class OldPluginAdapter(BasePlugin):
def __init__(self, plugin):
self.plugin = plugin
+ def __repr__(self):
+ return self.plugin.__class__.__name__
+
@property
def enabled(self):
plugin_type = self.get_plugin_type()
|
ci: cleanup
* ci: quote python version numbers
Future proofes for coming 3.10.X versions by chaning 3.10.0 to "3.10"
* ci: remove max parallel
Allow github to maximise number of parallel jobs | @@ -9,10 +9,9 @@ jobs:
test:
runs-on: ${{ matrix.platform }}
strategy:
- max-parallel: 4
matrix:
platform: [ ubuntu-latest, macos-latest, windows-latest ]
- python-version: [ 3.6.7, 3.7, 3.8, 3.9, 3.10.0 ]
+ python-version: [ "3.6.7", "3.7", "3.8", "3.9", "3.10" ]
steps:
- uses: actions/checkout@v2
|
Split cpu/gpu in caffe2/distributed + some clean up
Summary:
Pull Request resolved:
A few targets in caffe2/caffe2/distribute needs to be split too, otherwise won't compile. Also some clean ups and make select_gpu_type to gpu_library_selector | # not currently relevant so they are combined into one list.
from __future__ import absolute_import, division, print_function, unicode_literals
load("@bazel_skylib//lib:new_sets.bzl", "sets")
-load("//caffe2/caffe2/fb:defs_gpu.bzl", "gpu_library_targets")
+load("//caffe2/caffe2/fb:defs_gpu.bzl", "gpu_library_selector")
GENERATED_CPP = [
"Functions.cpp",
@@ -347,11 +347,11 @@ def add_torch_libs():
)
# TODO: split it into cpp and cuda parts similarly to libtorch
- gpu_library_targets(
+ gpu_library_selector(
name="_C_impl",
- deps=[":_C_impl_cuda"],
deps_cpu=[":_C_impl_cpu"],
- merge_only=True,
+ deps_cuda=[":_C_impl_cuda"],
+ merge_cpu_deps=False,
)
cpp_library(
|
Update dataloader.py
Update dataloader.py unified seg_pos : list | @@ -179,7 +179,7 @@ class LmDataloader(Dataloader):
src_single.append(self.vocab.get(PAD_TOKEN))
src.append(src_single[:-1])
tgt.append(src_single[1:])
- seg.append([1] * ins[1] + [0] * (len(src_single) - 1 - ins[1]))
+ seg.append([1] * ins[1][0] + [0] * (len(src_single) - 1 - ins[1][0]))
yield torch.LongTensor(src), \
torch.LongTensor(tgt), \
@@ -366,7 +366,7 @@ class BartDataloader(Dataloader):
tgt_single.append(self.vocab.get(PAD_TOKEN))
src_single, _ = mask_seq(src_single, self.tokenizer, self.whole_word_masking, self.span_masking, self.span_geo_prob, self.span_max_length)
- seg_pos = ins[2]
+ seg_pos = ins[2][0]
tgt_in.append(tgt_single[:-1])
tgt_out.append(tgt_single[1:])
|
Complete tags of superglue dataset card
complete tags of superglue dataset card | ---
+annotations_creators:
+- expert-generated
language:
- en
+language_creators:
+- other
+license:
+- unknown
+multilinguality:
+- monolingual
paperswithcode_id: superglue
pretty_name: SuperGLUE
+size_categories:
+- 10K<n<100K
+source_datasets:
+- extended|other
+tags:
+- superglue
+- NLU
+- natural language understanding
+task_categories:
+- text-classification
+- token-classification
+- question-answering
+task_ids:
+- natural-language-inference
+- word-sense-disambiguation
+- coreference-resolution
+- extractive-qa
---
# Dataset Card for "super_glue"
|
repository.virtual: InjectedPkg: add data attr to store misc data
For example, storing the reason or exception object related to why an
injected pkg was created. | @@ -64,23 +64,24 @@ class InjectedPkg(pkg_base.wrapper):
__slots__ = (
"bdepend", "depend", "rdepend", "pdepend",
- "repo", "repo_id", "built", "versioned_atom", "unversioned_atom",
+ "repo", "repo_id", "built", "versioned_atom", "unversioned_atom", "data",
)
default_bdepend = default_depend = default_rdepend = default_pdepend = DepSet()
package_is_real = False
is_supported = True
- def __init__(self, raw_pkg, repo):
+ def __init__(self, raw_pkg, repo, data=None):
pkg_base.wrapper.__init__(self, raw_pkg)
object.__setattr__(self, "repo", repo)
object.__setattr__(self, "repo_id", repo.repo_id)
object.__setattr__(self, "built", repo.livefs)
- object.__setattr__(self, "versioned_atom", str(self._raw_pkg))
+ object.__setattr__(self, "versioned_atom", self._raw_pkg)
object.__setattr__(self, "unversioned_atom", self._raw_pkg.key)
object.__setattr__(self, "bdepend", self.default_bdepend)
object.__setattr__(self, "depend", self.default_depend)
object.__setattr__(self, "rdepend", self.default_rdepend)
object.__setattr__(self, "pdepend", self.default_pdepend)
+ object.__setattr__(self, "data", data)
@property
def use(self):
|
status endpoint - fix services check
If a service that is run by systemd is not up yet, it's in falied state and no need to use uninit data | @@ -113,8 +113,9 @@ class Status(SecuredResourceReadonlyMode):
statuses = []
for service in systemd_services:
if should_be_in_services_output(service, OPTIONAL_SERVICES):
- status = ACTIVE_STATE if service['instances'][0]['state'] == \
- 'running' else INACTIVE_STATE
+ is_service_running = service['instances'] and (
+ service['instances'][0]['state'] == 'running')
+ status = ACTIVE_STATE if is_service_running else INACTIVE_STATE
services[service['display_name']] = {
'status': status,
'is_external': False,
|
Re-ordered recent additions to Dockerfile
It's preferred to build your app code after installing dependencies in
the Dockerfile. The latest additions to this file were added after
building the danesfield app code. This change corrects that ordering. | @@ -67,14 +67,6 @@ COPY ./deployment/conda/conda_env.yml \
RUN ${CONDA_EXECUTABLE} env create -f ./danesfield/deployment/conda/conda_env.yml -n core3d && \
${CONDA_EXECUTABLE} clean -tipsy
-# Install Danesfield package into CORE3D Conda environment
-COPY . ./danesfield
-RUN rm -rf ./danesfield/deployment
-RUN ["/bin/bash", "-c", "source /opt/conda/etc/profile.d/conda.sh && \
- conda activate core3d && \
- pip install --upgrade pip && \
- pip install -e ./danesfield"]
-
# Install core3d-tf_ops package from kitware-geospatial / defaults
RUN ["/bin/bash", "-c", "source /opt/conda/etc/profile.d/conda.sh && \
conda activate core3d && \
@@ -87,6 +79,14 @@ RUN ["/bin/bash", "-c", "source /opt/conda/etc/profile.d/conda.sh && \
conda install -c conda-forge -y opencv && \
conda clean -tipsy"]
+# Install Danesfield package into CORE3D Conda environment
+COPY . ./danesfield
+RUN rm -rf ./danesfield/deployment
+RUN ["/bin/bash", "-c", "source /opt/conda/etc/profile.d/conda.sh && \
+ conda activate core3d && \
+ pip install --upgrade pip && \
+ pip install -e ./danesfield"]
+
# Set entrypoint to script that sets up and activates CORE3D environment
ENTRYPOINT ["/bin/bash", "./danesfield/docker-entrypoint.sh"]
|
Subsets and Splits