message
stringlengths
13
484
diff
stringlengths
38
4.63k
m1n1.utils: Make Register sub-subclasses work, optimize Now figures out the fields/etc in a metaclass, not at object instantiation time.
@@ -70,11 +70,31 @@ class Reloadable: def _reloadme(self): self.__class__ = self._reloadcls() -class Register(Reloadable): +class RegisterMeta(type): + def __new__(cls, name, bases, dct): + m = super().__new__(cls, name, bases, dct) + + f = {} + + if bases and bases[0] is not Reloadable: + for cls in bases[0].mro(): + if cls is Reloadable: + break + f.update({k: None for k,v in cls.__dict__.items() + if not k.startswith("_") and isinstance(v, (int, tuple))}) + + f.update({k: None for k, v in dct.items() + if not k.startswith("_") and isinstance(v, (int, tuple))}) + + m._fields_list = list(f.keys()) + m._fields = set(f.keys()) + + return m + +class Register(Reloadable, metaclass=RegisterMeta): def __init__(self, v=0, **kwargs): self._value = v - self._fields_list = [k for k in self.__class__.__dict__ if not k.startswith("_")] - self._fields = set(self._fields_list) + for k,v in kwargs.items(): setattr(self, k, v) @@ -144,9 +164,11 @@ class Register(Reloadable): return val + def str_fields(self): + return f"{', '.join(f'{k}={self._field_val(k)}' for k in self._fields_list)}" + def __str__(self): - d = '.' - return f"0x{self._value:x} ({', '.join(f'{k}={self._field_val(k)}' for k in self._fields_list)})" + return f"0x{self._value:x} ({self.str_fields()})" def __repr__(self): return f"{type(self).__name__}({', '.join(f'{k}={self._field_val(k, True)}' for k in self._fields_list)})" @@ -479,7 +501,7 @@ class SetRangeMap(RangeMap): values = super().lookup(addr) return frozenset(values) if values else frozenset() -class RegMeta(type): +class RegMapMeta(type): def __new__(cls, name, bases, dct): m = super().__new__(cls, name, bases, dct) m._addrmap = {} @@ -562,7 +584,7 @@ class RegArrayAccessor: else: return [self.rd(self.addr + i) for i in self.range[item]] -class RegMap(Reloadable, metaclass=RegMeta): +class RegMap(Reloadable, metaclass=RegMapMeta): def __init__(self, backend, base): self._base = base self._backend = backend
Update mediaprocessor.py null out blank dispositions
@@ -822,6 +822,8 @@ class MediaProcessor: self.log.debug("Cleaning up default disposition settings from not preferred languages. %d streams will have default flag removed." % (len(default_streams_not_in_preferred_language))) for remove in default_streams_not_in_preferred_language: remove['disposition'] = remove.get('disposition', '').replace('+default', '') + if not remove.get('disposition'): + remove['disposition'] = None try: if 'default' not in default_stream.get('disposition', ''):
[BlackrockIO] Consistency improvements Made __get_nonneural_evtypes_variant_b and ..._variant_a consistent by creating mask using bitwise operators in both methods instead of equality with integer. This is also in line with the Blackrock manual that also refers to single bits being set.
@@ -1862,12 +1862,14 @@ class BlackrockRawIO(BaseRawIO): 'digital_input_port': { 'name': 'digital_input_port', 'field': 'digital_input', - 'mask': data['packet_insertion_reason'] == 1, + 'mask': self.__is_set(data['packet_insertion_reason'], 0) & + ~self.__is_set(data['packet_insertion_reason'], 7), 'desc': "Events of the digital input port"}, 'serial_input_port': { 'name': 'serial_input_port', 'field': 'digital_input', - 'mask': data['packet_insertion_reason'] == 129, + 'mask': self.__is_set(data['packet_insertion_reason'], 0) & + self.__is_set(data['packet_insertion_reason'], 7), 'desc': "Events of the serial input port"}} return event_types
change if statement to caught any not None keywords allowing for `title="",year=""` to work correctly
@@ -517,7 +517,7 @@ class PlexPartialObject(PlexObject): key = '/library/metadata/%s/matches' % self.ratingKey params = {'manual': 1} - if any([agent, title, year, language]): + if any(x is not None for x in [agent, title, year, language]): if title is None: params['title'] = self.title else:
ovs-fw: catches exception from ovsdb OVS agent will raise an exception when deleting multiple vms in bulk. Nova will delete tap when vms are removed. Then, ovs agent checks ovs_port by calling "self.get_ovs_port", and the exception will be raised. The patch will catch exception. Closes-Bug:
@@ -507,7 +507,13 @@ class OVSFirewallDriver(firewall.FirewallDriver): self.prepare_port_filter(port) return old_of_port = self.get_ofport(port) + try: of_port = self.get_or_create_ofport(port) + except exceptions.OVSFWPortNotFound as not_found_error: + LOG.info("port %(port_id)s does not exist in ovsdb: %(err)s.", + {'port_id': port['device'], + 'err': not_found_error}) + return # TODO(jlibosva): Handle firewall blink self.delete_all_port_flows(old_of_port) self.initialize_port_flows(of_port)
diamond: Update test cases Adds missing test cases from the canonical test data and stores this test version.
@@ -3,11 +3,19 @@ import unittest from diamond import make_diamond +# test cases adapted from `x-common//canonical-data.json` @ version: 1.0.0 + class DiamondTests(unittest.TestCase): - def test_letter_A(self): + def test_degenerate_case_with_a_single_row(self): self.assertMultiLineEqual(make_diamond('A'), 'A\n') - def test_letter_C(self): + def test_degenerate_case_with_two_rows(self): + result = [' A ', + 'B B', + ' A '] + self.assertMultiLineEqual(make_diamond('B'), '\n'.join(result) + '\n') + + def test_smallest_non_degenerate_case_with_odd_diamond_side_length(self): result = [' A ', ' B B ', 'C C', @@ -15,17 +23,69 @@ class DiamondTests(unittest.TestCase): ' A '] self.assertMultiLineEqual(make_diamond('C'), '\n'.join(result) + '\n') - def test_letter_E(self): + def test_smallest_non_degenerate_case_with_even_diamond_side_length(self): + result = [' A ', + ' B B ', + ' C C ', + 'D D', + ' C C ', + ' B B ', + ' A '] + self.assertMultiLineEqual(make_diamond('D'), '\n'.join(result) + '\n') + + def test_largest_possible_diamond(self): result = [' A ', ' B B ', ' C C ', ' D D ', ' E E ', + ' F F ', + ' G G ', + ' H H ', + ' I I ', + ' J J ', + ' K K ', + ' L L ', + ' M M ', + ' N N ', + ' O O ', + ' P P ', + ' Q Q ', + ' R R ', + ' S S ', + ' T T ', + ' U U ', + ' V V ', + ' W W ', + ' X X ', + ' Y Y ', + 'Z Z', + ' Y Y ', + ' X X ', + ' W W ', + ' V V ', + ' U U ', + ' T T ', + ' S S ', + ' R R ', + ' Q Q ', + ' P P ', + ' O O ', + ' N N ', + ' M M ', + ' L L ', + ' K K ', + ' J J ', + ' I I ', + ' H H ', + ' G G ', + ' F F ', + ' E E ', ' D D ', ' C C ', ' B B ', ' A '] - self.assertMultiLineEqual(make_diamond('E'), '\n'.join(result) + '\n') + self.assertMultiLineEqual(make_diamond('Z'), '\n'.join(result) + '\n') if __name__ == '__main__':
TST: updated meta tests Updated the meta unit tests by: fixing more csv unit tests, updating the name and comments of a case change unit test, and fixing the setup for Immutable unit tests.
@@ -805,9 +805,9 @@ class TestBasics(): @pytest.mark.parametrize("bad_key,bad_val,err_msg", [("col_names", [], "col_names must include"), - ("name", None, "Must provide an instrument"), - ("name", 5, "keyword name must be related"), - ("name", 'fake_inst', + ("filename", None, "Must provide an instrument"), + ("filename", 5, "keyword name must be related"), + ("filename", 'fake_inst', "keyword name must be related")]) def test_meta_csv_load_w_errors(self, bad_key, bad_val, err_msg): name = os.path.join(pysat.__path__[0], 'tests', 'cindi_ivm_meta.txt') @@ -905,17 +905,28 @@ class TestBasics(): assert (self.meta['new2'].Units == 'hey2') assert (self.meta['new2'].Long_Name == 'boo2') - def test_change_Units_and_Name_case_w_ho(self): + def test_case_change_of_meta_labels_w_ho(self): + """ Test changing case of meta labels after initialization with HO data + """ + # Set the initial labels self.meta_labels = {'units': ('units', str), 'name': ('long_Name', str)} self.meta = pysat.Meta(labels=self.meta_labels) meta2 = pysat.Meta(labels=self.meta_labels) + + # Set meta data values meta2['new21'] = {'units': 'hey2', 'long_name': 'boo2'} self.meta['new'] = {'units': 'hey', 'long_name': 'boo'} self.meta['new2'] = meta2 + + # Change the label name self.meta.labels.units = 'Units' self.meta.labels.name = 'Long_Name' + + # Evaluate the results in the main data assert (self.meta['new'].Units == 'hey') assert (self.meta['new'].Long_Name == 'boo') + + # Evaluate the results in the higher order data assert (self.meta['new2'].children['new21'].Units == 'hey2') assert (self.meta['new2'].children['new21'].Long_Name == 'boo2') @@ -1239,6 +1250,8 @@ class TestBasicsImmutable(TestBasics): clean_level='clean') self.meta = self.testInst.meta self.meta.mutable = False + self.meta_labels = {'units': ('Units', str), + 'name': ('Long_Name', str)} # Assign remaining values self.dval = None @@ -1252,5 +1265,5 @@ class TestBasicsImmutable(TestBasics): def teardown(self): """Runs after every method to clean up previous testing """ - del self.testInst, self.meta, self.out, self.stime + del self.testInst, self.meta, self.out, self.stime, self.meta_labels del self.default_name, self.default_nan, self.default_val, self.dval
We don't have iteritems in Cython. Fixes Helps
@@ -158,7 +158,8 @@ cdef class OutputContainer(Container): if k not in options: used_options.add(k) # ... and warn if any weren't used. - unused_options = {k: v for k, v in self.options.iteritems() if k not in used_options} + # TODO: How to items vs iteritems for Py2 vs 3 in Cython? + unused_options = {k: v for k, v in self.options.items() if k not in used_options} if unused_options: log.warning('Some options were not used: %s' % unused_options)
feat: getting common fields from telegram data Changes: added method to extract first, second name and nick from data
@@ -17,5 +17,11 @@ class TelegramProvider(Provider): def extract_uid(self, data): return data['id'] + def extract_common_fields(self, data): + return { + 'first_name': data['first_name'], + 'last_name': data['last_name'], + 'username': data['username'], + } provider_classes = [TelegramProvider]
station length comments Fixed formatting and added comments.
@@ -107,12 +107,12 @@ def list_files(tag='', sat_id=None, data_path=None, format_str=None): if tag == "stations": orig_files = files.copy() - # print (orig_files) new_files = [] + # Assigns the validity of each station file to be 1 year for orig in orig_files.iteritems(): files.ix[orig[0] + doff - pds.DateOffset(days=1)] = orig[1] files = files.sort_index() - new_files.append(files.ix[orig[0]: orig[0] + doff - + new_files.append(files.ix[orig[0]: orig[0] + doff - \ pds.DateOffset(days=1)].asfreq('D', method='pad')) files = pds.concat(new_files)
Support --all-tenant in server side If all-tenant is determined to be 1, then set context.all_tenant to true. Closes-Bug:
@@ -17,6 +17,7 @@ from oslo_log import log as logging from oslo_utils import strutils import pecan from pecan import rest +import six from zun.api.controllers import link from zun.api.controllers.v1 import collection @@ -108,6 +109,17 @@ class ContainersController(rest.RestController): def _get_containers_collection(self, **kwargs): context = pecan.request.context + all_tenants = kwargs.get('all_tenants') + if all_tenants: + try: + all_tenants = strutils.bool_from_string(all_tenants, True) + except ValueError as err: + raise exception.InvalidInput(six.text_type(err)) + else: + # If no value, it's considered to disable all_tenants + all_tenants = False + if all_tenants: + context.all_tenants = True compute_api = pecan.request.compute_api limit = api_utils.validate_limit(kwargs.get('limit')) sort_dir = api_utils.validate_sort_dir(kwargs.get('sort_dir', 'asc'))
Tutorial Fixes 1. `time_step` needs to be reset after each episode. 2. `next_time_step` should be `time_step` for collecting rewards.
" action = tf.random_uniform([1], 0, 2, dtype=tf.int32)\n", " time_step = tf_env.step(action)\n", " episode_steps += 1\n", - " episode_reward += next_time_step.reward.numpy()\n", + " episode_reward += time_step.reward.numpy()\n", " rewards.append(episode_reward)\n", " steps.append(episode_steps)\n", + " time_step = tf_env.reset()\n", "\n", "num_steps = np.sum(steps)\n", "avg_length = np.mean(steps)\n",
lxml is not a real dependency. From what I see this module is used by beautifulsoup4, but not directly from KiCost. Not a big problem, but could be a problem in the future.
@@ -80,7 +80,7 @@ with open(os.path.join('kicost','HISTORY.rst')) as history_file: # KiCost Python packages requirements to run-time. requirements = [ 'beautifulsoup4 >= 4.3.2', # Deal with HTML and XML tags. - 'lxml >= 3.7.2', +# 'lxml >= 3.7.2', # Indirectly used, this is beautifulsoup4's dependency 'XlsxWriter >= 0.7.3', # Write the XLSX output file. 'future', # For print statements. 'tqdm >= 4.30.0', # Progress bar.
Add reward ignore threshold Summary: If this threshold is set, we will ignore abnormal data with rewards larger than the threshold when computing the loss function.
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import logging from enum import Enum +from typing import Optional import reagent.types as rlt import torch @@ -20,13 +21,28 @@ class LossFunction(Enum): L1Loss = "L1_Loss" -def _get_loss_function(loss_fn: LossFunction): +def _get_loss_function(loss_fn: LossFunction, reward_ignore_threshold): + reduction_type = "mean" + if reward_ignore_threshold is not None: + reduction_type = "none" + if loss_fn == LossFunction.MSE: - return torch.nn.MSELoss(reduction="mean") + torch_fn = torch.nn.MSELoss(reduction=reduction_type) elif loss_fn == LossFunction.SmoothL1Loss: - return torch.nn.SmoothL1Loss(reduction="mean") + torch_fn = torch.nn.SmoothL1Loss(reduction=reduction_type) elif loss_fn == LossFunction.L1Loss: - return torch.nn.L1Loss(reduction="mean") + torch_fn = torch.nn.L1Loss(reduction=reduction_type) + + if reward_ignore_threshold is None: + return torch_fn + + def wrapper_loss_fn(target, pred): + loss = torch_fn(target, pred) + loss = loss[target <= reward_ignore_threshold] + assert len(loss) > 0, "reward ignore threshold set too small" + return torch.mean(loss) + + return wrapper_loss_fn class RewardNetTrainer(Trainer): @@ -39,6 +55,7 @@ class RewardNetTrainer(Trainer): default_factory=Optimizer__Union.default ), loss_type: LossFunction = LossFunction.MSE, + reward_ignore_threshold: Optional[float] = None, ) -> None: self.reward_net = reward_net self.use_gpu = use_gpu @@ -46,7 +63,8 @@ class RewardNetTrainer(Trainer): self.minibatch = 0 self.opt = optimizer.make_optimizer(self.reward_net.parameters()) self.loss_type = loss_type - self.loss_fn = _get_loss_function(loss_type) + self.loss_fn = _get_loss_function(loss_type, reward_ignore_threshold) + self.reward_ignore_threshold = reward_ignore_threshold def train(self, training_batch: rlt.PreprocessedTrainingBatch): training_input = training_batch.training_input
Adds idleOpStr argument to create_standard_cloudnoise_sequences. (arg was already present for create_cloudnoise_sequences)
@@ -1566,8 +1566,9 @@ def create_standard_cloudnoise_sequences(nQubits, maxLengths, singleQfiducials, availability=None, geometry="line", maxIdleWeight=1, maxhops=0, extraWeight1Hops=0, extraGateWeight=0, paramroot="H+S", sparse=False, verbosity=0, cache=None, idleOnly=False, - idtPauliDicts=None, algorithm="greedy"): + idtPauliDicts=None, algorithm="greedy", idleOpStr=((),)): """ + TODO: docstring - add idleOpStr Create a set of `fiducial1+germ^power+fiducial2` sequences which amplify all of the parameters of a `CloudNoiseModel` created by passing the arguments of this function to :function:`build_standard_cloudnoise_model`. @@ -1662,7 +1663,7 @@ def create_standard_cloudnoise_sequences(nQubits, maxLengths, singleQfiducials, gatedict, availability, geometry, maxIdleWeight, maxhops, extraWeight1Hops, extraGateWeight, paramroot, sparse, verbosity, cache, idleOnly, - idtPauliDicts, algorithm) + idtPauliDicts, algorithm, idleOpStr) def create_cloudnoise_sequences(nQubits, maxLengths, singleQfiducials, @@ -1671,6 +1672,7 @@ def create_cloudnoise_sequences(nQubits, maxLengths, singleQfiducials, sparse=False, verbosity=0, cache=None, idleOnly=False, idtPauliDicts=None, algorithm="greedy", idleOpStr=((),)): """ + TODO: docstring - add idleOpStr Create a set of `fiducial1+germ^power+fiducial2` sequences which amplify all of the parameters of a `CloudNoiseModel` created by passing the arguments of this function to :function:`build_standard_cloudnoise_model`. @@ -2270,8 +2272,8 @@ def create_cloudnoise_sequences(nQubits, maxLengths, singleQfiducials, aliases=None,sequenceRules=None) for germ,gdict in germs.items(): - for L,fidpairs in gdict.items(): serial_germ = germ.serialize() #must serialize to get correct count + for L,fidpairs in gdict.items(): germ_power = _gsc.repeat_with_max_length(serial_germ,L) gss.add_plaquette(germ_power, L, germ, fidpairs) #returns 'missing_list'; useful if using dsfilter arg
PythonAPISettings: add a shortcut property to get the context TN:
@@ -15,6 +15,10 @@ class PythonAPISettings(AbstractAPISettings): self.c_api_settings = c_api_settings self.module_name = module_name + @property + def context(self): + return self.c_api_settings.context + def get_enum_alternative(self, type_name, alt_name, suffix): return alt_name.upper
Run 'test-requirements' as part of 'make test' This is consistent with our other apps [1]. Although it won't get picked up by CI just yet [2], we can still benefit from it locally. [1]: [2]:
@@ -54,7 +54,7 @@ generate-version-file: ## Generates the app version file @echo -e "__git_commit__ = \"${GIT_COMMIT}\"\n__time__ = \"${DATE}\"" > ${APP_VERSION_FILE} .PHONY: test -test: generate-version-file ## Run tests +test: test-requirements ## Run tests ./scripts/run_tests.sh .PHONY: freeze-requirements
fix: typo of argment parser desc in train.py Remove duplicated `of`
@@ -108,7 +108,7 @@ parser.add_argument('--crop-pct', default=None, type=float, parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN', help='Override mean pixel value of dataset') parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD', - help='Override std deviation of of dataset') + help='Override std deviation of dataset') parser.add_argument('--interpolation', default='', type=str, metavar='NAME', help='Image resize interpolation type (overrides model)') parser.add_argument('-b', '--batch-size', type=int, default=128, metavar='N',
Upgrade Theano to 1.0.1 Upgrades Theano to 1.0.1
@@ -16,6 +16,7 @@ dependencies: - pygame - matplotlib - pandas + - mkl-service=1.1.2 - pip: - pyprind - ipdb @@ -25,8 +26,8 @@ dependencies: - pyzmq - cached_property - cloudpickle - - git+https://github.com/Theano/Theano.git@adfe319ce6b781083d8dc3200fb4481b00853791#egg=Theano - - git+https://github.com/neocxi/Lasagne.git@484866cf8b38d878e92d521be445968531646bb8#egg=Lasagne + - git+https://github.com/Lasagne/Lasagne.git@7992faa80fa5233a786e2582a605e854cea7d1cf#egg=Lasagne + - git+https://github.com/Theano/Theano.git@2c19431b165aaf8289d1e6654988c64f4bb234e1#egg=Theano - git+https://github.com/plotly/plotly.py.git@2594076e29584ede2d09f2aa40a8a195b3f3fc66#egg=plotly - git+https://github.com/deepmind/dm_control.git#egg=dm_control - awscli
Update all_practices Remove reference to mean
<h1>Find a practice</h1> -<p>Search for a practice by name, and see how the practice compares to the national mean for key prescribing indicators.</p> +<p>Search for a practice by name, and see how this practice compares with its peers across the NHS in England.</p> <input class="form-control" id="search" placeholder="Search by practice name or postcode" />
fw/entrypoint: log devlib version Log devlib version alongside WA version.
@@ -20,6 +20,8 @@ import logging import os import warnings +import devlib + from wa.framework import pluginloader from wa.framework.command import init_argument_parser from wa.framework.configuration import settings @@ -98,6 +100,7 @@ def main(): settings.set("verbosity", args.verbose) log.init(settings.verbosity) logger.debug('Version: {}'.format(get_wa_version_with_commit())) + logger.debug('devlib version: {}'.format(devlib.__full_version__)) logger.debug('Command Line: {}'.format(' '.join(sys.argv))) # each command will add its own subparser
integrate transformer into runner Transformer is called before combinator, it can provide variables to combinator and other parts.
@@ -6,7 +6,7 @@ from logging import FileHandler from threading import Lock from typing import Any, Callable, Dict, Iterator, List, Optional -from lisa import notifier, schema +from lisa import notifier, schema, transformer from lisa.action import Action from lisa.combinator import Combinator from lisa.parameter_parser.runbook import RunbookBuilder @@ -105,6 +105,8 @@ class RootRunner(Action): await super().start() try: + transformer.run(self._runbook_builder) + # update runbook for notifiers raw_data = copy.deepcopy(self._runbook_builder.raw_data) constants.RUNBOOK = replace_variables(
Remove Public APIs This API is in the header of the README.md file
@@ -545,7 +545,6 @@ API | Description | Auth | HTTPS | CORS | | [Plino](https://plino.herokuapp.com/) | An intelligent spam filtering system | No | Yes | No | | [Postman](https://docs.api.getpostman.com/) | Tool for testing APIs | `apiKey` | Yes | Unknown | | [ProxyCrawl](https://proxycrawl.com) | Scraping and crawling anticaptcha service | `apiKey` | Yes | Unknown | -| [Public APIs](https://github.com/davemachado/public-api) | A collective list of free JSON APIs for use in web development | No | Yes | Unknown | | [Pusher Beams](https://pusher.com/beams) | Push notifications for Android & iOS | `apiKey` | Yes | Unknown | | [QR code](http://qrtag.net/api/) | Create an easy to read QR code and URL shortener | No | Yes | Yes | | [QR code](http://goqr.me/api/) | Generate and decode / read QR code graphics | No | Yes | Unknown |
Show requests deprecation warning by default This adds a warning filter that ensures that the requests deprecation warning is printed unless a user manually disables it.
@@ -24,6 +24,13 @@ _WARNING_MSG = ( ) +warnings.filterwarnings( + action="always", + category=DeprecationWarning, + module=__name__, +) + + def request(method, url, **kwargs): """Constructs and sends a :class:`Request <Request>`.
Update README.md Merge announcement
`scikit-multiflow` is a machine learning package for streaming data in Python. +# Merger announcement + +## TLDR + +[creme](https://creme-ml.github.io/) and [scikit-multiflow](https://scikit-multiflow.github.io/) are merging. A new package will be released from this merge and both development teams will work together on this new package. + +## Why? + +We feel that both projects share the same vision. We believe that pooling our resources instead of duplicating work will benefit both parties. We are also confident that this will benefit both communities. There will be more people working on the new project, which will allow us to distribute work more efficiently. We will thus be able to work on more features and improve the overall quality of the project. + +## How does this affect each project? + +Both projects will stop active development once the new package is released. The code for both projects will remain publicly available, although development will only focus on minor maintenance during a transition period. During this transition period, most of the functionality of both projects will be made available in the new package. + +The architecture of the new package is more Pythonic. It will focus on single-instance incremental models. The new API reflects these changes. + +Detailed information on the new architecture and API will be available with the release of the new package. + +## How does this affect users? + +We encourage users to move towards the new package when possible. We understand that this transition will requiere an extra effort in the short term from current users. However, we believe that the result will be better for everyone in the long run. + +You will still be able to install and use `creme` as well as `scikit-multiflow`. Both projects will remain on PyPI, conda-forge and GitHub. + +## When? + +The target date for the first release: **2nd half of October 2020**. + +--- + ### Quick links * [Webpage](https://scikit-multiflow.github.io/) * [Documentation](https://scikit-multiflow.readthedocs.io/en/stable/)
Update FORWARD_TRAFFIC.yml (FortiOS 5.4) Update FORWARD_TRAFFIC.yml (FortiOS 5.4)
# messages: - error: 'FORWARD_TRAFFIC' - tag: "subtype=forward" + tag: "forward" values: - level: ([^ ]+) - vd: ([^ ]+) - srcip: ([^ ]+) - srcport: ([^ ]+) - srcintf: ([^ ]+) - dstip: ([^ ]+) - dstport: ([^ ]+) - dstintf: ([^ ]+) - poluuId: ([^ ]+) - sessiondId: ([^ ]+) - protocolId: ([^ ]+) - action: ([^ ]+) + level: (?<=level=)(.*)(?=\s+vd=) + vd: (?<=vd=)(.*)(?=\s+srcip=) + srcip: (?<=srcip=)(.*)(?=\s+srcport=) + srcport: (?<=srcport=)(.*)(?=\s+srcintf=) + srcintf: (?<=srcintf=)(.*)(?=\s+dstip=) + dstip: (?<=dstip=)(.*)(?=\s+dstport=) + dstport: (?<=dstport=)(.*)(?=\s+dstintf=) + dstintf: (?<=dstintf=)(.*)(?=\s+poluuid=) + poluuId: (?<=poluuid=)(.*)(?=\s+sessionid=) + sessiondId: (?<=sessionid=)(.*)(?=\s+proto=) + protocolId: (?<=proto=)(.*)(?=\s+action=) + action: (?<=action=)(.*)(?=\s+policyid=) miscData: (.*) - line: '{level} {vd} {srcip} {srcport} {srcintf} {dstip} {dstport} {dstintf} {poluuId} {sessiondId} {protocolId} {action} {miscData}' + line: 'level={level} vd={vd} srcip={srcip} srcport={srcport} srcintf={srcintf} dstip={dstip} dstport={dstport} dstintf={dstintf} poluuid={poluuId} sessionid={sessiondId} proto={protocolId} action={action} {miscData}' model: NO_MODEL mapping: variables:
Update messages_en.py typo correction
@@ -479,7 +479,7 @@ en = { "server-disable-ready-argument": "disable readiness feature", "server-motd-argument": "path to file from which motd will be fetched", "server-rooms-argument": "path to database file to use and/or create to store persistent room data. Enables rooms to persist without watchers and through restarts", - "server-permanent-rooms-argument": "path to file which lists permenant rooms that will be listed even if the room is empty (in the form of a text file which lists one room per line) - requires persistent rooms to be enabled", + "server-permanent-rooms-argument": "path to file which lists permanent rooms that will be listed even if the room is empty (in the form of a text file which lists one room per line) - requires persistent rooms to be enabled", "server-chat-argument": "Should chat be disabled?", "server-chat-maxchars-argument": "Maximum number of characters in a chat message (default is {})", # Default number of characters "server-maxusernamelength-argument": "Maximum number of characters in a username (default is {})",
show outputs on e2e tests remove debug line
@@ -129,7 +129,7 @@ class DbndKubernetesJobWatcher(KubernetesJobWatcher): if event["type"] == "ERROR": return self.process_error(event) - # self._extended_process_state(event) + self._extended_process_state(event) self.resource_version = task.metadata.resource_version except Exception as e:
Update extensions.py another attempt to fix this warning ``` WARNING: Explicit markup ends without a blank line; unexpected unindent. This usually occurs when the text following a directive is wrapped to the next line without properly indenting a multi-line text block ```
:copyright: (c) 2018 by Netflix Inc., see AUTHORS for more :license: Apache, see LICENSE for more details. """ -from flask_sqlalchemy import SQLAlchemy as BaseSQLAlchemy +from flask_sqlalchemy import SQLAlchemy as _BaseSQLAlchemy -class SQLAlchemy(BaseSQLAlchemy): +class SQLAlchemy(_BaseSQLAlchemy): + def apply_pool_defaults(self, app, options): options = super().apply_pool_defaults(app, options) options["pool_pre_ping"] = True
informational-overlays: Add "The basics" section to keyboard shortcuts. This adds a section for basic shortcuts.
<div class="overlay-modal" id="keyboard-shortcuts" tabindex="-1" role="dialog" aria-label="{{ _('Keyboard shortcuts') }}"> <div class="modal-body" tabindex="0"> + <div> + <table class="hotkeys_full_table hotkeys_table wide table table-striped table-bordered table-condensed"> + <thead> + <tr> + <th colspan="2">{{ _("The basics") }}</th> + </tr> + </thead> + <tr> + <td class="hotkey">Enter, r</td> + <td class="definition">{% trans %}Reply to message{% endtrans %}</td> + </tr> + <tr> + <td class="hotkey">c</td> + <td class="definition">{% trans %}New stream message{% endtrans %}</td> + </tr> + <tr> + <td class="hotkey">C</td> + <td class="definition">{% trans %}New private message{% endtrans %}</td> + </tr> + <tr> + <td class="hotkey">Esc, Ctrl + [</td> + <td class="definition">{% trans %}Cancel compose{% endtrans %}</td> + </tr> + <tr> + <td class="hotkey">d</td> + <td class="definition">{% trans %}View drafts{% endtrans %}</td> + </tr> + <tr> + <td class="hotkey">Down, j</td> + <td class="definition">{% trans %}Next message{% endtrans %}</td> + </tr> + <tr> + <td class="hotkey">End, G</td> + <td class="definition">{% trans %}Last message{% endtrans %}</td> + </tr> + <tr> + <td class="hotkey">n</td> + <td class="definition">{% trans %}Next unread topic{% endtrans %}</td> + </tr> + <tr> + <td class="hotkey">P</td> + <td class="definition">{% trans %}All private messages{% endtrans %}</td> + </tr> + <tr> + <td class="hotkey">/</td> + <td class="definition">{% trans %}Initiate a search{% endtrans %}</td> + </tr> + <tr> + <td class="hotkey">?</td> + <td class="definition">{% trans %}Show keyboard shortcuts{% endtrans %}</td> + </tr> + </table> + </div> + + <hr /> + <div> <table class="hotkeys_table table table-striped table-bordered table-condensed"> <thead>
Report reward training baseline mse Summary: A baseline mse would simply be the reward's variance. The variance is computed only on the evaluation data to be directly comparable with the eval MSE.
@@ -20,6 +20,7 @@ class RewardNetEvaluator: def __init__(self, trainer: RewardNetTrainer) -> None: self.trainer = trainer self.mse_loss = [] + self.rewards = [] self.best_model = None self.best_model_loss = 1e9 @@ -35,11 +36,13 @@ class RewardNetEvaluator: reward = eval_tdp.training_input.slate_reward else: reward = eval_tdp.training_input.reward + assert reward is not None mse_loss = F.mse_loss( reward_net(eval_tdp.training_input).predicted_reward, reward ) - self.mse_loss.append(mse_loss.detach().cpu()) + self.mse_loss.append(mse_loss.flatten().detach().cpu()) + self.rewards.append(reward.flatten().detach().cpu()) reward_net.train(reward_net_prev_mode) @@ -47,8 +50,9 @@ class RewardNetEvaluator: def evaluate_post_training(self): mean_mse_loss = np.mean(self.mse_loss) logger.info(f"Evaluation MSE={mean_mse_loss}") - eval_res = {"mse": mean_mse_loss} + eval_res = {"mse": mean_mse_loss, "rewards": torch.cat(self.rewards)} self.mse_loss = [] + self.rewards = [] if mean_mse_loss < self.best_model_loss: self.best_model_loss = mean_mse_loss
configure: fix config folder creation The folder creation was failing when passing a filename to -c parameter Tested with: c /test/config1 c test/config2 c config3
@@ -228,7 +228,8 @@ def configure(args): # noqa: C901 FIXME!!! # ensure that the directory for the config file exists (because # ~/.parallelcluster is likely not to exist on first usage) try: - os.makedirs(os.path.dirname(config_file)) + config_folder = os.path.dirname(config_file) or "." + os.makedirs(config_folder) except OSError as e: if e.errno != errno.EEXIST: raise # can safely ignore EEXISTS for this purpose...
Convert PlatformVersion to SolutionStack when launching docker applications locally SIM: cr
@@ -102,7 +102,7 @@ def _get_solution_stack(): # Test out sstack and tier before we ask any questions (Fast Fail) if solution_string: - if PlatformVersion.is_valid_arn(solution_string): + if PlatformVersion.is_custom_platform_arn(solution_string): try: platformops.describe_custom_platform_version(solution_string) except NotFoundError: @@ -115,7 +115,7 @@ def _get_solution_stack(): try: soln_stk = solution_stack_ops.find_solution_stack_from_string(solution_string) - if PlatformVersion.is_valid_arn(soln_stk): + if PlatformVersion.is_eb_managed_platform_arn(soln_stk): soln_stk = PlatformVersion.get_platform_name(soln_stk) except NotFoundError:
Make production database parameter fine configurable Allow database parameters configuration with separate environment variables: * GALAXY_DB_NAME * GALAXY_DB_USER * GALAXY_DB_PASSWORD * GALAXY_DB_HOST * GALAXY_DB_PORT
@@ -24,6 +24,11 @@ The following environment variables are supported: * GALAXY_ALLOWED_HOSTS * GALAXY_EMAIL_HOST * GALAXY_DB_URL +* GALAXY_DB_NAME +* GALAXY_DB_USER +* GALAXY_DB_PASSWORD +* GALAXY_DB_HOST +* GALAXY_DB_PORT * GALAXY_EMAIL_PORT * GALAXY_EMAIL_USER * GALAXY_EMAIL_PASSWORD @@ -78,9 +83,22 @@ ALLOWED_HOSTS = os.environ.get('GALAXY_ALLOWED_HOSTS', '*').split(',') # --------------------------------------------------------- # Define GALAXY_DB_URL=postgres://USER:PASSWORD@HOST:PORT/NAME -DATABASES = { - 'default': dj_database_url.config(env='GALAXY_DB_URL', conn_max_age=None) +DATABASES = {} + +if os.environ.get('GALAXY_DB_URL'): + DATABASES['default'] = dj_database_url.config( + env='GALAXY_DB_URL', conn_max_age=None) +else: + DATABASES['default'] = { + 'ENGINE': 'django.db.backends.postgresql_psycopg2', + 'NAME': os.environ.get('GALAXY_DB_NAME', 'galaxy'), + 'USER': os.environ.get('GALAXY_DB_USER', 'galaxy'), + 'PASSWORD': os.environ.get('GALAXY_DB_PASSWORD', ''), + 'HOST': os.environ.get('GALAXY_DB_HOST', ''), + 'PORT': int(os.environ.get('GALAXY_DB_PORT', 5432)), + 'CONN_MAX_AGE': None, } + # Create default alias for worker logging DATABASES['logging'] = DATABASES['default'].copy()
Remove peak load as max size for CHP in reopt.jl This was hardcoding max size at less than the user input max_kw, if peak load is less than max_kw
@@ -228,12 +228,6 @@ function add_bigM_adjustments(m, p) m[:NewMaxSize][t] = p.MaxSize[t] end end - for t in p.CHPTechs - m[:NewMaxSize][t] = maximum([p.ElecLoad[ts] for ts in p.TimeStep]) - if (m[:NewMaxSize][t] > p.MaxSize[t]) - m[:NewMaxSize][t] = p.MaxSize[t] - end - end # NewMaxSizeByHour is designed to scale the right-hand side of the constraint limiting rated production in each hour to the production factor; in most cases this is unaffected unless the production factor is zero, in which case the right-hand side is set to zero. #for t in p.ElectricTechs
doc/contributing: Add a guide to documentation styles Add a documentation style guide to aid in keeping a consistent style when adding new documentation.
-Contributing Code -================= +Contributing +============ + +Code +---- We welcome code contributions via GitHub pull requests. To help with maintainability of the code line we ask that the code uses a coding style @@ -53,3 +56,130 @@ submitting a pull request: Once you have your contribution is ready, please follow instructions in `GitHub documentation <https://help.github.com/articles/creating-a-pull-request/>`_ to create a pull request. + +-------------------------------------------------------------------------------- + +Documentation +------------- + +Headings +~~~~~~~~ + +To allow for consistent headings to be used through out the document the +following character sequences should be used when creating headings + +:: + + ========= + Heading 1 + ========= + + Only used for top level headings which should also have an entry in the + navigational side bar. + + ********* + Heading 2 + ********* + + Main page heading used for page title, should not have a top level entry in the + side bar. + + Heading 3 + ========== + + Regular section heading. + + Heading 4 + --------- + + Sub-heading. + + Heading 5 + ~~~~~~~~~ + + Heading 6 + ^^^^^^^^^ + + Heading 7 + """"""""" + + +-------------------------------------------------------------------------------- + +Configuration Listings +~~~~~~~~~~~~~~~~~~~~~~ + +To keep a consistent style for presenting configuration options, the preferred +style is to use a `Field List`. + +(See: http://docutils.sourceforge.net/docs/user/rst/quickref.html#field-lists) + +Example:: + + :parameter: My Description + +Will render as: + + :parameter: My Description + + +-------------------------------------------------------------------------------- + +API Style +~~~~~~~~~ + +When documenting an API the currently preferred style is to provide a short +description of the class, followed by the attributes of the class in a +`Definition List` followed by the methods using the `method` directive. + +(See: http://docutils.sourceforge.net/docs/user/rst/quickref.html#definition-lists) + + +Example:: + + API + === + + :class:`MyClass` + ---------------- + + :class:`MyClass` is an example class to demonstrate API documentation. + + ``attribute1`` + The first attribute of the example class. + + ``attribute2`` + Another attribute example. + + methods + """"""" + + .. method:: MyClass.retrieve_output(name) + + Retrieve the output for ``name``. + + :param name: The output that should be returned. + :return: An :class:`Output` object for ``name``. + :raises NotFoundError: If no output can be found. + + +Will render as: + +:class:`MyClass` is an example class to demonstrate API documentation. + +``attribute1`` + The first attribute of the example class. + +``attribute2`` + Another attribute example. + +methods +^^^^^^^ + +.. method:: MyClass.retrieve_output(name) + + Retrieve the output for ``name``. + + :param name: The output that should be returned. + :return: An :class:`Output` object for ``name``. + :raises NotFoundError: If no output can be found.
ImageWriterTest : Add missing offsets I assume this is what the loop on the `offset` variable was intended for, as otherwise it is completely unused, and the `o` node remains at default values throughout.
@@ -378,6 +378,8 @@ class ImageWriterTest( GafferImageTest.ImageTestCase ) : imath.V2i( 106, 28 ) ]: + o["offset"].setValue( offset ) + with Gaffer.Context() : w["task"].execute()
(doc) update advance MM overview and coti Update order optimization parameters and description, and add external links for COTI
@@ -28,7 +28,8 @@ There are two ways to configure these parameters: | [Price Band](./price-band) | `price_floor` | `Enter the price below which only buy orders will be placed` | Place only buy orders when mid price falls below this price. | | [Ping Pong](./ping-pong) | `ping_pong_enabled` | `Would you like to use the ping pong feature and alternate between buy and sell orders after fills?` | Whether to alternate between buys and sells. | | [Order Optimization](./order-optimization) | `order_optimization_enabled` | `Do you want to enable best bid ask jumping? (Yes/No)` | Allows your bid and ask order prices to be adjusted based on the current top bid and ask prices in the market. | -| [Order Optimization](./order-optimization) | `order_optimization_depth` | `How deep do you want to go into the order book for calculating the top bid and ask, ignoring dust orders on the top (expressed in base asset amount)?` | The depth in base asset amount to be used for finding top bid and ask. | +| [Order Optimization](./ask_order_optimization_depth) | `ask_order_optimization_depth` | `How deep do you want to go into the order book for calculating the top ask, ignoring dust orders on the top (expressed in base asset amount)?` | The depth in base asset amount to be used for finding top bid ask. | +| [Order Optimization](./bid_order_optimization_depth) | `bid_order_optimization_depth` | `How deep do you want to go into the order book for calculating the top bid, ignoring dust orders on the top (expressed in base asset amount)?` | The depth in base asset amount to be used for finding top bid. | | [Add Transaction Costs](./add-transaction-costs) | `add_transaction_costs` | `Do you want to add transaction costs automatically to order prices? (Yes/No)` | Whether to enable adding transaction costs to order price calculation. | | [External Price Source](./price-source) | `price_source_enabled` | `Would you like to use an external pricing source for mid-market price? (Yes/No)` | When enabled, allows users to use an external pricing source for the mid price. | | [External Price Source](./price-source) | `price_source_type` | `Which type of external price source to use? (exchange/custom_api)` | The type of external pricing source. |
drop 'sudo: false' move part of apt packages to lint stage; add names to stages
-sudo: false dist: xenial - language: python addons: apt: packages: - socat - - enchant - - aspell - - aspell-en env: global: @@ -29,13 +24,20 @@ stages: jobs: include: - stage: lint + name: documentation spell check python: "3.6" - env: DOCS_SPELL_CHECK="y" + addons: + apt: + packages: + - enchant + - aspell + - aspell-en install: - pip install -r docs/requirements.txt - pip install -e. -c tests/requirements.txt script: make spelling - &FLAKE + name: flake python: "3.6" install: - pip install -r tests/requirements.txt
family: nrf52: correct out of date code to use discoverer method Ensure core discovery after mass erase on locked targets.
@@ -103,7 +103,7 @@ class NRF52(CoreSightTarget): raise exceptions.TargetError("unable to unlock device") # Cached badness from create_ap run during AP lockout prevents create_cores from # succeeding. - self.dp.create_1_ap(AHB_AP_NUM) + self._discoverer._create_1_ap(AHB_AP_NUM) else: LOG.warning("%s APPROTECT enabled: not automatically unlocking", self.part_number) else:
mgr: do not copy all keyrings on all mgr There is no need to loop over all mgr nodes to set this fact, it's even breaking deployments because it tries to copy all mgr keyring on all mgr. Closes:
- name: set_fact _mgr_keys set_fact: - _mgr_keys: "{{ _mgr_keys | default([{ 'name': 'client.admin', 'path': '/etc/ceph/' + cluster + '.client.admin.keyring', 'copy_key': copy_admin_key }]) + [{ 'name': 'mgr.' + hostvars[item]['ansible_hostname'], 'path': '/var/lib/ceph/mgr/' + cluster + '-' + hostvars[item]['ansible_hostname'] + '/keyring', 'copy_key': true }] }}" - with_items: "{{ groups.get(mgr_group_name, []) }}" + _mgr_keys: + - { 'name': 'client.admin', 'path': "/etc/ceph/{{ cluster }}.client.admin.keyring", 'copy_key': copy_admin_key } + - { 'name': "mgr.{{ ansible_hostname }}", 'path': "/var/lib/ceph/mgr/{{ cluster }}-{{ ansible_hostname }}/keyring", 'copy_key': true } - name: get keys from monitors command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} auth get {{ item.name }}" register: _mgr_keys with_items: "{{ _mgr_keys }}" delegate_to: "{{ groups.get(mon_group_name)[0] }}" - run_once: true when: - cephx | bool - item.copy_key | bool
fixed issue 208 Removed quotes around number answers to fix this issue:
@@ -28,15 +28,15 @@ class Task(object): """) # make a script - if answer == "1": + if answer == 1: Task.one(my_bot) # unfollow your nonfriends - if answer == "2": + if answer == 2: Task.two(my_bot) # exit sript - if answer == "3": + if answer == 3: exit() # invalid input
Adding ANSIBLE0019 to skiplist for linters linters fail because ANSIBLE0019 has been enabled link: This is a temporary merge until we reenable ANSIBLE0019
@@ -16,7 +16,7 @@ whitelist_externals = bash commands = bash -c "cd ansible; find . -type f -regex '.*.y[a]?ml' -print0 | xargs -t -n1 -0 \ ansible-lint \ - -x ANSIBLE0012,ANSIBLE0006,ANSIBLE0007,ANSIBLE0016" \ + -x ANSIBLE0012,ANSIBLE0006,ANSIBLE0007,ANSIBLE0016,ANSIBLE0019" \ --exclude=rally pykwalify -d browbeat-config.yaml -s browbeat/schema/browbeat.yml pykwalify -d browbeat-complete.yaml -s browbeat/schema/browbeat.yml
Fix install galaxy role warning message Fix the msg value to use '%s' and remove extra ',' to get a properly formatted message and the '%s' sequence replaced.
@@ -181,8 +181,8 @@ def _install_galaxy_role() -> None: Role: https://galaxy.ansible.com/docs/contributing/creating_role.html#role-names As an alternative, you can add 'role-name' to either skip_list or warn_list. - """, - fqrn, + """ + % fqrn ) if 'role-name' in options.warn_list: _logger.warning(msg)
Edit requisite documentation Fixes
@@ -632,7 +632,7 @@ mod_python.sls - require_in: - service: httpd -Now the httpd server will only start if php or mod_python are first verified to +Now the httpd server will only start if both php and mod_python are first verified to be installed. Thus allowing for a requisite to be defined "after the fact".
Fix issue where available channels were pooled only taken from nsX files, not the nev.
@@ -1495,7 +1495,6 @@ class BlackrockIO(BaseIO): for nsx_nb in nsx_to_load: all_channels.extend( self.__nsx_ext_header[nsx_nb]['electrode_id'].astype(int)) - else: elec_id = self.__nev_ext_header[b'NEUEVWAV']['electrode_id'] all_channels.extend(elec_id.astype(int)) all_channels = np.unique(all_channels).tolist()
fix: Travis lint with matrix exclusions Go back to four Build Jobs and run linting with Python 3.6.
@@ -5,6 +5,31 @@ python: - 3.5 - 3.6 +env: + - TOXENV=py34 + - TOXENV=py35 + - TOXENV=py36 + - TOXENV=lint + +matrix: + exclude: + - python: 3.4 + env: TOXENV=py35 + - python: 3.4 + env: TOXENV=py36 + - python: 3.4 + env: TOXENV=lint + - python: 3.5 + env: TOXENV=py34 + - python: 3.5 + env: TOXENV=py36 + - python: 3.5 + env: TOXENV=lint + - python: 3.6 + env: TOXENV=py34 + - python: 3.6 + env: TOXENV=py35 + cache: false install: @@ -12,7 +37,6 @@ install: script: - tox - - tox -e lint notifications: webhooks:
build arm executable for linux update python 3 duh ugh testing testing testing fixes fixed artifacts upload fixed build for arm debugging ci debugging ci debugging ci debugging ci changed file name for arm
@@ -100,13 +100,8 @@ jobs: arch: aarch64 distro: ubuntu_latest - # Create an artifacts directory - setup: | - mkdir -p "${PWD}/artifacts" - # Mount the artifacts directory as /artifacts in the container dockerRunArgs: | - --volume "${PWD}/artifacts:/artifacts" --volume "${PWD}/:/spotdl" # The shell to run commands with in the container @@ -119,13 +114,12 @@ jobs: pip install poetry poetry install -E web poetry run python ./scripts/build.py - cd dist - for file in dist/spotdl*; do echo cp "$file" "/artifacts/${file}-aarch64"; done + for file in dist/spotdl*; do cp "$file" "${file}-aarch64"; done - name: Release uses: softprops/action-gh-release@v1 with: files: | - ${PWD}/artifacts/* + dist/spotdl*
Update permalink to match that of `navigation.yml` Broken link: Works with: Guided by consistency being king, I presume it was better to change the permalink than change the destination URL of the link.
--- title: Conversational AI Examples -permalink: /docs/convAI-examples/ +permalink: /docs/convAI-minimal-start/ excerpt: "Conversational AI Examples" last_modified_at: 2020/10/15 23:16:38 toc: true
Added additional instructions for virtualenv Since I am not automatically appending the "source virtualenvwrapper.sh" to the end of the line, print out some instructions asking the user to do so.
@@ -84,7 +84,6 @@ if [ $OPTION == "1" ]; then fi pip install --user virtualenvwrapper echo 'sourcing virtualenvwrapper.sh' - #export VIRTUALENVWRAPPER_PYTHON=`which python3` export VIRTUALENVWRAPPER_VIRTUALENV=~/.local/bin/virtualenv source ~/.local/bin/virtualenvwrapper.sh echo 'checking if Naomi virtualenv exists' @@ -104,12 +103,17 @@ if [ $OPTION == "1" ]; then fi # start the naomi setup process echo "#!/bin/bash" > Naomi - #echo "export VIRTUALENVWRAPPER_PYTHON=\`which python3\`" >> Naomi echo "export VIRTUALENVWRAPPER_VIRTUALENV=~/.local/bin/virtualenv" >> Naomi echo "source ~/.local/bin/virtualenvwrapper.sh" >> Naomi echo "workon Naomi" >> Naomi echo "python $NAOMI_DIR/Naomi.py \$@" >> Naomi echo "deactivate" >> Naomi + echo "You will need to activate the Naomi virtual environment when installing" + echo "or testing python modules for Naomi using the following command:" + echo " $ workon Naomi" + echo "You should add the following lines to your ~/.bashrc script:" + echo " export VIRTUALENVWRAPPER_VIRTUALENV=~/.local/bin/virtualenv" + echo " source ~/.local/bin/virtualenvwrapper.sh" fi if [ $OPTION == "2" ] ; then if [ $APT -eq "1" ] ; then
fix normalize local name 'local' should be returned for local site only
@@ -5,6 +5,7 @@ import threading import time from openpype.lib import Logger +from openpype.lib.local_settings import get_local_site_id from openpype.pipeline import Anatomy from .abstract_provider import AbstractProvider @@ -220,6 +221,6 @@ class LocalDriveHandler(AbstractProvider): def _normalize_site_name(self, site_name): """Transform user id to 'local' for Local settings""" - if site_name != 'studio': + if site_name == get_local_site_id(): return 'local' return site_name
Update v_generate_user_grant_revoke_ddl.sql added support for privileges granted on pg_catalog tables and other system owned objects
@@ -3,7 +3,7 @@ Purpose: View to generate grant or revoke ddl for users and groups. This recreating users or group privileges or for revoking privileges before dropping a user or group. -Version: 1.03 +Current Version: 1.04 Columns - objowner: Object owner @@ -20,15 +20,23 @@ ddl: DDL text Notes: History: + +Version 1.01 2017-03-01 adedotua created 2018-03-04 adedotua completely refactored the view to minimize nested loop joins. View is now significantly faster on clusters with a large number of users and privileges 2018-03-04 adedotua added column grantseq to help return the DDLs in the order they need to be granted or revoked 2018-03-04 adedotua renamed column sequence to objseq and username to grantee + +Version 1.02 2018-03-09 adedotua added logic to handle function name generation when there are non-alphabets in the function schemaname + +Version 1.03 2018-04-26 adedotua added missing filter for handling empty default acls 2018-04-26 adedotua fixed one more edge case where default privilege is granted on schema to user other than schema owner +Version 1.04 + 2018-05-02 adedotua added support for privileges granted on pg_catalog tables and other system owned objects @@ -68,7 +76,7 @@ WITH objprivs AS ( (SELECT oid,generate_series(1,array_upper(relacl,1)) AS n FROM pg_class) NS inner join pg_class B ON b.oid = ns.oid AND NS.n <= array_upper(b.relacl,1) join pg_namespace c on b.relnamespace = c.oid - where relowner>1 AND relkind in ('r','v') + where relkind in ('r','v') UNION ALL -- SCHEMA privileges SELECT pg_get_userbyid(b.nspowner)::text AS objowner, @@ -80,7 +88,6 @@ WITH objprivs AS ( FROM (SELECT oid,generate_series(1,array_upper(nspacl,1)) AS n FROM pg_namespace) NS inner join pg_namespace B ON b.oid = ns.oid AND NS.n <= array_upper(b.nspacl,1) - where nspowner>1 UNION ALL -- DATABASE privileges SELECT pg_get_userbyid(b.datdba)::text AS objowner, @@ -92,7 +99,6 @@ WITH objprivs AS ( FROM (SELECT oid,generate_series(1,array_upper(datacl,1)) AS n FROM pg_database) NS inner join pg_database B ON b.oid = ns.oid AND NS.n <= array_upper(b.datacl,1) - where datdba>1 OR datname = 'dev' UNION ALL -- FUNCTION privileges SELECT pg_get_userbyid(b.proowner)::text AS objowner,
Update espidf_debugging_unit_testing_analysis.rst BUG FIX in test_calc.c renamed main() to app_main()
@@ -338,7 +338,7 @@ implement several basic functions ``addition``, ``subtraction``, ``multiplicatio TEST_ASSERT_EQUAL(32, division(100, 3)); } - void main() { + void app_main() { UNITY_BEGIN(); RUN_TEST(test_function_calculator_addition);
Abort deployment if no CNI present This commit makes it so deployment is aborted in case of no CNI plugin provided when selecting specific cni_bin directory.
@@ -4,6 +4,8 @@ import os import sys import yaml +CNI_DIR = 'cni_bin' + def create(config, plandir, cluster, overrides, dnsconfig=None): k = config.k @@ -31,9 +33,12 @@ def create(config, plandir, cluster, overrides, dnsconfig=None): installparam['plan'] = plan installparam['kubetype'] = 'kind' yaml.safe_dump(installparam, p, default_flow_style=False, encoding='utf-8', allow_unicode=True) - if os.path.exists('cni_bin'): - warning("Disabling default cni to use yours instead") - data['cni_bin_path'] = f"{os.getcwd()}/cni_bin" + if os.path.exists(CNI_DIR) and os.path.isdir(CNI_DIR): + warning("Disabling default CNI to use yours instead") + if not os.listdir(CNI_DIR): + error("No CNI plugin provided, aborting...") + sys.exit(1) + data['cni_bin_path'] = f"{os.getcwd()}/{CNI_DIR}" data['disable_default_cni'] = True result = config.plan(plan, inputfile='%s/kcli_plan.yml' % plandir, overrides=data) if result['result'] != 'success':
Improve serialization behavior of line-less pages If no lines are given to the serializer but a number of regions exist the serializer now outputs all regions (in a random order). Fixes
@@ -209,6 +209,20 @@ def serialize(records: Sequence[ocr_record], seg_idx += 1 line_offset += len(segment) cur_ent.append(line) + + # No records but there are regions -> serialize all regions + if not records and regions: + logger.debug(f'No lines given but {len(region_map)}. Serialize all regions.') + for reg in region_map.items(): + region = {'index': reg[0], + 'bbox': [int(x) for x in reg[1][1].bounds], + 'boundary': [list(x) for x in reg[1][2]], + 'region_type': reg[1][0], + 'lines': [], + 'type': 'region' + } + page['entities'].append(region) + logger.debug('Initializing jinja environment.') env = Environment(loader=PackageLoader('kraken', 'templates'), trim_blocks=True,
validate: fix bug when using vault since a variable encrypted with vault is no longer a string but a encrypted object we can't use the filter | length, we have to convert it to a string before. Fixes:
when: - ceph_docker_registry_auth | bool - (ceph_docker_registry_username is not defined or ceph_docker_registry_password is not defined) or - (ceph_docker_registry_username | length == 0 or ceph_docker_registry_password | length == 0) + (ceph_docker_registry_username | string | length == 0 or ceph_docker_registry_password | string | length == 0) - name: validate container service and container package fail:
config_service: change pop-up message for force refresh button. Review-Url:
on-tap="_forceRefresh"> </paper-icon-button> <paper-tooltip for="force-refresh" offset="0"> - Force the config refresh. + Re-import the config-set from the repository. </paper-tooltip> </template> </div>
TypeSet: fix handling of abstract classes with no concrete subclasses TN:
@@ -156,13 +156,22 @@ class TypeSet(object): for parent in reversed(parents): if not parent.abstract or parent in self.matched_types: break - subclasses = set(parent.subclasses) + + subclasses = set(parent.concrete_subclasses) if not subclasses.issubset(self.matched_types): break - # If we reach this point, all parent's subclasses are matched, - # so we can state that parent itself is always matched. + + # If we reach this point, all parent's concrete subclasses are + # matched, so we can state that parent itself is always matched. self.matched_types.add(parent) + # Also include subclasses: we may add abstract subclasses which + # have no concrete subclasses themselves. Typically: the generic + # list type (which is abstract) while there is no list in the + # grammar. + for s in parent.subclasses: + self.include(s) + return False def exclude(self, t):
Create Wyckoff symmetry checking: (get_wyckoff_symmetry, check_wyckoff_position) Needed: Better database access and storage. Needed: Checking function for a single point.
@@ -250,7 +250,6 @@ def merge_coordinate(coor, lattice, wyckoff, tol): else: return coor, True - def estimate_volume(numIons, species, factor=2.0): volume = 0 for numIon, specie in zip(numIons, species): @@ -415,9 +414,58 @@ def site_symm(point, gen_pos, tol=1e-3, lattice=Euclidean_lattice): symmetry.append(el) return symmetry +def get_wyckoff_symmetry(sg, i): + ''' + Return the site symmetry of all points in the ith wyckoff position of sg. + + Args: + sg: the international space group + i: the index of the Wyckoff position (largest = 0) + ''' + #TODO: Convert from npy (very slow) to pandas + #TODO: Find ideal variable type for storing SymmOp operations + #TODO: Support letter instead of index + data = np.load("database/wyckoff_symmetry.npy") + return data[sg][i] + + +def check_wyckoff_position(points, sg, wyckoffs=None): + ''' + Given a list of points, return index of Wyckoff position in space group. + If no match found, returns False. + + Args: + points: a list of 3d coordinates or SymmOps to check + sg: the international space group number to check + wyckoffs: a list of wyckoff positions obtained from get_wyckoff_positions. + ''' + #TODO: Implement changes from get_wyckoff_symmetry + #TODO: Create function for assigning WP to a single point + a = np.array(points) + if wyckoffs == None: + wyckoffs = get_wyckoff_positions(sg) + gen_pos = wyckoffs[0][0] + i = -1 + p_symm = [] + for x in points: + p_symm.append(site_symm(x, gen_pos)) + for x in wyckoffs: + for wp in x: + i += 1 + w_symm = get_wyckoff_symmetry(sg, i) + if len(p_symm) == len(w_symm): + temp = w_symm + for p in p_symm: + for w in temp: + if p == w: + temp.remove(w) + if temp == []: + return i + return False + + class random_crystal(): def __init__(self, sg, species, numIons, factor): -<<<<<<< HEAD numIons *= cellsize(sg) volume = estimate_volume(numIons, species, factor) wyckoffs = get_wyckoff_positions(sg) #2D Array of Wyckoff positions organized by multiplicity @@ -431,7 +479,6 @@ class random_crystal(): if check_compatible(numIons, wyckoffs) is False: print(Msg1) -======= #Necessary input self.factor = factor self.numIons0 = numIons @@ -471,7 +518,6 @@ class random_crystal(): self.valid = False return ->>>>>>> 80bb9bb040a249727c1b0b6befc5482fd5a6eec0 else: for cycle1 in range(max1): #1, Generate a lattice
Update mkvtomp4.py fix missing input_dir, filename_ input_extension and output_dir variables
@@ -321,7 +321,9 @@ class MkvtoMp4: dump["input"] = self.generateSourceDict(inputfile) dump["output"], dump["preopts"], dump["postopts"] = self.generateOptions(inputfile, original) parsed = self.converter.parse_options(dump["output"]) - cmds = self.converter.ffmpeg.generateCommands(inputfile, self.getOutputFile(inputfile), parsed, dump["preopts"], dump["postopts"]) + input_dir, filename, input_extension = self.parseFile(inputfile) + outputfile, output_extension = self.getOutputFile(input_dir, filename, input_extension) + cmds = self.converter.ffmpeg.generateCommands(inputfile, outputfile, parsed, dump["preopts"], dump["postopts"]) dump["ffmpeg_command"] = " ".join(str(item) for item in cmds) return json.dumps(dump, sort_keys=False, indent=4) @@ -786,8 +788,7 @@ class MkvtoMp4: return options, preopts, postopts - def getOutputFile(self, inputfile): - input_dir, filename, input_extension = self.parseFile(inputfile) + def getOutputFile(self, input_dir, filename, input_extension): output_dir = input_dir if self.output_dir is None else self.output_dir output_extension = self.temp_extension if self.temp_extension else self.output_extension @@ -795,21 +796,24 @@ class MkvtoMp4: self.log.debug("File name: %s." % filename) self.log.debug("Input extension: %s." % input_extension) self.log.debug("Output directory: %s." % output_dir) + self.log.debug("Output extension: %s." % output_dir) try: outputfile = os.path.join(output_dir.decode(sys.getfilesystemencoding()), filename.decode(sys.getfilesystemencoding()) + "." + output_extension).encode(sys.getfilesystemencoding()) except: outputfile = os.path.join(output_dir, filename + "." + output_extension) - return outputfile + + self.log.debug("Output file: %s." % outputfile) + return outputfile, output_dir # Encode a new file based on selected options, built in naming conflict resolution def convert(self, inputfile, options, preopts, postopts, reportProgress=False): self.log.info("Starting conversion.") + input_dir, filename, input_extension = self.parseFile(inputfile) originalinputfile = inputfile - outputfile = self.getOutputFile(inputfile) + outputfile, output_dir = self.getOutputFile(input_dir, filename, input_extension) finaloutputfile = outputfile[:] - self.log.debug("Output file: %s." % outputfile) self.log.debug("Final output file: %s." % finaloutputfile) if len(options['audio']) == 0:
Fix how we get the pint registry With new pint version (0.18) we need to use ureg.get() to get the application registry.
@@ -9,7 +9,7 @@ import pint new_units_path = Path(__file__).parent / "new_units.txt" ureg = pint.get_application_registry() -if isinstance(ureg, pint.registry.LazyRegistry): +if isinstance(ureg.get(), pint.registry.LazyRegistry): ureg = pint.UnitRegistry() ureg.load_definitions(str(new_units_path)) # set ureg to make pickle possible
Remove duplicate policy modals Because `PolicyModals` was in both `UsingStudio` and `SettingsIndex` components, it was open two times which also caused a focus bug. This removes `PolicyModals` component from `UsingStudio` and leaves it only in `SettingsIndex`.
target="_blank" rel="noopener noreferrer" /> - - <PolicyModals /> </div> </template> <script> import { mapActions } from 'vuex'; - import PolicyModals from 'shared/views/policies/PolicyModals'; import { policies } from 'shared/constants'; export default { name: 'UsingStudio', - components: { - PolicyModals, - }, methods: { ...mapActions('policies', ['openPolicy']), showTermsOfService() {
Modify method BaseModel.get() Swap default and config arguments
@@ -48,7 +48,7 @@ class BaseModel: return Config().pop(variables, config, **kwargs) @classmethod - def get(cls, variables, config, default=None): + def get(cls, variables, default=None, config=None): """ Return variables from config """ return Config().get(variables, default=default, config=config)
Update __init__.py Added Lion Optimization Algorithm to __init__.py file
@@ -26,6 +26,7 @@ from niapy.algorithms.basic.gwo import GreyWolfOptimizer from niapy.algorithms.basic.hho import HarrisHawksOptimization from niapy.algorithms.basic.hs import HarmonySearch, HarmonySearchV1 from niapy.algorithms.basic.kh import KrillHerd +from niapy.algorithms.basic.loa import LionOptimizationAlgorithm from niapy.algorithms.basic.mbo import MonarchButterflyOptimization from niapy.algorithms.basic.mfo import MothFlameOptimizer from niapy.algorithms.basic.mke import MonkeyKingEvolutionV1, MonkeyKingEvolutionV2, MonkeyKingEvolutionV3 @@ -68,6 +69,7 @@ __all__ = [ 'HarmonySearch', 'HarmonySearchV1', 'KrillHerd', + 'LionOptimizationAlgorithm', 'FireworksAlgorithm', 'EnhancedFireworksAlgorithm', 'DynamicFireworksAlgorithm',
Fix selectize initialization in the Details page Fixes:
@@ -11,7 +11,9 @@ $(function () { return {value: tag} } - var allTags = $("#update-tags-input").data("all-tags"); + // Use attr() instead of data() here, as data() converts attribute's string value + // to a JS object, but we need an unconverted string: + var allTags = $("#update-tags-input").attr("data-all-tags"); var options = allTags ? allTags.split(" ").map(toOption) : []; $("#update-tags-input").selectize({ create: true,
CoilSetPriority causing desyncs The CoilSetPriority command is causing a desync condition on Node 9 and above. If I recall right, this was added to troubleshoot hardware rules and isn't strictly required by spike, so I removed it.
@@ -1251,8 +1251,8 @@ class SpikePlatform(SwitchPlatform, LightsPlatform, DriverPlatform, DmdPlatform, self.log.warning("Did not get status for node %s", node) if self.node_firmware_version[node] >= 0x3100: - self.log.debug("SetLEDMask, CoilSetMask, CoilSetOCTime, CoilSetOCBehavior, SetNumLEDsInputs and " - "CoilSetPriority on node %s", node) + self.log.debug("SetLEDMask, CoilSetMask, CoilSetOCTime, CoilSetOCBehavior, and SetNumLEDsInputs " + "on node %s", node) node_status = await self.send_cmd_and_wait_for_response(node, SpikeNodebus.GetStatus, bytearray(), 10) if node_status: @@ -1304,24 +1304,6 @@ class SpikePlatform(SwitchPlatform, LightsPlatform, DriverPlatform, DmdPlatform, await self.send_cmd_and_wait_for_response(node, SpikeNodebus.GetCoilCurrent, bytearray([0]), 12) - if self.node_firmware_version[node] >= 0x3100: - for node in self._nodes: - if node == 0: - continue - - # configure coil priorities - priority_response = await self.send_cmd_and_wait_for_response( - node, SpikeNodebus.CoilSetPriority, - bytearray([0x08, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08]), 3) - - if not priority_response: - priority_response = await self.send_cmd_and_wait_for_response( - node, SpikeNodebus.CoilSetPriority, - bytearray([0x04, 0x00, 0x01, 0x02, 0x03, 0x04]), 3) - - if not priority_response: - self.log.warning("Failed to set coil priority on node %s", node) - self.log.debug("Configuring traffic.") await self.send_cmd_sync(0, SpikeNodebus.SetTraffic, bytearray([0x11])) # set traffic
Fix docstr db in tfr.plot_joint * Fix docstr db in tfr.plot_joint When ```db=True```, tfr.plot_joint multiplies by 10. The info for the funtion states, however, that it multiplies by 20. * fix docstr dB in tfr.plot; plot_joint; plot_topo
@@ -1091,7 +1091,7 @@ class AverageTFR(_BaseTFR): amount of images. dB : bool - If True, 20*log10 is applied to the data to get dB. + If True, 10*log10 is applied to the data to get dB. colorbar : bool If true, colorbar will be added to the plot. For user defined axes, the colorbar cannot be drawn. Defaults to True. @@ -1305,7 +1305,7 @@ class AverageTFR(_BaseTFR): cmap : matplotlib colormap The colormap to use. dB : bool - If True, 20*log10 is applied to the data to get dB. + If True, 10*log10 is applied to the data to get dB. colorbar : bool If true, colorbar will be added to the plot (relating to the topomaps). For user defined axes, the colorbar cannot be drawn. @@ -1685,7 +1685,7 @@ class AverageTFR(_BaseTFR): title : str Title of the figure. dB : bool - If True, 20*log10 is applied to the data to get dB. + If True, 10*log10 is applied to the data to get dB. colorbar : bool If true, colorbar will be added to the plot layout_scale : float
Add delay to paper trade order created event firings, fixes XMM Fixes: XMM Paper Trade cancel orders loop
# distutils: sources=['hummingbot/core/cpp/Utils.cpp', 'hummingbot/core/cpp/LimitOrder.cpp', 'hummingbot/core/cpp/OrderExpirationEntry.cpp'] +import asyncio from collections import ( deque, defaultdict ) @@ -23,6 +24,9 @@ from hummingbot.core.Utils cimport( getIteratorFromReverseIterator, reverse_iterator ) +from hummingbot.core.utils.async_utils import ( + safe_ensure_future, +) from hummingbot.core.clock cimport Clock from hummingbot.core.clock import ( Clock @@ -364,15 +368,13 @@ cdef class PaperTradeExchange(ExchangeBase): <PyObject *> quantized_price, <PyObject *> quantized_amount )) - self.c_trigger_event(self.MARKET_BUY_ORDER_CREATED_EVENT_TAG, - BuyOrderCreatedEvent( - self._current_timestamp, + safe_ensure_future(self.place_order(self.MARKET_BUY_ORDER_CREATED_EVENT_TAG, + BuyOrderCreatedEvent(self._current_timestamp, order_type, trading_pair_str, quantized_amount, quantized_price, - order_id - )) + order_id))) return order_id cdef str c_sell(self, @@ -419,17 +421,21 @@ cdef class PaperTradeExchange(ExchangeBase): <PyObject *> quantized_price, <PyObject *> quantized_amount )) - self.c_trigger_event(self.MARKET_SELL_ORDER_CREATED_EVENT_TAG, - SellOrderCreatedEvent( - self._current_timestamp, + safe_ensure_future(self.place_order(self.MARKET_SELL_ORDER_CREATED_EVENT_TAG, + SellOrderCreatedEvent(self._current_timestamp, order_type, trading_pair_str, quantized_amount, quantized_price, - order_id - )) + order_id))) return order_id + async def place_order(self, + event_tag, + order_created_event): + await asyncio.sleep(0.01) + self.c_trigger_event(event_tag, order_created_event) + cdef c_execute_buy(self, str order_id, str trading_pair, object amount): cdef: str quote_asset = self._trading_pairs[trading_pair].quote_asset
remove card in landing page first section It was causing layout difficulties and not really adding much
@@ -208,7 +208,6 @@ export default function LandingPage() { </section> <div className={classes.spacer} /> <Container component="section" maxWidth="md"> - <Paper elevation={isBelowMd ? 0 : 1} className={classes.card}> <Typography component="h2" className={classes.header} @@ -231,11 +230,10 @@ export default function LandingPage() { <strong>largest active volunteer base </strong> with over 40 skilled contributors. Our product teams are designing and developing the platform at blazing speed, releasing{" "} - <strong>new features every two weeks</strong>; our community teams - are planning out and executing user base growth and engagement - strategies to reach a{" "} - <strong>critical mass of quality couch surfers</strong> with active - local communities and a vibrant global discussion. + <strong>new features every two weeks</strong>; our community teams are + planning out and executing user base growth and engagement strategies + to reach a <strong>critical mass of quality couch surfers</strong>{" "} + with active local communities and a vibrant global discussion. </Typography> <Typography className={classes.para}> <Link href={contributeRoute} passHref> @@ -251,7 +249,6 @@ export default function LandingPage() { <Button className={classes.subNavButtons}>Volunteer</Button> </Link> </Typography> - </Paper> </Container> <div className={classes.spacer} /> <Container component="section" maxWidth="md">
add python built-in complex type to array types fixes
@@ -153,8 +153,8 @@ def make_shaped_array(x): return ShapedArray(onp.shape(x), dtype) array_types = [onp.ndarray, onp.float64, onp.float32, onp.complex64, - onp.int64, onp.int32, onp.bool_, onp.uint64, onp.uint32, float, - int, bool] + onp.int64, onp.int32, onp.bool_, onp.uint64, onp.uint32, + complex, float, int, bool] for t in array_types: core.pytype_aval_mappings[t] = ConcreteArray
Only log connection failures to debug This is noisy and obscuring the real message in the page. This warns almost every time because some hosts are firewalled off and we can't get the file so setting to debug log only so we can read the output more clearly.
@@ -130,7 +130,10 @@ async def transfer_one_file( ) resp = await asyncio.wait_for(reader.read(), timeout=1.0) except (asyncio.TimeoutError, ConnectionRefusedError) as ex: - logger.warning(f"error getting file from {host}: {ex!r}") + # this is not ununusual because we sometimes advertise hosts from + # firewalled subnets where we can't make this connection to get + # the file. check y/ipam to see what the subnet means + logger.debug(f"error getting file from {host}: {ex!r}") return (host, None) return (host, resp.decode())
[logger.py] add tensorboard logging support Usage: call `logger.add_tensorboard_output(logdir)` before training. PyTorch >= 1.1 or tensorboardX is required.
@@ -1254,6 +1254,9 @@ _header_printed = False _running_processes = [] _async_plot_flag = False +_summary_writer = None +_global_step = 0 + def _add_output(file_name, arr, fds, mode='a'): if file_name not in arr: @@ -1287,6 +1290,48 @@ def add_tabular_output(file_name): _add_output(file_name, _tabular_outputs, _tabular_fds, mode='w') +def add_tensorboard_output(logdir): + try: + try: + from torch.utils.tensorboard import SummaryWriter + except ImportError: + from tensorboardX import SummaryWriter + except ImportError: + print("tensorboard is not available") + return + global _summary_writer + if _summary_writer is not None: + print("Currently only support one SummaryWriter") + return + _summary_writer = SummaryWriter(logdir) + + +def write_to_tensorboard(tabular_dict): + if _summary_writer is None: + return + global _global_step + stat_keys = set() + normal_keys = [] + for k in tabular_dict.keys(): + for j in ["Average", "Std", "Median", "Min", "Max"]: + idx = k.find(j) + if idx != -1: + break + if idx != -1: + stat_keys.add(k[:idx]) + else: + normal_keys.append(k) + for k in normal_keys: + _summary_writer.add_scalar( + "data/" + k, float(tabular_dict[k]), _global_step) + for k in stat_keys: + _summary_writer.add_scalars("stat/" + k, + {k + "Max": float(tabular_dict[k + "Max"]), + k + "Median": float(tabular_dict[k + "Median"]), + k + "Min": float(tabular_dict[k + "Min"])}, _global_step) + _global_step += 1 + + def remove_tabular_output(file_name): if _tabular_fds[file_name] in _tabular_header_written: _tabular_header_written.remove(_tabular_fds[file_name]) @@ -1424,6 +1469,7 @@ def dump_tabular(*args, **kwargs): _tabular_header_written.add(tabular_fd) writer.writerow(tabular_dict) tabular_fd.flush() + write_to_tensorboard(tabular_dict) del _tabular[:]
Remove FieldRowPanel top padding inside MultiFieldPanel fixes
@@ -57,14 +57,18 @@ $object-title-height: 40px; border-color: $color-input-focus-border; } - fieldset { + fieldset, + .field-row { padding-top: $object-title-height + 12px; + } + + fieldset { padding-left: 0; padding-right: 0; - } .field-row { - padding-top: $object-title-height + 12px; + padding-top: 0; + } } .object-help {
Migrate Alcatel.get_metrics to new interface HG-- branch : feature/microservices
@@ -21,14 +21,14 @@ class SlotRule(OIDRule): name = "slot" def iter_oids(self, script, metric): - healthModuleSlot = [0] + health_module_slot = [0] i = 1 r = {} if script.has_capability("Stack | Members"): - healthModuleSlot = range(1, script.capabilities["Stack | Members"] + 1) + health_module_slot = range(1, script.capabilities["Stack | Members"] + 1) - for ms in healthModuleSlot: + for ms in health_module_slot: r[str(i)] = "%d" % ms # r[str(i)] = {"healthModuleSlot": ms} i += 1 @@ -36,9 +36,11 @@ class SlotRule(OIDRule): for i in r: if self.is_complex: gen = [mib[self.expand(o, {"hwSlotIndex": r[i]})] for o in self.oid] + path = ["0", "0", i, ""] if "CPU" in metric.metric else ["0", i, "0"] if gen: - yield tuple(gen), self.type, self.scale, {"slot": i} + yield tuple(gen), self.type, self.scale, path else: oid = mib[self.expand(self.oid, {"hwSlotIndex": r[i]})] + path = ["0", "0", i, ""] if "CPU" in metric.metric else ["0", i, "0"] if oid: - yield oid, self.type, self.scale, {"slot": i} + yield oid, self.type, self.scale, path
Updated to_netcdf4 documentation Included attribute information
@@ -2020,7 +2020,12 @@ class Instrument(object): structure - All attributes attached to instrument meta are written to netCDF attrs. + All attributes attached to instrument meta are written to netCDF attrs + with the exception of 'Date_End', 'Date_Start', 'File', 'File_Date', + 'Generation_Date', and 'Logical_File_ID'. These are defined within to_netCDF + at the time the file is written, as per the adopted standard, + SPDF ISTP/IACG Modified for NetCDF. Atrributes 'Conventions' and + 'Text_Supplement' are given default values if not present. """
Add space after comma in default Quantity.__repr__ This since numpy is now much better in removing superfluous spaces.
@@ -1226,7 +1226,8 @@ class Quantity(np.ndarray, metaclass=InheritDocstrings): def __repr__(self): prefixstr = '<' + self.__class__.__name__ + ' ' - arrstr = np.array2string(self.view(np.ndarray), separator=',', + sep = ',' if NUMPY_LT_1_14 else ', ' + arrstr = np.array2string(self.view(np.ndarray), separator=sep, prefix=prefixstr) return '{0}{1}{2:s}>'.format(prefixstr, arrstr, self._unitstr)
docs: updated pip installation instructions Simplified instructions in light of binaries being available for fpzip and compressed_segmentation.
@@ -21,24 +21,30 @@ CloudVolume can be used in single or multi-process capacity and can be optimized ## Setup -Cloud-volume is compatible with Python 2.6+ and 3.4+ (we've noticed it's faster on Python 3). On linux it requires g++ and python3-dev. After installation, you'll also need to set up your cloud credentials. +Cloud-volume is regularly tested on Ubuntu with Python 2.7, 3.4, 3.5, and 3.6 (we've noticed it's faster on Python 3). Some people have used it with Python 3.7. We support Linux and OS X. Windows is currently unsupoorted. After installation, you'll also need to set up your cloud credentials. -CloudVolume uses several C/C++ extensions that require numpy header files to be pre-installed. As of this writing, they include accelerated `compressed_segmentation` (smaller and faster segmentation files) and `fpzip` (smaller and faster floating point files). To enable these extensions, please install numpy as a seperate command line invocation prior to installing CloudVolume. +#### `pip` Binary Installation -#### `pip` Installation +```bash +pip install cloud-volume +``` + +CloudVolume depends on the PyPI packages [`fpzip`](https://github.com/seung-lab/fpzip) and [`compressed_segmentation`](https://github.com/seung-lab/compressedseg), which are Cython bindings for C++. We have provided compiled binaries for many platforms and python versions, however if you are on an unsupported system, pip will attempt to install from source. In that case, follow the instructions below. + +#### `pip` Source Installation -*C++ compiler recommended.* +*C++ compiler required.* ```bash -sudo apt-get install g++ python3-dev -pip install numpy # extra step required for Cython extensions + Anaconda installations +sudo apt-get install g++ python3-dev # python-dev if you're on python2 +pip install numpy pip install cloud-volume ``` -Due to packaging problems endemic to Python, you'll want to explicitly install numpy prior to installing cloud-volume (i.e. as a totally seperate command). This is required because some CloudVolume capabilites depend on C++ Cython extensions which in turn depend on having numpy C headers present at compilation time. They are not recognized unless numpy is installed in a seperate process that runs first. +Due to packaging problems endemic to Python, Cython packages that depend on numpy require numpy header files be installed before attempting to install the package you want. The numpy headers are not recognized unless numpy is installed in a seperate process that runs first. There are hacks for this issue, but I haven't gotten them to work. If you think binaries should be available for your platform, please let us know by opening an issue. The libraries depending on numpy are: -- Accelerated compressed_segmentation: A slow pure python fallback is present. When the accelerated version is present, IO is faster than with gzip alone. +- compressed_segmentation: Smaller and faster segmentation files. A pure python fallback is present. When the accelerated version is present, IO is faster than with gzip alone. - fpzip: A lossless compression library for 3D & 4D floating point data. #### Manual Installation @@ -56,6 +62,7 @@ workon cv virtualenv venv source venv/bin/activate +sudo apt-get install g++ python3-dev # python-dev if you're on python2 pip install numpy # additional step needed for accelerated compressed_segmentation and fpzip pip install -e . ``` @@ -367,7 +374,8 @@ hyperview(img, seg) # img and seg shape must match CloudVolume in Julia - https://github.com/seung-lab/CloudVolume.jl fpzip Python Package - https://github.com/seung-lab/fpzip -compressed_segmentation - https://github.com/seung-lab/compressedseg +compressed_segmentation Python Package - https://github.com/seung-lab/compressedseg +Igneous - https://github.com/seung-lab/igneous ## Acknowledgments
Update Osc.py created new Mrl OscMessage - and converted from JavaOSC
# we want them sent to python so we subscribe to # the publishOSCMessage method - python.subscribe("osc", "publishOSCMessage") + python.subscribe("osc", "publishOscMessage") # the messages will come back to us in onOscMessage - def onOSCMessage(message): + def onOscMessage(message): print(message) data = message.getArguments() for d in data:
Update sre_parse module for Python 3.8 It seems in Python 3.8, the 'Pattern' object in the (undocumented?) sre_parse module was renamed to 'State', along with a few associated parameters.
@@ -4,6 +4,7 @@ from typing import ( Any, Dict, FrozenSet, Iterable, List, Match, Optional, Pattern as _Pattern, Tuple, Union ) +import sys from sre_constants import _NamedIntConstant as NIC, error as _Error SPECIAL_CHARS: str @@ -20,7 +21,7 @@ GLOBAL_FLAGS: int class Verbose(Exception): ... -class Pattern: +class _State: flags: int groupdict: Dict[str, int] groupwidths: List[Optional[int]] @@ -33,6 +34,11 @@ class Pattern: def checkgroup(self, gid: int) -> bool: ... def checklookbehindgroup(self, gid: int, source: Tokenizer) -> None: ... +if sys.version_info >= (3, 8): + State = _State +else: + Pattern = _State + _OpSubpatternType = Tuple[Optional[int], int, int, SubPattern] _OpGroupRefExistsType = Tuple[int, SubPattern, SubPattern] @@ -43,10 +49,16 @@ _CodeType = Tuple[NIC, _AvType] class SubPattern: - pattern: Pattern data: List[_CodeType] width: Optional[int] + + if sys.version_info >= (3, 8): + state: State + def __init__(self, state: State, data: List[_CodeType] = ...) -> None: ... + else: + pattern: Pattern def __init__(self, pattern: Pattern, data: List[_CodeType] = ...) -> None: ... + def dump(self, level: int = ...) -> None: ... def __len__(self) -> int: ... def __delitem__(self, index: Union[int, slice]) -> None: ... @@ -75,7 +87,11 @@ class Tokenizer: def error(self, msg: str, offset: int = ...) -> _Error: ... def fix_flags(src: Union[str, bytes], flag: int) -> int: ... -def parse(str: str, flags: int = ..., pattern: Pattern = ...) -> SubPattern: ... _TemplateType = Tuple[List[Tuple[int, int]], List[str]] +if sys.version_info >= (3, 8): + def parse(str: str, flags: int = ..., state: State = ...) -> SubPattern: ... + def parse_template(source: str, state: _Pattern[Any]) -> _TemplateType: ... +else: + def parse(str: str, flags: int = ..., pattern: Pattern = ...) -> SubPattern: ... def parse_template(source: str, pattern: _Pattern[Any]) -> _TemplateType: ... def expand_template(template: _TemplateType, match: Match[Any]) -> str: ...
Update correlation_tools.py if I understood well this function the variable `clipped` should report if any value was smaller than the threshold value not the hardcoded 0
@@ -20,7 +20,7 @@ from statsmodels.tools.sm_exceptions import ( def clip_evals(x, value=0): # threshold=0, value=0): evals, evecs = np.linalg.eigh(x) - clipped = np.any(evals < 0) + clipped = np.any(evals < value) x_new = np.dot(evecs * np.maximum(evals, value), evecs.T) return x_new, clipped
Additional caps to Iskratel.ESCOM.get_lldp_neighbors sript HG-- branch : feature/microservices
@@ -50,11 +50,18 @@ class Script(BaseScript): caps = 0 for c in i[4].split(","): c = c.strip() + """ + System capability legend: + B - Bridge; R - Router; W - Wlan Access Point; T - telephone; + D - DOCSIS Cable Device; H - Host; r - Repeater; + TP - Two Ports MAC Relay; S - S-VLAN; C - C-VLAN; O - Other + """ if c: caps |= { - "O": 1, "P": 2, "B": 4, - "W": 8, "R": 16, "T": 32, - "C": 64, "S": 128 + "O": 1, "r": 1, "P": 2, + "B": 4, "W": 8, "R": 16, + "T": 32, "C": 64, "D": 64, + "S": 128, "H": 128 }[c] """ if "O" in i[4]:
ip_conntrack module loading moved ip_conntrack module loading above sysctl loading to prevent errors when loading systctl changes that involved nf_conntrack
@@ -53,6 +53,31 @@ execute 'load ipmi_devintf kernel module at boot' do not_if "grep ipmi_devintf /etc/modules" end +# ip_conntrack module loading and configuration +# +execute 'load ip_conntrack kernel module' do + command 'modprobe ip_conntrack' + not_if 'lsmod | grep nf_conntrack' +end + +begin + sys_params = node['bcpc']['system']['parameters'] + nf_conntrack_max = sys_params['net.nf_conntrack_max'] + hashsize = nf_conntrack_max / 8 + + template '/etc/modprobe.d/nf_conntrack.conf' do + source 'modprobe.d/nf_conntrack.conf.erb' + variables( + :hashsize => hashsize + ) + end + + execute 'set nf conntrack hashsize' do + hashsize_fp = '/sys/module/nf_conntrack/parameters/hashsize' + command "echo #{hashsize} > #{hashsize_fp}" + not_if "grep -w #{hashsize} #{hashsize_fp}" + end +end # configure grub # @@ -104,33 +129,6 @@ template "/etc/udev/rules.d/99-readahead.rules" do end -# ip_conntrack module loading and configuration -# -execute 'load ip_conntrack kernel module' do - command 'modprobe ip_conntrack' - not_if 'lsmod | grep nf_conntrack' -end - -begin - sys_params = node['bcpc']['system']['parameters'] - nf_conntrack_max = sys_params['net.nf_conntrack_max'] - hashsize = nf_conntrack_max / 8 - - template '/etc/modprobe.d/nf_conntrack.conf' do - source 'modprobe.d/nf_conntrack.conf.erb' - variables( - :hashsize => hashsize - ) - end - - execute 'set nf conntrack hashsize' do - hashsize_fp = '/sys/module/nf_conntrack/parameters/hashsize' - command "echo #{hashsize} > #{hashsize_fp}" - not_if "grep -w #{hashsize} #{hashsize_fp}" - end -end - - # configure I/O scheduler # block_devices = ::Dir.glob('/dev/sd?').map { |d| d.split('/').last }
commands/list: add "augmentations" and "all" Allow specifying "augmentations" and "all" as the plugin kind to lost. In the case of of the former, instruments and output processors get listed. In the case of the latter, every plugin kind gets listed.
@@ -27,6 +27,7 @@ class ListCommand(Command): def initialize(self, context): kinds = get_kinds() + kinds.extend(['augmentations', 'all']) self.parser.add_argument('kind', metavar='KIND', help=('Specify the kind of plugin to list. Must be ' 'one of: {}'.format(', '.join(sorted(kinds)))), @@ -52,6 +53,21 @@ class ListCommand(Command): if args.kind == 'targets': list_targets() + elif args.kind == 'augmentations': + print('instruments:') + args.kind = 'instruments' + list_plugins(args, filters) + print('\noutput processors:') + args.kind = 'output_processors' + list_plugins(args, filters) + elif args.kind == 'all': + for kind in sorted(get_kinds()): + print('\n{}:'.format(kind)) + if kind == 'targets': + list_targets() + else: + args.kind = kind + list_plugins(args, filters) else: list_plugins(args, filters)
Disable flaky test in threading_utils_test.py There's probably a way to fix it but it's not worth the work for now.
@@ -268,8 +268,9 @@ class ThreadPoolTest(unittest.TestCase): actual = pool.join() self.assertEqual(['a', 'c', 'b'], actual) + # Disabled due to https://crbug.com/778055 @timeout(30) - def test_abort(self): + def disabled_test_abort(self): # Trigger a ridiculous amount of tasks, and abort the remaining. completed = False results = []
Remove .resolve_unique, which is unused and untested TN:
@@ -16,10 +16,8 @@ from langkit.expressions.utils import array_aggr, assign_var @auto_attr_custom("get") @auto_attr_custom("get_sequential", sequential=True) -@auto_attr_custom("resolve_unique", resolve_unique=True) -def env_get(self, env_expr, symbol_expr, resolve_unique=False, - sequential=False, sequential_from=Self, recursive=True, - filter_prop=None): +def env_get(self, env_expr, symbol_expr, sequential=False, + sequential_from=Self, recursive=True, filter_prop=None): """ Perform a lexical environment lookup. @@ -27,10 +25,6 @@ def env_get(self, env_expr, symbol_expr, resolve_unique=False, to get the element from. :param AbstractExpression|str symbol_expr: Expression that will yield the symbol to use as a key on the env, or a string to turn into a symbol. - :param bool resolve_unique: Wether we want an unique result or not. - NOTE: For the moment, nothing will be done to ensure that only one - result is available. The implementation will just take the first - result. :param bool sequential: Whether resolution needs to be sequential or not. :param AbstractExpression sequential_from: If resolution needs to be sequential, must be an expression to use as the reference node. @@ -92,27 +86,22 @@ def env_get(self, env_expr, symbol_expr, resolve_unique=False, filter_prop.require_untyped_wrapper() - return EnvGet(env_constr_expr, sym_expr, resolve_unique, recursive_expr, - from_expr, filter_prop, self) + return EnvGet(env_constr_expr, sym_expr, recursive_expr, from_expr, + filter_prop, self) class EnvGet(ComputingExpr): - def __init__(self, env_expr, key_expr, resolve_unique, recursive_expr, + def __init__(self, env_expr, key_expr, recursive_expr, sequential_from=None, filter_prop=None, abstract_expr=None): self.env_expr = env_expr self.key_expr = key_expr - self.resolve_unique = resolve_unique self.recursive_expr = recursive_expr self.sequential_from = sequential_from self.filter_prop = filter_prop + self.static_type = T.root_node.entity.array super(EnvGet, self).__init__('Env_Get_Result', abstract_expr=abstract_expr) - @property - def type(self): - t = T.root_node.entity - return t if self.resolve_unique else t.array - def _render_pre(self): result = [ self.env_expr.render_pre(), @@ -135,7 +124,7 @@ class EnvGet(ComputingExpr): array_expr = 'AST_Envs.Get ({})'.format( ', '.join('{} => {}'.format(n, v) for n, v in args) ) - result_expr = 'Get ({}, 0)' if self.resolve_unique else 'Create ({})' + result_expr = 'Create ({})' # In both cases above, the expression is going to be a function call # that returns a new ownership share, so there is no need for an @@ -151,7 +140,6 @@ class EnvGet(ComputingExpr): return { 'env': self.env_expr, 'key': self.key_expr, - 'resolve_unique': self.resolve_unique, 'recursive': self.recursive_expr, 'sequential_from': self.sequential_from, 'filter_prop': self.filter_prop
Update api-ref for partial download requests. Change [1] fixed partial downloads in glance and changed the status codes to be more appropriate. Updating api-ref to reflect what the v2 code will now do. [1]
@@ -102,16 +102,24 @@ verify the integrity of the image data. - You can download the binary image data in your machine if the image has image data. -- If image data exists, the call returns the HTTP ``200`` response code. +- If image data exists, the call returns the HTTP ``200`` response code for a + full image download request. + +- If image data exists, the call returns the HTTP ``206`` response code for a + partial download request. - If no image data exists, the call returns the HTTP ``204`` (No Content) response code. -- If no image record exists, the call returns the HTTP ``404`` response code. +- If no image record exists, the call returns the HTTP ``404`` response code + for an attempted full image download request. + +- For an unsatisfiable partial download request, the call returns the HTTP + ``416`` response code. -Normal response codes: 200, 204 +Normal response codes: 200, 204, 206 -Error response codes: 400, 403, 404 +Error response codes: 400, 403, 404, 416 Request
Update v_generate_tbl_ddl.sql Added ENCODE RAW keyword for non compressed columns (Issue
@@ -43,6 +43,7 @@ History: 2017-05-03 pvbouwel Change table & schemaname of Foreign key constraints to allow for filters 2018-01-15 pvbouwel Add QUOTE_IDENT for identifiers (schema,table and column names) 2018-05-30 adedotua Add table_id column +2018-05-30 adedotua Added ENCODE RAW keyword for non compressed columns (Issue #308) **********************************************************************************************/ CREATE OR REPLACE VIEW admin.v_generate_tbl_ddl AS @@ -110,7 +111,7 @@ FROM ELSE UPPER(format_type(a.atttypid, a.atttypmod)) END AS col_datatype ,CASE WHEN format_encoding((a.attencodingtype)::integer) = 'none' - THEN '' + THEN 'ENCODE RAW' ELSE 'ENCODE ' + format_encoding((a.attencodingtype)::integer) END AS col_encoding ,CASE WHEN a.atthasdef IS TRUE THEN 'DEFAULT ' + adef.adsrc ELSE '' END AS col_default
In this commit: I cleaned up some residue code The quiz now selects a random category if None provided Updated doc strings Displaying the category when the quiz is starting
@@ -45,13 +45,10 @@ class TriviaQuiz(commands.Cog): return questions @commands.group(name="quiz", aliases=["trivia"], invoke_without_command=True) - async def quiz_game(self, ctx: commands.Context, category: str = "general") -> None: + async def quiz_game(self, ctx: commands.Context, category: str = None) -> None: """ Start a quiz! - If the quiz game is running, then the owner or a mod can stop it by using this command - without providing any arguments or vice versa. - Questions for the quiz can be selected from the following categories: - general : Test your general knowledge. (default) (More to come!) @@ -68,22 +65,22 @@ class TriviaQuiz(commands.Cog): f"Game is already running..." f"do `{self.bot.command_prefix}quiz stop`" ) - await self.stop_quiz(ctx.author, ctx.channel) - return - category = category.lower() # Send embed showing available categori es if inputted category is invalid. - if category not in self.categories: + if category is None: + category = random.choice(list(self.categories)) + if category.lower() not in self.categories: embed = self.category_embed() await ctx.send(embed=embed) return + # Start game if not running. if self.game_status[ctx.channel.id] is False: self.game_owners[ctx.channel.id] = ctx.author self.game_status[ctx.channel.id] = True - start_embed = self.make_start_embed() + start_embed = self.make_start_embed(category) await ctx.send(embed=start_embed) # send an embed with the rules await asyncio.sleep(1) @@ -179,12 +176,13 @@ class TriviaQuiz(commands.Cog): await asyncio.sleep(2) @staticmethod - def make_start_embed() -> discord.Embed: + def make_start_embed(category: str) -> discord.Embed: """Generate a starting/introduction embed for the quiz.""" start_embed = discord.Embed(colour=discord.Colour.red()) start_embed.title = "Quiz game Starting!!" start_embed.description = "Each game consists of 5 questions.\n" start_embed.description += "**Rules :**\nNo cheating and have fun!" + start_embed.description += f"\n **Category** : {category}" start_embed.set_footer( text="Points for each question reduces by 25 after 10s or after a hint. Total time is 30s per question" ) @@ -192,7 +190,10 @@ class TriviaQuiz(commands.Cog): @quiz_game.command(name="stop") async def stop_quiz(self, ctx: commands.Context) -> None: - """Stop a quiz game if its running in the channel.""" + """ + Stop a quiz game if its running in the channel. + Note: Only mods or the owner of the quiz can stop it. + """ if self.game_status[ctx.channel.id] is True: # Check if the author is the game starter or a moderator. if (
Add safeguard against malicious exploitation of ISO_C_BINDING import For security, write `use, intrinsic :: ISO_C_BINDING` instead of `use ISO_C_BINDING` (fixes
@@ -284,7 +284,7 @@ class FCodePrinter(CodePrinter): name=name) imports = ''.join(self._print(i) for i in expr.imports) - imports += 'use ISO_C_BINDING\n' + imports += 'use, intrinsic :: ISO_C_BINDING\n' decs = ''.join(self._print(i) for i in expr.declarations) body = '' @@ -335,7 +335,7 @@ class FCodePrinter(CodePrinter): self._handle_fortran_specific_a_prioris(self.parser.get_variables(self._namespace)) name = 'prog_{0}'.format(self._print(expr.name)).replace('.', '_') imports = ''.join(self._print(i) for i in expr.imports) - imports += 'use ISO_C_BINDING\n' + imports += 'use, intrinsic :: ISO_C_BINDING\n' body = self._print(expr.body) # Print the declarations of all variables in the namespace, which include: @@ -1381,7 +1381,7 @@ class FCodePrinter(CodePrinter): self._handle_fortran_specific_a_prioris(list(f.arguments) + list(f.results)) parts = self.function_signature(f, f.name) parts = ["{}({}) {}\n".format(parts['sig'], parts['arg_code'], parts['func_end']), - 'use iso_c_binding\n', + 'use, intrinsic :: ISO_C_BINDING\n', parts['arg_decs'], 'end {} {}\n'.format(parts['func_type'], f.name)] funcs_sigs.append(''.join(a for a in parts)) @@ -1479,7 +1479,7 @@ class FCodePrinter(CodePrinter): interfaces = '\n'.join(self._print(i) for i in expr.interfaces) arg_code = ', '.join(self._print(i) for i in chain( arguments, results )) imports = ''.join(self._print(i) for i in expr.imports) - imports += 'use ISO_C_BINDING' + imports += 'use, intrinsic :: ISO_C_BINDING' prelude = ''.join(self._print(i) for i in args_decs.values()) body_code = self._print(expr.body) doc_string = self._print(expr.doc_string) if expr.doc_string else ''
remove format arg removing the format arg since we also removed it from the posts.py logic
{% if can_download %} <span class='section'>Downloads</span> <a href="{{url_for('posts.download')}}?type=kp&post={{post_path|urlencode}}" class="btn btn-primary btn-download" style='display: block;'>Portable Knowledge Post</a> - <a href="{{url_for('posts.download')}}?type=post&format=pdf&post={{post_path|urlencode}}" class="btn btn-primary btn-download" style='display: block;'>PDF</a> + <a href="{{url_for('posts.download')}}?type=pdf&post={{post_path|urlencode}}" class="btn btn-primary btn-download" style='display: block;'>PDF</a> <a href="{{url_for('posts.download')}}?type=zip&post={{post_path|urlencode}}" class="btn btn-primary btn-download" style='display: block;'>ZIP Archive</a> {% if downloads %} <span class='subsection'>Source Files</span>
Capitalisation mistake Changed "BigchaindB" to "BigchainDB"
@@ -13,7 +13,7 @@ Code is Apache-2.0 and docs are CC-BY-4.0 BigchainDB Server requires Python 3.5+ and Python 3.5+ [will run on any modern OS](https://docs.python.org/3.5/using/index.html), but we recommend using an LTS version of [Ubuntu Server](https://www.ubuntu.com/server) or a similarly server-grade Linux distribution. -_Don't use macOS_ (formerly OS X, formerly Mac OS X), because it's not a server-grade operating system. Also, BigchaindB Server uses the Python multiprocessing package and [some functionality in the multiprocessing package doesn't work on Mac OS X](https://docs.python.org/3.6/library/multiprocessing.html#multiprocessing.Queue.qsize). +_Don't use macOS_ (formerly OS X, formerly Mac OS X), because it's not a server-grade operating system. Also, BigchainDB Server uses the Python multiprocessing package and [some functionality in the multiprocessing package doesn't work on Mac OS X](https://docs.python.org/3.6/library/multiprocessing.html#multiprocessing.Queue.qsize). ## General Considerations
[Sigma] Update OriginalFileName mapping * Update OriginalFileName mapping the previous mapping did create a query that was not parseable, e.g. for rule # * remove one Image mapping that was not unique
@@ -326,7 +326,6 @@ fieldmappings: TargetFilename: product=linux: filename default: xml_string - Image: xml_string # that is a value name that might be used in other queries as well. Ideally it would be something _all ImageLoaded: xml_string QueryName: xml_string TargetProcessAddress: xml_string @@ -339,7 +338,7 @@ fieldmappings: Signed: xml_string ScriptBlockText: xml_string ContextInfo: xml_string - OriginalFileName: "xml_string:*OriginalFileName*" + OriginalFileName: xml_string # 80167ada-7a12-41ed-b8e9-aa47195c66a1 Payload: xml_string HostName: xml_string #96b9f619-aa91-478f-bacb-c3e50f8df575 HostApplication: xml_string #96b9f619-aa91-478f-bacb-c3e50f8df575
fix doctest again... python 2 and 3 don't print lines/columns of datasets/dataframes in the same order: change calls to specify order or narrow selection
@@ -166,7 +166,7 @@ class DataRecord(Dataset): the dimension 'item_id'. >>> dr2=DataRecord(grid, ... items=my_items2) - >>> dr2.to_dataframe() + >>> dr2.to_dataframe()[['grid_element', 'element_id']] grid_element element_id item_id 0 node 1 @@ -181,7 +181,7 @@ class DataRecord(Dataset): >>> dr3=DataRecord(grid, ... time=[0.], ... items=my_items3) - >>> dr3.to_dataframe() + >>> dr3.to_dataframe()[['grid_element', 'element_id']] grid_element element_id item_id time 0 0.0 node 1 @@ -625,10 +625,8 @@ class DataRecord(Dataset): ... ['item_id', 'time'], [[10],[5]])}) Two items have been added at a new timestep 1.0: - >>> dr3.coords - Coordinates: - * item_id (item_id) int64 0 1 2 3 - * time (time) float64 0.0 1.0 + >>> dr3['item_id'].values, dr3['time'].values + (array([0, 1, 2, 3]), array([ 0., 1.])) If a data variable is also added with the new items ('size' in this example), the values for this variable are filled with 'nan' for the @@ -639,7 +637,6 @@ class DataRecord(Dataset): The previous line calls the values of the variable 'size', for all items, at time=1; the first two items don't have a value for the variable 'size'. - """ if model__time is None and 'time' in self['grid_element'].coords: @@ -830,27 +827,17 @@ class DataRecord(Dataset): ... time=[50.], ... items=my_items4, ... data_vars=my_data4) - >>> dr4 - <xarray.DataRecord> - Dimensions: (item_id: 4, time: 1) - Coordinates: - * time (time) float64 50.0 - * item_id (item_id) int64 0 1 2 3 - Data variables: - grid_element (item_id, time) object 'node' 'node' 'node' 'node' - element_id (item_id, time) int64 1 3 3 7 - item_size (item_id, time) float64 0.3 0.4 0.8 0.4 + >>> dr4['item_size'].values + array([[ 0.3], + [ 0.4], + [ 0.8], + [ 0.4]]) >>> dr4.set_data(50.,2,'item_size', 0.5) - >>> dr4 - <xarray.DataRecord> - Dimensions: (item_id: 4, time: 1) - Coordinates: - * time (time) float64 50.0 - * item_id (item_id) int64 0 1 2 3 - Data variables: - grid_element (item_id, time) object 'node' 'node' 'node' 'node' - element_id (item_id, time) int64 1 3 3 7 - item_size (item_id, time) float64 0.3 0.4 0.5 0.4 + >>> dr4['item_size'].values + array([[ 0.3], + [ 0.4], + [ 0.5], + [ 0.4]]) """
fix first submission not associated with user ID see
@@ -180,12 +180,11 @@ class PenguinStatsReporter: ) - if self.logged_in: client = self.client - else: + if not self.logged_in: uid = config.get('reporting/penguin_stats_uid', None) if uid is not None: - self.try_login(uid) + if not self.try_login(uid): # use exclusive client instance to get response cookie client = penguin_client.ApiClient() api = penguin_client.ReportApi(client)
Update pytests-dev.yml Removed windows testing in set env var
@@ -27,12 +27,7 @@ jobs: - name: Set environment variables run: | - if [[ ${{ matrix.os }} == windows* ]] ; - then - echo "CONDA_ENV_FILE=ci/requirements/environment-windows.yml" >> $GITHUB_ENV - else echo "CONDA_ENV_FILE=ci/requirements/py${{matrix.python-version}}-dev.yml" >> $GITHUB_ENV - fi echo "PYTHON_VERSION=${{ matrix.python-version }}" >> $GITHUB_ENV - name: Cache conda
Handle user error in update labels * Get only required fields when processing messages * Handle customer id in group add/delete/sync/clear I always wondered what this was: if user_email != u'*' * Allow * to mean all users in group operations * Make pylon happy, handle user error in update labels
@@ -4714,7 +4714,7 @@ def doProcessMessagesOrThreads(users, function, unit=u'messages'): for my_key in body: kwargs[u'body'][my_key] = labelsToLabelIds(gmail, body[my_key]) if not kwargs[u'body']: - del(kwargs[u'body']) + del kwargs[u'body'] i = 0 if unit == u'messages' and function in [u'delete', u'modify']: batchFunction = u'batch%s' % function.title() @@ -4963,7 +4963,11 @@ def renameLabels(users): continue match_result = re.search(pattern, label[u'name']) if match_result is not None: + try: new_label_name = replace % match_result.groups() + except TypeError: + print u'ERROR: The number of subfields ({0}) in search "{1}" does not match the number of subfields ({2}) in replace "{3}"'.format(len(match_result.groups()), search, replace.count(u'%s'), replace) + sys.exit(2) print u' Renaming "%s" to "%s"' % (label[u'name'], new_label_name) try: callGAPI(gmail.users().labels(), u'patch', soft_errors=True, throw_reasons=[u'aborted'], id=label[u'id'], userId=user, body={u'name': new_label_name})
Scons: Fixup for Python3 compiler version output decoding * This should only affect MinGW64 with Python3, but was not generally observed.
@@ -303,7 +303,7 @@ def detectVersion(env, cc): # version = line line = pipe.stdout.readline() - if str is not bytes: + if str is not bytes and type(line) is bytes: line = line.decode("utf8") match = re.search(r'[0-9]+(\.[0-9]+)+', line)
Fixing detection to conform to new sysmon behavior.
@@ -13,7 +13,7 @@ description: this detection was designed to identifies suspicious office documen or other malware component. It is really good practice to disable macro by default to avoid automatically execute macro code while opening or closing a office document files. -search: '`sysmon` EventCode=7 process_name IN ("WINWORD.EXE", "EXCEL.EXE", "POWERPNT.EXE") +search: '`sysmon` EventCode=7 parent_process_name IN ("WINWORD.EXE", "EXCEL.EXE", "POWERPNT.EXE") ImageLoaded IN ("*\\VBE7INTL.DLL","*\\VBE7.DLL", "*\\VBEUI.DLL") | stats min(_time) as firstTime max(_time) as lastTime values(ImageLoaded) as AllImageLoaded count by Computer EventCode Image process_name ProcessId ProcessGuid | `security_content_ctime(firstTime)`
DOC: clarify typical optional input Update concat dosctring with info about sorting.
@@ -1583,7 +1583,8 @@ class Instrument(object): ---- For pandas, sort=False is passed along to the underlying pandas.concat method. If sort is supplied as a keyword, the - user provided value is used instead. + user provided value is used instead. Recall that sort orders the + data columns, not the data values or the index. For xarray, dim=Instrument.index.name is passed along to xarray.concat except if the user includes a value for dim as a keyword argument.
optional 'forceconvert' parameter overrides deluge getting list of files from torrent in favor of just reading from directory
@@ -49,6 +49,20 @@ try: category = torrent_data['label'].lower() files = [] + + # Check forcepath which overrides talking to deluge for files and instead reads the path + try: + force = (str(sys.argv[4]).lower().strip() == 'forcepath') + except: + force = False + + if force: + log.debug("List of files in path override:") + for r, d, f in os.walk(path): + for file in f: + files.append(file) + log.debug(file) + else: log.debug("List of files in torrent:") for contents in torrent_files: try: @@ -93,7 +107,7 @@ try: log.error("No files provided by torrent") for filename in files: - inputfile = os.path.join(path, filename) + inputfile = filename if force else os.path.join(path, filename) info = mp.isValidSource(inputfile) if info: log.info("Converting file %s at location %s." % (inputfile, settings.output_dir)) @@ -112,7 +126,7 @@ try: except: log.exception("Unable to make copy directory %s." % newpath) for filename in files: - inputfile = os.path.join(path, filename) + inputfile = filename if force else os.path.join(path, filename) log.info("Copying file %s to %s." % (inputfile, newpath)) shutil.copy(inputfile, newpath) path = newpath