message
stringlengths
13
484
diff
stringlengths
38
4.63k
Add Dropout to blacklist Summary: Pull Request resolved: Add Dropout to blacklist to avoid the error in eager mode quantization. ghstack-source-id: Test Plan: Test locally in python notebook.
@@ -11,7 +11,7 @@ from .QConfig import default_dynamic_qconfig import torch.nn.qat as nnqat -DEFAULT_SKIP_LIST = [nn.Identity, nn.MaxPool2d, nn.AvgPool2d, nn.AdaptiveAvgPool2d] +DEFAULT_SKIP_LIST = [nn.Dropout, nn.Identity, nn.MaxPool2d, nn.AvgPool2d, nn.AdaptiveAvgPool2d] def propagate_qconfig_helper(module, qconfig_dict, skip_list=DEFAULT_SKIP_LIST, qconfig_parent=None, prefix=''): r"""This is a helper function for `propagate_qconfig`
Better validation of filelists Closes
@@ -980,8 +980,12 @@ def _normalize_fileset(fileset, treename): if isinstance(fileset, str): with open(fileset) as fin: fileset = json.load(fin) + elif not isinstance(fileset, Mapping): + raise ValueError("Expected fileset to be a path string or mapping") for dataset, filelist in fileset.items(): if isinstance(filelist, dict): + if set(filelist.keys()) - {"treename", "files"}: + raise ValueError(f"Extraneous arguments in fileset dictionary, expected treename, files but got {set(filelist.keys())}") local_treename = filelist['treename'] if 'treename' in filelist else treename filelist = filelist['files'] elif isinstance(filelist, list):
util.commandline: StoreRepoObject: skip check for unknown aliases If no aliases were allowed.
@@ -286,6 +286,7 @@ class StoreRepoObject(StoreConfigObject): self.repo_key = self.valid_repo_types[self.repo_type] self.allow_aliases = set(kwargs.pop("allow_aliases", ())) + if self.allow_aliases: unknown_aliases = self.allow_aliases.difference(self.valid_repo_types) if unknown_aliases: raise argparse.ArgumentTypeError(
snake: Refactor awkward [0] to a short-circuit In get_raw_secret_value()
@@ -657,13 +657,9 @@ def get_raw_secret_value( plugin_secrets = plugin.analyze(file_handle, filename) - matching_secret = [ - plugin_secret.secret_value - for plugin_secret in plugin_secrets - if plugin_secret.secret_hash == secret['hashed_secret'] - ] + # Return value of matching secret + for plugin_secret in plugin_secrets: + if plugin_secret.secret_hash == secret['hashed_secret']: + return plugin_secret.secret_value - if not matching_secret: raise SecretNotFoundOnSpecifiedLineError(secret['line_number']) - - return matching_secret[0]
pmaint regen: handle repos without cache in a better fashion Previously this would just crash when no repo args were passed since the attr would be missing for the stacked global repo. We really need a way to limit certain tools to certain types of repos.
@@ -282,7 +282,7 @@ def regen_main(options, out, err): if not repo.operations.supports("regen_cache"): out.write("repository %s doesn't support cache regeneration" % (repo,)) continue - elif not repo.cache and not options.force: + elif not getattr(repo, 'cache', False) and not options.force: out.write("skipping repo %s: cache disabled" % (repo,)) continue
Inkscape_r2p: Fix imports for Python 2/3 Add absolute_import, change import syntax.
@@ -12,11 +12,12 @@ the vectorpdf code to process the PDF. and doesn't check return from inkscape for errors. ''' +from __future__ import absolute_import import sys, os, tempfile, subprocess from weakref import WeakKeyDictionary from rst2pdf.log import log -from vectorpdf_r2p import VectorPdf +from .vectorpdf_r2p import VectorPdf import rst2pdf.image
Fixed CHECK failure in logger module due to Docker workaround. NOTE: Marathon/Metronome apps do not allow colons in the task ID.
"single_source" : { "kind": "git", "git": "https://github.com/dcos/dcos-mesos-modules.git", - "ref": "7ce5ba397bbaa8fdcc985ef2e6dd8bc81709931a", - "ref_origin": "1.9.x" + "ref": "2a5a02f1de47dcbe44844f7358adf85ea630cf4a", + "ref_origin": "master" } }
Fix bug in discv5 logging statement Fixes
@@ -781,7 +781,7 @@ class DiscoveryProtocol(asyncio.DatagramProtocol): if received_echo != echo: self.logger.warning( "Unexpected topic_nodes from %s, expected echo %s, got %s", - encode_hex(echo), encode_hex(received_echo)) + remote, encode_hex(echo), encode_hex(received_echo)) return nodes.extend(response)
No need for if_main_process() in on_stage_end run_on_main() is already used in `core.py` for `on_stage_end` function
@@ -116,7 +116,7 @@ class W2VBrain(sb.core.Brain): stage_stats["acc"] = sum(self.acc_metric) / len(self.acc_metric) # Perform end-of-iteration things, like annealing, logging, etc. - if stage == sb.Stage.VALID and sb.utils.distributed.if_main_process(): + if stage == sb.Stage.VALID: lr = self.hparams.noam_annealing.current_lr steps = self.hparams.noam_annealing.n_steps optimizer = self.optimizer.__class__.__name__
slack: Parse emoji skin tone variants. Fixes part of
@@ -1123,7 +1123,11 @@ def build_reactions( # For the Unicode emoji codes, we use equivalent of # function 'emoji_name_to_emoji_code' in 'zerver/lib/emoji' here for slack_reaction in reactions: - emoji_name = slack_reaction["name"] + # Slack's data exports use encode skin tone variants on emoji + # reactions like this: `clap::skin-tone-2`. For now, we only + # use the name of the base emoji, since Zulip's emoji + # reactions system doesn't yet support skin tone modifiers. + emoji_name = slack_reaction["name"].split("::", maxsplit=1)[0] if emoji_name in slack_emoji_name_to_codepoint: emoji_code = slack_emoji_name_to_codepoint[emoji_name] try:
Fixes Circuit.expand_subcircuits and adds .expand_subcircuits_inplace. The prior .expand_subcircuits() method worked in-place, so it was renamed to .expand_subcircuits_inplace() and a new .expand_subcircuits() method was created which simply copies a circuit and expands the copy's subcircuits in-place before returning it.
@@ -1682,7 +1682,7 @@ class Circuit(object): parallel_lbls = [_Label(lbl_list) if len(lbl_list) != 1 else lbl_list[0] for lbl_list in parallel_lbls] return Circuit._fastinit(tuple(parallel_lbls), self.line_labels, editable=False, occurrence=self.occurrence) - def expand_subcircuits(self): # INPLACE + def expand_subcircuits_inplace(self): """ Expands all :class:`CircuitLabel` labels within this circuit. @@ -1713,6 +1713,19 @@ class Circuit(object): self.set_labels(subc.components * subc.reps, slice(i, i + subc.depth), subc.sslbls) # dump in the contents + def expand_subcircuits(self): + """ + Returns a new circuit with :class:`CircuitLabel` labels expanded. + + Returns + ------- + Circuit + """ + cpy = self.copy(editable=True) + cpy.expand_subcircuits_inplace() + cpy.done_editing() + return cpy + def factorize_repetitions_inplace(self): """ Attempt to replace repeated sub-circuits with :class:`CircuitLabel` objects.
Updates packages arrow-0.12.1 boto3-1.5.22 botocore-1.8.36 certifi-2018.1.18
# # pip-compile --output-file requirements.txt requirements.in # -arrow==0.12.0 +arrow==0.12.1 blessed==1.14.2 -boto3==1.5.14 -botocore==1.8.28 # via boto3, s3transfer -certifi==2017.11.5 # via requests +boto3==1.5.22 +botocore==1.8.36 # via boto3, s3transfer +certifi==2018.1.18 # via requests chardet==3.0.4 # via requests django-picklefield==1.0.0 django-redis==4.8.0
Fix 15135 * Fix 15135 Remove the `update_addon_total_downloads` per * Update the run time for update_addon_weekly_downloads
@@ -36,8 +36,7 @@ HOME=/tmp 00 12 * * * %(django)s download_counts_from_file # Once per day after metrics import is done -30 12 * * * %(z_cron)s update_addon_total_downloads -35 12 * * * %(z_cron)s update_addon_weekly_downloads +30 12 * * * %(z_cron)s update_addon_weekly_downloads 30 13 * * * %(z_cron)s update_addon_average_daily_users 00 14 * * * %(z_cron)s index_latest_stats
Fix Rosenbrock tutorial The algorithms list had a missing value, which would cause the script to error out
@@ -83,7 +83,7 @@ sampler.sample(rep) results.append(sampler.getdata()) -sampler=spotpy.algorithms.demcz(spot_setup, parallel=parallel, dbname='RosenDREAM', dbformat=dbformat, sim_timeout=timeout) +sampler=spotpy.algorithms.demcz(spot_setup, parallel=parallel, dbname='RosenDEMCZ', dbformat=dbformat, sim_timeout=timeout) print(describe(sampler)) sampler.sample(rep) results.append(sampler.getdata()) @@ -99,5 +99,5 @@ print(results[0].dtype) # Check for Travis: Get the last sampled parameter for x evaluation = spot_setup.evaluation() # Example how to plot the data -#algorithms = ['mc','lhs','mle','mcmc','sceua','sa','demcz','rope','abc','fscabc','dream'] -#spotpy.analyser.plot_parametertrace_algorithms(results,algorithmnames=algorithms,parameternames=['x','y']) +algorithms = ['mc','lhs','mle','mcmc','sceua','sa','demcz','rope','abc','fscabc', 'demcz', 'dream'] +spotpy.analyser.plot_parametertrace_algorithms(results,algorithmnames=algorithms,parameternames=['x','y'])
Fix .get_buttons failing for some messages sent by the bot Closes
@@ -1080,7 +1080,8 @@ class Message(ChatGetter, SenderGetter, TLObject, abc.ABC): for row in self.reply_markup.rows: for button in row.buttons: if isinstance(button, types.KeyboardButtonSwitchInline): - if button.same_peer: + # no via_bot_id means the bot sent the message itself (#1619) + if button.same_peer or not self.via_bot_id: bot = self.input_sender if not bot: raise ValueError('No input sender')
Adjust paired PureCN to the new way of locating PureCN.R Unlike run_purecn_normaldb, run_purecn was never adjusted.
@@ -165,7 +165,10 @@ def _run_purecn(paired, work_dir): # Use UCSC style naming for human builds to support BSgenome genome = ("hg19" if dd.get_genome_build(paired.tumor_data) in ["GRCh37", "hg19"] else dd.get_genome_build(paired.tumor_data)) - cmd = ["PureCN.R", "--seed", "42", "--out", tx_out_base, "--rds", "%s.rds" % tx_out_base, + rscript = utils.Rscript_cmd() + purecn_r = utils.R_package_script("PureCN", "extdata/PureCN.R") + cmd = [rscript, purecn_r, "--seed", "42", "--out", tx_out_base, + "--rds", "%s.rds" % tx_out_base, "--sampleid", dd.get_sample_name(paired.tumor_data), "--genome", genome, "--vcf", vcf_file, "--tumor", cnr_file,
Fix incorrect smartos.vm_present example When looking up my own docs because I forgot how it worked, I noticed and example was broken. I also added an extra warning as I wasted half an hour on silly mistake.
@@ -58,7 +58,7 @@ Management of SmartOS Standalone Compute Nodes label: 'test kvm' owner: 'sjorge' disks: - disk0 + disk0: size: 2048 model: virtio compression: lz4 @@ -111,6 +111,12 @@ Management of SmartOS Standalone Compute Nodes they get removed via add_*, set_*, update_*, and remove_*. Properties must be manually reset to their default value. The same behavior as when using 'vmadm update'. + +.. warning:: + + For HVM (bhyve and KVM) brands the `image_uuid` field should go on the boot disks, + this disk should NOT have a size specified. (See man vmadm) + ''' from __future__ import absolute_import, unicode_literals, print_function
Add choices for GG45 and TERA connectors and Cat7a/Cat8 cables Fixes
@@ -873,6 +873,10 @@ class PortTypeChoices(ChoiceSet): TYPE_8P6C = '8p6c' TYPE_8P4C = '8p4c' TYPE_8P2C = '8p2c' + TYPE_GG45 = 'gg45' + TYPE_TERA4P = 'tera-4p' + TYPE_TERA2P = 'tera-2p' + TYPE_TERA1P = 'tera-1p' TYPE_110_PUNCH = '110-punch' TYPE_BNC = 'bnc' TYPE_MRJ21 = 'mrj21' @@ -898,6 +902,10 @@ class PortTypeChoices(ChoiceSet): (TYPE_8P6C, '8P6C'), (TYPE_8P4C, '8P4C'), (TYPE_8P2C, '8P2C'), + (TYPE_GG45, 'GG45'), + (TYPE_TERA4P, 'TERA 4P'), + (TYPE_TERA2P, 'TERA 2P'), + (TYPE_TERA1P, 'TERA 1P'), (TYPE_110_PUNCH, '110 Punch'), (TYPE_BNC, 'BNC'), (TYPE_MRJ21, 'MRJ21'), @@ -936,6 +944,8 @@ class CableTypeChoices(ChoiceSet): TYPE_CAT6 = 'cat6' TYPE_CAT6A = 'cat6a' TYPE_CAT7 = 'cat7' + TYPE_CAT7A = 'cat7a' + TYPE_CAT8 = 'cat8' TYPE_DAC_ACTIVE = 'dac-active' TYPE_DAC_PASSIVE = 'dac-passive' TYPE_MRJ21_TRUNK = 'mrj21-trunk' @@ -960,6 +970,8 @@ class CableTypeChoices(ChoiceSet): (TYPE_CAT6, 'CAT6'), (TYPE_CAT6A, 'CAT6a'), (TYPE_CAT7, 'CAT7'), + (TYPE_CAT7A, 'CAT7a'), + (TYPE_CAT8, 'CAT8'), (TYPE_DAC_ACTIVE, 'Direct Attach Copper (Active)'), (TYPE_DAC_PASSIVE, 'Direct Attach Copper (Passive)'), (TYPE_MRJ21_TRUNK, 'MRJ21 Trunk'),
Add new Absorp Chl input to modelinputs.json The added AbsorptionChillerElecCOP parameter is always passed to utils.jl/reopt.jl, so we need to add it to modelinputs.json for the precompile
"BoilerEfficiency": 1.0 , "ElectricChillerCOP": 1.0 , "AbsorptionChillerCOP": 1.0 , + "AbsorptionChillerElecCOP": 1.0, "CHPThermalProdSlope": [] , "CHPThermalProdIntercept": [] , "FuelBurnYIntRate": [] ,
compose: Change borders from gray to translucent. This works better on color backgrounds while staying the same on white. The compose input borders are also darkened slightly.
.compose-content { - border-top: 1px solid hsl(0, 0%, 93%); + border-top: 1px solid hsla(0, 0%, 0%, 0.07); -webkit-transition: background-color 200ms linear; -moz-transition: background-color 200ms linear; -o-transition: background-color 200ms linear; @@ -299,7 +299,7 @@ textarea.new_message_textarea { textarea.new_message_textarea, .compose_table .recipient_box { - border: 1px solid hsl(0, 0%, 86%); + border: 1px solid hsla(0, 0%, 0%, 0.2); box-shadow: none; -webkit-box-shadow: none; transition: border 0.2s ease;
Fix docstring of test_request_context Add missing `app.`.
@@ -2448,7 +2448,7 @@ class Flask(Scaffold): :data:`request` point at the request for the created environment. :: - with test_request_context(...): + with app.test_request_context(...): generate_report() When using the shell, it may be easier to push and pop the
refactor: support generators in db.bulk_insert When you want to insert a huge amount of docs in DB, using bulk insert currently requries creating a huge number of objects, this is ineffecient from memory perspective and also delays failures if any till last step in execution.
import datetime import json +import itertools import random import re import string import traceback from contextlib import contextmanager, suppress from time import time +from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union from pypika.dialects import MySQLQueryBuilder, PostgreSQLQueryBuilder from pypika.terms import Criterion, NullValue @@ -1204,28 +1206,36 @@ class Database: frappe.flags.touched_tables = set() frappe.flags.touched_tables.update(tables) - def bulk_insert(self, doctype, fields, values, ignore_duplicates=False, *, chunk_size=10_000): + def bulk_insert( + self, + doctype: str, + fields: List[str], + values: Iterable[Sequence[Any]], + ignore_duplicates=False, + *, + chunk_size=10_000, + ): """ Insert multiple records at a time :param doctype: Doctype name :param fields: list of fields - :params values: list of list of values + :params values: iterable of values """ - values = list(values) table = frappe.qb.DocType(doctype) - for start_index in range(0, len(values), chunk_size): - query = frappe.qb.into(table) + query = frappe.qb.into(table).columns(fields) + if ignore_duplicates: # Pypika does not have same api for ignoring duplicates - if self.db_type == "mariadb": + if frappe.conf.db_type == "mariadb": query = query.ignore() - elif self.db_type == "postgres": + elif frappe.conf.db_type == "postgres": query = query.on_conflict().do_nothing() - values_to_insert = values[start_index : start_index + chunk_size] - query.columns(fields).insert(*values_to_insert).run() + value_iterator = iter(values) + while value_chunk := tuple(itertools.islice(value_iterator, chunk_size)): + query.insert(*value_chunk).run() def create_sequence(self, *args, **kwargs): from frappe.database.sequence import create_sequence
enhancement: add a test case of empty patterns for pickle backend add a test case to load some empty patterns for pickle backend.
@@ -20,6 +20,7 @@ class HasParserTrait(TBC.HasParserTrait): class Test_10(TBC.Test_10_dumps_and_loads, HasParserTrait): load_options = dump_options = dict(protocol=TT.pickle.HIGHEST_PROTOCOL) + empty_patterns = [('', {})] class Test_20(TBC.Test_20_dump_and_load, HasParserTrait):
Update MarySpeech.py enriched
#start Service -mouth = Runtime.createAndStart("MarySpeech", "MarySpeech") +mouth = Runtime.start("MarySpeech", "MarySpeech") + +#possible voices +print ("these are the voices I can have", mouth.getVoices()) +print ("this is the voice I am using", mouth.getVoice()) + +#set a different voice +#mouth.setVoice("cmu-slt-hsmm") +#mouth.speak("Hello world I have a different voice") #speak! +# this blocks until speaking is done mouth.speakBlocking("Hello world") mouth.speakBlocking("I speak English. More voices are available, but they need to be installed") mouth.speakBlocking("Echo echo echo") @@ -20,4 +29,5 @@ mouth.speakBlocking("Happy birthday Kyle") #add voice effects: #more effects and information @ http://myrobotlab.org/service/MarySpeech -mouth.setAudioEffects("FIRFilter+Robot(amount=50)"); +mouth.setAudioEffects("FIRFilter+Robot(amount=50)") +mouth.speakBlocking("this is after a sound effect ")
Add missing instruction to dev installation guide The missing data folder will throw an exception: `sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) unable to open database file`
@@ -40,6 +40,13 @@ Take a look at [Polar][polar] for an excellent way of spinning up a Lightning Ne ## Running the server LNbits uses [Quart][quart] as an application server. +Before running the server for the first time, make sure to create the data folder: + +```sh +$ mkdir data +``` + +To then run the server, use: ```sh $ pipenv run python -m lnbits
Add GUI logging for phobos Log messages are displayed in the GUI depending on the add-on settings of the loglevel.
@@ -79,8 +79,8 @@ def decorate(level): def log(message, level="INFO", origin=None, prefix=""): """Logs a given message to the blender console and logging file if present and if log level is low enough. The origin can be defined as string. - However, if not a string the information will also be displayed in the - Blender status bar. + The message is logged by the operator depending on the loglevel + settings. :param message: The message to log. :type message: str. @@ -122,6 +122,20 @@ def log(message, level="INFO", origin=None, prefix=""): if prefs.logtoterminal: print(terminalmsg) + # log in GUI depending on loglevel + import sys + # start from this function + frame = sys._getframe(1) + f_name = frame.f_code.co_name + # go back until operator (using execute) + while f_name != 'execute' and frame != None: + frame = frame.f_back + f_name = frame.f_code.co_name + + # use operator to show message in Blender + if 'self' in frame.f_locals: + origin = frame.f_locals['self'] + # show message in Blender status bar. if origin is not None and type(origin) is not str: # format report message to remove loging level and originname
Update release.md Probably want to delete gitconfig otherwise people will be submitting changes to their cloned repos as will roscoe :D
@@ -9,7 +9,7 @@ Notes on how to release donkey. 3. Pull the lastest donkeycar code. 4. Make your changes. 5. Move the disk back to your computer. -6. Remove your wi-fi password and change the host name to d2. +6. Remove your wi-fi password and change the host name to d2. Delete `.gitconfig`. 7. Create the disk image from the SD card Run `sudo gparted` to see the size of the disk partitions. Resize the partitions
Improve error logging for failures to update or create StateAnswersCalcOutputModel entities.
@@ -853,8 +853,12 @@ class StateAnswersCalcOutputModel(base_models.BaseMapReduceBatchResultsModel): try: # This may fail if calculation_output is too large. instance.put() - except Exception as e: - logging.error(e) + except Exception: + logging.exception( + 'Failed to add calculation output for exploration ID %s, ' + 'version %s, state name %s, and calculation ID %s' % ( + exploration_id, exploration_version, + state_name.encode('utf-8'), calculation_id)) @classmethod def get_model(cls, exploration_id, exploration_version, state_name,
DOC: Update macro names. [ci skip]
@@ -619,14 +619,14 @@ for NumPy v1.14 and above if (!PyArg_ParseTuple(args, "OOO!", &arg1, &arg2, &PyArray_Type, &out)) return NULL; - arr1 = PyArray_FROM_OTF(arg1, NPY_DOUBLE, NPY_IN_ARRAY); + arr1 = PyArray_FROM_OTF(arg1, NPY_DOUBLE, NPY_ARRAY_IN_ARRAY); if (arr1 == NULL) return NULL; - arr2 = PyArray_FROM_OTF(arg2, NPY_DOUBLE, NPY_IN_ARRAY); + arr2 = PyArray_FROM_OTF(arg2, NPY_DOUBLE, NPY_ARRAY_IN_ARRAY); if (arr2 == NULL) goto fail; #if NPY_API_VERSION >= 0x0000000c - oarr = PyArray_FROM_OTF(out, NPY_DOUBLE, NPY_INOUT_ARRAY2); + oarr = PyArray_FROM_OTF(out, NPY_DOUBLE, NPY_ARRAY_INOUT_ARRAY2); #else - oarr = PyArray_FROM_OTF(out, NPY_DOUBLE, NPY_INOUT_ARRAY); + oarr = PyArray_FROM_OTF(out, NPY_DOUBLE, NPY_ARRAY_INOUT_ARRAY); #endif if (oarr == NULL) goto fail;
MAINT: refactor on TreeGeometryBase [CHANGED] store the cumulative length attribute in params['cum_length'] [CHANGED] mopved _init_tip_ranks() method from SquareTreeGeometry to TreeGeometryBase
@@ -490,7 +490,7 @@ class TreeGeometryBase(PhyloNode): if self.is_root(): self._x = 0 elif self._x is None: - val = self.params[self._length] + self.parent.x + val = self.params['cum_length'] self._x = val return self._x @@ -525,13 +525,13 @@ class TreeGeometryBase(PhyloNode): if edge.is_root(): continue - parent_frac = edge.parent.params.get('cum_frac', 0) + parent_frac = edge.parent.params.get('cum_length', 0) if edge.is_tip(): - edge.params['frac_pos'] = 1 - parent_frac + frac = 1 - parent_frac else: frac = 1 / edge.params['max_child_depth'] edge.params['frac_pos'] = frac - edge.params['cum_frac'] = parent_frac + frac + edge.params['cum_length'] = parent_frac + edge.params['frac_pos'] @property def depth(self):
SpreadsheetUI : Format array values separated by "," Based on feedback from Murray.
@@ -1046,12 +1046,12 @@ class _PlugTableModel( QtCore.QAbstractTableModel ) : elif value is None : return "" - if not forToolTip : - return str( value ) - else : try : - return "\n".join( str( x ) for x in value ) + # Unknown type. If iteration is supported then use that. + separator = "\n" if forToolTip else ", " + return separator.join( str( x ) for x in value ) except : + # Otherwise just cast to string return str( value ) class _PlugTableDelegate( QtWidgets.QStyledItemDelegate ) :
tests/DDM: Drop extra 'monitor' mechanism from the threshold modulation test Pass the modulator value directly from input.
@@ -665,30 +665,21 @@ def test_DDM_in_composition(benchmark, comp_mode): @pytest.mark.composition @pytest.mark.ddm_mechanism def test_DDM_threshold_modulation(comp_mode): - M = pnl.DDM( - name='DDM', + M = pnl.DDM(name='DDM', function=pnl.DriftDiffusionAnalytical( threshold=20.0, ), ) - monitor = pnl.TransferMechanism(default_variable=[[0.0]], - size=1, - function=pnl.Linear(slope=1, intercept=0), - output_ports=[pnl.RESULT], - name='monitor') - control = pnl.ControlMechanism( - monitor_for_control=monitor, - control_signals=[(pnl.THRESHOLD, M)]) + control = pnl.ControlMechanism(control_signals=[(pnl.THRESHOLD, M)]) C = pnl.Composition() C.add_node(M, required_roles=[pnl.NodeRole.ORIGIN, pnl.NodeRole.TERMINAL]) - C.add_node(monitor) C.add_node(control) - inputs = {M:[1], monitor:[3]} + inputs = {M:[1], control:[3]} val = C.run(inputs, num_trials=1, execution_mode=comp_mode) - # FIXME: Python version returns dtype=object - val = np.asfarray(val) + + # Default modulation is 'multiplicative so the threshold is 20 * 3 assert np.allclose(val[0], [60.0]) assert np.allclose(val[1], [60.2])
[ENH] Proximity forest, removal of legacy conversion This PR replaces legacy conversion in proximity forest with `convert` - should be 1:1 refactor change.
@@ -15,7 +15,7 @@ from sklearn.preprocessing import normalize from sklearn.utils import check_random_state from sktime.classification.base import BaseClassifier -from sktime.datatypes._panel._convert import from_nested_to_2d_array +from sktime.datatypes import convert from sktime.distances import ( dtw_distance, erp_distance, @@ -152,8 +152,8 @@ def numba_wrapper(distance_measure): """ def distance(instance_a, instance_b, **params): - instance_a = from_nested_to_2d_array(instance_a, return_numpy=True) - instance_b = from_nested_to_2d_array(instance_b, return_numpy=True) + instance_a = convert(instance_a, "nested_univ", "numpyflat") + instance_b = convert(instance_b, "nested_univ", "numpyflat") return distance_measure(instance_a, instance_b, **params) return distance
Fix warning check for python 2.7 Python 2.7 represses repeated warnings in a way that causes the unit test to fail => make sure that warnings are always generated
@@ -139,6 +139,7 @@ class TestFreqresp(unittest.TestCase): # Check for warning if frequency is out of range import warnings + warnings.simplefilter('always', UserWarning) # don't supress with warnings.catch_warnings(record=True) as w: omega_bad = np.linspace(10e-4,1.1,10) * np.pi/sys.dt ret = sys.freqresp(omega_bad)
Fix for ValueError: zero-size array This PR is part of * fix for 'ValueError: zero-size array to reduction operation maximum which has no identity' when .loc[[key]] isn't found. This didn't raise in pandas 1.0, does in 1.2 * fix cached_property to work with 3.7 or 3.9.
@@ -54,6 +54,10 @@ from libs.datasets.taglib import UrlStr from libs.datasets.demographics import DistributionBucket from libs.pipeline import Region import pandas.core.groupby.generic + +try: # To work with python 3.7 and 3.9 without changes. + from functools import cached_property +except ImportError: from backports.cached_property import cached_property from libs.pipeline import RegionMaskOrRegion @@ -203,6 +207,7 @@ class OneRegionTimeseriesDataset: if CommonFields.DATE not in self.data.columns: raise ValueError("A timeseries must have a date column") + assert isinstance(self.tag, pd.Series) assert self.tag.index.names == [ TagField.VARIABLE, TagField.DEMOGRAPHIC_BUCKET, @@ -1097,7 +1102,10 @@ class MultiRegionDataset: if ts_df.empty and not latest_dict: raise RegionLatestNotFound(region) - tag = self.tag.loc[[region.location_id]].reset_index(TagField.LOCATION_ID, drop=True) + try: + tag = self.tag.xs(region.location_id, level=CommonFields.LOCATION_ID, drop_level=True) + except KeyError: + tag = _EMPTY_ONE_REGION_TAG_SERIES return OneRegionTimeseriesDataset( region=region, data=ts_df, latest=latest_dict, tag=tag, bucketed_latest=bucketed_latest @@ -1417,7 +1425,10 @@ class MultiRegionDataset: ): latest_dict = self._location_id_latest_dict(location_id) region = Region.from_location_id(location_id) - tag = self.tag.loc[[region.location_id]].reset_index(TagField.LOCATION_ID, drop=True) + try: + tag = self.tag.xs(region.location_id, level=TagField.LOCATION_ID, drop_level=True) + except KeyError: + tag = _EMPTY_ONE_REGION_TAG_SERIES bucketed_latest = self._bucketed_latest_for_location_id(location_id) yield region, OneRegionTimeseriesDataset(
Filter custom events from stats_for_run query Same as
@@ -289,7 +289,12 @@ def get_stats_for_run(self, run_id): db.func.max(SqlEventLogStorageTable.c.timestamp).label("last_event_timestamp"), ] ) - .where(SqlEventLogStorageTable.c.run_id == run_id) + .where( + db.and_( + SqlEventLogStorageTable.c.run_id == run_id, + SqlEventLogStorageTable.c.dagster_event_type != None, + ) + ) .group_by("dagster_event_type") ) @@ -301,7 +306,7 @@ def get_stats_for_run(self, run_id): times = {} for result in results: (dagster_event_type, n_events_of_type, last_event_timestamp) = result - if dagster_event_type: + check.invariant(dagster_event_type is not None) counts[dagster_event_type] = n_events_of_type times[dagster_event_type] = last_event_timestamp
BUG: stats: Random parameters in `pytest.mark.parametrize()` break pytest-xdist. Don't use random parameters in `pytest.mark.parametrize()`, as it can confuse the pytest-xdist extension; see Closes
@@ -1385,7 +1385,8 @@ class TestLogistic: # with 64 bit floating point. assert_equal(logp, [-800, -800]) - @pytest.mark.parametrize("loc_rvs,scale_rvs", [np.random.rand(2)]) + @pytest.mark.parametrize("loc_rvs,scale_rvs", [(0.4484955, 0.10216821), + (0.62918191, 0.74367064)]) def test_fit(self, loc_rvs, scale_rvs): data = stats.logistic.rvs(size=100, loc=loc_rvs, scale=scale_rvs) @@ -2183,7 +2184,7 @@ class TestInvgauss: np.random.seed(1234) @pytest.mark.parametrize("rvs_mu,rvs_loc,rvs_scale", - [(2, 0, 1), (np.random.rand(3)*10)]) + [(2, 0, 1), (4.635, 4.362, 6.303)]) def test_fit(self, rvs_mu, rvs_loc, rvs_scale): data = stats.invgauss.rvs(size=100, mu=rvs_mu, loc=rvs_loc, scale=rvs_scale) @@ -2214,7 +2215,7 @@ class TestInvgauss: assert shape_mle1 == shape_mle2 == shape_mle3 == 1.04 @pytest.mark.parametrize("rvs_mu,rvs_loc,rvs_scale", - [(2, 0, 1), (np.random.rand(3)*10)]) + [(2, 0, 1), (6.311, 3.225, 4.520)]) def test_fit_MLE_comp_optimizer(self, rvs_mu, rvs_loc, rvs_scale): data = stats.invgauss.rvs(size=100, mu=rvs_mu, loc=rvs_loc, scale=rvs_scale) @@ -5120,7 +5121,8 @@ class TestRayleigh: y = stats.rayleigh.logsf(50) assert_allclose(y, -1250) - @pytest.mark.parametrize("rvs_loc,rvs_scale", [np.random.rand(2)]) + @pytest.mark.parametrize("rvs_loc,rvs_scale", [(0.85373171, 0.86932204), + (0.20558821, 0.61621008)]) def test_fit(self, rvs_loc, rvs_scale): data = stats.rayleigh.rvs(size=250, loc=rvs_loc, scale=rvs_scale) @@ -5145,7 +5147,7 @@ class TestRayleigh: assert_equal(scale, scale_mle(data, loc)) @pytest.mark.parametrize("rvs_loc,rvs_scale", [[0.74, 0.01], - np.random.rand(2)]) + [0.08464463, 0.12069025]]) def test_fit_comparison_super_method(self, rvs_loc, rvs_scale): # test that the objective function result of the analytical MLEs is # less than or equal to that of the numerically optimized estimate
Added missing dot Added a missing dot in the `Launch a Pipeline` section of Tools
@@ -216,7 +216,7 @@ Archived pipelines are not returned by default. To include them, use the `--show ## Launch a pipeline Some nextflow pipelines have a considerable number of command line flags that can be used. -To help with this, you can use the `nf-core launch` command +To help with this, you can use the `nf-core launch` command. You can choose between a web-based graphical interface or an interactive command-line wizard tool to enter the pipeline parameters for your run. Both interfaces show documentation alongside each parameter and validate your inputs.
Correct typo "baised" ==> "based"
@@ -19,7 +19,7 @@ MAP_acq_func = { class AMBS(Search): - """Asynchronous Model-Based Search baised on the `Scikit-Optimized Optimizer <https://scikit-optimize.github.io/stable/modules/generated/skopt.Optimizer.html#skopt.Optimizer>`_. + """Asynchronous Model-Based Search based on the `Scikit-Optimized Optimizer <https://scikit-optimize.github.io/stable/modules/generated/skopt.Optimizer.html#skopt.Optimizer>`_. Args: problem (HpProblem): Hyperparameter problem describing the search space to explore.
[IMPR] Add -confirm option to harvest_template.py add -confirm option which sets 'always' option to False remove -always option because it is the default behavior update argument parsing
@@ -22,8 +22,7 @@ These command line parameters can be used to specify which pages to work on: You can also use additional parameters: --always If used, the bot won't ask if it should add the specified - text +-confirm If used, the bot will ask if it should make changes -create Create missing items before importing. @@ -375,26 +374,26 @@ def main(*args: str) -> None: fields = {} options = {} for arg in local_args: - if arg.startswith('-template'): - if len(arg) == 9: - template_title = pywikibot.input( + opt, _, value = arg.partition(':') + if opt == '-template': + template_title = value or pywikibot.input( 'Please enter the template to work on:') - else: - template_title = arg[10:] + elif opt == '-confirm': + options['always'] = False elif arg.startswith('-create'): options['create'] = True elif gen.handle_arg(arg): if arg.startswith('-transcludes:'): - template_title = arg[13:] + template_title = value else: - optional = arg.startswith('-') + optional = opt.startswith('-') complete = len(current_args) == 3 if optional: needs_second = len(current_args) == 1 if needs_second: break # will stop below - arg, _, value = arg[1:].partition(':') + arg = opt[1:] if len(current_args) == 0: assert not fields options[arg] = value or True
Update New York.md Adding June 5th incident involving a reporter being shot at/arrested.
@@ -27,6 +27,13 @@ id: ny-buffalo-2 * https://www.wivb.com/news/five-people-arrested-one-person-injured-during-protest-activity-in-niagara-square/ * https://www.reddit.com/r/PublicFreakout/comments/gwv7k4/just_about_an_hour_ago_police_officers_shove_man/fsxfeb3/ +### Reporter shot at after identifying himself | June 5th + +Reporter shot at over a dozen times at close range and arrested, despite identifying himself. Was told his 1st Amendment rights did not matter. + +**Links** +* https://twitter.com/PhotoJazzy/status/1269056626843099137 + ## Rochester ### Police shoot at people filming | May 31st
Fix crawler for comrademao When getting chapter body in korean novel in comrademao old crawler return [], this update fix it
@@ -80,7 +80,11 @@ class ComrademaoCrawler(Crawler): soup = BeautifulSoup(response.content, 'lxml') logger.debug(soup.title.string) + if soup.select('div.entry-content div.container div.container a p'): contents = soup.select('div.entry-content div.container div.container a p') + else: + contents = soup.select('div.entry-content div.container a p') + body_parts = [] for x in contents: body_parts.append(x.text)
Change all comparisons to lowerase Removed some superfluous parentheses.
@@ -66,9 +66,10 @@ def error_response(message, error_code, http_status_code, headers=None): def _matches_content_type(content_type, valid_content_types): # If '*/*' is in the Accept header or the valid types, # then all content_types match. Otherwise see of there are any common types - return ('*/*' in content_type or '*/*' in valid_content_types) or\ - set(valid_content_types).intersection(re.split('[,;]', - content_type.lower())) + content_type = content_type.lower() + valid_content_types = [x.lower() for x in valid_content_types] + return '*/*' in content_type or '*/*' in valid_content_types or\ + set(valid_content_types).intersection(re.split('[,;]', content_type)) class ChaliceError(Exception):
Python3.8: Fix, positional only arguments were not enforced be so. * Only one of the checking loops was considering the proper start value.
@@ -1038,7 +1038,7 @@ static Py_ssize_t handleKeywordArgs(struct Nuitka_FunctionObject const *function if (found == false) { PyObject **varnames = function->m_varnames; - for (Py_ssize_t i = 0; i < keywords_count; i++) { + for (Py_ssize_t i = kw_arg_start; i < keywords_count; i++) { if (RICH_COMPARE_BOOL_EQ_OBJECT_OBJECT_NORECURSE(varnames[i], key)) { assert(python_pars[i] == NULL); python_pars[i] = value; @@ -1056,9 +1056,29 @@ static Py_ssize_t handleKeywordArgs(struct Nuitka_FunctionObject const *function } if (unlikely(found == false)) { + bool pos_only_error = false; + + for (Py_ssize_t i = 0; i < kw_arg_start; i++) { + PyObject **varnames = function->m_varnames; + + if (RICH_COMPARE_BOOL_EQ_OBJECT_OBJECT_NORECURSE(varnames[i], key)) { + pos_only_error = true; + break; + } + } + + if (pos_only_error == true) { + PyErr_Format(PyExc_TypeError, + "%s() got some positional-only arguments passed as keyword arguments: '%s'", + Nuitka_String_AsString(function->m_name), + Nuitka_String_Check(key) ? Nuitka_String_AsString(key) : "<non-string>"); + + } else { + PyErr_Format(PyExc_TypeError, "%s() got an unexpected keyword argument '%s'", Nuitka_String_AsString(function->m_name), Nuitka_String_Check(key) ? Nuitka_String_AsString(key) : "<non-string>"); + } Py_DECREF(key); Py_DECREF(value);
decrease min-delta in early stopping to 0.002 triggers stopping with decent amount of training data too early
@@ -82,7 +82,7 @@ def _expand_gt(ctx, param, value): help='Stop condition for training. Set to `early` for early stooping or `dumb` for fixed number of epochs') @click.option('-N', '--epochs', show_default=True, default=-1, help='Number of epochs to train for') @click.option('--lag', show_default=True, default=5, help='Number of epochs to wait before stopping training without improvement') [email protected]('--min-delta', show_default=True, default=0.005, help='Minimum improvement between epochs to reset early stopping') [email protected]('--min-delta', show_default=True, default=0.002, help='Minimum improvement between epochs to reset early stopping') @click.option('-d', '--device', show_default=True, default='cpu', help='Select device to use (cpu, cuda:0, cuda:1, ...)') @click.option('--optimizer', show_default=True, default='Adam', type=click.Choice(['Adam', 'SGD', 'RMSprop']), help='Select optimizer') @click.option('-r', '--lrate', show_default=True, default=2e-3, help='Learning rate') @@ -344,6 +344,8 @@ def train(ctx, pad, output, spec, append, load, savefreq, report, quit, epochs, if not torch.isinf(loss): loss.backward() optim.step() + else: + logger.debug('infinite loss in trial {}'.format(trial)) bar.update(1) if not epoch % savefreq: logger.info('Saving to {}_{}'.format(output, epoch))
Fix error in plot_color2D method: handle lists of different-lengths lists in the y-axis
@@ -954,8 +954,8 @@ class BaseDataAnalysis(object): else: plot_xvals_step = (abs(np.max(plot_xvals) - np.min(plot_xvals)) / len(plot_xvals)) - plot_yvals_step = (abs(np.max(plot_yvals) - np.min(plot_yvals)) / - len(plot_yvals)) + plot_yvals_step = [(abs(np.max(x) - np.min(x)) / + len(x)) for x in plot_yvals] # plot_yvals_step = plot_yvals[1]-plot_yvals[0] if plot_zrange is not None:
initialize meshrenderer before pybullet in 18.04 initialize pybullet GUI first will cause segfault, the reason is unknown
@@ -45,6 +45,12 @@ class Simulator: self.load() def load(self): + self.renderer = MeshRenderer(width=self.resolution, + height=self.resolution, + device_idx=self.device_idx, + use_fisheye=self.use_fisheye) + self.renderer.set_fov(90) + if self.mode == 'gui': self.cid = p.connect(p.GUI) else: @@ -52,12 +58,6 @@ class Simulator: p.setTimeStep(self.timestep) p.setGravity(0, 0, -self.gravity) - self.renderer = MeshRenderer(width=self.resolution, - height=self.resolution, - device_idx=self.device_idx, - use_fisheye=self.use_fisheye) - self.renderer.set_fov(90) - if self.mode == 'gui': self.viewer.renderer = self.renderer
bip39_recovery: also scan change addresses fixes
# file LICENCE or http://www.opensource.org/licenses/mit-license.php from typing import TYPE_CHECKING +import itertools from . import bitcoin from .constants import BIP39_WALLET_FORMATS @@ -44,11 +45,15 @@ async def scan_for_active_accounts(network: 'Network', get_account_xpub, wallet_ async def account_has_history(network: 'Network', account_node: BIP32Node, script_type: str) -> bool: - gap_limit = 20 + # note: scan both receiving and change addresses. some wallets send change across accounts. + path_suffixes = itertools.chain( + itertools.product((0,), range(20)), # ad-hoc gap limits + itertools.product((1,), range(10)), + ) async with OldTaskGroup() as group: get_history_tasks = [] - for address_index in range(gap_limit): - address_node = account_node.subkey_at_public_derivation("0/" + str(address_index)) + for path_suffix in path_suffixes: + address_node = account_node.subkey_at_public_derivation(path_suffix) pubkey = address_node.eckey.get_public_key_hex() address = bitcoin.pubkey_to_address(script_type, pubkey) script = bitcoin.address_to_script(address)
[tasks] Create different Loop objects for different instances Fixes
@@ -405,6 +405,21 @@ class Loop: self.hours = hours self.minutes = minutes +class _LoopFactory: + def __init__(self, func, **kwargs): + self.func = func + self.name = func.__name__ + self.kwargs = kwargs + + def __get__(self, obj, objtype): + if obj is None: + return self + + loop = Loop(self.func, **self.kwargs) + loop._injected = obj + setattr(obj, self.name, loop) + return loop + def loop(*, seconds=0, minutes=0, hours=0, count=None, reconnect=True, loop=None): """A decorator that schedules a task in the background for you with optional reconnect logic. The decorator returns a :class:`Loop`. @@ -436,6 +451,33 @@ def loop(*, seconds=0, minutes=0, hours=0, count=None, reconnect=True, loop=None The function was not a coroutine. """ def decorator(func): - return Loop(func, seconds=seconds, minutes=minutes, hours=hours, - count=count, reconnect=reconnect, loop=loop) + defined_within_class = False + frames = inspect.stack() + # Essentially, to detect whether we're using this decorator a class + # context we're walking the stack to see whether it's top level or + # within a class level. This code is pretty finicky and hacky but + # it's better than the alternative that requires maintaining a list + # of IDs. This code does however break if someone assigns a loop + # decorator using different ways, such as a dynamically created + # class or calling the decorator directly. However such uses should + # be niche and thus don't really impede functionality for 99.99% of users + for frame in frames[1:]: + if frame[3] == '<module>': + break + if '__module__' in frame[0].f_code.co_names: + defined_within_class = True + break + + kwargs = { + 'seconds': seconds, + 'minutes': minutes, + 'hours': hours, + 'count': count, + 'reconnect': reconnect, + 'loop': loop + } + + if defined_within_class: + return _LoopFactory(func, **kwargs) + return Loop(func, **kwargs) return decorator
scim: Temporarily stop running SCIM change operations atomically. do_deactivate_user can't be run in an atomic block due to concerns around revoking session in a transaction. See for more details. Without the change in this commit, the process of deactivating a user via SCIM is broken.
@@ -4,7 +4,7 @@ import django_scim.constants as scim_constants import django_scim.exceptions as scim_exceptions from django.conf import settings from django.core.exceptions import ValidationError -from django.db import models, transaction +from django.db import models from django.http import HttpRequest from django_scim.adapters import SCIMUser from scim2_filter_parser.attr_paths import AttrPath @@ -281,7 +281,11 @@ class ZulipSCIMUser(SCIMUser): ) return - with transaction.atomic(): + # TODO: The below operations should ideally be executed in a single + # atomic block to avoid failing with partial changes getting saved. + # This can be fixed once we figure out how do_deactivate_user can be run + # inside an atomic block. + # We process full_name first here, since it's the only one that can fail. if full_name_new_value: check_change_full_name(self.obj, full_name_new_value, acting_user=None)
Update premailer function call. The signature changed a bit from the version we used to run.
@@ -110,7 +110,7 @@ def make_mail(subject, if html_template: html = transform(render_email(html_template, context_vars), - 'https://' + Site.objects.get_current().domain) + base_url='https://' + Site.objects.get_current().domain) mail.attach_alternative(html, 'text/html') return mail
Fix requirements remove duplicated requirements * gevent is required by locustio and nose by apiritif
@@ -7,7 +7,6 @@ fuzzyset==0.0.19 hdrpy>=0.3.3 ipaddress; python_version < '3.0' lxml>=3.8.0,!=4.2.0 -nose progressbar33 psutil>=5,!=5.3.0 pytest>=3 @@ -18,4 +17,3 @@ selenium Appium-Python-Client urwid==2.0.1 terminaltables>=3.1.0 -gevent!=1.3.5
fix `GUILD_HELP_JSON=1` partial reversion of
@@ -165,6 +165,15 @@ class JSONHelpFormatter: if self._help_text is not None: self._help_text.append("\n") + @contextlib.contextmanager + def indentation(self): + # Used by click when GUILD_HELP_JSON=1 + self.indent() + try: + yield + finally: + self.dedent() + def indent(self): if self._help_text is None: self._help_text = []
fix Raisecom.RCIOS get_arp HG-- branch : feature/dcs
@@ -27,6 +27,6 @@ class Script(BaseScript): r += [{ "ip": match.group("ip"), "mac": match.group("mac"), - "interface": "ip%s" % match.group("interface") + "interface": match.group("interface") }] return r
Readme.md: Fix Image displayment The `center` tag would cause the image not to be rendered, but the raw text to be inserted. Simply removing the center tag fixes this.
@@ -37,9 +37,7 @@ Full text available at: http://arxiv.org/abs/1609.06647 The *Show and Tell* model is a deep neural network that learns how to describe the content of images. For example: -<center> ![Example captions](g3doc/example_captions.jpg) -</center> ### Architecture @@ -66,9 +64,7 @@ learned during training. The following diagram illustrates the model architecture. -<center> ![Show and Tell Architecture](g3doc/show_and_tell_architecture.png) -</center> In this diagram, \{*s*<sub>0</sub>, *s*<sub>1</sub>, ..., *s*<sub>*N*-1</sub>\} are the words of the caption and \{*w*<sub>*e*</sub>*s*<sub>0</sub>,
Fixed a typo in the return type of _array_ir_types There could be other typing issues in that module, but I will address them separately.
@@ -115,7 +115,7 @@ def dtype_to_ir_type(dtype: Union[np.dtype, np.generic]) -> ir.Type: f"No dtype_to_ir_type handler for dtype: {dtype}") from err return ir_type_factory() -def _array_ir_types(aval: core.ShapedArray) -> ir.Type: +def _array_ir_types(aval: core.ShapedArray) -> Sequence[ir.Type]: return (ir.RankedTensorType.get(aval.shape, dtype_to_ir_type(aval.dtype)),) ir_type_handlers: Dict[Type[core.AbstractValue],
Keymap: Add Goto Next/Prev Modification New builtin commands added with ST3189.
@@ -137,6 +137,9 @@ next_field: next_misspelling: command_type: text doc_string: Navigate to the next misspelling in the document. +next_modification: + command_type: text + doc_string: Navigate to the next modification in the document. next_result: command_type: window doc_string: Jump to the next build result. @@ -178,6 +181,9 @@ prev_field: prev_misspelling: command_type: text doc_string: Navigate to the previous misspelling in the document. +prev_modification: + command_type: text + doc_string: Navigate to the previous modification in the document. prev_result: command_type: window doc_string: Jump to the previous build result.
Update binify.py Clearly I didn't think through the previous change.
@@ -10,6 +10,7 @@ def handle_binify(command: Command): if not command.has_arg(): response = "Please include string to convert." elif set(command.arg).issubset(["0", "1", " "]) and len(command.arg) > 2: + command.arg = command.arg.replace(" ", "") if len(command.arg) % 8 != 0: response = "Binary string contains partial byte." else:
`streamListener.filter(follow=)` expects `[str]` Example shows the list of parameters as `int`s; they should be `str`s.
@@ -83,7 +83,7 @@ the word *python*. The **track** parameter is an array of search terms to stream This example shows how to use **filter** to stream tweets by a specific user. The **follow** parameter is an array of IDs. :: - myStream.filter(follow=[2211149702]) + myStream.filter(follow=["2211149702"]) An easy way to find a single ID is to use one of the many conversion websites: search for 'what is my twitter ID'.
fix cuda multiple algorithm test Summary: Fixing a bug in the multiple algorithm test where threads were spawned repeatedly, causing collisions during rendezvous.
@@ -156,8 +156,8 @@ TEST_F(CudaAllreduceTest, MultipleAlgorithms) { allreduceHalvingDoubling, allreduceHalvingDoublingPipelined}; - for (const auto& fn : fns) { spawn(size, [&](std::shared_ptr<Context> context) { + for (const auto& fn : fns) { // Run algorithm auto fixture = CudaFixture(context, 1, count); auto ptrs = fixture.getFloatPointers(); @@ -175,8 +175,8 @@ TEST_F(CudaAllreduceTest, MultipleAlgorithms) { // Verify result assertResult(fixture); - }); } + }); } std::vector<int> genMemorySizes() {
Update domain.txt typo
@@ -5281,7 +5281,7 @@ pktriot.net # Reference: https://twitter.com/kyleehmke/status/1383742871329591296 # Reference: https://github.com/stamparm/maltrail/pull/17070 -# Reference: # Reference: https://www.virustotal.com/gui/file/08261ed40e21140eb438f16af0233217c701d9b022dce0a45b6e3e1ee2467739/detection +# Reference: https://www.virustotal.com/gui/file/08261ed40e21140eb438f16af0233217c701d9b022dce0a45b6e3e1ee2467739/detection defenderlive.com dnsstatus.org
Add proper __repr__ to LogSoftMax Summary: Fixes Pull Request resolved:
@@ -1128,6 +1128,9 @@ class Softmax(Module): def forward(self, input): return F.softmax(input, self.dim, _stacklevel=5) + def extra_repr(self): + return 'dim={dim}'.format(dim=self.dim) + @weak_module class Softmax2d(Module):
[IMPR] Use a set as default container A set is the the appropriate implemetation of a container holding all seen items instead using a dict with it's key and unused value. Don't care about 2 nanoseconds which is lower than the function overhead!
@@ -843,14 +843,14 @@ def filter_unique(iterable, container=None, key=None, add=None): """ Yield unique items from an iterable, omitting duplicates. - By default, to provide uniqueness, it puts the generated items into - the keys of a dict created as a local variable, each with a value of True. - It only yields items which are not already present in the local dict. + By default, to provide uniqueness, it puts the generated items into a + set created as a local variable. It only yields items which are not + already present in the local set. For large collections, this is not memory efficient, as a strong reference - to every item is kept in a local dict which can not be cleared. + to every item is kept in a local set which can not be cleared. - Also, the local dict cant be re-used when chaining unique operations on + Also, the local set can't be re-used when chaining unique operations on multiple generators. To avoid these issues, it is advisable for the caller to provide their own @@ -876,7 +876,7 @@ def filter_unique(iterable, container=None, key=None, add=None): @type add: callable """ if container is None: - container = {} + container = set() if not add: if hasattr(container, 'add'):
puppeteer_tests: Change `test_get_api_key` order to fix the flake. I tried different methods/strategies to find the reason for this flake. But sadly I couldn't find the actual reason for its failure. (Read https://chat.zulip.org/#narrow/stream/43-automated-testing/topic/upgrading.20puppeteer.20to.209.2E1.2E1) Somehow changing of test order i.e moving `test_get_api_key` after `test_change_password` seems to fix the flake.
@@ -62,7 +62,8 @@ async function test_change_password(page: Page): Promise<void> { // when the above issue is resolved. await page.waitForFunction(() => document.activeElement!.id === "change_password_modal"); await page.type("#old_password", test_credentials.default_user.password); - await page.type("#new_password", "new_password"); + test_credentials.default_user.password = "new_password"; + await page.type("#new_password", test_credentials.default_user.password); await page.click(change_password_button_selector); // On success the change password modal gets closed. @@ -384,8 +385,8 @@ async function settings_tests(page: Page): Promise<void> { await common.log_in(page); await open_settings(page); await test_change_full_name(page); - await test_get_api_key(page); await test_change_password(page); + await test_get_api_key(page); await test_alert_words_section(page); await test_your_bots_section(page); await test_default_language_setting(page);
fix(device): fix z2m mapping of W2049LightController related to
@@ -505,14 +505,13 @@ class E1812SwitchController(SwitchController): class W2049LightController(LightController): def get_z2m_actions_mapping(self) -> DefaultActionsMapping: return { - "brightness_up_click": Light.TOGGLE, - "brightness_down_click": Light.CLICK_BRIGHTNESS_DOWN, + "on": Light.ON, + "off": Light.OFF, "arrow_left_click": Light.CLICK_COLOR_DOWN, "arrow_right_click": Light.CLICK_COLOR_UP, - "brightness_up_hold": Light.HOLD_BRIGHTNESS_UP, - "brightness_up_release": Light.RELEASE, - "brightness_down_hold": Light.HOLD_BRIGHTNESS_DOWN, - "brightness_down_release": Light.RELEASE, + "brightness_move_up": Light.HOLD_BRIGHTNESS_UP, + "brightness_stop": Light.RELEASE, + "brightness_move_down": Light.HOLD_BRIGHTNESS_DOWN, "arrow_left_hold": Light.HOLD_COLOR_DOWN, "arrow_left_release": Light.RELEASE, "arrow_right_hold": Light.HOLD_COLOR_UP,
speed up determining row height in Tree avoid calling outlineView_viewForTableColumn_item_ from outlineView_heightOfRowByItem_
@@ -65,7 +65,8 @@ class TogaTree(NSOutlineView): @objc_method def outlineView_viewForTableColumn_item_(self, tree, column, item): - col_identifier = self._impl.column_identifiers[id(column.identifier)] + + col_identifier = str(column.identifier) try: value = getattr(item.attrs['node'], col_identifier) @@ -118,16 +119,24 @@ class TogaTree(NSOutlineView): @objc_method def outlineView_heightOfRowByItem_(self, tree, item) -> float: - min_row_height = self.rowHeight + default_row_height = self.rowHeight if item is self: - return min_row_height + return default_row_height + + heights = [default_row_height] + + for column in self.tableColumns: + value = getattr(item.attrs['node'], str(column.identifier)) - # get all views in column - views = [self.outlineView_viewForTableColumn_item_(tree, col, item) for col in self.tableColumns] + if isinstance(value, toga.Widget): + # if the cell value is a widget, use its height + heights.append(value._impl.native.intrinsicContentSize().height) + else: + # otherwise use default row height + heights.append(default_row_height) - max_widget_height = max(view.intrinsicContentSize().height for view in views) - return max(min_row_height, max_widget_height) + return max(heights) # @objc_method # def outlineView_sortDescriptorsDidChange_(self, tableView, oldDescriptors) -> None:
Documentation: added converor-stager description Removed one duplicated line
@@ -34,8 +34,7 @@ def get_parser(): """ Returns the argparse parser. """ - parser = argparse.ArgumentParser() - parser = argparse.ArgumentParser() + parser = argparse.ArgumentParser(description="Conveyor is a daemon to manage file transfers. The conveyor-stager is responsible for make transfers from Tape endopoints to stage endpoints. This stage area is used as a buffer controlled by Rucio, in the sense that Rucio have control over the retention policy of the files in the staging area.") parser.add_argument("--run-once", action="store_true", default=False, help='One iteration only') parser.add_argument("--total-threads", action="store", default=1, type=int,
exception-handling-error-in-network-attach Attach the same network to the container, the error log is 2018-02-09 17:31:06.811 43376 ERROR zun.compute.manager TypeError: 'Container' object has no attribute '__getitem__' this patch fix it Closes-Bug:
@@ -938,7 +938,7 @@ class DockerDriver(driver.ContainerDriver): raise exception.ZunException('Container %(container)s has' ' alreay connected to the network' '%(network)s.' - % {'container': container['uuid'], + % {'container': container.uuid, 'network': network}) self._get_or_create_docker_network(context, network_api, network) requested_network = {'network': network,
Fix failing doctests on auto ticks But I am not convinced the code is working properly yet
@@ -259,8 +259,11 @@ class _HeatMapper(object): bbox = ax.get_window_extent().transformed(transform) size = [bbox.width, bbox.height][axis] axis = [ax.xaxis, ax.yaxis][axis] - fontsize = axis.get_majorticklabels()[0].get_fontsize() - max_ticks = int((size * .85) // (fontsize / 72)) + tick, = axis.set_ticks([0]) + fontsize = tick.label.get_size() + max_ticks = int(size // (fontsize / 72)) + if max_ticks == 0: + return [], [] tick_every = len(labels) // max_ticks tick_every = 1 if tick_every == 0 else tick_every ticks, labels = self._skip_ticks(labels, tick_every)
description added functions
@@ -35,6 +35,8 @@ The micro framework features following algorithms: The following benchmark functions are included in NiaPy: - Ackley +- Alpine1 +- Alpine2 - Griewank - Rastrigin - Rosenbrock @@ -44,6 +46,7 @@ The following benchmark functions are included in NiaPy: - Sphere - Whitley + # Setup ## Requirements
Add show all host_status policy to scn001 Depends-on:
@@ -200,3 +200,6 @@ parameter_defaults: MultipathdEnable: true NovaLibvirtVolumeUseMultipath: true BackupAndRestoreSkipNFSTest: true + NovaShowHostStatus: all + NovaApiHostStatusPolicy: > + (role:reader and system_scope:all) or (role:reader and project_id:%(project_id)s)
Update hiddentear.txt Moving to ```supremebot```:
@@ -78,21 +78,8 @@ http://84.252.95.236 http://51.15.91.55 -# Reference: https://www.virustotal.com/gui/file/22978740db1e322f671369d67d5272028280ee6dfcf6e3018743fe6fd0fc315f/detection - -f0482784.xsph.ru - -# Reference: https://twitter.com/ViriBack/status/1387115824352202765 -# Reference: https://app.any.run/tasks/4a4e5463-ae50-4bc6-89d0-3cf4db6283b1/ - -imen1.webd.pro - # Generic -/gate/connection.php -/gate/create.php -/gate/config.php -/gate/update.php /verma/login/ /verma/plugins/ /verma/connection.php
test_path: revive match tests These tests had not been running on Nose and I did not spot that previously.
@@ -390,7 +390,7 @@ def test_match_posix(self): ([r'a\*', 'a*'], ['a*'] + self.filenames_start_with_a), ([r'a\[012]'], ['a[012]']), ]: - yield (self.check_match, patterns, matches) + self.check_match(patterns, matches) @skip_if_not_win32 def test_match_windows(self): @@ -401,7 +401,7 @@ def test_match_windows(self): ([r'a\*', 'a*'], [r'a\*'] + self.filenames_start_with_a), ([r'a\[012]'], [r'a\[012]']), ]: - yield (self.check_match, patterns, matches) + self.check_match(patterns, matches) # TODO : pytest.mark.parametrise once nose is gone.
no EIGEN engine for DeformConv Summary: Pull Request resolved: There's no EIGEN engine implemented for DeformConv but unit test was checking it.
@@ -430,7 +430,6 @@ class TestConvolution(hu.HypothesisTestCase): output_channels=st.integers(1, 3), batch_size=st.integers(1, 3), order=st.sampled_from(["NCHW"]), - engine=st.sampled_from(["", "EIGEN"]), shared_buffer=st.booleans(), use_bias=st.booleans(), deformable_group=st.integers(1, 3), @@ -448,7 +447,6 @@ class TestConvolution(hu.HypothesisTestCase): output_channels, batch_size, order, - engine, shared_buffer, use_bias, deformable_group, @@ -467,7 +465,6 @@ class TestConvolution(hu.HypothesisTestCase): pad_r=pad_w, kernel=kernel, order=order, - engine=engine, shared_buffer=int(shared_buffer), deformable_group=deformable_group, )
[commands] Explicit set traceback for hybrid invoke Fix
@@ -445,11 +445,11 @@ class HybridAppCommand(discord.app_commands.Command[CogT, P, T]): else: exc = HybridCommandError(e) exc.__cause__ = e - await command.dispatch_error(ctx, exc) + await command.dispatch_error(ctx, exc.with_traceback(e.__traceback__)) except app_commands.AppCommandError as e: exc = HybridCommandError(e) exc.__cause__ = e - await command.dispatch_error(ctx, exc) + await command.dispatch_error(ctx, exc.with_traceback(e.__traceback__)) except CommandError as e: await command.dispatch_error(ctx, e) finally:
dynamicly quantized linear benchmarking Summary: Pull Request resolved: Test Plan: Imported from OSS
@@ -5,8 +5,10 @@ from __future__ import unicode_literals import operator_benchmark as op_bench + import torch import torch.nn.quantized as nnq +import torch.nn.quantized.dynamic as nnqd """ @@ -35,27 +37,41 @@ qlinear_configs = op_bench.config_list( ) -class QLinearBenchmark(op_bench.TorchBenchmarkBase): - def init(self, N, IN, OUT): - scale = 1.0 / 255 - zero_point = 0 - X = torch.randn(N, IN, dtype=torch.float32) - qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point, dtype=torch.quint8) +class _QLinearBenchmarkBase(op_bench.TorchBenchmarkBase): + def init(self, N, IN, OUT, linear_under_test): + scale = torch.tensor(1.0 / 255) + zero_point = torch.tensor(0) + self.X = torch.randn(N, IN, dtype=torch.float32) + self.qX = torch.quantize_per_tensor(self.X, scale=scale, zero_point=zero_point, dtype=torch.quint8) W = torch.randn(OUT, IN, dtype=torch.float32) qW = torch.quantize_per_tensor(W, scale=scale, zero_point=0, dtype=torch.qint8) - self.input = qX - self.qlinear = nnq.Linear(IN, OUT) + # Assume that the `self.qlinear` is set in the child + self.qlinear = linear_under_test self.qlinear.weight = qW self.qlinear.scale = scale self.qlinear.zero_point = zero_point - self.set_module_name("QLinear") def forward(self): + # Assume that the `self.input` is set in the child return self.qlinear(self.input) +class QLinearBenchmark(_QLinearBenchmarkBase): + def init(self, N, IN, OUT): + super(QLinearBenchmark, self).init(N, IN, OUT, nnq.Linear(IN, OUT)) + self.input = self.qX + self.set_module_name("QLinear") + + +class QDynamicLinearBenchmark(_QLinearBenchmarkBase): + def init(self, N, IN, OUT): + super(QDynamicLinearBenchmark, self).init(N, IN, OUT, nnqd.Linear(IN, OUT)) + self.input = self.X + self.set_module_name("QDynamicLinear") + op_bench.generate_pt_test(qlinear_configs, QLinearBenchmark) +op_bench.generate_pt_test(qlinear_configs, QDynamicLinearBenchmark) if __name__ == "__main__":
Move most of the code in assistant to the plugins repo The assistant example will now simply be the "core" that initializes the rest of plugins, allowing for more updates to the *bot* without cluttering Telethon's git history.
@@ -71,14 +71,20 @@ not really needed for the previous two trivial examples. * Usable as a: **bot**. * Difficulty: **medium**. -This example is the actual bot account [@TelethonianBot] running in the -[official Telethon's chat] to help people out. The file is a bit big and -assumes some [`asyncio`] knowledge, but otherwise is easy to follow. - -In addition, it has optional plugins, which may be useful for your own code. -The plugins can be found at https://github.com/Lonami/TelethonianBotExt and -should be cloned into a `plugins` folder next to `assistant.py` for them to -work. +This example is the core of the actual bot account [@TelethonianBot] running +in the [official Telethon's chat] to help people out. It showcases how to +create an extremely simple "plugins" system with Telethon, but you're free +to borrow ideas from it and make it as fancy as you like (perhaps you want +to add hot reloading?). + +The plugins are a separate Python file each which get loaded dynamically and +can be found at <https://github.com/Lonami/TelethonianBotExt>. To use them, +clone the repository into a `plugins` folder next to `assistant.py` and then +run `assistant.py`. + +The content of the plugins or how they work is not really relevant. You can +disable them by moving them elsewhere or deleting the file entirely. The point +is to learn how you can build fancy things with your own code and Telethon. ### [`interactive_telegram_client.py`]
Update elf_mirai.txt [0] [1] [2]
/mirai.x86 # Reference: https://www.virustotal.com/en/ip-address/209.141.55.13/information/ +# Reference: https://twitter.com/i/moments/1046121581393543168 /Extendo.mips /Josho.arm5 +/Josho.arm7 /8UsA.sh /zaky.sh @@ -179,3 +181,20 @@ ukrainianhorseriding.com /hoho.sh4 /hoho.spc /hoho.x86 + +# Reference: https://twitter.com/bad_packets/status/1032494091290832896 + +/d +/.shinka.mips +/.shinka.mpsl + +# Reference: https://twitter.com/bad_packets/status/1029965235212320768 + +/k +/x86_64 +/x86_32 + +# Reference: https://twitter.com/bad_packets/status/1029608234569453569 + +/init0 +/sals
Add support to promo trailer video for custom kodi skins The custom Kodi skins can now use the property ListItem.Trailer to implement for example the playback of a promo video when a listitem is selected
@@ -12,6 +12,7 @@ from future.utils import iteritems import resources.lib.common as common +from resources.lib.globals import g from .exceptions import InvalidReferenceError MAX_PATH_REQUEST_SIZE = 47 # Stands for 48 results, is the default value defined by netflix for a single request @@ -45,7 +46,7 @@ VIDEO_LIST_PARTIAL_PATHS = [ [['requestId', 'summary', 'title', 'synopsis', 'regularSynopsis', 'evidence', 'queue', 'episodeCount', 'info', 'maturity', 'runtime', 'seasonCount', 'releaseYear', 'userRating', 'numSeasonsLabel', 'bookmarkPosition', 'creditsOffset', - 'dpSupplementalMessage', 'watched', 'delivery', 'sequiturEvidence']], + 'dpSupplementalMessage', 'watched', 'delivery', 'sequiturEvidence', 'promoVideo']], [['genres', 'tags', 'creators', 'directors', 'cast'], {'from': 0, 'to': 10}, ['id', 'name']] ] + ART_PARTIAL_PATHS @@ -100,14 +101,17 @@ INFO_MAPPINGS = { 'userrating': ['userRating', 'userRating'], 'mpaa': ['maturity', 'rating', 'value'], 'duration': 'runtime', - # 'bookmark': 'bookmarkPosition', - # 'playcount': 'watched' + # 'trailer' add the trailer button support to 'Information' window of ListItem, can be used from custom Kodi skins + # to reproduce a background promo video when a ListItem is selected + 'trailer': ['promoVideo', 'id'] } INFO_TRANSFORMATIONS = { 'season_shortname': lambda sn: ''.join([n for n in sn if n.isdigit()]), 'rating': lambda r: r / 10, - 'playcount': lambda w: int(w) # pylint: disable=unnecessary-lambda + 'playcount': lambda w: int(w), # pylint: disable=unnecessary-lambda + 'trailer': lambda video_id: common.build_url(pathitems=[common.VideoId.SUPPLEMENTAL, str(video_id)], + mode=g.MODE_PLAY) } REFERENCE_MAPPINGS = {
Update README.rst source:
@@ -22,7 +22,7 @@ HistomicsTK can be used in two ways: - **As a pure Python package**: enables application of image analysis algorithms to data independent of the `Digital Slide Archive`_ (DSA). HistomicsTK provides a collection of fundamental algorithms for tasks such as color normalization, color deconvolution, nuclei segmentation, and feature extraction. Read more about these capabilities here: `api-docs <https://digitalslidearchive.github.io/HistomicsTK/api-docs.html>`__ and `examples <https://digitalslidearchive.github.io/HistomicsTK/examples.html>`__ for more information. - Installation instructions on Linux: + **Installation instructions on Linux:** *To install HistomicsTK using PyPI*:: @@ -39,6 +39,28 @@ HistomicsTK can be used in two ways: The system version of various libraries are used if the ``--find-links`` option is not specified. You will need to use your package manager to install appropriate libraries (on Ubuntu, for instance, you'll need ``libopenslide-dev`` and ``libtiff-dev``). + **To install from source on Windows**: + + 1- Run the following:: + + $ pip install large-image + $ pip install cmake + $ git clone https://github.com/DigitalSlideArchive/HistomicsTK/ + $ cd HistomicsTK/ + $ python -m pip install setuptools-scm Cython>=0.25.2 scikit-build>=0.8.1 cmake>=0.6.0 numpy>=1.12.1 + + 2- Comment out ``large-image[sources]`` in ``setup.py``. + + 3- Install Visual Studio 15 2017 `Community Version <https://my.visualstudio.com/Downloads?q=visual%20studio%202017&wt.mc_id=o~msft~vscom~older-downloads>`_ + + 4- Install C++ build tools. Under Tools > Get Tools and Features ... > Desktop Development with C++, ensure that the first 8 boxes are checked. + + 5- Run this:: + + $ python -m pip install -e . + $ pip install girder-client + + - **As a image-processing task library for HistomicsUI and the Digital Slide Archive**: This allows end users to apply containerized analysis modules/pipelines over the web. See the `Digital Slide Archive`_ for installation instructions. Refer to `our website`_ for more information.
100% test coverage for densearith.py Similar assumption may be broken only in dmp_ff_div(), e.g. for EX or RR.
@@ -789,8 +789,7 @@ def dmp_pdiv(f, g, u, K): if dr < dg: break - elif not (dr < _dr): - raise PolynomialDivisionFailed(f, g, K) + assert dr < _dr if u: c = dmp_pow(lc_g, N, u - 1, K) @@ -841,8 +840,7 @@ def dmp_prem(f, g, u, K): if dr < dg: break - elif not (dr < _dr): - raise PolynomialDivisionFailed(f, g, K) + assert dr < _dr if u: c = dmp_pow(lc_g, N, u - 1, K)
Using secret placeholder instead of the secret itself Summary: T123438891 - as part of the effort, replacing the secret in the comment with a placeholder
@@ -411,7 +411,7 @@ service Command extends fb303.FacebookService { * fcr_client.run_session(session, 'show version') * // push a configlet * configlet = '''conf t -snmp-server community TEST RO +snmp-server community {SECRET} exit wr mem''' * fcr_client.run_session(session, configlet)
readme: remove the tidb-tools repo link Via:
- [TiDB Roadmap](ROADMAP.md) - [Connect with us](community.md) + More Resources - - [Frequently Used Tools](https://github.com/pingcap/tidb-tools) - [PingCAP Blog](https://pingcap.com/blog/) - [Weekly Update](https://pingcap.com/weekly/)
[core/input] Move event handling to core.event Until now, bumblebee-status did event handling in two places with almost identical code: in core.event (makes sense) and core.input (still makes sense, but a bit more dubious). Changed core.input to use core.event
@@ -19,35 +19,32 @@ def button_name(button): if button == WHEEL_DOWN: return 'wheel-down' return 'n/a' -callbacks = {} - class Object(object): def __init__(self): super(Object, self).__init__() self.id = str(uuid.uuid4()) -def register(obj, button=None, cmd=None): - logging.debug('registering callback {} {}'.format(obj.id, button)) - callbacks.setdefault(obj.id, {}).setdefault(button, []).append(cmd) - -def trigger(event): - for field in ['instance', 'name']: - if field in event: - cb = callbacks.get(event[field]) - __invoke(event, cb) - -def __invoke(event, callback): - if not callback: return - if not 'button' in event: return +def __event_id(obj_id, button): + return '{}::{}'.format(obj_id, button_name(button)) - for cb in callback.get(event['button'], []): - if callable(cb): - cb(event) - else: +def __execute(cmd): try: - util.cli.execute(cb, wait=False) + util.cli.execute(cmd, wait=False) except Exception as e: logging.error('failed to invoke callback: {}'.format(e)) - return + +def register(obj, button=None, cmd=None): + event_id = __event_id(obj.id, button) + logging.debug('registering callback {}'.format(event_id)) + if callable(cmd): + core.event.register(event_id, cmd) + else: + core.event.register(event_id, lambda _: __execute(cmd)) + +def trigger(event): + if not 'button' in event: return + for field in ['instance', 'name']: + if not field in event: continue + core.event.trigger(__event_id(event[field], event['button']), event) # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
Support searching for titles which contain quotes [change] piratebay: replacing single quotes with spaces as their search engine doesn't like them
@@ -126,8 +126,10 @@ class UrlRewritePirateBay(object): entries = set() for search_string in entry.get('search_strings', [entry['title']]): query = normalize_unicode(search_string) - # TPB search doesn't like dashes - query = query.replace('-', ' ') + + # TPB search doesn't like dashes or quotes + query = query.replace('-', ' ').replace("'", " ") + # urllib.quote will crash if the unicode string has non ascii characters, so encode in utf-8 beforehand url = 'http://thepiratebay.%s/search/%s%s' % (CUR_TLD, quote(query.encode('utf-8')), filter_url) log.debug('Using %s as piratebay search url' % url) @@ -145,13 +147,13 @@ class UrlRewritePirateBay(object): entry['torrent_leeches'] = int(tds[-1].contents[0]) entry['search_sort'] = torrent_availability(entry['torrent_seeds'], entry['torrent_leeches']) # Parse content_size - sizeText = link.find_next(attrs={'class': 'detDesc'}).get_text() - if sizeText: - size = re.search('Size (\d+(\.\d+)?\xa0(?:[PTGMK])?i?B)', sizeText) + size_text = link.find_next(attrs={'class': 'detDesc'}).get_text() + if size_text: + size = re.search('Size (\d+(\.\d+)?\xa0(?:[PTGMK])?i?B)', size_text) if size: entry['content_size'] = parse_filesize(size.group(1)) else: - log.error('Malformed search result? Title: "%s", No size? %s', entry['title'], sizeText) + log.error('Malformed search result? Title: "%s", No size? %s', entry['title'], size_text) entries.add(entry)
Update csharp_snippets.py Initial snippet_watcher support fo vscode on Mac. Disabled by default
@@ -31,10 +31,13 @@ ctx.lists["user.snippets"] = { # # there's probably a way to do this without -# # if app.platform == "windows": -# watcher = snippet_watcher( -# {os.path.expandvars(r"%AppData%\Code\User\snippets"): ["csharp.json"],}, -# update_list, +# snippet_path = None +# if app.platform == "windows": +# snippet_path = os.path.expandvars(r"%AppData%\Code\User\snippets") +# elif app.platform == "mac": +# snippet_path = os.path.expanduser( +# "~/Library/Application Support/Code/User/snippets" # ) - +# if snippet_path: +# watcher = snippet_watcher({snippet_path: ["csharp.json",],}, update_list,) # print("reloaded!")
Add a few utility functions for old couch revs These may be useful in debugging future fires
@@ -262,15 +262,6 @@ def send_keys_to_couch(db, keys): return r.json()['rows'] -def bulk_get_revs(target_db, doc_ids): - """ - return (_id, _rev) for every existing doc in doc_ids - if a doc id is not found in target_db, it is excluded from the result - """ - result = target_db.all_docs(keys=list(doc_ids)).all() - return [(row['id'], row['value']['rev']) for row in result if not row.get('error')] - - def iter_update(db, fn, ids, max_retries=3, verbose=False, chunksize=100): """ Map `fn` over every doc in `db` matching `ids` @@ -365,3 +356,42 @@ def iter_update(db, fn, ids, max_retries=3, verbose=False, chunksize=100): def stale_ok(): return settings.COUCH_STALE_QUERY + + +def bulk_get_revs(target_db, doc_ids): + """ + return (_id, _rev) for every existing doc in doc_ids + if a doc id is not found in target_db, it is excluded from the result + """ + result = target_db.all_docs(keys=list(doc_ids)).all() + return [(row['id'], row['value']['rev']) for row in result if not row.get('error')] + + +def get_revisions_info(db, doc_id): + """ + :return: a list of revisions ordered newest to oldest. Eg: + [{'rev': '3-583f2b050fc2099775b5a6ee573c0822', 'status': 'available'}, + {'rev': '2-1584c6ba63613203ae5aa03bdf34fa9e', 'status': 'available'}, + {'rev': '1-73ce55ebe921edf14e37a144706b1070', 'status': 'missing'}] + """ + return db._request_session.get( + url=f'{db.uri}/{doc_id}', + params={'revs_info': 'true'} + ).json()['_revs_info'] + + +def get_old_rev(db, doc_id, rev): + return db._request_session.get( + url=f'{db.uri}/{doc_id}', + params={'rev': rev} + ).json() + + +def iter_old_doc_versions(db, doc_id): + """ + Returns an generator of old versions of the document + Note that there may be unavailable old revisions not included + """ + for rev in get_revisions_info(db, doc_id): + if rev['status'] == 'available': + yield get_old_rev(db, doc_id, rev['rev'])
Passed kwargs to db_remove The function db_remove fails because the call to the dbexists function needs kwargs to connect to the db if you're passing all connection parameters from cli.
@@ -175,7 +175,7 @@ def db_remove(database_name, **kwargs): salt minion mssql.db_remove database_name='DBNAME' ''' try: - if db_exists(database_name) and database_name not in ['master', 'model', 'msdb', 'tempdb']: + if db_exists(database_name, **kwargs) and database_name not in ['master', 'model', 'msdb', 'tempdb']: conn = _get_connection(**kwargs) conn.autocommit(True) cur = conn.cursor()
WHISPER_FALLOCATE_CREATE documentation improved Documenting silent conflict between WHISPER_SPARSE_CREATE and WHISPER_FALLOCATE_CREATE carbon-cache configuration variables: when both are set to True, WHISPER_SPARSE_CREATE is effectively disabled.
@@ -198,6 +198,7 @@ WHISPER_AUTOFLUSH = False # allocation and zero-ing. Enabling this option may allow a large increase of # MAX_CREATES_PER_MINUTE. If enabled on an OS or filesystem that is unsupported # this option will gracefully fallback to standard POSIX file access methods. +# If enabled, disables WHISPER_SPARSE_CREATE regardless of the value. WHISPER_FALLOCATE_CREATE = True # Enabling this option will cause Whisper to lock each Whisper file it writes
$.Lexer: special-case offset/length for the termination token TN:
## vim: filetype=makoada -<% lexer = ctx.lexer %> +<% + lexer = ctx.lexer + termination = lexer.ada_token_name('Termination') +%> with Ada.Strings.Unbounded; use Ada.Strings.Unbounded; with Ada.Unchecked_Conversion; @@ -214,13 +217,22 @@ package body ${_self.ada_api_settings.lib_name}.Lexer is end case; + -- Special case for the termination token: Quex yields inconsistent + -- offsets/sizes. Make sure we get the end of the buffer so that the + -- rest of our machinery (in particular source slices) works well + -- with it. + Append (TDH.Tokens, (Kind => Token_Id, Text => Text, Sloc_Range => Sloc_Range, - Offset => Token.Offset, - Length => Text_Length)); + Offset => (if Token_Id = ${termination} + then Unsigned_32 (TDH.Source_Buffer.all'Last + 1) + else Token.Offset), + Length => (if Token_Id = ${termination} + then 0 + else Text_Length))); Prepare_For_Trivia; % if lexer.token_actions['WithTrivia']:
Make BlackBoxPredictor handle networks throwing exceptions Summary: Pull Request resolved: OSS: add a tiny unit test utility function to create tensors given shape and data outside of any workspace. I use it in an internal test
@@ -98,6 +98,15 @@ caffe2::Tensor* createTensorAndFill( return tensor; } +template <typename T> +caffe2::Tensor createTensorAndFill( + const std::vector<int64_t>& shape, + const std::vector<T>& data) { + Tensor tensor(caffe2::CPU); + fillTensor<T>(shape, data, &tensor); + return tensor; +} + // Fill a constant to a tensor. template <typename T> void constantFillTensor(
Update README.md Add "follow on twitter" button
# InvenTree +<p><a href="https://twitter.com/intent/follow?screen_name=inventreedb"> + <img src="https://img.shields.io/twitter/follow/inventreedb?style=social&logo=twitter" + alt="follow on Twitter"></a></p> + [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![Coverage Status](https://coveralls.io/repos/github/inventree/InvenTree/badge.svg)](https://coveralls.io/github/inventree/InvenTree) [![Crowdin](https://badges.crowdin.net/inventree/localized.svg)](https://crowdin.com/project/inventree)
PresetsPlugValueWidget : Ensure current context is set when generating presets Fixes: Fixes - PresetsPlugValueWidget : Fixed bug that meant no context variables were set when generating dynamic plug presets.
@@ -92,6 +92,9 @@ class PresetsPlugValueWidget( GafferUI.PlugValueWidget ) : if self.getPlug() is None : return result + # Required for context-sensitive dynamic presets + with self.getContext(): + currentPreset = Gaffer.NodeAlgo.currentPreset( self.getPlug() ) allowCustom = Gaffer.Metadata.value( self.getPlug(), "presetsPlugValueWidget:allowCustom" ) isCustom = Gaffer.Metadata.value( self.getPlug(), "presetsPlugValueWidget:isCustom" )
jinja2: Add InternationalizationExtension elements. Fixes
@@ -71,6 +71,16 @@ class Environment: def from_string(self, source: Text, globals: Optional[Dict[str, Any]] = ..., template_class: Optional[Type[Template]] = ...) -> Template: ... def make_globals(self, d: Optional[Dict[str, Any]]) -> Dict[str, Any]: ... + # Frequently added extensions are included here: + # from InternationalizationExtension: + def install_gettext_translations(self, translations: Any, newstyle: Optional[bool]): ... + def install_null_translations(self, newstyle: Optional[bool]): ... + def install_gettext_callables(self, gettext: Callable, ngettext: Callable, + newstyle: Optional[bool]): ... + def uninstall_gettext_translations(self, translations: Any): ... + def extract_translations(self, source: Any, gettext_functions: Any): ... + newstyle_gettext = ... # type: bool + class Template: def __new__(cls, source, block_start_string: Any = ..., block_end_string: Any = ..., variable_start_string: Any = ..., variable_end_string: Any = ..., comment_start_string: Any = ..., comment_end_string: Any = ..., line_statement_prefix: Any = ..., line_comment_prefix: Any = ..., trim_blocks: Any = ..., lstrip_blocks: Any = ..., newline_sequence: Any = ..., keep_trailing_newline: Any = ..., extensions: Any = ..., optimized: bool = ..., undefined: Any = ..., finalize: Optional[Any] = ..., autoescape: bool = ...): ... @classmethod
Lexical envs: fix a ref-counting leak in Get_Env TN:
@@ -189,11 +189,15 @@ package body Langkit_Support.Lexical_Env is ------------- function Get_Env (Self : in out Env_Getter; - Info : Entity_Info) return Lexical_Env is + Info : Entity_Info) return Lexical_Env + is + Cache_Enabled : constant Boolean := Info = No_Entity_Info; + -- The cache (Self.Env) can be used only if No_Entity_Info is passed begin if Self.Dynamic then - -- If simple case: No Entity_Info, and already cached env - if Self.Env /= Null_Lexical_Env and then Info = No_Entity_Info then + -- Resolve the dynamic lexical env getter. For this, use the cache if + -- possible. + if Cache_Enabled and then Self.Env /= Null_Lexical_Env then -- If it is not stale, return it if not Is_Stale (Self.Env) then @@ -201,29 +205,29 @@ package body Langkit_Support.Lexical_Env is return Self.Env; end if; - -- If it is stale, release it + -- If it is stale, release and clear it Dec_Ref (Self.Env); end if; + -- For some reason we could not use the cache: do the resolution and + -- cache its result if applicable. declare - R : constant Lexical_Env_Resolver := Self.Resolver; E : constant Entity := (Node => Self.Node, Info => Info); + Result : constant Lexical_Env := Self.Resolver.all (E); begin - - if Info = No_Entity_Info then - -- We use the share returned by the resolver, so no need for - -- inc ref here. - Self.Env := R.all (E); + if Cache_Enabled then + -- The ownership share returned by the resolver goes to the + -- cache: the call to Inc_Ref below will create a new one for + -- the returned value. + Self.Env := Result; else - -- Don't cache when entity info is not null - return Ret : constant Lexical_Env := R.all (E) do - Inc_Ref (Ret); - end return; + return Result; end if; end; end if; - -- Inc ref for the returned value + -- Return a copy of the cached resolved lexical env, so create a new + -- ownership share. Inc_Ref (Self.Env); return Self.Env; end Get_Env;
ci: Change production setup in ci according to bionic and Circle CI. Used postgres 10 inplace of postgres 9.5 as it is used in Bionic. Upgraded nginx version in success-http-headers which is in CircleCI Bionic enviroment.
@@ -41,7 +41,7 @@ sudo /etc/init.d/memcached restart sudo "$(dirname "$0")/../../scripts/setup/terminate-psql-sessions" postgres zulip zulip_base # Remove and recreate the postgres database -sudo pg_ctlcluster 9.5 main stop -sudo pg_dropcluster 9.5 main -sudo rm -rf /etc/postgresql/9.5/main /var/lib/postgresql/9.5/main -sudo pg_createcluster 9.5 main +sudo pg_ctlcluster 10 main stop +sudo pg_dropcluster 10 main +sudo rm -rf /etc/postgresql/10/main /var/lib/postgresql/10/main +sudo pg_createcluster 10 main
ch_tests: fix _get_hypervisor_param Return kvm if /dev/kvm exists on high priority
@@ -158,10 +158,13 @@ class CloudHypervisorTestSuite(TestSuite): node.tools[Usermod].add_user_to_group("mshv", sudo=True) def _get_hypervisor_param(self, node: Node) -> str: + kvm_exists = node.tools[Ls].path_exists(path="/dev/kvm", sudo=True) + if kvm_exists: + return "kvm" mshv_exists = node.tools[Ls].path_exists(path="/dev/mshv", sudo=True) if mshv_exists: return "mshv" - return "kvm" + return "" def get_test_list(variables: Dict[str, Any], var1: str, var2: str) -> Any:
Correct extra_includes for dev.major The file names have changed around a bit, so be sure to include the new forms as well.
@@ -165,8 +165,9 @@ def _extension_extra_sources(): # For typing brevity we specify sources in Unix-style string form, then # normalise them into the OS-specific form later. extra_sources = { - 'qutip.cy.spmatfuncs': ['qutip/cy/src/zspmv.cpp'], - 'qutip.cy.openmp.parfuncs': ['qutip/cy/openmp/src/zspmv_openmp.cpp'], + 'qutip.core.data.matmul': [ + 'qutip/core/data/src/matmul_csr_vector.cpp', + ], } out = collections.defaultdict(list) for module, sources in extra_sources.items():
Cleanup, the "LIBDIR" trick is now obsolete. * We used to have runners patched at install time with the correct path to add, but that was moved into the "__main__" here for no good reason.
@@ -30,38 +30,6 @@ import os import sys import warnings -# LIBDIR trick start (marker for removal on platforms that don't need it) -libdir = "@LIBDIR@" - -# Two cases: -if libdir != '@' "LIBDIR" '@': - # Changed by our "distutils" hook, then use the given path. - - if not os.path.isabs(libdir): - libdir = os.path.join( - os.path.dirname(os.path.realpath(__file__)), - libdir - ) - libdir = os.path.abspath(libdir) - - sys.path.insert( - 0, - libdir - ) -else: - # Unchanged, running from checkout, use the parent directory, the nuitka - # package ought be there. - sys.path.insert( - 0, - os.path.normpath( - os.path.join( - os.path.dirname(__file__), - ".." - ) - ) - ) -# LIBDIR trick end (marker for removal on platforms that don't need it) - # PyLint for Python3 thinks we import from ourselves if we really # import from package, pylint:disable=I0021,no-name-in-module
Fix docstring to use 'distribution' instead of 'platform' "centos" is a distribution, not a platform
@@ -90,9 +90,9 @@ class ResolverWithPlugins(DefaultImportResolver): {'package_name': 'cloudify-openstack-plugin'} >>> _make_plugin_filters('cool?version=1.0.2') {'package_name': 'cool', 'package_version': '1.0.2'} - >>> _make_plugin_filters('cool?version=1.0.2&platform=centos') + >>> _make_plugin_filters('cool?version=1.0.2&distribution=centos') {'package_name': 'cool', 'package_version': '1.0.2', - 'supported_platform': 'centos'} + 'distribution': 'centos'} """ filter_renames = {'platform': 'supported_platform', 'version': 'package_version',