message
stringlengths
13
484
diff
stringlengths
38
4.63k
Disable SignUp * Disable SignUp Disabled signup from setting only hides signup link but users can signup anyway. Disables the sign_up function. * Fix translate String
@@ -13,6 +13,7 @@ import frappe.permissions import frappe.share import re from frappe.limits import get_limits +from frappe.website.utils import is_signup_enabled STANDARD_USERS = ("Guest", "Administrator") @@ -602,6 +603,9 @@ def verify_password(password): @frappe.whitelist(allow_guest=True) def sign_up(email, full_name, redirect_to): + if not is_signup_enabled(): + frappe.throw(_('Sign Up is disabled'), title='Not Allowed') + user = frappe.db.get("User", {"email": email}) if user: if user.disabled:
Fix backwards compatibility for pip VcsSupport VcsSupport classes need to be instantiated on pip<19.2 This provides backward compatibility
@@ -61,15 +61,17 @@ class VCSRepository(object): def obtain(self): # type: () -> None - lte_pip_19 = ( - pip_shims.parsed_pip_version.parsed_version < pip_shims.parse_version("19.0") + lt_pip_19_2 = ( + pip_shims.parsed_pip_version.parsed_version < pip_shims.parse_version("19.2") ) + if lt_pip_19_2: + self.repo_backend = self.repo_backend(self.url) if os.path.exists( self.checkout_directory ) and not self.repo_backend.is_repository_directory(self.checkout_directory): self.repo_backend.unpack(self.checkout_directory) elif not os.path.exists(self.checkout_directory): - if lte_pip_19: + if lt_pip_19_2: self.repo_backend.obtain(self.checkout_directory) else: self.repo_backend.obtain(self.checkout_directory, self.parsed_url)
Update NZBGetPostProcess.py fail more gracefully
@@ -236,14 +236,14 @@ if 'NZBOP_SCRIPTDIR' in os.environ and not os.environ['NZBOP_VERSION'][0:5] < '1 if success: sys.exit(POSTPROCESS_SUCCESS) else: - sys.exit(POSTPROCESS_ERROR) + sys.exit(POSTPROCESS_NONE) elif (radarrcat.startswith(category)): #DEBUG#print "Radarr Processing Activated" success = radarr.processMovie(path, settings, True, pathMapping=path_mapping) if success: sys.exit(POSTPROCESS_SUCCESS) else: - sys.exit(POSTPROCESS_ERROR) + sys.exit(POSTPROCESS_NONE) elif (sickragecat.startswith(category)): #DEBUG#print "Sickrage Processing Activated" autoProcessTVSR.processEpisode(path, settings, nzb, pathMapping=path_mapping)
[cleanup] Deprecate pagegenerators.UnconnectedPageGenerator Use Site.unconnected_pages() instead
@@ -950,8 +950,7 @@ class GeneratorFactory(object): total=intNone(value), site=self.site) elif arg == '-unconnectedpages': - gen = UnconnectedPageGenerator(total=intNone(value), - site=self.site) + gen = self.site.unconnected_pages(total=intNone(value)) elif arg == '-imagesused': if not value: value = pywikibot.input( @@ -1309,6 +1308,7 @@ def RecentChangesPageGenerator(start=None, end=None, reverse=False, return gen +@deprecated('site.unconnected_pages()') @deprecated_args(step=None) def UnconnectedPageGenerator(site=None, total=None): """ @@ -1323,8 +1323,7 @@ def UnconnectedPageGenerator(site=None, total=None): site = pywikibot.Site() if not site.data_repository(): raise ValueError('The given site does not have Wikibase repository.') - for page in site.unconnected_pages(total=total): - yield page + return site.unconnected_pages(total=total) @deprecated_args(referredImagePage='referredFilePage', step=None)
[varLib] Allow sparse masters in HVAR Part of Part of
@@ -420,42 +420,46 @@ def _merge_TTHinting(font, masterModel, master_ttfs, tolerance=0.5): var = TupleVariation(support, delta) cvar.variations.append(var) -def _add_HVAR(font, model, master_ttfs, axisTags): +def _add_HVAR(font, masterModel, master_ttfs, axisTags): log.info("Generating HVAR") - hAdvanceDeltas = {} + glyphOrder = font.getGlyphOrder() + + hAdvanceDeltasAndSupports = {} metricses = [m["hmtx"].metrics for m in master_ttfs] - for glyph in font.getGlyphOrder(): - hAdvances = [metrics[glyph][0] for metrics in metricses] + for glyph in glyphOrder: + hAdvances = [metrics[glyph][0] if glyph in metrics else None for metrics in metricses] # TODO move round somewhere else? - hAdvanceDeltas[glyph] = tuple(otRound(d) for d in model.getDeltas(hAdvances)[1:]) + hAdvanceDeltasAndSupports[glyph] = masterModel.getDeltasAndSupports(hAdvances) - # Direct mapping - supports = model.supports[1:] + singleModel = models.allSame(id(v[1]) for v in hAdvanceDeltasAndSupports.values()) + + directStore = None + if singleModel: + # Build direct mapping + supports = hAdvanceDeltasAndSupports[0][1][1:] varTupleList = builder.buildVarRegionList(supports, axisTags) varTupleIndexes = list(range(len(supports))) - n = len(supports) - items = [] - for glyphName in font.getGlyphOrder(): - items.append(hAdvanceDeltas[glyphName]) - - # Build indirect mapping to save on duplicates, compare both sizes - uniq = list(set(items)) - mapper = {v:i for i,v in enumerate(uniq)} - mapping = [mapper[item] for item in items] - advanceMapping = builder.buildVarIdxMap(mapping, font.getGlyphOrder()) - - # Direct - varData = builder.buildVarData(varTupleIndexes, items) + varData = builder.buildVarData(varTupleIndexes, []) + for glyphName in glyphOrder: + varData.add_item(hAdvanceDeltasAndSupports[glyphName][0]) directStore = builder.buildVarStore(varTupleList, [varData]) - # Indirect - varData = builder.buildVarData(varTupleIndexes, uniq) - indirectStore = builder.buildVarStore(varTupleList, [varData]) - mapping = indirectStore.optimize() - advanceMapping.mapping = {k:mapping[v] for k,v in advanceMapping.mapping.items()} - + # Build optimized indirect mapping + storeBuilder = varStore.OnlineVarStoreBuilder(axisTags) + mapping = {} + for glyphName in glyphOrder: + deltas,supports = hAdvanceDeltasAndSupports[glyphName] + storeBuilder.setSupports(supports) + mapping[glyphName] = storeBuilder.storeDeltas(deltas) + indirectStore = storeBuilder.finish() + mapping2 = indirectStore.optimize() + mapping = [mapping2[mapping[g]] for g in glyphOrder] + advanceMapping = builder.buildVarIdxMap(mapping, glyphOrder) + + use_direct = False + if directStore: # Compile both, see which is more compact writer = OTTableWriter()
(fix) updated poll interval lengths updated SHORT_POLL_INTERVAL and LONG_POLL_INTERVAL both to 1.0 so _status_poll_loop can actively query acc balances.
@@ -54,10 +54,10 @@ class BitmartExchange(ExchangeBase): trading functionality. """ API_CALL_TIMEOUT = 10.0 - SHORT_POLL_INTERVAL = 5.0 + SHORT_POLL_INTERVAL = 1.0 UPDATE_ORDER_STATUS_MIN_INTERVAL = 10.0 UPDATE_TRADE_STATUS_MIN_INTERVAL = 10.0 - LONG_POLL_INTERVAL = 120.0 + LONG_POLL_INTERVAL = 1.0 @classmethod def logger(cls) -> HummingbotLogger:
TST: updated repr test for files Updated the repr test for files.
@@ -172,23 +172,23 @@ class TestBasics(): def setup(self): """Runs before every method to create a clean testing setup.""" self.out = '' + # Use a two-year as default. Some tests will use custom ranges. self.start = dt.datetime(2008, 1, 1) self.stop = dt.datetime(2009, 12, 31) - # store current pysat directory + # Store current pysat directory self.data_paths = pysat.params['data_dirs'] - # create temporary directory + # Create temporary directory self.tempdir = tempfile.TemporaryDirectory() pysat.params['data_dirs'] = [self.tempdir.name] - self.testInst = \ - pysat.Instrument(inst_module=pysat.instruments.pysat_testing, - clean_level='clean', - temporary_file_list=self.temporary_file_list, - update_files=True) - # create instrument directories in tempdir + self.testInst = pysat.Instrument( + inst_module=pysat.instruments.pysat_testing, clean_level='clean', + temporary_file_list=self.temporary_file_list, update_files=True) + + # Create instrument directories in tempdir create_dir(self.testInst) def teardown(self): @@ -201,7 +201,7 @@ class TestBasics(): """The repr output will match the str output""" self.out = self.testInst.files.__repr__() assert isinstance(self.out, str) - assert self.out.find("Local files") > 0 + assert self.out.find("pysat.Files(") >= 0 def test_basic_str(self): """Check for lines from each decision point in str"""
[Docs] Changed output of model server When I run the model server the output is [value] not {"predictions": [value]} - perhaps this is something I have done but if not the docs should probably reflect this. I haven't tested the R version so that may need updating too.
@@ -306,7 +306,7 @@ in MLflow saved the model as an artifact within the run. the server should respond with output similar to:: - {"predictions": [6.379428821398614]} + [6.379428821398614] .. container:: R
Tests: Run compile library test in a per version directory. * This is to avoid clashes between different versions run at the same time and still easily find them.
@@ -109,12 +109,11 @@ def action(stage_dir, root, path): ) ) - +from nuitka.PythonVersions import python_version compileLibraryTest( search_mode = search_mode, - stage_dir = os.path.join(tmp_dir, "compile_library"), + stage_dir = os.path.join(tmp_dir, "compile_library_%s" % python_version ), decide = decide, action = action ) -
Developer Manual: Added description of how context managers are named. * This is a very useful coding rule in my mind, as it makes it really easy to recognize them.
@@ -293,6 +293,21 @@ There is no code in packages themselves. For programs, we use Names of modules should be plurals if they contain classes. Example is that a ``Nodes`` module that contains a ``Node`` class. +Names for context manages start with ``with`` +============================================= + +In order to easily recognize that something is to be used as a context +manager, we follow a pattern of naming them ``withSomething``, to make +that easily recognized. + +.. code:: python + + with withEnvironmentPathAdded(os.path.join(sys.prefix, "bin")): + with withDirectoryChange(self.qt_datadir): + ... + +This makes these easy to recognize even in their definition. + Prefer list contractions over built-ins =======================================
TST: added unit tests Added unit tests to cover newly uncovered lines.
@@ -271,6 +271,28 @@ class TestConstellationBasics(object): del self.testConst, self.load_date, self.custom_args return + @pytest.mark.parametrize("apply_inst", [False, True]) + def test_bad_set_custom(self, apply_inst): + """Test ValueError raised when not setting custom functions correctly. + + Parameters + ---------- + apply_inst : bool + Apply custom function at Instrument level (True) or Constellation + level (False) + + """ + + with pytest.raises(ValueError) as verr: + pysat.Constellation( + instruments=[pysat.Instrument( + 'pysat', 'testing', num_samples=10, clean_level='clean') + for i in range(5)], + custom=[{'apply_inst': apply_inst}]) + + assert str(verr).find("Input dict to custom is missing the") >= 0 + return + @pytest.mark.parametrize("apply_inst", [False, True]) def test_repr(self, apply_inst): """Test `__repr__` with custom method. @@ -305,12 +327,17 @@ class TestConstellationBasics(object): """ self.testConst.custom_attach(mult_data, apply_inst=apply_inst, - args=self.custom_args) + args=self.custom_args, + kwargs={'dkey': 'mlt'}) self.out = self.testConst.__str__() assert isinstance(self.out, str) assert self.out.find("Constellation-level Data Processing") >= 0 assert self.out.find( "Custom Functions: {:d} applied".format(num_func)) >= 0 + + if num_func > 0: + assert self.out.find("Args=") >= 0 + assert self.out.find("Kwargs=") >= 0 return def test_single_custom_function_error(self):
UI - FM - Alarm Diagnostic: Wrong field types fixed HG-- branch : ashapovalov/ui-fm-alarm-diagnostic-wrong-field-typ-1490087280651
@@ -142,7 +142,7 @@ Ext.define("NOC.fm.alarmdiagnosticconfig.Application", { }, { name: "periodic_delay", - xtype: "main.ref.script.LookupField", + xtype: "numberfield", fieldLabel: __("Delay"), min: 0, allowBlank: true @@ -155,7 +155,7 @@ Ext.define("NOC.fm.alarmdiagnosticconfig.Application", { }, { name: "periodic_script", - xtype: "textfield", + xtype: "main.ref.script.LookupField", fieldLabel: __("Script"), allowBlank: true },
CI: Removed test_converters_compatibility TT 1.x uses inkscape as converter only. Hence, this test is not required anymore.
@@ -200,14 +200,6 @@ def is_current_version_compatible(test_id, or not os.path.isfile(mod_args["preamble-file"]): mod_args["preamble-file"] = os.path.join(EXTENSION_DIR, "default_packages.tex") - if converter == "pstoedit": - textext.CONVERTERS = {textext.PstoeditPlotSvg.get_pdf_converter_name(): textext.PstoeditPlotSvg} - elif converter == "pdf2svg": - textext.CONVERTERS = {textext.Pdf2SvgPlotSvg.get_pdf_converter_name(): textext.Pdf2SvgPlotSvg} - else: - textext.CONVERTERS = {textext.PstoeditPlotSvg.get_pdf_converter_name(): textext.PstoeditPlotSvg, - textext.Pdf2SvgPlotSvg.get_pdf_converter_name(): textext.Pdf2SvgPlotSvg} - # run TexText tt = textext.TexText() tt.affect([ @@ -250,31 +242,3 @@ def test_compatibility(root, inkscape_version, textext_version, converter, test_ ) sys.stderr.write(message + "\n") assert result, message - - -def test_converters_compatibility(root, inkscape_version, textext_version, converter, test_case): - if inkscape_version.startswith("_") or textext_version.startswith("_") or converter.startswith( - "_") or test_case.startswith("_"): - pytest.skip("skip %s (remove underscore to enable)" % os.path.join(inkscape_version, textext_version, converter, - test_case)) - - assert converter in ["pdf2svg", "pstoedit"] - # switch converters - if converter == "pdf2svg": - replaced_converter = "pstoedit" - elif converter == "pstoedit": - replaced_converter = "pdf2svg" - - test_id = "%s-%s-%s-%s-%s" % (inkscape_version, textext_version, converter, replaced_converter, test_case) - result, message = is_current_version_compatible( - test_id, - svg_original=os.path.join(root, inkscape_version, textext_version, converter, test_case, "original.svg"), - svg_modified=os.path.join(root, inkscape_version, textext_version, converter, test_case, "modified.svg"), - json_config=os.path.join(root, inkscape_version, textext_version, converter, test_case, "config.json"), - converter=replaced_converter, - fuzz="50%", - pixel_diff_abs_tol=150, - pixel_diff_rel_tol=0.005 - ) - sys.stderr.write(message+"\n") - assert result, message
Allow pipelines to be opened withe the JSON widget Pipieline files can currently be opened in either the pipeline editor or the file editor. This uodate also allows users to open pipeline files with the JSON viewer. Fixes
@@ -108,12 +108,15 @@ const extension: JupyterFrontEndPlugin<void> = { }); // Add the default behavior of opening the widget for .pipeline files - app.docRegistry.addFileType({ + app.docRegistry.addFileType( + { name: PIPELINE, displayName: 'Pipeline', extensions: ['.pipeline'], icon: pipelineIcon - }); + }, + ['JSON'] + ); app.docRegistry.addWidgetFactory(pipelineEditorFactory); const tracker = new WidgetTracker<DocumentWidget>({
Catch RemoteError if scanner has crashed If you restart the scanner (because it has crashed for e.g.) but don't restart the web script it won't show any data This way only the Worker status will be missing.
@@ -236,7 +236,10 @@ def get_pokemarkers(): if config.MAP_WORKERS: # Worker stats + try: markers.extend(get_worker_markers()) + except RemoteError: + print('Unable to connect to manager for worker data.') return markers def get_spawnpointsmarkers():
fix: Use correct path of built assets Get path from bundled_assets
@@ -576,13 +576,15 @@ def get_server_messages(app): def get_messages_from_include_files(app_name=None): """Returns messages from js files included at time of boot like desk.min.js for desk and web""" + from frappe.utils.jinja_globals import bundled_asset messages = [] app_include_js = frappe.get_hooks("app_include_js", app_name=app_name) or [] web_include_js = frappe.get_hooks("web_include_js", app_name=app_name) or [] include_js = app_include_js + web_include_js for js_path in include_js: - relative_path = os.path.join(frappe.local.sites_path, js_path.lstrip('/')) + file_path = bundled_asset(js_path) + relative_path = os.path.join(frappe.local.sites_path, file_path.lstrip('/')) messages_from_file = get_messages_from_file(relative_path) messages.extend(messages_from_file)
Standalone: Fix Qt plugins in subfolders. * Also scan original files properly by proving the correct path. * This should e.g. fix the JPEG plugin.
@@ -105,12 +105,12 @@ if os.path.exists(guess_path): return [ ( - os.path.join(plugin_dir, os.path.basename(filename)), filename, + os.path.join(target_plugin_dir, os.path.relpath(filename, plugin_dir)), full_name ) for filename in - getFileList(target_plugin_dir) + getFileList(plugin_dir) if not filename.endswith(".qml") ]
Remove share directory API documentation is installed by CMake in the doc directory.
@@ -84,6 +84,8 @@ class LibpqxxRecipe(ConanFile): cmake = self._configure_cmake() cmake.install() + tools.rmdir(os.path.join(self.package_folder, "share")) + def package_info(self): pqxx_with_suffix = "pqxx-%s.%s" % tuple(self.version.split(".")[0:2]) is_package_with_suffix = self.settings.os != "Windows" and self.options.shared
Fix auth of mailboxlayer and vatlayer Fix auth of apilayer mailboxlayer and vatlayer resolve
@@ -404,7 +404,7 @@ API | Description | Auth | HTTPS | CORS | API | Description | Auth | HTTPS | CORS | |---|---|---|---|---| | [Abstract Email Validation](https://www.abstractapi.com/email-verification-validation-api) | Validate email addresses for deliverability and spam | `apiKey` | Yes | Yes | -| [apilayer mailboxlayer](https://mailboxlayer.com) | Email address validation | No | Yes | Unknown | +| [apilayer mailboxlayer](https://mailboxlayer.com) | Email address validation | `apiKey` | Yes | Unknown | | [Cloudmersive Validate](https://cloudmersive.com/validate-api) | Validate email addresses, phone numbers, VAT numbers and domain names | `apiKey` | Yes | Yes | | [EVA](https://eva.pingutil.com/) | Validate email addresses | No | Yes | Yes | | [Lob.com](https://lob.com/) | US Address Verification | `apiKey` | Yes | Unknown | @@ -413,7 +413,7 @@ API | Description | Auth | HTTPS | CORS | | [US Autocomplete](https://smartystreets.com/docs/cloud/us-autocomplete-api) | Enter address data quickly with real-time address suggestions | `apiKey` | Yes | Yes | | [US Extract](https://smartystreets.com/products/apis/us-extract-api) | Extract postal addresses from any text including emails | `apiKey` | Yes | Yes | | [US Street Address](https://smartystreets.com/docs/cloud/us-street-api) | Validate and append data for any US postal address | `apiKey` | Yes | Yes | -| [vatlayer](https://vatlayer.com/documentation) | VAT number validation | No | Yes | Unknown | +| [vatlayer](https://vatlayer.com/documentation) | VAT number validation | `apiKey` | Yes | Unknown | | [Verifier](https://verifier.meetchopra.com/docs#/) | Verifies that a given email is real | `apiKey` | Yes | Yes | | [Veriphone](https://veriphone.io) | Phone number validation & carrier lookup | `apiKey` | Yes | Yes |
Cleanup Ran flake8 to clean up some unneeded whitespaces.
@@ -133,7 +133,6 @@ class Folder(CanvasObject): ) return Folder(self._requester, response.json()) - def upload(self, file, **kwargs): """ Upload a file to this folder. @@ -155,7 +154,6 @@ class Folder(CanvasObject): **kwargs ).start() - def update(self, **kwargs): """ Updates a folder.
Update problem.py Should use "oneshot" for remote challenges.
@@ -246,6 +246,13 @@ class Remote(Service): return output + def service(self): + """ + Unlike the parent class, these are executables and should be restarted each time + """ + return {"Type":"oneshot", + "ExecStart":"/bin/bash -c \"{}\"".format(self.start_cmd) + } class FlaskApp(Service): """
Codacy Still trying to clear the codacy flag.
@@ -1245,7 +1245,7 @@ def get_tts_engine(profile): try: flite_cmd = ['flite', '-lv'] voices = subprocess.check_output( - flite_cmd, + ['flite','-lv'], shell=False ).decode('utf-8').split(" ")[2:-1] print(
Update README.md Links in table now open the notebooks in colab
|-----------|----|----| |Overview of Python ML/DL software ecosystem| Various | [markdown](software.md)| |List of Python tutorials | Various | [markdown](python.md)| -|Brief intro to colab| Colab | [notebook](colab_intro.ipynb)| -|Brief intro to data analysis |Matplotlib, Pandas, Xarray | [notebook](pandas_intro.ipynb)| -|Brief intro to sklearn | Sklearn | [notebook](sklearn_intro.ipynb)| -|Brief intro to JAX | JAX | [notebook](jax_intro.ipynb)| -|Brief survey of common datasets| Sklearn, TFDS| [notebook](datasets.ipynb)| +|Brief intro to colab| Colab | [notebook](https://colab.sandbox.google.com/github/probml/pyprobml/blob/master/book1/intro/colab_intro.ipynb)| +|Brief intro to data analysis |Matplotlib, Pandas, Xarray | [notebook](https://colab.sandbox.google.com/github/probml/pyprobml/blob/master/book1/intro/pandas_intro.ipynb)| +|Brief intro to sklearn | Sklearn | [notebook](https://colab.sandbox.google.com/github/probml/pyprobml/blob/master/book1/intro/sklearn_intro.ipynb)| +|Brief intro to JAX| Colab | [notebook](https://colab.sandbox.google.com/github/probml/pyprobml/blob/master/book1/intro/jax_intro.ipynb) +|Brief survey of common datasets| Sklearn, TFDS| [notebook](https://colab.sandbox.google.com/github/probml/pyprobml/blob/master/book1/intro/datasets.ipynb)|
Fix master Summary: The `warnOnSpreadAttributes` config option is failing on `yarn build-for-python`, remove it since it's not surfacing any issues right now anyway. Test Plan: `yarn build-for-python`, verify no error. Reviewers: yuhan
@@ -53,7 +53,7 @@ module.exports = { ], }, ], - 'react/jsx-no-target-blank': ['error', {warnOnSpreadAttributes: true}], + 'react/jsx-no-target-blank': 'error', 'react/prefer-stateless-function': 'error', 'react/prop-types': 'off', 'react/display-name': 'off',
update analyze_spectral There are specific datatypes that are allowed with writing a dictionary from the Outputs observations class to a json text file. Converting to string was useful for avoiding float32 but we don't want wavelengths or reflectance frequencies to be strings so instead update the datatype transformation steps.
@@ -38,7 +38,6 @@ def analyze_spectral(array, header_dict, mask, histplot=True): wavelength_data = array[np.where(mask > 0)] wavelength_freq = wavelength_data.mean(axis=0) - # min_wavelength = int(np.ceil(float(header_dict["wavelength"][0]))) max_wavelength = int(np.ceil(float(header_dict["wavelength"][-1]))) @@ -47,13 +46,12 @@ def analyze_spectral(array, header_dict, mask, histplot=True): for i, wavelength in enumerate(header_dict["wavelength"]): new_wavelengths.append(float(wavelength)) - new_freq.append(str(wavelength_freq[i])) + new_freq.append((wavelength_freq[i]).astype(np.float)) maxreflectance = np.amax(wavelength_data) minreflectance = np.amin(wavelength_data) avgreflectance = np.average(wavelength_data) medianreflectance = np.median(wavelength_data) - print("max") # Store data into outputs class outputs.add_observation(variable='max_reflectance', trait='maximum reflectance', @@ -70,7 +68,7 @@ def analyze_spectral(array, header_dict, mask, histplot=True): value=float(medianreflectance), label='reflectance') outputs.add_observation(variable='spectral_frequencies', trait='thermal spectral_frequencies', method='plantcv.plantcv.hyperspectral.analyze_spectral', scale='frequency', datatype=list, - value=new_freq, label=header_dict["wavelength"]) + value=new_freq, label=new_wavelengths) params.debug = debug analysis_img = None
Remove protractor-add-test-answer css class from modify training data button
answer-group-editor .oppia-add-rule-button:active, answer-group-editor .oppia-add-rule-button:focus, - answer-group-editor .oppia-add-rule-button:hover { + answer-group-editor .oppia-add-rule-button:hover, + answer-group-editor .oppia-modify-training-data-button:active, + answer-group-editor .oppia-modify-training-data-button:focus, + answer-group-editor .oppia-modify-training-data-button:hover { background-color: rgba(165,165,165,1); color: white; opacity: 1; } answer-group-editor .oppia-modify-training-data-button { + background-color: rgba(165,165,165,0.9); + border: 0; + border-radius: 0; + color: white; + opacity: 0.9; + padding: 7px; + width: 100%; margin-top: 4%; } </style> + Add Another Possible Answer </button> - <div ng-if="isCurrentInteractionTrainable()"> - <button type="button" class="btn btn-default btn-lg oppia-add-rule-button protractor-test-add-answer oppia-modify-training-data-button" ng-click="openTrainingDataEditor()"> + <div ng-if="isEditable && isCurrentInteractionTrainable()"> + <button type="button" class="btn btn-default btn-lg oppia-modify-training-data-button" ng-click="openTrainingDataEditor()"> <i class="material-icons md-24">playlist_add</i> Modify Training Data </button> </div>
Force dtype=float for array returned by inf_like The scalar case already explicity returned np.inf.
@@ -3182,19 +3182,24 @@ def vectorize_if_needed(func, *x): def inf_like(x): - """Return the shape of x with value infinity. + """Return the shape of x with value infinity and dtype='float'. Preserves 'shape' for both array and scalar inputs. + But always returns a float array, even if x is of integer type. - >>> inf_like(0.) + >>> inf_like(0.) # float scalar inf - >>> inf_like([0., 1., 2., 3.]) + >>> inf_like(1) # integer scalar should give float output + inf + >>> inf_like([0., 1., 2., 3.]) # float list + array([inf, inf, inf, inf]) + >>> inf_like([0, 1, 2, 3]) # integer list should give float output array([inf, inf, inf, inf]) """ if np.isscalar(x): return np.inf else: - return np.full_like(x, np.inf) + return np.full_like(x, np.inf, dtype='float') # Pre-defined cosmologies. This loops over the parameter sets in the
Tests: install python dependencies in pyright tests Otherwise pyright shows 100+ warnings related to imports.
@@ -91,8 +91,9 @@ jobs: - uses: actions/checkout@v3 - name: Install dependencies run: | - sudo apt-get install -y npm + sudo apt-get install -y npm libkrb5-dev libxmlsec1-dev npm install --global pyright + python -m pip --no-cache-dir install --upgrade -r requirements.txt - name: Make pyright report of current commit run: | tools/run_pyright.sh generate ${{ env.PYRIGHT_CURRENT_REPORT }}
DeleteUndefined: handle MaterializeAll MaterializeAll has to be checked in llvm 4. Do so.
#include "llvm/Transforms/Utils/BasicBlockUtils.h" #include <llvm/IR/DebugInfoMetadata.h> +#if LLVM_VERSION_MAJOR >= 4 +#include <llvm/Support/Error.h> +#endif + using namespace llvm; class DeleteUndefined : public ModulePass { @@ -106,7 +110,16 @@ static bool array_match(const StringRef &name, const char **array) } bool DeleteUndefined::runOnModule(Module& M) { +#if LLVM_VERSION_MAJOR >= 4 + if (llvm::Error err = M.materializeAll()) { + std::error_code ec = errorToErrorCode(std::move(err)); + llvm::errs() << __PRETTY_FUNCTION__ << ": cannot load module: " << + ec.message(); + return false; + } +#else M.materializeAll(); +#endif // delete/replace the calls in the rest of functions bool modified = false;
Field.doc: automatically append precise list of types TN:
@@ -1489,6 +1489,25 @@ class Field(BaseField): :type: int """ + @property + def doc(self): + result = super(Field, self).doc + + # If parsers build this field, add a precise list of types it can + # contain: the field type might be too generic. + if not self.struct.synthetic: + precise_types = self.types_from_parser.minimal_matched_types + if len(precise_types) > 1: + type_descr = '\n'.join([ + 'This field can contain one of the following nodes:' + ] + list(sorted('* {}'.format(t.dsl_name) + for t in precise_types))) + result = ('{}\n\n{}'.format(type_descr, type_descr) + if result else + type_descr) + + return result + @property def overriding(self): """
fix tostring deprecation Fixes: bloscpack/test_cmdline/mktestarray.py:30: DeprecationWarning: tostring() is deprecated. Use tobytes() instead.
@@ -27,7 +27,7 @@ def exists(filename): if not exists(DATA_FILE) and not exists(META_FILE): a = numpy.linspace(0, 100, int(2e7)) with open(DATA_FILE, 'wb') as f: - f.write(a.tostring()) + f.write(a.tobytes()) with open(META_FILE, 'w') as m: meta = dict(sorted(_ndarray_meta(a).items())) m.write(json.dumps(meta))
Refine CosineAnnealingWarmRestarts doc for issue Summary: Fixes Pull Request resolved:
@@ -697,18 +697,32 @@ class CosineAnnealingWarmRestarts(_LRScheduler): for base_lr in self.base_lrs] def step(self, epoch=None): - """Step could be called after every update, i.e. if one epoch has 10 iterations - (number_of_train_examples / batch_size), we should call SGDR.step(0.1), SGDR.step(0.2), etc. + """Step could be called after every batch update + + Example: + >>> scheduler = CosineAnnealingWarmRestarts(optimizer, T_0, T_mult) + >>> iters = len(dataloader) + >>> for epoch in range(20): + >>> for i, sample in enumerate(dataloader): + >>> inputs, labels = sample['inputs'], sample['labels'] + >>> scheduler.step(epoch + i / iters) + >>> optimizer.zero_grad() + >>> outputs = net(inputs) + >>> loss = criterion(outputs, labels) + >>> loss.backward() + >>> optimizer.step() This function can be called in an interleaved way. Example: - >>> scheduler = SGDR(optimizer, T_0, T_mult) + >>> scheduler = ConsineAnnealingWarmRestarts(optimizer, T_0, T_mult) >>> for epoch in range(20): >>> scheduler.step() >>> scheduler.step(26) >>> scheduler.step() # scheduler.step(27), instead of scheduler(20) """ + if epoch < 0: + raise ValueError("Expected non-negative epoch, but got {}".format(epoch)) if epoch is None: epoch = self.last_epoch + 1 self.T_cur = self.T_cur + 1
Added print out of ignore_forNorm chromosomes Number of chromosomes to be ignore might be more informative
@@ -20,6 +20,7 @@ outdir_ATACqc = 'MACS2_qc/' # do workflow specific stuff now include: os.path.join(workflow.basedir, "internals.snakefile") + ### include modules of other snakefiles ######################################## ################################################################################ @@ -57,6 +58,7 @@ onstart: print("Gene annotation BED:", genes_bed) print("Gene annotation GTF:", genes_gtf) print("Blacklist regions BED:", blacklist_bed) + print("Ignore for normalization (bigwigs):", ignore_forNorm) print("-" * 80, "\n") ### main rule ##################################################################
export: Fix an unnecessary Any. This was introduced a few weeks ago in "Import script: Check and add system bots after every import."
@@ -1390,7 +1390,7 @@ def import_uploads(import_dir: Path, processing_avatars: bool=False) -> None: # Because the Python object => JSON conversion process is not fully # faithful, we have to use a set of fixers (e.g. on DateTime objects # and Foreign Keys) to do the import correctly. -def do_import_realm(import_dir: Path) -> Any: +def do_import_realm(import_dir: Path) -> Realm: logging.info("Importing realm dump %s" % (import_dir,)) if not os.path.exists(import_dir): raise Exception("Missing import directory!")
Interpret entity docs as Mako templates This will allow us to have different doc renderings depending on the context. For instance, having different casings depending on the generated API. TN:
@@ -1148,16 +1148,19 @@ def create_doc_printer(lang, formatter): template_ctx['TODO'] = todo_markers[lang] if isinstance(entity, str): - doc = ctx.documentations[entity].render( + doc_template = ctx.documentations[entity] + elif entity.doc: + doc_template = Template(entity.doc) + else: + return '' + + doc = doc_template.render( ctx=get_context(), capi=ctx.c_api_settings, lang=lang, null=null_names[lang], TODO=todo_markers[lang] ) - else: - doc = entity.doc or '' - return formatter(doc, column, **kwargs) func.__name__ = '{}_doc'.format(lang)
Process docs: added version note for "advanced argument tweaking" feature related: openEOPlatform/documentation#41
@@ -105,6 +105,8 @@ but you can call the corresponding client method in multiple equivalent ways:: Advanced argument tweaking --------------------------- +.. versionadded:: 0.10.0 + In some situations, you may want to finetune what the (convenience) methods generate. For example, you want to play with non-standard, experimental arguments, or there is a problem with a automatic argument handling/conversion feature.
Match: make input value casts unsafe TN:
@@ -1015,10 +1015,13 @@ class Match(AbstractExpression): # bound and initialized. self.matchers = [] for m in matchers: - # Initialize match_var... + # Initialize match_var. Note that assuming the code generation + # is bug-free, this cast cannot fail, so don't generate type + # check boilerplate. let_expr = Let.Expr( [m.match_var], - [Cast.Expr(self.prefix_var.ref_expr, m.match_var.type)], + [Cast.Expr(self.prefix_var.ref_expr, m.match_var.type, + unsafe=True)], # ... and cast this matcher's result to the Match result's # type, as required by OOP with access types in Ada.
Remove unneeded config entries Since channels that mods can't read are now implicitly ignored, there is no need to explicitly ignore them.
@@ -248,15 +248,13 @@ guild: - *ADMIN_SPAM - *MODS - # Modlog cog ignores events which occur in these channels + # Modlog cog explicitly ignores events which occur in these channels. + # This is on top of implicitly ignoring events in channels that the mod team cannot view. modlog_blacklist: - - *ADMINS - - *ADMINS_VOICE - *ATTACH_LOG - *MESSAGE_LOG - *MOD_LOG - *STAFF_VOICE - - *DEV_CORE_VOTING reminder_whitelist: - *BOT_CMD
updated: reduction in overhead if memory is already accurate updated: using contextlib instead of home-grown class for "with" context.
@@ -2,6 +2,7 @@ import Queue import logging import traceback import threading +import contextlib import collections import envi @@ -230,18 +231,7 @@ class VivWorkspaceCore(object,viv_impapi.ImportApi): self.reloc_by_va[rva] = rtype self.relocations.append(einfo) - if rtype == RTYPE_BASERELOC: - # FIXME: we can't rebase something and expect the pointer in memory to be just the offset... - # consider deprecating this reloc type in favor of BASEOFF - ptr = self.readMemoryPtr(rva) - ptr += imgbase - if ptr != (ptr & e_bits.u_maxes[self.psize]): - logger.warn('RTYPE_BASERELOC calculated a bad pointer: 0x%x (imgbase: 0x%x)', ptr, imgbase) - - with SupervisorMode(self): - self.writeMemoryPtr(rva, ptr) - - logger.info('_handleADDRELOC: %x -> %x (map: 0x%x)', rva, ptr, imgbase) + # RTYPE_BASERELOC assumes the memory is already accurate (eg. PE's unless rebased) if rtype == RTYPE_BASEOFF: # add imgbase and offset to pointer in memory @@ -250,10 +240,20 @@ class VivWorkspaceCore(object,viv_impapi.ImportApi): if ptr != (ptr & e_bits.u_maxes[self.psize]): logger.warn('RTYPE_BASEOFF calculated a bad pointer: 0x%x (imgbase: 0x%x)', ptr, imgbase) - with SupervisorMode(self): + # writes are costly, especially on larger binaries + if ptr == self.readMemoryPtr(rva): + return + + with self.getAdminRights(): self.writeMemoryPtr(rva, ptr) - logger.info('_handleADDRELOC: %x -> %x (map: 0x%x)', rva, ptr, imgbase) + #logger.info('_handleADDRELOC: %x -> %x (map: 0x%x)', rva, ptr, imgbase) + + @contextlib.contextmanager + def getAdminRights(self): + self._supervisor = True + yield + self._supervisor = False def _handleADDMODULE(self, einfo): print('DEPRICATED (ADDMODULE) ignored: %s' % einfo) @@ -659,14 +659,6 @@ class VivWorkspaceCore(object,viv_impapi.ImportApi): fva,spdelta,symtype,syminfo = locsym self.localsyms[fva][spdelta] = locsym -class SupervisorMode: - def __init__(self, vw): - self.vw = vw - def __enter__(self): - self.vw._supervisor = 1 - def __exit__(self, type, value, traceback): - self.vw._supervisor = 0 - def trackDynBranches(cfctx, op, vw, bflags, branches): ''' track dynamic branches
[tests] Show additional informations with "urlshortener-blocked" APIError add site and user to the result['error'] dict in case of T244062 if not logged in, site.user() is None; show the IP in that case print "other" information in separate lines with APIError
@@ -120,10 +120,10 @@ class APIError(Error): def __str__(self): """Return a string representation.""" if self.other: - return '{0}: {1} [{2}]'.format( + return '{0}: {1}\n[{2}]'.format( self.code, self.info, - '; '.join( + ';\n '.join( '{0}: {1}'.format(key, val) for key, val in self.other.items())) @@ -2069,6 +2069,15 @@ class Request(MutableMapping): self.wait() continue + if code == 'urlshortener-blocked': # T244062 + # add additional informations to result['error'] + result['error']['current site'] = self.site + if self.site.user(): + result['error']['current user'] = self.site.user() + else: # not logged in; show the IP + si = self.site.siteinfo + result['error']['current user'] = si['name'] + # raise error try: # Due to bug T66958, Page's repr may return non ASCII bytes
Fix import ssl may fail under some Python installs It's only required for certain proxy configurations, so we don't want it to raise ImportError while the user imports our library.
import abc import asyncio import socket -import ssl as ssl_mod import sys +try: + import ssl as ssl_mod +except ImportError: + ssl_mod = None + from ...errors import InvalidChecksumError from ... import helpers @@ -68,6 +72,12 @@ class Connection(abc.ABC): loop=self._loop ) if ssl: + if ssl_mod is None: + raise RuntimeError( + 'Cannot use proxy that requires SSL' + 'without the SSL module being available' + ) + s.settimeout(timeout) s = ssl_mod.wrap_socket( s,
[air/xgboost] Resolve xgboost benchmark failure After batch_size in predictor is applied correctly, we can reduce flakiness of our release test by using larger batch size for xgboost, since default 4096 is too small. This reduces runtime from ~310secs to ~200secs. Full context and debugging history see attached issue.
@@ -102,7 +102,12 @@ def run_xgboost_prediction(model_path: str, data_path: str): ds = data.read_parquet(data_path) ckpt = XGBoostCheckpoint.from_model(booster=model) batch_predictor = BatchPredictor.from_checkpoint(ckpt, XGBoostPredictor) - result = batch_predictor.predict(ds.drop_columns(["labels"])) + result = batch_predictor.predict( + ds.drop_columns(["labels"]), + # Improve prediction throughput for xgboost with larger + # batch size than default 4096 + batch_size=8192, + ) return result
Fix non-json response of container commit The RESP BODY should be of json format, this patch fixes it. Closes-Bug:
@@ -617,7 +617,7 @@ class Manager(periodic_task.PeriodicTasks): repository, tag) utils.spawn_n(do_container_commit) - return snapshot_image.id + return {"uuid": snapshot_image.id} def _do_container_image_upload(self, context, snapshot_image, data, tag): try:
add SSH ingress on default sg for all addrs Also fixes the ip_ranges on the other rules to match the CidrBlock given when creating the vpc.
@@ -106,10 +106,9 @@ class EC2Provider(ExecutionProvider): # Required: False # Default: t2.small }, - "imageId" : #{"Description: String to append to the #SBATCH blocks - # in the submit script to the scheduler + "imageId" : #{"Description: The ID of the AMI # Type: String, - # Required: False }, + # Required: True }, "region" : #{"Description: AWS region to launch machines in # in the submit script to the scheduler @@ -377,7 +376,10 @@ class EC2Provider(ExecutionProvider): return vpc def security_group(self, vpc): - ''' Create and configure security group. + """Create and configure a new security group. + + + Allows all ICMP in, all TCP and UDP in within VPC This security group is very open. It allows all @@ -386,13 +388,13 @@ class EC2Provider(ExecutionProvider): by changing the allowed port ranges. :param vpc - VPC in which to set up security group - ''' + """ sg = vpc.create_security_group( GroupName="private-subnet", Description="security group for remote executors" ) - ip_ranges = [{'CidrIp': '172.32.0.0/16'}] + ip_ranges = [{'CidrIp': '10.0.0.0/16'}] # Allows all ICMP in, all TCP and UDP in within VPC @@ -414,8 +416,16 @@ class EC2Provider(ExecutionProvider): 'IpRanges': [{ 'CidrIp': '0.0.0.0/0' }], + }, { + 'IpProtocol': 'TCP', + 'FromPort': 22, + 'ToPort': 22, + 'IpRanges': [{ + 'CidrIp': '0.0.0.0/0' + }], } ] + # Allows all TCP out, all TCP and UDP out within VPC outPerms = [ {
Fix bugs that prevented the form handlers from working on firefox. Now, the handlers directly interact with the input or textarea tag when inputting text.
@@ -45,6 +45,14 @@ class BaseFormHandler: def selenium(self): return BuiltIn().get_library_instance("SeleniumLibrary") + @property + def input_element(self): + """Returns the actual <input> or <textarea> element inside the element""" + elements = self.element.find_elements_by_xpath( + ".//*[self::input or self::textarea]" + ) + return elements[0] if elements else None + # these should be implemented by each of the handlers def set(self, value): pass @@ -82,7 +90,7 @@ class LightningComboboxHandler(BaseFormHandler): value_locator = f'//lightning-base-combobox-item[.="{value}"]' wait = 5 try: - self.element.click() + self.input_element.click() self.selenium.wait_until_element_is_visible(value_locator, wait) self.selenium.click_element(value_locator) except Exception as e: @@ -99,20 +107,22 @@ class LightningInputHandler(BaseFormHandler): def set(self, value): self.focus() - if self.element.get_attribute("type") == "checkbox": + if self.input_element.get_attribute("type") == "checkbox": # lightning-input elements are used for checkboxes # as well as free text input. checked = self.element.get_attribute("checked") if (checked and value != "checked") or (not checked and value == "checked"): - self.element.send_keys(" ") + self.input_element.send_keys(" ") else: self.clear() - self.element.send_keys(value) + self.input_element.send_keys(value) def clear(self): """Remove the value in the element""" # oddly, element.clear() doesn't always work. - self.selenium.driver.execute_script("arguments[0].value = '';", self.element) + self.selenium.driver.execute_script( + "arguments[0].value = '';", self.input_element + ) def focus(self): """Set focus to the element @@ -134,7 +144,7 @@ class LightningLookupHandler(BaseFormHandler): # instead of searching the whole document? value_locator = f'//div[@role="listbox"]//*[.="{value}"]' self.element.click() - self.element.send_keys(value) + self.input_element.send_keys(value) try: self.selenium.wait_until_element_is_visible(value_locator, wait) self.selenium.click_element(value_locator)
Fixed typo in requisites.rst Glog -> Glob
@@ -68,7 +68,7 @@ first line in the stanza) or the ``- name`` parameter. - require: - pkg: vim -Glog matching in requisites +Glob matching in requisites ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 0.9.8
swarming: fix ts_mon_metrics_test This is to fix
@@ -12,6 +12,7 @@ import swarming_test_env swarming_test_env.setup_test_env() from google.appengine.ext import ndb +import webapp2 import gae_ts_mon from test_support import test_case @@ -83,7 +84,8 @@ class TestMetrics(test_case.TestCase): def setUp(self): super(TestMetrics, self).setUp() gae_ts_mon.reset_for_unittest() - gae_ts_mon.initialize() + self.app = webapp2.WSGIApplication(None, debug=True) + gae_ts_mon.initialize(self.app) self.now = datetime.datetime(2016, 4, 7) self.mock_now(self.now)
Update rogue_dns.txt Root ```upgrinfo.com``` is going to ```domain.txt```
@@ -223,4 +223,12 @@ ns2.whatnexthost.com # Reference: https://www.virustotal.com/gui/domain/ns1.fakesemoiin23.com/relations # Reference: https://www.virustotal.com/gui/domain/ns2.fakesemoiin23.com/relations +ns1.fakesemoiin23.com +ns2.fakesemoiin23.com 139.59.80.101:53 + +# Reference: https://www.virustotal.com/gui/domain/upgrinfo.com/relations + +ns1.upgrinfo.com +ns2.upgrinfo.com +58.158.177.102:53
Config2: limit the number of retries to 15 This limits the waiting time until timeout from 43 min to 23.
@@ -649,7 +649,7 @@ special_page_limit = 500 step = -1 # Maximum number of times to retry an API request before quitting. -max_retries = 25 +max_retries = 15 # Minimum time to wait before resubmitting a failed API request. retry_wait = 5
Environment variable fallbacks should work for manually created configs This gives LocalConfig.find() and LocalConfig() the same defaults. The Code is also more easily understandable with the chain of 'or' fallbacks in one place.
@@ -52,56 +52,68 @@ class LocalConfig(object): current user. """ - def __init__(self, config, environment=None, files_loaded=None): - self._config = config + def __init__(self, config, files_loaded=None, env=None, driver=None): + self._config = config # type: compat.configparser.ConfigParser self.files_loaded = [] if files_loaded: - self.files_loaded = files_loaded + self.files_loaded = files_loaded # type: list[str] - self.environment = environment + # The user may specify these when running, otherwise they are loaded from config. + self._specified_environment = env # type: str + self._specified_driver = driver # type: str + + if not config.has_section(self.environment): + raise ValueError('No config section found for environment %r' % (env,)) @classmethod - def find(cls, paths=DEFAULT_CONF_PATHS, env=None): + def find(cls, paths=DEFAULT_CONF_PATHS, env=None, driver=None): """ Find config from possible filesystem locations. 'env' is which environment to use from the config: it corresponds to the name of a config section + `driver` is a specific driver to use rather than the defaults. + :type paths: list[str] :type env: str + :type driver: str :rtype: LocalConfig """ config = compat.read_config(_DEFAULT_CONF) files_loaded = config.read(p for p in paths if p) - env = env or os.environ.get('DATACUBE_ENVIRONMENT') or config.get('user', 'default_environment') - if not config.has_section(env): - raise ValueError('No config section found for environment %r' % (env,)) - return LocalConfig( config, - environment=env, - files_loaded=files_loaded + files_loaded=files_loaded, + env=env, + driver=driver ) - def _prop(self, key): + def _environment_prop(self, key): + # Get the property for the current instance. try: return self._config.get(self.environment, key) except compat.NoOptionError: return None + @property + def environment(self): + return self._specified_environment or \ + os.environ.get('DATACUBE_ENVIRONMENT') or \ + self._config.get('user', 'default_environment') + @property def db_hostname(self): - return self._prop('db_hostname') + return self._environment_prop('db_hostname') @property def db_database(self): - return self._prop('db_database') + return self._environment_prop('db_database') @property def db_connection_timeout(self): - return int(self._prop('db_connection_timeout')) + return int(self._environment_prop('db_connection_timeout')) @property def db_username(self): @@ -112,19 +124,24 @@ class LocalConfig(object): # No default on Windows default_username = None - return self._prop('db_username') or default_username + return self._environment_prop('db_username') or default_username @property def default_driver(self): - return self._prop('default_driver') + return ( + self._specified_driver or + os.environ.get('DATACUBE_DEFAULT_DRIVER') or + # The default driver for the current environment + self._environment_prop('default_driver') + ) @property def db_password(self): - return self._prop('db_password') + return self._environment_prop('db_password') @property def db_port(self): - return self._prop('db_port') or '5432' + return self._environment_prop('db_port') or '5432' def __str__(self): return "LocalConfig<loaded_from={}, config={}, environment={})".format(
Switched to using Message.from_bytes in RtMidi backend. Since RtMidi always returns a complete message there's no need to use the full parser.
@@ -7,6 +7,7 @@ import threading import rtmidi from .. import ports +from ..messages import Message from ._parser_queue import ParserQueue def _get_api_lookup(): @@ -203,10 +204,13 @@ class Input(PortCommon, ports.BaseInput): self._rt.set_callback(self._callback_wrapper) def _callback_wrapper(self, msg_data, data): - self._queue.put_bytes(msg_data[0]) - if self._callback: - for msg in self._queue.iterpoll(): - self._callback(msg) + try: + msg = Message.from_bytes(msg_data[0]) + except ValueError: + # Ignore invalid message. + return + + (self._callback or self._queue.put)(msg) class Output(PortCommon, ports.BaseOutput):
Add an annotation for time completed to show its non-None type This is part of DFK static typing, issue
@@ -110,7 +110,7 @@ class DataFlowKernel(object): self.hub_interchange_port = self.monitoring.start(self.run_id, self.run_dir) self.time_began = datetime.datetime.now() - self.time_completed = None + self.time_completed: Optional[datetime.datetime] = None logger.info("Run id is: " + self.run_id)
node tests: Add test_trigger_submit_compose_form(). (Steve Howell also contributed to this.)
@@ -822,6 +822,26 @@ function test_with_mock_socket(test_params) { assert(update_faded_messages_checked); }()); +(function test_trigger_submit_compose_form() { + var prevent_default_checked = false; + var compose_finish_checked = false; + var e = { + preventDefault: function () { + prevent_default_checked = true; + }, + }; + compose.finish = function () { + compose_finish_checked = true; + }; + + var submit_handler = $('#compose form').get_on_handler('submit'); + + submit_handler(e); + + assert(prevent_default_checked); + assert(compose_finish_checked); +}()); + (function test_set_focused_recipient() { var sub = { stream_id: 101,
fix "git clone" on windows env replaces, it does not update, and windows really needs its env
@@ -273,13 +273,20 @@ def run_cmd(cwd, cmd, env=None): if len(cmd) == 0: raise dbt.exceptions.CommandError(cwd, cmd) + # the env argument replaces the environment entirely, which has exciting + # consequences on Windows! Do an update instead. + full_env = env + if env is not None: + full_env = os.environ.copy() + full_env.update(env) + try: proc = subprocess.Popen( cmd, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, - env=env) + env=full_env) out, err = proc.communicate() except OSError as exc:
[gcs/ha] Enable HA flags by default PR to enable all three flags for GCS HA: RAY_bootstrap_with_gcs=1 RAY_gcs_grpc_based_pubsub=1 RAY_gcs_storage=memory
@@ -294,9 +294,9 @@ RAY_CONFIG(bool, grpc_based_resource_broadcast, true) // Feature flag to enable grpc based pubsub in GCS. RAY_CONFIG(bool, gcs_grpc_based_pubsub, true) // The storage backend to use for the GCS. It can be either 'redis' or 'memory'. -RAY_CONFIG(std::string, gcs_storage, "redis") +RAY_CONFIG(std::string, gcs_storage, "memory") // Feature flag to enable GCS based bootstrapping. -RAY_CONFIG(bool, bootstrap_with_gcs, false) +RAY_CONFIG(bool, bootstrap_with_gcs, true) /// Duration to sleep after failing to put an object in plasma because it is full. RAY_CONFIG(uint32_t, object_store_full_delay_ms, 10)
Tweak language in PR template This has irked me for a while. Declaring certainty that a change won't produce a regression seems impossibly hard, and I think this is a more achievable reflection of the intent.
- [ ] All migrations are backwards compatible and won't block deploy - [ ] The set of people pinged as reviewers is appropriate for the level of risk of the change - [ ] If QA is part of the safety story, the "Awaiting QA" label is used -- [ ] I am certain that this PR will not introduce a regression for the reasons below +- [ ] I have confidence that this PR will not introduce a regression for the reasons below ### Automated test coverage
adding fields attribute to audio.Audio docstring update
@@ -11,6 +11,7 @@ class Audio(PlexPartialObject): Attributes: addedAt (datetime): Datetime this item was added to the library. + fields (list): List of :class:`~plexapi.media.Field`. index (sting): Index Number (often the track number). key (str): API URL (/library/metadata/<ratingkey>). lastViewedAt (datetime): Datetime item was last accessed. @@ -33,6 +34,7 @@ class Audio(PlexPartialObject): self._data = data self.listType = 'audio' self.addedAt = utils.toDatetime(data.attrib.get('addedAt')) + self.fields = self.findItems(data, etag='Field') self.index = data.attrib.get('index') self.key = data.attrib.get('key') self.lastViewedAt = utils.toDatetime(data.attrib.get('lastViewedAt'))
Removes container cleanup test This is tested elsewhere, eg, test_service_start_cleanup Closes
from pathlib import Path -import docker import pytest from django.core.exceptions import ValidationError from django.test import TestCase @@ -26,12 +25,7 @@ def test_submission_evaluation( settings.task_always_eager = (True,) # Upload a submission and create an evaluation - dockerclient = docker.DockerClient( - base_url=settings.COMPONENTS_DOCKER_BASE_URL - ) - eval_container, sha256 = evaluation_image - method = MethodFactory( image__from_path=eval_container, image_sha256=sha256, ready=True ) @@ -40,18 +34,11 @@ def test_submission_evaluation( with pytest.raises(NotImplementedError): _ = method.image.url - num_containers_before = len(dockerclient.containers.list()) - num_volumes_before = len(dockerclient.volumes.list()) - # This will create an evaluation, and we'll wait for it to be executed submission = SubmissionFactory( predictions_file__from_path=submission_file, phase=method.phase ) - # The evaluation method should clean up after itself - assert len(dockerclient.volumes.list()) == num_volumes_before - assert len(dockerclient.containers.list()) == num_containers_before - # The evaluation method should return the correct answer assert len(submission.evaluation_set.all()) == 1 assert (
OptionalParameter in ExternalPythonProgramTask Updated ExternalPythonProgramTask parameters type to OptionalParameter
@@ -260,13 +260,13 @@ class ExternalPythonProgramTask(ExternalProgramTask): :py:class:`luigi.parameter.Parameter` s for setting a virtualenv and for extending the ``PYTHONPATH``. """ - virtualenv = luigi.Parameter( + virtualenv = luigi.OptionalParameter( default=None, positional=False, description='path to the virtualenv directory to use. It should point to ' 'the directory containing the ``bin/activate`` file used for ' 'enabling the virtualenv.') - extra_pythonpath = luigi.Parameter( + extra_pythonpath = luigi.OptionalParameter( default=None, positional=False, description='extend the search path for modules by prepending this '
rng warning Changed settings for generating the RNG warning, which were happening too often.
@@ -198,7 +198,7 @@ def sample_rwalk(args): nfail += 1 # Check if we're stuck generating bad numbers. - if fail > 50 * walks: + if fail > 100 * walks: warnings.warn("Random number generation appears to be " "extremely inefficient. Adjusting the " "scale-factor accordingly.")
TST: improve tox.ini [CHANGED] invoke mpi4py futures correctly. To do this required ensuring I correctly specify the tox venv environment python interpreter. Other miscellaneous cleanup.
@@ -14,9 +14,7 @@ deps = py{36,37,38}: numba>0.48.0 scitrack pandas pytest-cov - py37mpi: mpi4py - py38mpi: mpi4py - py39mpi: mpi4py + py{37mpi,38mpi,39mpi}: mpi4py [testenv:py39] changedir = tests @@ -41,21 +39,21 @@ changedir = tests basepython = python3.7 whitelist_externals = mpiexec commands = - mpiexec -n 1 pytest --junitxml=junit-{envname}.xml --cov-report xml --cov=cogent3 test_app/test_app_mpi.py + mpiexec -n 1 {envpython} -m mpi4py.futures -m pytest --junitxml=junit-{envname}.xml --cov-report xml --cov=cogent3 test_app/test_app_mpi.py [testenv:py38mpi] changedir = tests basepython = python3.8 whitelist_externals = mpiexec commands = - mpiexec -n 1 pytest --junitxml=junit-{envname}.xml --cov-report xml --cov=cogent3 test_app/test_app_mpi.py + mpiexec -n 1 {envpython} -m mpi4py.futures -m pytest --junitxml=junit-{envname}.xml --cov-report xml --cov=cogent3 test_app/test_app_mpi.py [testenv:py39mpi] changedir = tests basepython = python3.9 whitelist_externals = mpiexec commands = - mpiexec -n 1 pytest --junitxml=junit-{envname}.xml --cov-report xml --cov=cogent3 test_app/test_app_mpi.py + mpiexec -n 1 {envpython} -m mpi4py.futures -m pytest --junitxml=junit-{envname}.xml --cov-report xml --cov=cogent3 test_app/test_app_mpi.py [testenv:py36] changedir = tests
Decouple the cmd_restart from the actual restart This is particularly useful if we want to restart without triggering the configuration validation, for example after a xrandr.
@@ -261,6 +261,20 @@ class Qtile(CommandObject): logger.debug('Stopping qtile') self._stopped_event.set() + def restart(self): + argv = [sys.executable] + sys.argv + if '--no-spawn' not in argv: + argv.append('--no-spawn') + buf = io.BytesIO() + try: + pickle.dump(QtileState(self), buf, protocol=0) + except: # noqa: E722 + logger.error("Unable to pickle qtile state") + argv = [s for s in argv if not s.startswith('--with-state')] + argv.append('--with-state=' + buf.getvalue().decode()) + self._restart = (sys.executable, argv) + self.stop() + async def finalize(self): self._eventloop.remove_signal_handler(signal.SIGINT) self._eventloop.remove_signal_handler(signal.SIGTERM) @@ -1098,19 +1112,7 @@ class Qtile(CommandObject): logger.error("Preventing restart because of a configuration error: {}".format(error)) send_notification("Configuration error", str(error.__context__)) return - - argv = [sys.executable] + sys.argv - if '--no-spawn' not in argv: - argv.append('--no-spawn') - buf = io.BytesIO() - try: - pickle.dump(QtileState(self), buf, protocol=0) - except: # noqa: E722 - logger.error("Unable to pickle qtile state") - argv = [s for s in argv if not s.startswith('--with-state')] - argv.append('--with-state=' + buf.getvalue().decode()) - self._restart = (sys.executable, argv) - self.stop() + self.restart() def cmd_spawn(self, cmd): """Run cmd in a shell.
Make LaunchBar font configurable When an icon can't be found, `LaunchBar` defaults to displaying text. The text is formatted according to `base._TextBox`'s defaults. This will pick up values set by `widget_defaults` but doesn't allow individual customisation for this widget. This PR fixes this issue by allowing users to configure the appearance of text.
@@ -51,7 +51,10 @@ from libqtile.widget import base class LaunchBar(base._Widget): - """A widget that display icons to launch the associated command + """ + A widget that display icons to launch the associated command. + + Text will displayed when no icon is found. Widget requirements: pyxdg_. @@ -72,9 +75,13 @@ class LaunchBar(base._Widget): ("padding", 2, "Padding between icons"), ( "default_icon", - "/usr/share/icons/oxygen/256x256/mimetypes/" "application-x-executable.png", + "/usr/share/icons/oxygen/256x256/mimetypes/application-x-executable.png", "Default icon not found", ), + ("font", "sans", "Text font"), + ("fontsize", None, "Font pixel size. Calculated if None."), + ("fontshadow", None, "Font shadow color, default is None (no shadow)"), + ("foreground", "#ffffff", "Text colour."), ] def __init__(self, progs=None, width=bar.CALCULATED, **config): @@ -124,10 +131,10 @@ class LaunchBar(base._Widget): textbox._configure(self.qtile, self.bar) textbox.layout = self.drawer.textlayout( textbox.text, - textbox.foreground, - textbox.font, - textbox.fontsize, - textbox.fontshadow, + self.foreground, + self.font, + self.fontsize, + self.fontshadow, markup=textbox.markup, ) # the name will be displayed
BUG: Add missing DECREF in Py2 int() cast The Long number is downcast to int if possible, so the old version has to be DECREF'ed. Thanks to Johannes Barthelmes for bisecting the offending commit. Closes
@@ -1424,7 +1424,11 @@ static PyObject * #ifndef NPY_PY3K /* Invoke long.__int__ to try to downcast */ + { + PyObject *before_downcast = long_result; long_result = Py_TYPE(long_result)->tp_as_number->nb_int(long_result); + Py_DECREF(before_downcast); + } #endif return long_result;
Fix ctrl-\ behavior This commit fixes two issues with ctrl-\ when using IPython on linux: Previously, pressing ctrl-\ would make IPython exit without resetting the terminal configuration IPython users could not override the behavior of ctrl-\ using `signal.signal(signal.SIGQUIT, ...)` as they would in other terminal apps
import signal import sys import re +import os from typing import Callable @@ -56,7 +57,7 @@ def reformat_and_execute(event): & insert_mode ))(reformat_and_execute) - kb.add('c-\\')(force_exit) + kb.add("c-\\")(quit) kb.add('c-p', filter=(vi_insert_mode & has_focus(DEFAULT_BUFFER)) )(previous_history_or_previous_completion) @@ -458,10 +459,15 @@ def reset_search_buffer(event): def suspend_to_bg(event): event.app.suspend_to_background() -def force_exit(event): +def quit(event): """ - Force exit (with a non-zero return value) + On platforms that support SIGQUIT, send SIGQUIT to the current process. + On other platforms, just exit the process with a message. """ + sigquit = getattr(signal, "SIGQUIT", None) + if sigquit is not None: + os.kill(0, signal.SIGQUIT) + else: sys.exit("Quit") def indent_buffer(event):
Fix missing call to logger converter. If logger was not set and left to the default string 'bar', then it would have crashed when it tried to call `logger` as a function. See VideoClip.py:376 for correct equivalent.
@@ -6,6 +6,7 @@ out of VideoClips import subprocess as sp import os import numpy as np +from proglog import proglog from moviepy.compat import PY3, DEVNULL from moviepy.config import get_setting @@ -200,6 +201,8 @@ def ffmpeg_write_video(clip, filename, fps, codec="libx264", bitrate=None, """ Write the clip to a videofile. See VideoClip.write_videofile for details on the parameters. """ + logger = proglog.default_bar_logger(logger) + if write_logfile: logfile = open(filename + ".log", 'w+') else:
Bump cudf/cuml to 21.12 This is the newest version that supports both CUDA 11.X & Python 3.7
@@ -89,7 +89,7 @@ RUN conda config --add channels nvidia && \ # b/232247930: uninstall pyarrow to avoid double installation with the GPU specific version. RUN pip uninstall -y pyarrow && \ - conda install cudf=21.10 cuml=21.10 cudatoolkit=$CUDA_MAJOR_VERSION.$CUDA_MINOR_VERSION && \ + conda install cudf=21.12 cuml=21.12 cudatoolkit=$CUDA_MAJOR_VERSION.$CUDA_MINOR_VERSION && \ /tmp/clean-layer.sh {{ end }}
core: fix Dispatcher race introduced in It must be constructed before are messages pumped.
@@ -2214,6 +2214,7 @@ class ExternalContext(object): if self.config.get('setup_stdio', True): self._setup_stdio() + self.dispatcher = Dispatcher(self) self.router.register(self.parent, self.stream) self.log_handler.uncork() @@ -2222,7 +2223,6 @@ class ExternalContext(object): self.parent, mitogen.context_id, os.getpid()) _v and LOG.debug('Recovered sys.executable: %r', sys.executable) - self.dispatcher = Dispatcher(self) self.dispatcher.run() _v and LOG.debug('ExternalContext.main() normal exit') except KeyboardInterrupt:
Update nasa-soho-comet-challenge-on-aws.yaml added publications to dataset listing
@@ -18,3 +18,12 @@ DataAtWork: Tutorials: Tools & Applications: Publications: + - Title: Topcoder NASA Comet Discovery: A Recap + URL: https://www.youtube.com/watch?v=E4OxTaqTP6E + AuthorName: TopCoder + - Title: Winners Selected for the NASA SOHO Comet Search with Artificial Intelligence Open-Science Challenge + URL: https://science.nasa.gov/science-news/winners-selected-for-the-nasa-soho-comet-search-with-artificial-intelligence-open-science-challenge + AuthorName: Denise Hill + - Title: Topcoder Challenge Finds Two New Comets For NASA + URL: https://www.topcoder.com/blog/topcoder-challenge-finds-two-new-comets-for-nasa/ + AuthorName: Annika Nagy
Fix OneAccess.TDRE.get_metrics script HG-- branch : feature/microservices
@@ -70,7 +70,7 @@ class Script(GetMetricsScript): ): self.set_metric( name=self.SLA_ICMP_RTT, - value=delay, + value=int(delay * 1000000), ts=ts, tags={"probe": name} ) @@ -83,7 +83,7 @@ class Script(GetMetricsScript): ): self.set_metric( name=self.SLA_UDP_RTT, - value=delay, + value=int(delay * 1000000), ts=ts, tags={"probe": name} ) @@ -95,7 +95,7 @@ class Script(GetMetricsScript): ): self.set_metric( name=self.SLA_JITTER, - value=jitter, + value=int(jitter * 1000000), ts=ts, tags={"probe": name} )
help_docs: Update `add-or-remove-users-from-a-stream` help doc. Uses new `select-stream-view-subscribers.md` in instructions. Also, adds a tip to bulk add users to a stream.
@@ -17,6 +17,8 @@ to a stream][configure-invites]. 1. Select a stream. +{!select-stream-view-subscribers.md!} + 1. Under **Add subscribers**, enter a name or email address. The typeahead will only include users who aren't already subscribed to the stream. @@ -24,6 +26,11 @@ to a stream][configure-invites]. {end_tabs} +!!! tip "" + + To add users in bulk, you can copy members from an + existing stream or [user group](/help/user-groups). + ## Remove users from a stream {!admin-only.md!} @@ -39,6 +46,8 @@ including streams the admin is not subscribed to. 1. Select a stream. +{!select-stream-view-subscribers.md!} + 1. Under **Subscribers**, find the user you would like to remove. 1. Click the **Unsubscribe** button in that row.
Add st_birthtime to struct stat This is available on OS X, FreeBSD and NetBSD. On Linux, the definition itself will not result in any errors. However, code that does attempt to st_birthtime under Linux will now fail at C compile time rather than Cython compile time.
@@ -18,6 +18,11 @@ cdef extern from "<sys/stat.h>" nogil: time_t st_mtime time_t st_ctime + # st_birthtime exists on *BSD and OS X. + # Under Linux, defining it here does not hurt. Compilation under Linux + # will only (and rightfully) fail when attempting to use the field. + time_t st_birthtime + # POSIX prescribes including both <sys/stat.h> and <unistd.h> for these cdef extern from "<unistd.h>" nogil: int fchmod(int, mode_t)
snow update link * snow update link * Update README.md added more cmd example
@@ -5,7 +5,7 @@ IT service management. Cortex XSOAR interfaces with ServiceNow to help streamlin - Query ServiceNow data with the ServiceNow query syntax. - Manage Security Incident Response (SIR) tickets with Cortex XSOAR, update tickets and enrich them with data. -Please refer to ServiceNow documentation for additional information. We especially recommend the [Operators available for filters and queries](https://docs.servicenow.com/bundle/istanbul-servicenow-platform/page/use/common-ui-elements/reference/r_OpAvailableFiltersQueries.html) page. +Please refer to ServiceNow documentation for additional information. We especially recommend the [Operators available for filters and queries](https://docs.servicenow.com/bundle/sandiego-platform-user-interface/page/use/common-ui-elements/reference/r_OpAvailableFiltersQueries.html?cshalt=yes) page. This integration was integrated and tested with the Orlando version of ServiceNow. @@ -986,6 +986,7 @@ Queries the specified table in ServiceNow. #### Command Example ```!servicenow-query-table table_name=alm_asset fields=asset_tag,sys_updated_by,display_name query=display_nameCONTAINSMacBook limit=4``` +```!servicenow-query-table table_name=sys_journal_field query=element_id=<SYS_ID>^ORDERBYsys_created_on limit=10 fields=value,name,element,sys_created_by,sys_created_on``` #### Context Example ```
hw DeviceMgr: speed-up client_for_keystore() for common-case This method is often called when there is already an existing paired client for the keystore, in which case we can avoid scan_devices() - which would needlessly take several seconds.
@@ -511,10 +511,16 @@ class DeviceMgr(ThreadJob): if handler is None: raise Exception(_("Handler not found for") + ' ' + plugin.name + '\n' + _("A library is probably missing.")) handler.update_status(False) + pcode = keystore.pairing_code() + client = None + # search existing clients first (fast-path) + if not devices: + client = self.client_by_pairing_code(plugin=plugin, pairing_code=pcode, handler=handler, devices=[]) + # search clients again, now allowing a (slow) scan + if client is None: if devices is None: devices = self.scan_devices() - client = self.client_by_pairing_code( - plugin=plugin, pairing_code=keystore.pairing_code(), handler=handler, devices=devices) + client = self.client_by_pairing_code(plugin=plugin, pairing_code=pcode, handler=handler, devices=devices) if client is None and force_pair: try: info = self.select_device(plugin, handler, keystore, devices,
pull all services in parallel This is done by default now
@@ -241,7 +241,7 @@ case $CMD in esac if [ "$CMD" == "test" ]; then - docker-compose pull --parallel couch postgres redis elasticsearch kafka riakcs + docker-compose pull docker-compose run --rm web run_tests "${TEST:-python}" "$@" elif [ "$CMD" == "shell" ]; then docker-compose run --rm web ./manage.py $CMD "$@"
Put get_questions on a long timeout It varies by form source, so this should be safe to do. I tested this out locally and it does invalidate properly, and interestingly, if you change the form and then change it back, it'll hit the original cache, since the source is the same.
@@ -1212,7 +1212,8 @@ class FormBase(DocumentSchema): return xform.render() @time_method() - @quickcache(['self.source', 'langs', 'include_triggers', 'include_groups', 'include_translations']) + @quickcache(['self.source', 'langs', 'include_triggers', 'include_groups', 'include_translations'], + timeout=24 * 60 * 60) def get_questions(self, langs, include_triggers=False, include_groups=False, include_translations=False): try:
BALD's isnan check is too late Per-entropy isnan check. The current check is too late.
@@ -208,14 +208,21 @@ def get_bald_scores(logits, masks): log_probs = jax.nn.log_softmax(logits) probs = jax.nn.softmax(logits) + weighted_nats = -probs * log_probs + weighted_nats = jnp.where(jnp.isnan(weighted_nats), 0, weighted_nats) + + marginal_entropy = jnp.mean(jnp.sum(weighted_nats, axis=-1), axis=1) + marginal_log_probs = jax.nn.logsumexp(log_probs, axis=1) - jnp.log(ens_size) marginal_probs = jnp.mean(probs, axis=1) - entropy_marginal = jnp.sum(-marginal_probs * marginal_log_probs, axis=-1) - marginal_entropy = jnp.mean(jnp.sum(-probs * log_probs, axis=-1), axis=1) - bald = entropy_marginal - marginal_entropy + weighted_marginal_nats = -marginal_probs * marginal_log_probs + weighted_marginal_nats = jnp.where(jnp.isnan(weighted_marginal_nats), 0, weighted_marginal_nats) + + entropy_marginal = jnp.sum(weighted_marginal_nats, axis=-1) - bald = jnp.where(jnp.isnan(bald), 0, bald) + # Mask results. + bald = entropy_marginal - marginal_entropy bald = jnp.where(masks, bald, NINF_SCORE) return bald
TST: set up TestMetaLabels Cleaned up the TestMetaLabels class.
"""Tests the pysat MetaLabels object.""" import logging +import numpy as np import pytest import pysat class TestMetaLabels(object): - """Basic unit tests for the MetaLabels class.""" - + """Unit and integration tests for the MetaLabels class.""" def setup(self): """Set up the unit test environment for each method.""" - self.testInst = pysat.Instrument('pysat', 'testing') - self.stime = pysat.instruments.pysat_testing._test_dates[''][''] - self.meta_labels = self.testInst.meta.labels - - self.label_dict = {'units': ('Units', str), - 'name': ('Long_Name', str)} - self.dval = None - self.default_name = ['long_name'] - self.default_nan = ['fill', 'value_min', 'value_max'] - self.default_val = {'notes': '', 'units': '', 'desc': ''} - self.frame_list = ['dummy_frame1', 'dummy_frame2'] + testInst = pysat.Instrument('pysat', 'testing') + self.meta_labels = testInst.meta.labels + self.meta = pysat.Meta() + return def teardown(self): """Clean up the unit test environment after each method.""" - del self.testInst, self.meta, self.stime, self.meta_labels - del self.default_name, self.default_nan, self.default_val, self.dval - del self.frame_list + del self.meta, self.meta_labels return # -----------------------
api/pupdevices/Light: drop blink, animate External single-color lights won't have this for now.
@@ -556,7 +556,3 @@ Light .. automethod:: pybricks.pupdevices.Light.on .. automethod:: pybricks.pupdevices.Light.off - - .. automethod:: pybricks.pupdevices.Light.blink - - .. automethod:: pybricks.pupdevices.Light.animate
Fix padding type mismatch in gshard builder Add num_packed_examples and batch_utilized_ratio eval metrics for gshard models
@@ -2359,7 +2359,8 @@ class UniTransformer(base_model.BaseTask): def _ComputeNonPadding(self, input_batch): if 'paddings' in input_batch.tgt: - return 1.0 - input_batch.tgt.paddings + return tf.cast(1.0 - input_batch.tgt.paddings, + py_utils.FPropDtype(self.params)) non_padding = tf.cast( tf.not_equal(input_batch.tgt.segment_ids, 0), @@ -2455,9 +2456,11 @@ class UniTransformer(base_model.BaseTask): tf.reduce_sum(non_padding)) avg_loss += p.aux_loss_coef * aux_loss - non_padding_nor_eos = tf.where( - tf.equal(input_batch.tgt.labels, 1), - tf.zeros_like(non_padding, dtype=non_padding.dtype), non_padding) + num_items_in_batch = tf.reduce_sum( + tf.reduce_max(input_batch.tgt.segment_ids, axis=1)) + num_nonpadding = tf.reduce_sum( + _ToInt32(tf.not_equal(input_batch.tgt.segment_ids, 0))) + batch_capacity = tf.size(input_batch.tgt.labels) whole_tgt_correct = tf.cast( tf.equal( @@ -2471,12 +2474,11 @@ class UniTransformer(base_model.BaseTask): } eval_metrics = { + 'num_packed_examples': (num_items_in_batch, 1.0), + 'batch_utilized_ratio': (num_nonpadding / batch_capacity, 1.0), 'acc1': (tf.reduce_sum(acc1 * non_padding) / tf.reduce_sum(non_padding), tf.reduce_sum(non_padding)), - 'acc1_excluding_eos': (tf.reduce_sum(acc1 * non_padding_nor_eos) / - tf.reduce_sum(non_padding_nor_eos), - tf.reduce_sum(non_padding_nor_eos)), 'whole_tgt_accuracy': (tf.reduce_sum(whole_tgt_correct) / tf.cast(whole_tgt_correct.shape[0], whole_tgt_correct.dtype), 1.0
Remove suggestion to switch to FastAPI branch Branch does not exist
@@ -28,7 +28,6 @@ Download this repo and install the dependencies: git clone https://github.com/lnbits/lnbits-legend.git cd lnbits-legend/ # ensure you have virtualenv installed, on debian/ubuntu 'apt install python3-venv' should work -# for now you'll need to `git checkout FastAPI` python3 -m venv venv ./venv/bin/pip install -r requirements.txt cp .env.example .env
Store windows in list instead of set, fixes Since set is unordered, output of TaskList like widget was unpredictable. Before: window 4 | window 1 | window 3| window 2 | After: window 1 | window 2 | window 3 | window 4 |
@@ -44,7 +44,7 @@ class _Group(CommandObject): self.name = name self.label = name if label is None else label self.custom_layout = layout # will be set on _configure - self.windows = set() + self.windows = [] self.qtile = None self.layouts = [] self.floating_layout = None @@ -61,7 +61,7 @@ class _Group(CommandObject): self.screen = None self.current_layout = 0 self.focus_history = [] - self.windows = set() + self.windows = [] self.qtile = qtile self.layouts = [i.clone(self) for i in layouts] self.floating_layout = floating_layout @@ -235,7 +235,8 @@ class _Group(CommandObject): def add(self, win, focus=True, force=False): hook.fire("group_window_add", self, win) - self.windows.add(win) + if win not in self.windows: + self.windows.append(win) win.group = self if self.qtile.config.auto_fullscreen and win.wants_to_fullscreen: win._float_state = FloatStates.FULLSCREEN
change: add a function in .cli.parse_args to get parser and parsed result Add a function .cli.parse_args.parse to get an argument parser and its parsed result.
@@ -97,4 +97,13 @@ def make_parser(defaults: typing.Optional[typing.Dict] = None help='Verbose mode; -v or -vv (more verbose)') return apsr + +def parse(argv: typing.List[str] + ) -> typing.Tuple[argparse.ArgumentParser, argparse.Namespace]: + """ + Parse given arguments ``argv`` and return it with the parser. + """ + psr = make_parser() + return (psr, psr.parse_args(argv)) + # vim:sw=4:ts=4:et:
email tooltip: Adjust background color of email toolip in dark-mode. This commit adjust the email tooltip of popover in dark-mode.
@@ -399,6 +399,14 @@ body.dark-mode #out-of-view-notification { border: 1px solid 1px solid hsl(144, 45%, 62%); } +body.dark-mode .email_tooltip { + background-color: #404c59; +} + +body.dark-mode .email_tooltip:after { + border-bottom-color: #404c59 !important; +} + @-moz-document url-prefix() { body.dark-mode #settings_page select { background-color: hsla(0, 0%, 0%, 0.2);
Skip install prereqs when installing stable The current stable release (1.8.8) breaks an existing Docker 1.13 install with `--install-prereqs`. See
@@ -333,7 +333,9 @@ class VpcClusterUpgradeTest: with logger.scope("install dcos"): # Use the CLI installer to set exhibitor_storage_backend = zookeeper. - test_util.cluster.install_dcos(cluster, self.stable_installer_url, api=False, + # Don't install prereqs since stable breaks Docker 1.13. See + # https://jira.mesosphere.com/browse/DCOS_OSS-743. + test_util.cluster.install_dcos(cluster, self.stable_installer_url, api=False, install_prereqs=False, add_config_path=self.config_yaml_override_install) master_list = [h.private_ip for h in cluster.masters]
Update issue templates A little sizing and reordering.
@@ -9,28 +9,32 @@ assignees: '' <!--- Please search existing bugs before creating a new one --> -# Bug Report -**Install Source:** + +### Environment +- **System OS:** <!--- Windows/OSX/Linux/Heroku/Docker --> + +- **Python Version:** <!--- Python Version can be found by running "py -V" --> + +- **Install Source:** <!--- Did you download from pip, the master branch on github, or a different branch?) --> -**Version:** +- **Version OR Commit hash:** <!--- If from pip, what is the version? Run "pip show spotdl" --> -**OR Commit hash:** <!--- If not from pip, what is the commit hash? --> -## Expected Behavior +------------------------------------------------------------ + +### Expected Behaviour <!--- What did you expect to happen? --> -## Actual Behavior +### Actual Behaviour <!--- What actually happened? --> -## Steps to Reproduce +### Steps to Reproduce 1. 2. 3. -## Environment -**System OS:** <!--- Windows/OSX/Linux/Heroku/Docker --> -**Python Version:** <!--- Python Version can be found by running "py -V" --> +-------------------------------------------------------------------- -## Any other information: +### Any other information:
Minor tweak to Tools.xml - correct referrring text [ci skip] In two places a reference to a section is made using "below" implying the section is in same doc, but Tools.xml definitions are distributed to both docs and this is incorrect for User Guide. Use "in manpage" instead.
@@ -282,8 +282,8 @@ Note that the source files will be scanned according to the suffix mappings in the <classname>SourceFileScanner</classname> object. -See the section "Scanner Objects," -below, for more information. +See the manpage section "Scanner Objects" +for more information. </para> </summary> </builder> @@ -387,8 +387,8 @@ Note that the source files will be scanned according to the suffix mappings in the <classname>SourceFileScanner</classname> object. -See the section "Scanner Objects," -below, for more information. +See the manpage section "Scanner Objects" +for more information. </para> </summary> </builder>
exclude some caffe2 modules from libtorch mobile build Summary: Pull Request resolved: ghimport-source-id: Test Plan: verified libtorch mobile library builds and links successfully; Imported from OSS
@@ -59,14 +59,18 @@ endif() # addressed yet. if (NOT BUILD_ATEN_ONLY) - add_subdirectory(proto) - add_subdirectory(contrib) add_subdirectory(core) + add_subdirectory(proto) + add_subdirectory(serialize) add_subdirectory(utils) + add_subdirectory(perfkernels) + + # Skip modules that are not used by libtorch mobile yet. + if (NOT INTERN_BUILD_MOBILE OR BUILD_CAFFE2_MOBILE) + add_subdirectory(contrib) add_subdirectory(predictor) add_subdirectory(predictor/emulator) add_subdirectory(core/nomnigraph) - add_subdirectory(serialize) if (USE_NVRTC) add_subdirectory(cuda_rtc) endif() @@ -92,7 +96,6 @@ if(NOT BUILD_ATEN_ONLY) endif() endif() add_subdirectory(opt) - add_subdirectory(perfkernels) add_subdirectory(python) add_subdirectory(queue) add_subdirectory(sgd) @@ -100,6 +103,7 @@ if(NOT BUILD_ATEN_ONLY) # add_subdirectory(test) # todo: use caffe2_gtest_main instead of gtest_main because we will need to call GlobalInit add_subdirectory(transforms) endif() +endif() # Advanced: if we have white list specified, we will do intersections for all # main lib srcs.
Fix a bug dependent on glibc version I changed adam.py so that bugs that depend on the version of glibc do not occur.
@@ -76,8 +76,8 @@ class AdamRule(optimizer.UpdateRule): @property def lr(self): - fix1 = 1. - self.hyperparam.beta1 ** self.t - fix2 = 1. - self.hyperparam.beta2 ** self.t + fix1 = 1. - math.pow(self.hyperparam.beta1, self.t) + fix2 = 1. - math.pow(self.hyperparam.beta2, self.t) return self.hyperparam.alpha * math.sqrt(fix2) / fix1 @@ -116,6 +116,6 @@ class Adam(optimizer.GradientMethod): @property def lr(self): - fix1 = 1. - self.hyperparam.beta1 ** self.t - fix2 = 1. - self.hyperparam.beta2 ** self.t + fix1 = 1. - math.pow(self.hyperparam.beta1, self.t) + fix2 = 1. - math.pow(self.hyperparam.beta2, self.t) return self.hyperparam.alpha * math.sqrt(fix2) / fix1
update up.sh Fix up.sh to point to binpash
@@ -7,7 +7,7 @@ set -e # will install dependencies locally. PLATFORM=$(uname | tr '[:upper:]' '[:lower:]') -URL='https://github.com/andromeda/pash/archive/refs/heads/main.zip' +URL='https://github.com/binpash/pash/archive/refs/heads/main.zip' VERSION='latest' DL=$(command -v curl >/dev/null 2>&1 && echo curl || echo 'wget -qO-') @@ -21,7 +21,7 @@ if [ "$PLATFORM" = "darwin" ]; then fi set +e -git clone [email protected]:andromeda/pash.git +git clone [email protected]:binpash/pash.git if [ $? -ne 0 ]; then echo 'SSH clone failed; attempting HTTPS' git clone https://github.com/andromeda/pash.git
Update simulation.py To increase readability, make use of pandas index method `get_loc` instead of relying on `np.where`.
@@ -26,7 +26,7 @@ def get_dynamics(adata, key="fit", extrapolate=False, sorted=False, t=None): def compute_dynamics( adata, basis, key="true", extrapolate=None, sort=True, t_=None, t=None ): - idx = np.where(adata.var_names == basis)[0][0] if isinstance(basis, str) else basis + idx = adata.var_names.get_loc(basis) if isinstance(basis, str) else basis key = "fit" if f"{key}_gamma" not in adata.var_keys() else key alpha, beta, gamma, scaling, t_ = get_vars(adata[:, basis], key=key) @@ -95,7 +95,7 @@ def show_full_dynamics( ) (line,) = ax.plot(st, ut, color=color, linewidth=linewidth, label=label) - idx = np.where(adata.var_names == basis)[0][0] + idx = adata.var_names.get_loc(basis) beta, gamma = adata.var[f"{key}_beta"][idx], adata.var[f"{key}_gamma"][idx] xnew = np.linspace(np.min(st), np.max(st)) ynew = gamma / beta * (xnew - np.min(xnew)) + np.min(ut) @@ -135,7 +135,7 @@ def simulation( 1, ncols, pl.figure(None, (figsize[0] * ncols, figsize[1]), dpi=dpi) ) ): - idx = np.where(adata.var_names == var_names[i])[0][0] + idx = adata.var_names.get_loc(var_names[i]) alpha, ut, st = compute_dynamics(adata, idx) t = ( adata.obs[xkey]
Fixes Ensure if target_edges is a list, we don't run into a `UnboundLocalError` because `edges` is not defined
# # ================================================================================================ +try: + import collections.abc as abc +except ImportError: + import collections as abc + import dwave_networkx as dnx import networkx as nx @@ -90,9 +95,9 @@ def find_clique_embedding(k, m, n=None, t=None, target_edges=None): elif len(nodes) == 2: # If k == 2 we simply return two one-qubit chains that are the endpoints of a randomly sampled coupler. - if not isinstance(target_edges, list): - edges = list(target_edges) - edge = edges[random.randrange(len(edges))] + if not isinstance(target_edges, abc.Sequence): + target_edges = list(target_edges) + edge = random.choice(target_edges) embedding = [[edge[0]], [edge[1]]] else:
Update ppo_cart_pole.gin with reward scaling and mini_batch_size=128, it will stably convert to 300 in 30 seconds
@@ -4,6 +4,10 @@ include 'ppo.gin' create_environment.env_name="CartPole-v0" create_environment.num_parallel_environments=8 +# reward scaling +ActorCriticAlgorithm.reward_shaping_fn = @reward_scaling +common.reward_scaling.scale = 0.01 + # algorithm config PPOLoss.entropy_regularization=1e-4 PPOLoss.gamma=0.98 @@ -16,7 +20,7 @@ create_ac_algorithm.learning_rate=0.001 # training config off_policy_trainer.train.mini_batch_length=1 off_policy_trainer.train.num_steps_per_iter=256 -off_policy_trainer.train.mini_batch_size=256 +off_policy_trainer.train.mini_batch_size=128 off_policy_trainer.train.num_iterations=1000 off_policy_trainer.train.summary_interval=5 off_policy_trainer.train.checkpoint_interval=100000
Update test_ec2_role_crud to match recent aws auth resp parameters See current response parameters at: This behavior was changed in vault 0.9.6. See: "[...] to keep consistency with input and output, when reading a role the binds will now be returned as string arrays rather than strings."
@@ -854,21 +854,21 @@ class IntegrationTest(TestCase): assert ('qux' in roles['data']['keys']) foo_role = self.client.get_ec2_role('foo') - assert (foo_role['data']['bound_ami_id'] == 'ami-notarealami') + assert (foo_role['data']['bound_ami_id'] == ['ami-notarealami']) assert ('ec2rolepolicy' in foo_role['data']['policies']) bar_role = self.client.get_ec2_role('bar') - assert (bar_role['data']['bound_account_id'] == '123456789012') + assert (bar_role['data']['bound_account_id'] == ['123456789012']) assert ('ec2rolepolicy' in bar_role['data']['policies']) baz_role = self.client.get_ec2_role('baz') - assert (baz_role['data']['bound_iam_role_arn'] == 'arn:aws:iam::123456789012:role/mockec2role') + assert (baz_role['data']['bound_iam_role_arn'] == ['arn:aws:iam::123456789012:role/mockec2role']) assert ('ec2rolepolicy' in baz_role['data']['policies']) qux_role = self.client.get_ec2_role('qux') assert ( - qux_role['data']['bound_iam_instance_profile_arn'] == 'arn:aws:iam::123456789012:instance-profile/mockprofile') + qux_role['data']['bound_iam_instance_profile_arn'] == ['arn:aws:iam::123456789012:instance-profile/mockprofile']) assert ('ec2rolepolicy' in qux_role['data']['policies']) # teardown
Add ?next= param for GitHub login This fixes by adding `?next=/asd` query for GitHub login button. It will redirect to pointed resource after successful log in.
<div class="col-12 col-md-5"> <h4>For The Carpentries Instructors</h4> - <p><a class="btn btn-primary w-100" href="/login/github/"><i class="fab fa-github"></i> Log in with your GitHub account</a></p> + <p><a class="btn btn-primary w-100" href="{% url 'social:begin' 'github' %}{% if next %}?next={{ next }}{% endif %}"><i class="fab fa-github"></i> Log in with your GitHub account</a></p> <p>Having trouble logging in? Contact us at <a href="mailto:[email protected]">[email protected]</a>.</p> </div> </div>
[nixio] Add close() function Closes underlying nix file and cleans up maps and read_block tracking. __del__ added that calls the close function.
@@ -16,7 +16,7 @@ This IO supports both writing and reading of NIX files. Reading is supported only if the NIX file was created using this IO. """ -from __future__ import absolute_import, print_function +from __future__ import absolute_import import time from datetime import datetime @@ -1289,3 +1289,19 @@ class NixIO(BaseIO): strupdate(type(obj).__name__) return objhash.hexdigest() + + def close(self): + """ + Closes the open nix file and resets maps. + """ + if (hasattr(self, "nix_file") and + self.nix_file and self.nix_file.is_open()): + self.nix_file.close() + self.nix_file = None + self._object_map = None + self._lazy_loaded = None + self._object_hashes = None + self._block_read_counter = None + + def __del__(self): + self.close()
Add the propose of the inssue Add a balloon tip with the price break. Missing yet some information when is more convenient buy the next price break and not your intended quantity.
@@ -1213,6 +1213,22 @@ def add_dist_to_worksheet(wks, wrk_formats, index, start_row, start_col, purch_qty=xl_rowcol_to_cell(row, purch_qty_col), qtys=','.join(qtys), prices=','.join(prices)), wrk_formats['currency']) + # Add comment if the price break + price_break_info = 'Price break:\n' + for count_price_break in range(0,len(qtys)): + if int(qtys[count_price_break]) != 0: # 0 qnty information is not userful to show. + price_break_info += qtys[count_price_break] + ' - ' + '$' + prices[count_price_break] + '\n' + wks.write_comment( row, unit_price_col, price_break_info[:-1]) + # Conditional format, if the price of the next price break is less than the actual + # unity price by the quantity chosen, put the unit price red. +# wks.conditional_format(row, unit_price_col, row, unit_price_col, { #TODO +# 'type': 'cell', +# 'criteria': '<=', +# 'value': xl_rowcol_to_cell(row, 7), +# # This is the global data cell holding the minimum unit price for this part. +# 'format': wrk_formats['best_price'] +# }) + # Conditionally format the unit price cell that contains the best price. wks.conditional_format(row, unit_price_col, row, unit_price_col, { 'type': 'cell',
[Stress Tester XFails] Remove and apple/swift#36943 The issues have been fixed
], "issueUrl" : "https://bugs.swift.org/browse/SR-8898" }, - { - "path" : "*\/MovieSwift\/MovieSwift\/MovieSwift\/views\/components\/bottomMenu\/BottomMenu.swift", - "issueDetail" : { - "kind" : "codeComplete", - "offset" : 1723 - }, - "applicableConfigs" : [ - "main" - ], - "issueUrl" : "https://bugs.swift.org/browse/SR-14494" - }, - { - "path" : "*\/MovieSwift\/MovieSwift\/Packages\/Backend\/Sources\/Backend\/services\/ImageService.swift", - "issueDetail" : { - "kind" : "codeComplete", - "offset" : 1093 - }, - "applicableConfigs" : [ - "main" - ], - "issueUrl" : "https://github.com/apple/swift/pull/36943" - }, { "path" : "*\/ACHNBrowserUI\/ACHNBrowserUI\/ACHNBrowserUI\/packages\/UI\/Sources\/UI\/ProgressView.swift", "issueDetail" : {
DOC: updated changelog Updated changelog with a description of the changes in this branch.
@@ -26,8 +26,11 @@ This project adheres to [Semantic Versioning](https://semver.org/). docstrings from an instantiated Instrument in an interactive session * Bug Fix * Fixed default MetaLabel specification in `pysat.utils.load_netcdf4` + * Fixed `parse_delimited_filename` output consistency and ability to handle + leading and trailing non-parsed text in filenames (e.g., file extensions) * Maintenance * Added missing unit tests for `pysat.utils.time` + * Added missing unit tests for `pysat.utils.file.parse_delimited_filename` [3.0.1] - 2021-XX-XX --------------------
Add link to GitHub discussion forum to docs Update "Getting help" topic to include a link to the new public discussion forum on GitHub.
@@ -19,6 +19,10 @@ limitations under the License. Thank you for your interest in Elyra! +### General questions + +Share your questions and ideas with the community in the [GitHub discussion forum](https://github.com/elyra-ai/elyra/discussions). + ### Create an issue or feature request If you encounter a problem or have suggestions for improvements please [open an issue on GitHub](https://github.com/elyra-ai/elyra/issues).