message
stringlengths
13
484
diff
stringlengths
38
4.63k
Remove unneeded check and add comments Removes the unneeded check for if the cache is empty. Also adds a seconds comment about the format of the contents of the Redis cache.
@@ -30,6 +30,7 @@ AOC_WHITELIST = AOC_WHITELIST_RESTRICTED + (Channels.advent_of_code,) class AdventOfCode(commands.Cog): """Advent of Code festivities! Ho Ho Ho!""" + # Redis Cache for linking Discord IDs to Advent of Code usernames account_links = RedisCache() def __init__(self, bot: Bot): @@ -186,23 +187,9 @@ class AdventOfCode(commands.Cog): """ Link your Discord Account to your Advent of Code name. - Stored in a Redis Cache, Discord ID: Advent of Code Name + Stored in a Redis Cache with the format of `Discord ID: Advent of Code Name` """ cache_items = await self.account_links.items() - - # A short circuit in case the cache is empty - if len(cache_items) == 0 and aoc_name: - log.info(f"{ctx.author} ({ctx.author.id}) is now linked to {aoc_name}.") - await self.account_links.set(ctx.author.id, aoc_name) - await ctx.reply(f"You have linked your Discord ID to {aoc_name}.") - return - elif len(cache_items) == 0: - await ctx.reply( - "You have not linked an Advent of Code account." - "Please re-run the command with one specified." - ) - return - cache_aoc_name = [value for _, value in cache_items] if aoc_name:
(airline-demo-testability-2) Use file handle solids for sfo_weather_data Summary: Use the new fancy things. No behavior change. Test Plan: Run in dagit. Buildkite. Reviewers: max, natekupp
from dagster_aws.s3.resources import s3_resource from dagster_aws.s3.system_storage import s3_plus_default_storage_defs -from dagster_aws.s3.solids import put_object_to_s3_bytes, download_from_s3_to_bytes +from dagster_aws.s3.solids import put_object_to_s3_bytes + +from .mirror_keyed_file_from_s3 import mirror_keyed_file_from_s3 from .resources import postgres_db_info_resource, redshift_db_info_resource, spark_session_local from .solids import ( average_sfo_outbound_avg_delays_by_destination, delays_vs_fares, delays_vs_fares_nb, eastbound_delays, - ingest_csv_to_spark, + ingest_csv_file_handle_to_spark, load_data_to_database_from_spark, join_q2_data, process_sfo_weather_data, @@ -90,12 +92,12 @@ def process_on_time_data(context): @composite_solid(outputs=[OutputDefinition(name='table_name', dagster_type=String)]) -def sfo_weather_data(context): +def sfo_weather_data(_): return load_data_to_database_from_spark.alias('load_q2_sfo_weather')( process_sfo_weather_data( - context, - ingest_csv_to_spark.alias('ingest_q2_sfo_weather')( - download_from_s3_to_bytes.alias('download_q2_sfo_weather')() + _, + ingest_csv_file_handle_to_spark.alias('ingest_q2_sfo_weather')( + mirror_keyed_file_from_s3.alias('download_q2_sfo_weather')() ), ) )
Fixed js error in iframe_login This is a bug in a previous PR. There's no javascript after it, so it doesn't seem to stop the user from being able to log in, but still good to fix.
<script src="{% static 'blazy/blazy.js' %}"></script> <script> new Blazy({container: 'body'}); - var username = (new URLSearchParams(window.location.search)).get("username"); - if (username) { + var username = (new URLSearchParams(window.location.search)).get("username"), + element = document.getElementById('id_auth-username'); + if (username && element) { if (username.endsWith("commcarehq.org")) { username = username.split("@")[0]; } - document.getElementById('id_auth-username').value = username; + element.value = username; } </script> </body>
fix wrong link the previous link can not open, the correct link maybe
@@ -19,5 +19,5 @@ Resources * `Blog <https://browbeatproject.org>`_ * `Twitter <https://twitter.com/browbeatproject>`_ * `Code Review <https://review.openstack.org/#/q/project:openstack/browbeat>`_ -* `Git Web <https://review.openstack.org/gitweb?p=openstack/browbeat.git;a=summary>`_ +* `Git Web <https://git.openstack.org/cgit/openstack/browbeat>`_ * `IRC <http://webchat.freenode.net/?nick=browbeat_user&channels=openstack-browbeat>`_ -- **#openstack-browbeat** (irc.freenode.net)
Add more details to the internal error for "worker cannot find registered function" This adds some more debug information for this internal error that shouldn't happen.
import dis import hashlib +import os import importlib import inspect import json @@ -405,7 +406,10 @@ class FunctionActorManager: warning_message = ( "This worker was asked to execute a " "function that it does not have " - "registered. You may have to restart " + f"registered ({function_descriptor}, " + f"node={self._worker.node_ip_address}, " + f"worker_id={self._worker.worker_id.hex()}, " + f"pid={os.getpid()}). You may have to restart " "Ray." ) if not warning_sent:
Allow user-defined kwargs passed to click.group Fixes
@@ -57,6 +57,8 @@ def group( short_help: str = None, options_metavar: str = '[OPTIONS]', add_help_option: bool = True, + # User-defined + **kwargs: Any, ) -> _Decorator: ...
Orders trustees by id Orders trustees by id to garantee access order on "freeze" method
@@ -1171,7 +1171,7 @@ class Trustee(HeliosModel): @classmethod def get_by_election(cls, election): - return cls.objects.filter(election = election) + return cls.objects.filter(election = election).order_by('id') @classmethod def get_by_uuid(cls, uuid):
Fix typo Fix "contorls" to "controls" in window_text docstring
@@ -307,7 +307,7 @@ class BaseWrapper(object): """ Window text of the element - Quite a few contorls have other text that is visible, for example + Quite a few controls have other text that is visible, for example Edit controls usually have an empty string for window_text but still have text displayed in the edit window. """
Minor relocate of badge no more info needed
![Instabot is better that other open-source bots!](https://github.com/instagrambot/instabot/blob/master/docs/img/tag%20instabot.png "Instabot is better that other open-source bots!") Cool Instagram scripts for promotion and API wrapper. Written in Python. ___ +[![Telegram Chat](https://patrolavia.github.io/telegram-badge/chat.png)](https://t.me/joinchat/AAAAAEHxHAtKhKo4X4r7xg) + As you may know, Instagram closed its API in summer 2016. This Python module can do the same things without any effort. Also it has lots of [example scripts](https://github.com/ohld/instabot/tree/master/examples) to start with. If you have any ideas, please, leave them in [Issues section](https://github.com/ohld/instabot/issues) or in our [Telegram chat](https://t.me/joinchat/AAAAAEHxHAtKhKo4X4r7xg). -[![Telegram Chat](https://patrolavia.github.io/telegram-badge/chat.png)](https://t.me/joinchat/AAAAAEHxHAtKhKo4X4r7xg) - -*Your __contribution__ and support through __Stars__ will be highly appreciated.* +*Your __contribution__ and support through __stars__ will be highly appreciated.* ## How to install and update
{Compute} Doc fix for 'vm user delete' Clarify that 'vm user delete' also removes the home directory on Linux systems.
@@ -1950,6 +1950,8 @@ short-summary: Manage user accounts for a VM. helps['vm user delete'] = """ type: command short-summary: Delete a user account from a VM. +long-summary: > + Also deletes the user home directory on Linux VMs. examples: - name: Delete a user account. text: az vm user delete -u username -n MyVm -g MyResourceGroup
Adds the job user and team This is required as some challenge admins want to see the teams for each job in order to download the supplementary file.
{% extends "site.html" %} {% load evaluation_extras %} +{% load user_profile_link from profiles %} {% load guardian_tags %} {% load url from grandchallenge_tags %} <thead> <tr> <th>ID</th> + {% if "change_challenge" in challenge_perms %} + <th>User</th> + {% endif %} <th>Created</th> <th>Updated</th> <th>Status</th> {% for job in object_list %} <tr> <td>{{ job.id }}</td> + {% if "change_challenge" in challenge_perms %} + <td> + {{ job.submission.creator|user_profile_link }} + + {% if site.evaluation_config.use_teams %} + {% with job.result|get_team_html as team_html %} + {% if team_html %} + ({{ team_html }}) + {% endif %} + {% endwith %} + {% endif %} + </td> + {% endif %} <td data-order="{{ job.created|date:"U" }}">{{ job.created }}</td> <td data-order="{{ job.modified|date:"U" }}">{{ job.modified }}</td> <td>
add stdout as an output format for report subcommand Using --format=stdout now writes output to STDOUT in human-readable form, in addition to tabular/Excel/etc.
@@ -41,6 +41,7 @@ THE SOFTWARE. """ import os +import sys import numpy as np import pandas as pd @@ -123,6 +124,13 @@ def write_styled_html(path, df, index=None): ofh.write(html) +# Write a dataframe to STDOUT +def write_to_stdout(stem, df, index=None, line_width=None): + """Write dataframe in tab-separated form to STDOUT.""" + sys.stdout.write("TABLE: %s\n" % stem) + sys.stdout.write(df.to_string(index=index, line_width=line_width) + '\n\n') + + # Write a table returned from the pyani database in the requested format def write_dbtable(data, headers, path=None, formats=('tab',), index=False): """Write database result table to output file in named format.""" @@ -131,7 +139,9 @@ def write_dbtable(data, headers, path=None, formats=('tab',), index=False): formatdict = {'tab': (df.to_csv, {'sep': '\t', 'index': False}, '.tab'), 'excel': (df.to_excel, {'index': False}, '.xlsx'), 'html': (write_styled_html, {'df': df, 'index': index}, - '.html')} + '.html'), + 'stdout': (write_to_stdout, {'df': df, 'index': False}, '') + } for format in formats: func, args, ext = formatdict[format] ofname = path + ext
chore: correct region tag in submit_job_to_cluster.py Change region tag to make it unique. The previous tag was used in another create cluster file and caused problems with automation tools.
@@ -85,7 +85,7 @@ def download_output(project, cluster_id, output_bucket, job_id): return bucket.blob(output_blob).download_as_string() -# [START dataproc_create_cluster] +# [START dataproc_submit_job_create_cluster] def create_cluster(dataproc, project, zone, region, cluster_name): """Create the cluster.""" print("Creating cluster...") @@ -110,7 +110,7 @@ def create_cluster(dataproc, project, zone, region, cluster_name): waiting_callback = True -# [END dataproc_create_cluster] +# [END dataproc_submit_job_create_cluster] def callback(operation_future):
wallet.get_request_by_addr: make deterministic This makes test_invoices/test_wallet_get_request_by_addr pass without flakyness. closes
@@ -2355,8 +2355,13 @@ class Abstract_Wallet(ABC, Logger, EventListener): if not req.is_lightning() or self.lnworker.get_invoice_status(req) == PR_UNPAID] if not reqs: return None - # note: there typically should not be more than one relevant request for an address - return reqs[0] + # note: There typically should not be more than one relevant request for an address. + # If there's multiple, return the one created last (see #8113). Consider: + # - there is an old expired req1, and a newer unpaid req2, reusing the same addr (and same amount), + # - now req2 gets paid. however, get_invoice_status will say both req1 and req2 are PAID. (see #8061) + # - as a workaround, we return the request with the larger creation time. + reqs.sort(key=lambda req: req.get_time()) + return reqs[-1] def get_request(self, request_id: str) -> Optional[Invoice]: return self._receive_requests.get(request_id)
Update Update desc
no_log_contains: id "942190" - test_title: 942190-40 - desc: "MSSQL Logical Functions - IIF (Transact-SQL)" + desc: "MSSQL Logical Functions - IIF (Transact-SQL) - regression test" stages: - stage:
missing pipe [nodeploy]
@@ -19,7 +19,7 @@ fi echo "Starting devserver in new tmux session..." tmux new-session -d -s $session tmux new-window -t "$session:1" -n gae "dev_appserver.py --admin_host=0.0.0.0 --host=0.0.0.0 --datastore_path=/datastore/tba.db src/default.yaml src/web.yaml src/api.yaml src/dispatch.yaml 2>&1 | tee /var/log/tba.log; read" -tmux new-window -t "$session:2" -n gulp "gulp 2>&1 tee /var/log/gulp.log; read" +tmux new-window -t "$session:2" -n gulp "gulp 2>&1 | tee /var/log/gulp.log; read" if [ ! -z "$instance_name" ]; then echo "Starting Cloud SQL proxy to connect to $instance_name" tmux new-window -t "$session:3" -n sql "/cloud_sql_proxy -instances=$instance_name=tcp:3306 -credential_file=$auth_path | tee /var/log/sql.log; read"
Changes default "onBadFit" option to *nothing* (not even Robust+). This update to the default behavior of do_long_sequence_gst when a model doesn't fit the data is more conservative -- only do the special Robust+ or wildcard post-processing analysis when the user specificially requests it.
@@ -1329,7 +1329,7 @@ def _post_opt_processing(callerName, ds, target_model, mdl_start, lsgstLists, objective = advancedOptions.get('objective', 'logl') badFitThreshold = advancedOptions.get('badFitThreshold',DEFAULT_BAD_FIT_THRESHOLD) if ret.estimates[estlbl].misfit_sigma(evaltree_cache=evaltree_cache, comm=comm) > badFitThreshold: - onBadFit = advancedOptions.get('onBadFit',["wildcard"]) #["Robust+"]) # empty list => 'do nothing' + onBadFit = advancedOptions.get('onBadFit',[]) #["wildcard"]) #["Robust+"]) # empty list => 'do nothing' if len(onBadFit) > 0 and parameters.get('weights',None) is None:
Python API: override __nonzero__ for node wrappers TN:
@@ -790,6 +790,16 @@ class ${root_astnode_name}(object): ctypes.byref(result)) return ${root_astnode_name}._wrap(result) + def __nonzero__(self): + """ + Return always True so that checking a node against None can be done as + simply as:: + + if node: + ... + """ + return True + def __len__(self): """Return the number of ${root_astnode_name} children this node has.""" node = self._unwrap(self)
container-common: Enable docker on boot for ubuntu docker daemon is automatically started during package installation but the service isn't enabled on boot.
tags: with_pkg - - name: start docker service - service: - name: docker - state: started - enabled: yes - tags: - with_pkg - - name: red hat 8 based systems tasks when: - ansible_distribution_major_version == '8' tags: with_pkg +- name: start docker service + service: + name: docker + state: started + enabled: yes + tags: + with_pkg + when: not (ansible_os_family == 'RedHat' and + ansible_distribution_major_version == '8') + - name: ensure tmpfiles.d is present lineinfile: path: /etc/tmpfiles.d/ceph-common.conf
Improve sentence parsing I've always parsed this sentence as "attrs comes with serious, business aliases". I just realized you probably meant srs bzns aliases and figured I'd clarify.
@@ -48,7 +48,7 @@ By default, all features are added, so you immediately have a fully functional d As shown, the generated ``__init__`` method allows for both positional and keyword arguments. -If playful naming turns you off, ``attrs`` comes with serious business aliases: +If playful naming turns you off, ``attrs`` comes with serious-business aliases: .. doctest::
Setup (Windows): Query inkscape install location correctly MSI installer writes install location in key HKLM\SOFTWARE\Microsoft\Windows\CurrentVersion\App Paths\inkscape.exe
@@ -117,14 +117,14 @@ goto FINAL :DETECT_INKSCAPE_LOCATION echo Trying to find Inkscape in Windows Registry... +rem Checking NSIS-Installer registry information rem Inkscape installation path is usually found in the registry -rem "SOFTWARE\Inkscape\Inkscape" -rem under HKLM (Local Machine -> machine wide installation) or -rem HKCU (Current User -> user installation) +rem "SOFTWARE\Inkscape\Inkscape" under HKLM (Local Machine -> +rem machine wide installation) or rem HKCU (Current User -> +rem user installation) if installed via NSIS exe installer. rem We also have to keep in mind that the values might be in the 32bit or 64bit rem version of the registry (i.e., under SOFTWARE\WOW6432Node\Inkscape\Inkscape rem or SOFTWARE\Inkscape\Inkscape) -rem This holds if Inkscape has been installed via via NSIS, not via MSI for %%R in (HKLM HKCU) do ( for %%T in (32 64) do ( rem Output of REG QUERY "KeyName" /ve is (first line is a blank line): @@ -136,7 +136,7 @@ for %%R in (HKLM HKCU) do ( rem so we skip the first two lines (skip=2) and then we take the second token rem and the reamining output (tokens=2*), so %%A is REG_SZ and %%B is the path rem even if it contains spaces (tokens are delimited by spaces) - echo Trying registry root %%R [%%T]... + echo Trying SOFTWARE\Inkscape\Inkscape in registry root %%R [%%T]... for /f "usebackq skip=2 tokens=2*" %%A in (`REG QUERY "%%R\SOFTWARE\Inkscape\Inkscape" /ve /reg:%%T 2^>nul`) do ( if exist %%B ( set INKSCAPE_DIR=%%B @@ -157,6 +157,33 @@ for %%R in (HKLM HKCU) do ( ) ) + +rem Checking MSI-Installer registry information +rem Inkscape installation path is usually found in the registry +rem under key "Path" in +rem SOFTWARE\Microsoft\Windows\CurrentVersion\App Paths\inkscape.exe +rem if installed via msi installer +for %%T in (32 64) do ( + echo Trying SOFTWARE\Microsoft\Windows\CurrentVersion\App Paths\inkscape.exe in registry root HKLM [%%T]... + for /f "usebackq skip=2 tokens=2*" %%A in (`REG QUERY "HKLM\SOFTWARE\Microsoft\Windows\CurrentVersion\App Paths\inkscape.exe" /v Path /reg:%%T 2^>nul`) do ( + if exist %%B ( + set INKSCAPE_DIR=%%B + ) + ) + if defined INKSCAPE_DIR ( + echo Inkscape considered to be installed in !INKSCAPE_DIR! + echo Setting executable path to !INKSCAPE_DIR! + if exist "!INKSCAPE_DIR!\!INKSCAPE_EXENAME!" ( + echo !INKSCAPE_DIR!\!INKSCAPE_EXENAME! found + echo. + goto INKSCAPE_FOUND + ) else ( + echo !INKSCAPE_DIR!\!INKSCAPE_EXENAME! not found + ) + ) +) + + rem If we did non succeed in the registry lets have a look rem at the most common install locations echo Trying the usual Windows install locations...
Quick syntax correction for clarity `''.join(srcCode)` is not really readable. On the other hand, `str.join('', srcCode)` is much better.
@@ -31,7 +31,7 @@ for file in allFiles: srcCode = f.readlines() # The last three lines are always the main() call srcCode = srcCode[:-3] - srcCode = ''.join(srcCode) + srcCode = str.join('', srcCode) module.__SRC_CODE = srcCode bmpSrcCode = highlight(srcCode, PythonLexer(), BmpImageFormatter())
Update language ID map html: add text.html.ngx for angular files shaderlab: these are supposedly Unity Shaderlab files r: the R language server can also handle R-flavoured markdown files xsl and xml: decouple them In general, added repo links to thirdparty syntaxes
"bibtex": "text.bibtex", "cpp": "source.c++", "csharp": "source.cs", - "html": "embedding.php | text.html.basic", + "html": "embedding.php | text.html.basic | text.html.ngx", // https://github.com/princemaple/ngx-html-syntax "javascript": "source.js", - "javascriptreact": "source.jsx", // 3rdparty + "javascriptreact": "source.jsx", "jsonc": "source.json", "latex": "text.tex.latex", "markdown": "text.html.markdown", "objective-c": "source.objc", "objective-cpp": "source.objc++", "php": "source.php | embedding.php", + "r": "source.r | text.html.markdown.rmarkdown", // https://github.com/REditorSupport/sublime-ide-r "ruby": "text.html.ruby | source.ruby", - "shaderlab": "source.glsl | source.essl", // 3rdparty + "shaderlab": "source.unity_shader | source.shader", // https://github.com/petereichinger/Unity3D-Shader, https://github.com/waqiju/unity_shader_st3 "shellscript": "source.shell.bash", "typescript": "source.ts", "typescriptreact": "source.tsx", "txt": "text.plain", - "vue": "text.html.vue", // 3rdparty - "xml": "text.xml", - "xsl": "text.xml", // 3rdparty + "vue": "text.html.vue", // https://github.com/vuejs/vue-syntax-highlight + "xml": "text.xml - text.xml.xsl", + "xsl": "text.xml.xsl", // https://github.com/packagecontrol/XSL }
boot: Remove special case for pypy load failures There was a special case for Pypy in the handling of errors when loading components. It looks like in the years since it was written, it may have become unnecessary. Removing it leads to more helpful error messages, so... let's remove it?
@@ -104,9 +104,7 @@ def _do_import (name): message = str(sys.exc_info()[1].args[0]) s = message.rsplit(" ", 1) - # Sadly, PyPy isn't consistent with CPython here. - #TODO: Check on this behavior in pypy 2.0. - if s[0] == "No module named" and (name.endswith(s[1]) or __pypy__): + if s[0] == "No module named" and (name.endswith(s[1])): # It was the one we tried to import itself. (Case 1) # If we have other names to try, try them! return do_import2(base_name, names_to_try)
fix bug wth --runtest where software or system packages were not showing due to directory error. The if conditions in eb_menu were not setup properly.
@@ -32,7 +32,7 @@ import subprocess import time import glob -from buildtest.tools.config import BUILDTEST_ROOT +from buildtest.tools.config import BUILDTEST_ROOT, config_opts from buildtest.tools.menu import buildtest_menu def systempkg_menu(systempkg): @@ -199,6 +199,8 @@ def eb_menu(ebpkg): app_tc_set = set() + + # translate directory path into app name/version and toolchain name/version for item in testroot_set: # directory format $BUILDTEST_TESTDIR/ebapps/software/version, ebapp only 2 directories up @@ -224,8 +226,7 @@ def eb_menu(ebpkg): app_tc_set.add(app_ver+","+tcname_tcver) # directory format $BUILDTEST_TESTDIR/ebapps/software/version/package, ebapp only 3 directories up - - if os.path.basename(os.path.dirname(os.path.dirname(os.path.dirname(item)))) == "ebapp": + elif os.path.basename(os.path.dirname(os.path.dirname(os.path.dirname(item)))) == "ebapp": app = os.path.basename(os.path.dirname(os.path.dirname(item))) ver = os.path.basename(os.path.dirname(item)) @@ -235,7 +236,7 @@ def eb_menu(ebpkg): # directory format $BUILDTEST_TESTDIR/ebapps/software/version/toolchainname/toolchainver/package, ebapp only 5 directories up - if os.path.basename(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(item)))))) == "ebapp": + elif os.path.basename(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(item)))))) == "ebapp": app = os.path.basename(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(item))))) ver = os.path.basename(os.path.dirname(os.path.dirname(os.path.dirname(item)))) @@ -438,11 +439,8 @@ def runtest_menu(): os.system("clear") - cwd = BUILDTEST_ROOT - testing = os.path.join(cwd,"testing") - systempkg = os.path.join(testing,"system") - ebpkg = os.path.join(testing,"ebapp") - + system_testdir = os.path.join(config_opts['BUILDTEST_TESTDIR'],"system") + software_testdir = os.path.join(config_opts['BUILDTEST_TESTDIR'],"ebapp") text = """ _________________________________________________________________________ |\ /| @@ -491,8 +489,8 @@ def runtest_menu(): # force userinput to be integer in case its float or something else userinput = int(userinput) if userinput == 1: - systempkg_menu(systempkg) + systempkg_menu(system_testdir) elif userinput == 2: - eb_menu(ebpkg) + eb_menu(software_testdir) else: print "Invalid Entry, please try again"
Fixes an "*" import in the middle of the code. Importing everything without namespace is a bad practice. Doing it outside module level is currently forbidden. Python 3.9.1 refuses to compile it. Flake8 reports: F406 'from kicost.kicost_gui import *' only allowed at module level.
@@ -63,7 +63,7 @@ class kicost_kicadplugin(ActionPlugin): bom_file = '' try: try: - from kicost.kicost_gui import * + from kicost.kicost_gui import kicost_gui kicost_gui(bom_file) # If KiCad and KiCost share the same Python installation. except ImportError: subprocess.call(('kicost', '--guide', bom_file), shell=True)
Update train.py remove redundant code
@@ -98,7 +98,6 @@ def main(train_data_file, test_data_file, vocab_file, target_file, emb_file, for pass_id in xrange(num_passes): chunk_evaluator.reset(exe) for data in train_reader(): - print len(data) cost, batch_precision, batch_recall, batch_f1_score = exe.run( fluid.default_main_program(), feed=feeder.feed(data),
Split the empty cluster case from normal case Cover normal usage of get_brokers function
@@ -339,6 +339,8 @@ class TestZK(object): } assert actual_without_fetch_state == expected_without_fetch_state + def test_get_topics_empty_cluster(self, mock_client): + with ZK(self.cluster_config) as zk: zk.get_children = mock.Mock(side_effect=NoNodeError()) actual_with_no_node_error = zk.get_topics() expected_with_no_node_error = {} @@ -346,6 +348,29 @@ class TestZK(object): assert actual_with_no_node_error == expected_with_no_node_error def test_get_brokers(self, mock_client): + with ZK(self.cluster_config) as zk: + zk.get_children = mock.Mock( + return_value=[1, 2, 3], + ) + expected = {1: None, 2: None, 3: None} + actual = zk.get_brokers(names_only=True) + zk.get_children.assert_called_with("/brokers/ids") + assert actual == expected + + zk.get_children = mock.Mock( + return_value=[1, 2, 3], + ) + zk.get_broker_metadata = mock.Mock( + return_value='broker', + ) + expected = {1: 'broker', 2: 'broker', 3: 'broker'} + actual = zk.get_brokers() + zk.get_children.assert_called_with("/brokers/ids") + calls = zk.get_broker_metadata.mock_calls + zk.get_broker_metadata.assert_has_calls(calls) + assert actual == expected + + def test_get_brokers_empty_cluster(self, mock_client): with ZK(self.cluster_config) as zk: zk.get_children = mock.Mock(side_effect=NoNodeError()) actual_with_no_node_error = zk.get_brokers()
bootstrap_javascript use settings include_jquery tnx
@@ -282,7 +282,7 @@ def bootstrap_jquery(jquery='full'): @register.simple_tag -def bootstrap_javascript(jquery='falsy'): +def bootstrap_javascript(jquery=None): """ Return HTML for Bootstrap JavaScript. @@ -315,7 +315,7 @@ def bootstrap_javascript(jquery='falsy'): javascript_tags = [] # Set jquery value from setting or leave default. - jquery = get_bootstrap_setting('include_jquery') or jquery + jquery = jquery or get_bootstrap_setting('include_jquery', 'falsy') # Include jQuery if the option is passed if jquery != 'falsy':
tests/state_docs: clear registry before running the test. Make sure docs examples get consistent naming
@@ -3,6 +3,18 @@ import pytest import psyneulink as pnl import doctest +def clear_registry(): + from psyneulink.components.component import DeferredInitRegistry + from psyneulink.components.system import SystemRegistry + from psyneulink.components.process import ProcessRegistry + from psyneulink.components.mechanisms.mechanism import MechanismRegistry + from psyneulink.components.projections.projection import ProjectionRegistry + # Clear Registry to have a stable reference for indexed suffixes of default names + pnl.clear_registry(DeferredInitRegistry) + pnl.clear_registry(SystemRegistry) + pnl.clear_registry(ProcessRegistry) + pnl.clear_registry(MechanismRegistry) + pnl.clear_registry(ProjectionRegistry) def test_state_docs(): # get examples of mechanisms that can be used with GatingSignals/Mechanisms @@ -10,7 +22,8 @@ def test_state_docs(): def test_parameter_state_docs(): - fail, total = doctest.testmod(pnl.components.states.parameterstate) + clear_registry() + fail, total = doctest.testmod(pnl.components.states.parameterstate, globs={}) if fail > 0: pytest.fail("{} out of {} examples failed".format(fail, total),
Fix when filter working on POST HG-- branch : feature/microservices
@@ -65,6 +65,8 @@ class ExtFormatMiddleware(object): def process_request(self, request): if request.GET and request.GET.get("__format") == "ext": request.is_extjs = True + elif request.POST and request.POST.get("__format") == "ext": + request.is_extjs = True else: request.is_extjs = False
use addClassResourceCleanup in test_roles Replaces resource_cleanup with addClassResourceCleanup in test_roles. test_utils.call_and_ignore_notfound_exc is NOT used in resource_setup as delete_role_from_user_on_project and similar calls, do not delete the role, it just unassigns the role from the target.
@@ -32,6 +32,8 @@ class RolesV3TestJSON(base.BaseIdentityV3AdminTest): for _ in range(3): role_name = data_utils.rand_name(name='role') role = cls.roles_client.create_role(name=role_name)['role'] + cls.addClassResourceCleanup(cls.roles_client.delete_role, + role['id']) cls.roles.append(role) u_name = data_utils.rand_name('user') u_desc = '%s description' % u_name @@ -42,25 +44,23 @@ class RolesV3TestJSON(base.BaseIdentityV3AdminTest): data_utils.rand_name('project'), description=data_utils.rand_name('project-desc'), domain_id=cls.domain['id'])['project'] + cls.addClassResourceCleanup(cls.projects_client.delete_project, + cls.project['id']) cls.group_body = cls.groups_client.create_group( name=data_utils.rand_name('Group'), project_id=cls.project['id'], domain_id=cls.domain['id'])['group'] + cls.addClassResourceCleanup(cls.groups_client.delete_group, + cls.group_body['id']) cls.user_body = cls.users_client.create_user( name=u_name, description=u_desc, password=cls.u_password, email=u_email, project_id=cls.project['id'], domain_id=cls.domain['id'])['user'] + cls.addClassResourceCleanup(cls.users_client.delete_user, + cls.user_body['id']) cls.role = cls.roles_client.create_role( name=data_utils.rand_name('Role'))['role'] - - @classmethod - def resource_cleanup(cls): - cls.roles_client.delete_role(cls.role['id']) - cls.groups_client.delete_group(cls.group_body['id']) - cls.users_client.delete_user(cls.user_body['id']) - cls.projects_client.delete_project(cls.project['id']) - for role in cls.roles: - cls.roles_client.delete_role(role['id']) - super(RolesV3TestJSON, cls).resource_cleanup() + cls.addClassResourceCleanup(cls.roles_client.delete_role, + cls.role['id']) @decorators.attr(type='smoke') @decorators.idempotent_id('18afc6c0-46cf-4911-824e-9989cc056c3a')
Add build status to README Library can now successfully do nothing
# Manim - Mathematical Animation Engine [![Documentation Status](https://readthedocs.org/projects/manim/badge/?version=latest)](https://manim.readthedocs.io/en/latest/?badge=latest) +[![Build Status](https://travis-ci.org/3b1b/manim.svg?branch=master)](https://travis-ci.org/3b1b/manim) [![MIT License](https://img.shields.io/badge/license-MIT-blue.svg?style=flat)](http://choosealicense.com/licenses/mit/) Manim is an animation engine for explanatory math videos. It's used to create precise animations programmatically, as seen in the videos at [3Blue1Brown](https://www.3blue1brown.com/).
fixed duplication of representations nuke.api.plugin.ExporterReview adds representation explicitly via publish_on_farm, so skip adding repre if already there. (Issue in ExtractBurnin other way.) ExporterReview should be probably refactored and publish_on_farm removed altogether.
@@ -601,7 +601,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "files": os.path.basename(remainder), "stagingDir": os.path.dirname(remainder), } - representations.append(rep) if "render" in instance.get("families"): rep.update({ "fps": instance.get("fps"), @@ -609,6 +608,16 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): }) self._solve_families(instance, True) + already_there = False + for repre in instance.get("representations", []): + # might be added explicitly before by publish_on_farm + already_there = repre.get("files") == rep["files"] + if already_there: + break + self.log.debug("repre {} already_there".format(repre)) + if not already_there: + representations.append(rep) + return representations def _solve_families(self, instance, preview=False):
Add more description to policies in the keypairs.py This updates the policy doc for server extend controller in keypairs.py Partial implement blueprint blueprint policy-docs
# License for the specific language governing permissions and limitations # under the License. -from oslo_policy import policy - from nova.policies import base @@ -63,9 +61,20 @@ keypairs_policies = [ 'method': 'GET' } ]), - policy.RuleDefault( - name=BASE_POLICY_NAME, - check_str=base.RULE_ADMIN_OR_OWNER), + base.create_rule_default( + BASE_POLICY_NAME, + base.RULE_ADMIN_OR_OWNER, + "Return 'key_name' in the response of server.", + [ + { + 'path': '/servers/{id}', + 'method': 'GET', + }, + { + 'path': '/servers/detail', + 'method': 'GET' + } + ]), ]
Fixed _custom_opac flag If we specify opacity for every point, then we should set _custom_opac to true.
@@ -1484,7 +1484,7 @@ class BasePlotter(PickingHelper, WidgetHelper): opacity = np.array(opacity) if scalars.shape[0] == opacity.shape[0]: # User could pass an array of opacities for every point/cell - pass + _custom_opac = True else: opacity = opacity_transfer_function(opacity, n_colors)
Add warning about mounting relative paths and minor tweaks
@@ -72,11 +72,16 @@ Running mriqc automatically be executed without need of running the command in item 3. +.. warning:: + + Paths `<bids_dir>` and `<output_dir>` must be absolute. In particular, specifying relative paths for + `<output_dir>` will generate no error and mriqc will run to completion without error but produce no output. + .. warning:: For security reasons, we recommend to run the docker command with the options ``--read-only --tmpfs /run --tmpfs /tmp``. This will run the docker image in - read-only mode, and map the temporal folders ``/run`` and ``/tmp`` to the temporal + read-only mode, and map the temporary folders ``/run`` and ``/tmp`` to the temporal folder of the host. @@ -87,7 +92,7 @@ Explaining the mriqc-docker command line Let's dissect this command line: -+ :code:`docker run`- instructs the docker engine to get and run certain ++ :code:`docker run`- instructs the docker engine to get and run a certain image (which is the last of docker-related arguments: :code:`poldracklab/mriqc:latest`) + :code:`-v <bids_dir>:/data:ro` - instructs docker to mount the local
Cast regularization parameters to float. This works around a bug in earlier proto versions that automatically infer these values to be integer instead of float.
@@ -111,9 +111,9 @@ def _build_regularizer(regularizer): """ regularizer_oneof = regularizer.WhichOneof('regularizer_oneof') if regularizer_oneof == 'l1_regularizer': - return slim.l1_regularizer(scale=regularizer.l1_regularizer.weight) + return slim.l1_regularizer(scale=float(regularizer.l1_regularizer.weight)) if regularizer_oneof == 'l2_regularizer': - return slim.l2_regularizer(scale=regularizer.l2_regularizer.weight) + return slim.l2_regularizer(scale=float(regularizer.l2_regularizer.weight)) raise ValueError('Unknown regularizer function: {}'.format(regularizer_oneof))
DOC: Update TESTS.rst to use the correct names Not actually sure that setup_module() is what was wanted here, but it works? Mention a bit more about actual pytest fixtures.
@@ -178,30 +178,33 @@ Similarly for methods:: Easier setup and teardown functions / methods --------------------------------------------- -Testing looks for module-level or class-level setup and teardown functions by -name; thus:: +Testing looks for module-level or class method-level setup and teardown +functions by name; thus:: - def setup(): + def setup_module(): """Module-level setup""" print('doing setup') - def teardown(): + def teardown_module(): """Module-level teardown""" print('doing teardown') class TestMe: - def setup(): + def setup_method(self): """Class-level setup""" print('doing setup') - def teardown(): + def teardown_method(): """Class-level teardown""" print('doing teardown') Setup and teardown functions to functions and methods are known as "fixtures", -and their use is not encouraged. +and they should be used sparingly. +``pytest`` supports more general fixture at various scopes which may be used +automatically via special arguments. For example, the special argument name +``tmpdir`` is used in test to create a temporary directory. Parametric tests ----------------
Optimize mesh export using np.fromiter. Made optimization of mesh export using np.fromiter() instead of creating creating python lists of mesh data. On my tests it speedups export process more than 2 times, on some scenes ore than 4 times.
@@ -43,24 +43,26 @@ class MeshData: if tris_len == 0: raise SyncError("Mesh %s has no polygons" % mesh.name, mesh) - data.vertices = np.array([vert.co for vert in mesh.vertices], dtype=np.float32) - data.normals = np.array( - [norm for tri in mesh.loop_triangles - for norm in tri.split_normals], - dtype=np.float32 - ) + data.vertices = np.fromiter( + (x for vert in mesh.vertices for x in vert.co), + dtype=np.float32).reshape((len(mesh.vertices), 3)) + data.normals = np.fromiter( + (x for tri in mesh.loop_triangles for norm in tri.split_normals for x in norm), + dtype=np.float32).reshape((tris_len * 3, 3)) data.uvs = None data.uv_indices = None if len(mesh.uv_layers) > 0: uv_layer = mesh.uv_layers.active - uvs = np.array([[d.uv.x, d.uv.y] for d in uv_layer.data], dtype=np.float32) + uvs = np.fromiter( + (x for d in uv_layer.data for x in d.uv), + dtype=np.float32).reshape((len(uv_layer.data), 2)) if len(uvs) > 0: data.uvs = uvs - data.uv_indices = np.array([tri.loops for tri in mesh.loop_triangles], dtype=np.int32).reshape((tris_len * 3,)) + data.uv_indices = np.fromiter((x for tri in mesh.loop_triangles for x in tri.loops), dtype=np.int32) data.num_face_vertices = np.full((tris_len,), 3, dtype=np.int32) - data.vertex_indices = np.array([tri.vertices for tri in mesh.loop_triangles], dtype=np.int32).reshape((tris_len * 3,)) + data.vertex_indices = np.fromiter((x for tri in mesh.loop_triangles for x in tri.vertices), dtype=np.int32) data.normal_indices = np.arange(tris_len * 3, dtype=np.int32) if calc_area:
Add ContainerImagePrepare service to CellController role The CellController role does not have ContainerImagePrepare service. This result in empty external_deploy_steps_tasks.yaml and does not update container images when trying to update the cell stack. Closes-Bug:
- OS::TripleO::Services::CertmongerUser - OS::TripleO::Services::Clustercheck - OS::TripleO::Services::Collectd + - OS::TripleO::Services::ContainerImagePrepare - OS::TripleO::Services::Docker - OS::TripleO::Services::Fluentd - OS::TripleO::Services::HAproxy
Added SLIs, SLOs and Burn rate Alerts section SLIs, SLOs and Burn rate Alerts section documentation, need to add pictures. +fixed typos in slos.tf
# See the License for the specific language governing permissions and # limitations under the License. -# Create an SLO for availablity for the custom service. +# Create an SLO for availability for the custom service. # Example SLO is defined as following: # 90% of all non-4XX requests within the past 30 day windowed period # return with 200 OK status @@ -89,7 +89,7 @@ resource "google_monitoring_slo" "custom_service_latency_slo" { } } -# Create an SLO for availablity for the Istio service. +# Create an SLO for availability for the Istio service. # Example SLO is defined as following: # 90% of all non-4XX requests within the past 30 day windowed period # return with 200 OK status @@ -249,7 +249,7 @@ resource "google_monitoring_slo" "rating_service_latency_slo" { } } -# Rating data freshness SLO: +# Rating service's data freshness SLO: # during a day 99.9% of minutes have at least 1 successful recollect API call resource "google_monitoring_slo" "rating_service_freshness_slo" { # Uses ratingservice service that is automatically detected and created when the service is deployed to App Engine
generate_adhoc_ssl_pair: make issuer match subject With this change, the generated certificate can be trusted, and the following command starts working: openssl s_client -showcerts -connect dev:443 -verifyCAfile dev.crt </dev/null
@@ -462,8 +462,8 @@ def generate_adhoc_ssl_pair(cn=None): subject.O = 'Dummy Certificate' # noqa: E741 issuer = cert.get_issuer() - issuer.CN = 'Untrusted Authority' - issuer.O = 'Self-Signed' # noqa: E741 + issuer.CN = subject.CN + issuer.O = subject.O # noqa: E741 pkey = crypto.PKey() pkey.generate_key(crypto.TYPE_RSA, 2048)
Do not fail if process already ended We can expect the subprocess has already ended by the time we're checking for child processes. Handle this case gracefully so that tests do not fail with an exception.
@@ -1548,7 +1548,11 @@ def win32_kill_process_tree(pid, sig=signal.SIGTERM, include_parent=True, ''' if pid == os.getpid(): raise RuntimeError("I refuse to kill myself") + try: parent = psutil.Process(pid) + except psutil.NoSuchProcess: + log.debug("PID not found alive: %d", pid) + return ([], []) children = parent.children(recursive=True) if include_parent: children.append(parent)
Cell ID performance improvements Determine if the platform can store cell IDs in an array up front and use pogo_async's new array storage feature, don't cast between sequence types since pogo_async can handle each of them now, only round coordinates if caching IDs.
@@ -6,7 +6,7 @@ from pogo_async.hash_server import HashServer from asyncio import sleep, Lock, Semaphore, get_event_loop from random import choice, randint, uniform, triangular from time import time, monotonic -from array import array +from array import typecodes from queue import Empty from aiohttp import ClientSession @@ -48,6 +48,8 @@ class Worker: accounts = load_accounts() if config.CACHE_CELLS: cell_ids = load_pickle('cells') or {} + COMPACT = 'Q' in typecodes + loop = get_event_loop() login_semaphore = Semaphore(config.SIMULTANEOUS_LOGINS) sim_semaphore = Semaphore(config.SIMULTANEOUS_SIMULATION) @@ -285,7 +287,7 @@ class Worker: except (KeyError, TypeError, AttributeError): pass - await random_sleep(.7, 1.2) + await random_sleep(.9, 1.2) # request 2: download_remote_config_version await self.download_remote_config(version) @@ -704,18 +706,17 @@ class Worker: self.log.info('Visiting {0[0]:.4f},{0[1]:.4f}', point) start = time() - rounded = round_coords(point, 4) - if config.CACHE_CELLS and rounded in self.cell_ids: - cell_ids = list(self.cell_ids[rounded]) - else: - cell_ids = get_cell_ids(*rounded, radius=500) if config.CACHE_CELLS: + rounded = round_coords(point, 4) try: - self.cell_ids[rounded] = array('L', cell_ids) - except OverflowError: - self.cell_ids[rounded] = tuple(cell_ids) + cell_ids = self.cell_ids[rounded] + except KeyError: + cell_ids = get_cell_ids(*rounded, compact=self.COMPACT) + self.cell_ids[rounded] = cell_ids + else: + cell_ids = get_cell_ids(latitude, longitude) - since_timestamp_ms = [0] * len(cell_ids) + since_timestamp_ms = (0,) * len(cell_ids) request = self.api.create_request() request.get_map_objects(cell_id=cell_ids,
rpm: Properly detect other ARMv7 (32 Bit) arches Like it is currently being done for the different x86 arches (i386, i486, ...).
@@ -30,7 +30,16 @@ ARCHES_ALPHA = ( "alphaev68", "alphaev7", ) -ARCHES_ARM = ("armv5tel", "armv5tejl", "armv6l", "armv7l", "aarch64") +ARCHES_ARM_32 = ( + "armv5tel", + "armv5tejl", + "armv6l", + "armv6hl", + "armv7l", + "armv7hl", + "armv7hnl", +) +ARCHES_ARM_64 = ("aarch64",) ARCHES_SH = ("sh3", "sh4", "sh4a") ARCHES = ( @@ -39,7 +48,8 @@ ARCHES = ( + ARCHES_PPC + ARCHES_S390 + ARCHES_ALPHA - + ARCHES_ARM + + ARCHES_ARM_32 + + ARCHES_ARM_64 + ARCHES_SH ) @@ -66,11 +76,13 @@ def get_osarch(): def check_32(arch, osarch=None): """ - Returns True if both the OS arch and the passed arch are 32-bit + Returns True if both the OS arch and the passed arch are x86 or ARM 32-bit """ if osarch is None: osarch = get_osarch() - return all(x in ARCHES_32 for x in (osarch, arch)) + return all(x in ARCHES_32 for x in (osarch, arch)) or all( + x in ARCHES_ARM_32 for x in (osarch, arch) + ) def pkginfo(name, version, arch, repoid, install_date=None, install_date_time_t=None):
TST: implemented testing utilities Updated time unit tests to use the testing utilities.
@@ -11,6 +11,7 @@ import numpy as np import pytest from pysat.utils import time as pytime +from pysat.utils import testing class TestGetYearDay(): @@ -158,9 +159,8 @@ class TestCreateDateRange(): tst_stop = stop[-1] if hasattr(stop, "__iter__") else stop # Test the seasonal return values - assert season[0] == tst_start - assert season[-1] == tst_stop - assert len(season) == tst_len + testing.assert_lists_equal([season[0], season[-1]], + [tst_start, tst_stop]) return @@ -184,9 +184,9 @@ class TestCreateDatetimeIndex(): dates = pytime.create_datetime_index(year=self.year, month=self.month, day=self.day, uts=self.uts) - assert dates[0] == dt.datetime(2012, 2, 28) - assert dates[-1] == dt.datetime(2012, 2, 28, 0, 0, 3) - assert len(dates) == len(self.year) + testing.assert_lists_equal([dates[0], dates[-1]], + [dt.datetime(2012, 2, 28), + dt.datetime(2012, 2, 28, 0, 0, 3)]) return def test_create_datetime_index_wo_month_day_uts(self): @@ -194,9 +194,9 @@ class TestCreateDatetimeIndex(): dates = pytime.create_datetime_index(year=self.year) - assert dates[0] == dt.datetime(2012, 1, 1) - assert dates[-1] == dt.datetime(2012, 1, 1) - assert len(dates) == len(self.year) + testing.assert_lists_equal([dates[0], dates[-1]], + [dt.datetime(2012, 1, 1), + dt.datetime(2012, 1, 1)]) return @pytest.mark.parametrize("in_args,err_msg", [
Remove netaddr useless requirement This patch cleans up the requirements.txt list to remove netaddr module actually replaced by oslo_utils.
@@ -8,7 +8,6 @@ automaton>=0.5.0 # Apache-2.0 eventlet!=0.18.3,>=0.18.2 # MIT WebOb>=1.6.0 # MIT greenlet>=0.3.2 # MIT -netaddr!=0.7.16,>=0.7.13 # BSD paramiko>=2.0 # LGPLv2.1+ python-neutronclient>=5.1.0 # Apache-2.0 python-glanceclient>=2.5.0 # Apache-2.0
Create compilation passes for ASTNode kinds and final structs processing TN:
@@ -30,9 +30,7 @@ from mako.lookup import TemplateLookup from langkit import caching, names, template_utils from langkit.ada_api import AdaAPISettings from langkit.c_api import CAPISettings -from langkit.diagnostics import ( - Severity, check_source_language, errors_checkpoint -) +from langkit.diagnostics import Severity, check_source_language import langkit.documentation from langkit.expressions import PropertyDef from langkit.passes import ( @@ -948,31 +946,21 @@ class CompileCtx(object): GlobalPass('annotate fields types', CompileCtx.annotate_fields_types, disabled=not annotate_fields_types), + GlobalPass('compute ASTNode kind constants', + CompileCtx.compute_node_kind_constants), + + # Now that all Struct subclasses referenced by the grammar have + # been typed, iterate over all declared subclasses to register the + # ones that are unreachable from the grammar. TODO: this kludge + # will eventually disappear as part of OC22-016. + GlobalPass('add structs to context', + CompileCtx.add_structs_to_context), + errors_checkpoint_pass, ) with names.camel_with_underscores: pass_manager.run(self) - for i, astnode in enumerate( - (astnode - for astnode in self.astnode_types - if not astnode.abstract), - # Compute kind constants for all ASTNode concrete subclasses. - # Start with 1: the constant 0 is reserved as an - # error/uninitialized code. - start=1 - ): - self.node_kind_constants[astnode] = i - - # Now that all Struct subclasses referenced by the grammar have been - # typed, iterate over all declared subclasses to register the ones that - # are unreachable from the grammar. TODO: this kludge will eventually - # disappear as part of OC22-016. - for t in self.struct_types + self.astnode_types: - t.add_to_context() - - errors_checkpoint() - def _emit(self, file_root, generate_lexer, main_source_dirs, main_programs): """ @@ -1300,3 +1288,25 @@ class CompileCtx(object): ["-f", "annotate_fields_types", "--no-diff", "-w"] + list(astnodes_files) ) + + def compute_node_kind_constants(self): + """ + Compute kind constants for all ASTNode concrete subclasses. + """ + for i, astnode in enumerate( + (astnode + for astnode in self.astnode_types + if not astnode.abstract), + # Start with 1: the constant 0 is reserved as an + # error/uninitialized code. + start=1 + ): + self.node_kind_constants[astnode] = i + + def add_structs_to_context(self): + """ + Make sure all Struct subclasses (including ASTNode ones) are added to + the context. + """ + for t in self.struct_types + self.astnode_types: + t.add_to_context()
Association connect should not blindly assume memberEnds In the rare case memberEnd instances are missing, we should just do nothing.
@@ -79,13 +79,16 @@ class AssociationConnect(RelationshipConnect): subject = line.subject def member_ends_match(subject): - return ( + return len(subject.memberEnd) >= 2 and ( + ( head_subject is subject.memberEnd[0].type and tail_subject is subject.memberEnd[1].type - ) or ( + ) + or ( head_subject is subject.memberEnd[1].type and tail_subject is subject.memberEnd[0].type ) + ) # First check if the right subject is already connected: if line.subject and member_ends_match(line.subject):
Using snapshot alf/examples When playing a trained model with alf snapshot, we should also set redirect the python path to its examples directory in case some conf files have been changed.
@@ -1103,8 +1103,9 @@ def get_alf_snapshot_env_vars(root_dir): alf_repo = os.path.join(root_dir, "alf") alf_cnest = os.path.join(alf_repo, "alf/nest/cnest") # path to archived cnest.so + alf_examples = os.path.join(alf_repo, "alf/examples") python_path = os.environ.get("PYTHONPATH", "") - python_path = ":".join([alf_repo, alf_cnest, python_path]) + python_path = ":".join([alf_repo, alf_cnest, alf_examples, python_path]) env_vars = copy.copy(os.environ) env_vars.update({"PYTHONPATH": python_path}) return env_vars
Update libc.math tests cimport some C99 float and long double functions, and test legacy kwargs for double functions.
from libc.math cimport (M_E, M_LOG2E, M_LOG10E, M_LN2, M_LN10, M_PI, M_PI_2, M_PI_4, M_1_PI, M_2_PI, M_2_SQRTPI, M_SQRT2, M_SQRT1_2) -from libc.math cimport (acos, asin, atan, atan2, cos, sin, tan, cosh, sinh, - tanh, acosh, asinh, atanh, exp, log, log10, pow, sqrt) +from libc.math cimport (acos, asin, atan, atan2, cos, sin, sinf, sinl, tan, + cosh, sinh, tanh, acosh, asinh, atanh, exp, log, log10, pow, sqrt) cimport libc.math as libc_math @@ -34,3 +34,11 @@ def test_sin(x): [True, True, True, True, True, True, True, True, True, True] """ return sin(x) + + +def test_sin_kwarg(x): + """ + >>> test_sin_kwarg(0) + 0.0 + """ + return sin(x=x)
feat: archiving pipelines $feat: add archive jobs BE integration $feat: add tests for archive jobs button
@@ -3,6 +3,7 @@ from dbnd._vendor.marshmallow import fields, validate class JobSchemaV2(ApiObjectSchema): + id = fields.Int() name = fields.Str() user = fields.Str() ui_hidden = fields.Boolean()
For the NotificationWithTemplateSchema exclude the scheduled_notifications so we do not query that table. The scheduled_notifications is not used as of yet.
@@ -449,7 +449,7 @@ class NotificationWithTemplateSchema(BaseSchema): class Meta: model = models.Notification strict = True - exclude = ('_personalisation', ) + exclude = ('_personalisation', 'scheduled_notification') template = fields.Nested( TemplateSchema,
Update facades for 2.9 release The following updates the facades to prevent spurious warnings about missing facades. Although it logs, because nothing has been coded to the facades we can safely add them without any consequence.
@@ -29,13 +29,17 @@ client_facades = { 'Backups': {'versions': [1, 2]}, 'Block': {'versions': [2]}, 'Bundle': {'versions': [1, 2, 3]}, + 'CharmHub': {'versions': [1]}, 'CharmRevisionUpdater': {'versions': [2]}, 'Charms': {'versions': [2]}, 'Cleaner': {'versions': [2]}, 'Client': {'versions': [1, 2]}, 'Cloud': {'versions': [1, 2, 3, 4, 5]}, 'CAASAdmission': {'versions': [1]}, + 'CAASApplication': {'versions': [1]}, + 'CAASApplicationProvisioner': {'versions': [1]}, 'CAASFirewaller': {'versions': [1]}, + 'CAASFirewallerEmbedded': {'versions': [1]}, 'CAASOperator': {'versions': [1]}, 'CAASAgent': {'versions': [1]}, 'CAASOperatorProvisioner': {'versions': [1]},
Only configure flint2 once If we've run configure before and a Makefile exists, let make figure out whether a recompile is necessary of flint2
@@ -8,7 +8,9 @@ pip install -r requirements.txt # Check for git clone of flint2 on MacOS and install if found if [ -f flint2/configure ]; then cd flint2/ + if [ ! -f Makefile ]; then ./configure + fi make -j4 make install cd ../
polys: avoid unnecessary using numbered_symbols() in primitive_element() Also drop redundant polys option
@@ -674,14 +674,14 @@ def primitive_element(extension, **args): x = Dummy('x') domain = args.get('domain', QQ) - F, Y = zip(*[(minimal_polynomial(e, domain=domain).replace(y), y) - for e, y in zip(extension, numbered_symbols('y', cls=Dummy))]) + F = [minimal_polynomial(e, domain=domain) for e in extension] + Y = [p.gen for p in F] for u in range(1, (len(F) - 1)*prod(f.degree() for f in F) + 1): coeffs = [u**n for n in range(len(Y))] f = x - sum(c*y for c, y in zip(coeffs, Y)) - *H, g = groebner(F + (f,), Y + (x,), domain=domain, polys=True) + *H, g = groebner(F + [f], Y + [x], domain=domain) for i, (h, y) in enumerate(zip(H, Y)): H[i] = (y - h).eject(*Y).retract(field=True)
Fix LTE _init_ HG-- branch : feature/microservices
@@ -15,6 +15,8 @@ from noc.core.profile.base import BaseProfile class Profile(BaseProfile): name = "Eltex.LTE" pattern_username = r"(?<!Last )login: " + username_submit = "\r" + password_submit = "\r" pattern_more = [ (r"\[Yes/press any key for no\]", "Y") ]
remove overwrite __init__ Overrriding __init__ is not necessary.
@@ -36,9 +36,6 @@ INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") class PhotoshopHost(HostBase, IWorkfileHost, ILoadHost): name = "photoshop" - def __init__(self): - super(PhotoshopHost, self).__init__() - def install(self): """Install Photoshop-specific functionality of avalon-core.
Fix resolving against multiple markers Fix for when requirements are also present Fixes
@@ -310,7 +310,9 @@ class Resolver(object): for dependency_string in dependency_strings: try: - individual_dependencies = [dep.strip() for dep in dependency_string.split(', ')] + split_deps = dependency_string.split(';') + dependencies, markers = split_deps[0], '; '.join(list(set([marker.strip() for marker in split_deps[1:]]))) + individual_dependencies = [dep.strip() for dep in dependencies.split(', ')] cleaned_deps = [] for dep in individual_dependencies: tokens = [token.strip() for token in dep.split(';')] @@ -325,6 +327,7 @@ class Resolver(object): cleaned_tokens.extend(markers) cleaned_deps.append('; '.join(cleaned_tokens)) _dependency_string = ', '.join(set(cleaned_deps)) + _dependency_string += '; {0}'.format(markers) yield InstallRequirement.from_line(_dependency_string, constraint=ireq.constraint) except InvalidMarker:
fix: `set_column_display` contradicts arguments `show` should set `hidden` as 0, but does the opposite. This is fixed. Use `Array.isArray()` instead of deprecated usage
@@ -501,9 +501,9 @@ export default class Grid { } set_column_disp(fieldname, show) { - if ($.isArray(fieldname)) { + if (Array.isArray(fieldname)) { for (let field of fieldname) { - this.update_docfield_property(field, "hidden", show); + this.update_docfield_property(field, "hidden", show ? 0 : 1); this.set_editable_grid_column_disp(field, show); } } else {
docs: Updated quickstart docs to import FeatureService docs: updated quickstart docs to import FeatureService
@@ -82,7 +82,7 @@ online_store: from datetime import timedelta -from feast import Entity, FeatureView, Field, FileSource, ValueType +from feast import Entity, FeatureService, FeatureView, Field, FileSource, ValueType from feast.types import Float32, Int64 # Read data from parquet files. Parquet is convenient for local development mode. For
Fix computed getter Content defaults are saved in `diffTracker.contentDefaults`, not in `diffTracker.content_defaults` Prioritize `diffTracker`'s values over `channel.content_defaults` as diffTracker contains the latest updates
contentDefaults: { get() { return { - ...(this.diffTracker.content_defaults || {}), ...(this.channel.content_defaults || {}), + ...(this.diffTracker.contentDefaults || {}), }; }, set(contentDefaults) {
[hailtop] use the exact same error message for sync and async * [hailtop] use the exact same error message for sync and async Importantly, I want to see the stack trace in either case. * revert
@@ -629,7 +629,7 @@ async def retry_transient_errors(f: Callable[..., Awaitable[T]], *args, **kwargs errors += 1 if errors % 10 == 0: st = ''.join(traceback.format_stack()) - log.warning(f'encountered {errors} errors. My stack trace is {st}. Most recent error was {e}', exc_info=True) + log.warning(f'Encountered {errors} errors. My stack trace is {st}. Most recent error was {e}', exc_info=True) delay = await sleep_and_backoff(delay) @@ -642,7 +642,8 @@ def sync_retry_transient_errors(f, *args, **kwargs): except Exception as e: errors += 1 if errors % 10 == 0: - log.warning(f'encountered {errors} errors, most recent one was {e}', exc_info=True) + st = ''.join(traceback.format_stack()) + log.warning(f'Encountered {errors} errors. My stack trace is {st}. Most recent error was {e}', exc_info=True) if is_transient_error(e): pass else:
NanoRange: Fixed bugs from comments Changed the way MSVC is handled, open for handling version checks Removed the version number in the conanfile.py Updated the copy logic from working with .zip to .tar.gz
@@ -6,7 +6,6 @@ from conans.errors import ConanInvalidConfiguration class NanorangeConan(ConanFile): name = "nanorange" - version = "20191001" license = "Boost 1.0" author = "Paul M. Bendixen [email protected]" url = "github.com/conan-io/conan-center-index" @@ -18,6 +17,10 @@ class NanorangeConan(ConanFile): # No settings/options are necessary, this is header only def configure(self): + if self.settings.compiler == "Visual Studio": + if not any([self.settings.compiler.cppstd == std for std in ["17", "20"]]): + raise ConanInvalidConfiguration("nanoRange requires at least c++17") + else: if not any([str(self.settings.compiler.cppstd) == std for std in ["17", "20", "gnu17", "gnu20"]]): raise ConanInvalidConfiguration("nanoRange requires at least c++17") @@ -25,7 +28,7 @@ class NanorangeConan(ConanFile): tools.get(**self.conan_data["sources"][self.version]) def package(self): - sourceSubfolder="NanoRange-{}".format( self.conan_data["sources"][self.version]["url"].split("/")[-1][:-4]) + sourceSubfolder="NanoRange-{}".format( self.conan_data["sources"][self.version]["url"].split("/")[-1][:-7]) self.copy("*.hpp", src="{}/include".format(sourceSubfolder), dst="include" ) self.copy("LICENSE_1_0.txt", src=sourceSubfolder, dst="licenses")
Added missing component of cmac to save file _n was not saved in the save file
@@ -30,7 +30,7 @@ class CMAC(LinearApproximator): super().__init__(weights=weights, input_shape=(self._phi.size,), output_shape=output_shape) - self._add_save_attr(_phi='pickle') + self._add_save_attr(_phi='pickle', _n='primitive') def fit(self, x, y, alpha=1.0, **kwargs): """
Add pre-conditions to avoid on_timeout being called after stop() Apparently the cancellation request for a TimerHandle doesn't necessarily have to be honoured despite large periods of time passing
@@ -314,7 +314,9 @@ class View: self._timeout_handler = loop.call_later(self.timeout, self.dispatch_timeout) def dispatch_timeout(self): - if not self._stopped.done(): + if self._stopped.done(): + return + self._stopped.set_result(True) asyncio.create_task(self.on_timeout(), name=f'discord-ui-view-timeout-{self.id}')
change add_outgrads and primitive_mut_add to do the vspace.zeros() initialization inside primitive_mut_add
@@ -44,7 +44,7 @@ def backward_pass(g, end_node, start_node): def add_outgrads(vspace, prev_g_flagged, g): if prev_g_flagged is None: if type(getval(g)) == SparseObject: - return primitive_mut_add(vspace, vspace.zeros(), g), True + return primitive_mut_add(vspace, None, g), True else: return g, False else: @@ -52,7 +52,7 @@ def add_outgrads(vspace, prev_g_flagged, g): if mutable: return primitive_mut_add(vspace, prev_g, g), True else: - prev_g_mutable = primitive_mut_add(vspace, vspace.zeros(), prev_g) + prev_g_mutable = primitive_mut_add(vspace, None, prev_g) return primitive_mut_add(vspace, prev_g_mutable, g), True active_progenitors = set() @@ -130,6 +130,8 @@ class nograd_primitive(primitive): @primitive def primitive_mut_add(vspace, x_prev, x_new): + if x_prev is None: + x_prev = vspace.zeros() if type(x_new) == SparseObject: return x_new.mut_add(x_prev) else:
Compare the bytes we read with a bytes object, not str. Fixes
@@ -23,7 +23,7 @@ def print_unsourced_ids_from_wikipedia(): for page_id, type in cursor: if type == b'page': print(page_id) - elif type == 'subcat': + elif type == b'subcat': subcategories.add(page_id) if not subcategories: break
improved export file export original file. Or export sqlite created file.
@@ -1122,7 +1122,7 @@ class DialogManageFiles(QtWidgets.QDialog): def export(self): """ Export files to selected directory. If an imported file was from a docx, odt, pdf, html, epub then export the original file - and also export the plain text version. + If the file was created within QualCoder (so only in the database), export as plain text. """ index_list = self.ui.tableWidget.selectionModel().selectedIndexes() @@ -1145,15 +1145,15 @@ class DialogManageFiles(QtWidgets.QDialog): if not ok: return - # redo ms as filenames may change for created files and for original file documents + # redo msg as filenames may change for created files and for original file documents msg = _("Export to ") + directory + "\n" for row in rows: filename = self.source[row]['name'] filename_txt = None - if len(filename) > 5 and (filename[-5:] == ".html" or filename[-5:] == ".docx" or filename[-5:] == ".epub"): + '''if len(filename) > 5 and (filename[-5:] == ".html" or filename[-5:] == ".docx" or filename[-5:] == ".epub"): filename_txt = filename[0:len(filename) - 5] + ".txt" if len(filename) > 4 and (filename[-4:] == ".htm" or filename[-4:] == ".odt" or filename[-4] == ".txt"): - filename_txt = filename[0:len(filename) - 4] + ".txt" + filename_txt = filename[0:len(filename) - 4] + ".txt" ''' # Below is for transcribed files and for user created text files within QualCoder if self.source[row]['mediapath'] is None and filename_txt is None: filename_txt = filename + ".txt"
Classes for extensions Added for both extensions and lnfaucet db
@@ -30,3 +30,62 @@ class Database: """Given a query, cursor.execute() it.""" self.cursor.execute(query, values) self.connection.commit() + + +class ExtDatabase: + def __init__(self, db_path: str = os.path.join(LNBITS_PATH, "extensions", "overview.sqlite3")): + self.path = db_path + self.connection = sqlite3.connect(db_path) + self.connection.row_factory = sqlite3.Row + self.cursor = self.connection.cursor() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.cursor.close() + self.connection.close() + + def fetchall(self, query: str, values: tuple) -> list: + """Given a query, return cursor.fetchall() rows.""" + self.cursor.execute(query, values) + return self.cursor.fetchall() + + def fetchone(self, query: str, values: tuple): + self.cursor.execute(query, values) + return self.cursor.fetchone() + + def execute(self, query: str, values: tuple) -> None: + """Given a query, cursor.execute() it.""" + self.cursor.execute(query, values) + self.connection.commit() + + +class FauDatabase: + def __init__(self, db_path: str = os.path.join(LNBITS_PATH, "extensions", "lnurlfaucet", "database.sqlite3")): + self.path = db_path + self.connection = sqlite3.connect(db_path) + self.connection.row_factory = sqlite3.Row + self.cursor = self.connection.cursor() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.cursor.close() + self.connection.close() + + def fetchall(self, query: str, values: tuple) -> list: + """Given a query, return cursor.fetchall() rows.""" + self.cursor.execute(query, values) + return self.cursor.fetchall() + + def fetchone(self, query: str, values: tuple): + self.cursor.execute(query, values) + return self.cursor.fetchone() + + def execute(self, query: str, values: tuple) -> None: + """Given a query, cursor.execute() it.""" + self.cursor.execute(query, values) + self.connection.commit() +
[bugfix] Fix _formatLimit_MonthOfYear Limit is given as 1900 but not recognized by predicate
@@ -2152,7 +2152,7 @@ formatLimits = { } # All month of year articles are in the same format -_formatLimit_MonthOfYear = (lambda v: 1 <= 1900 and v < 2051, 1900, 2051) +_formatLimit_MonthOfYear = (lambda v: 1900 <= v < 2051, 1900, 2051) for month in yrMnthFmts: formatLimits[month] = _formatLimit_MonthOfYear
test: Misc update in test_tutorial Add missing remove_target call from "Delegate to Hashed Bins" section Add comments to dirty_roles output assertion
@@ -134,7 +134,8 @@ class TestTutorial(unittest.TestCase): repository.root.load_signing_key(private_root_key) repository.root.load_signing_key(private_root_key2) - # Patch logger to assert that it accurately logs dirty roles + # NOTE: The tutorial does not call dirty_roles anymore due to #964 and + # #958. We still call it here to see if roles are dirty as expected. with mock.patch("tuf.repository_tool.logger") as mock_logger: repository.dirty_roles() # Concat strings to avoid Python2/3 unicode prefix problems ('' vs. u'') @@ -186,6 +187,8 @@ class TestTutorial(unittest.TestCase): repository.timestamp.expiration = datetime.datetime(2080, 10, 28, 12, 8) + # NOTE: The tutorial does not call dirty_roles anymore due to #964 and + # #958. We still call it here to see if roles are dirty as expected. with mock.patch("tuf.repository_tool.logger") as mock_logger: repository.dirty_roles() # Concat strings to avoid Python2/3 unicode prefix problems ('' vs. u'') @@ -265,7 +268,8 @@ class TestTutorial(unittest.TestCase): 'timestamp_key', 'password') repository.timestamp.load_signing_key(private_timestamp_key) - # Patch logger to assert that it accurately logs dirty roles + # NOTE: The tutorial does not call dirty_roles anymore due to #964 and + # #958. We still call it here to see if roles are dirty as expected. with mock.patch("tuf.repository_tool.logger") as mock_logger: repository.dirty_roles() # Concat strings to avoid Python2/3 unicode prefix problems ('' vs. u'') @@ -278,6 +282,8 @@ class TestTutorial(unittest.TestCase): self.assertTrue(os.path.exists(os.path.join( 'repository','targets', 'myproject', 'file4.txt'))) + # NOTE: The tutorial does not call dirty_roles anymore due to #964 and + # #958. We still call it here to see if roles are dirty as expected. with mock.patch("tuf.repository_tool.logger") as mock_logger: repository.dirty_roles() # Concat strings to avoid Python2/3 unicode prefix problems ('' vs. u'') @@ -318,7 +324,8 @@ class TestTutorial(unittest.TestCase): 'unclaimed_key', 'password') repository.targets("unclaimed").load_signing_key(private_unclaimed_key) - + # NOTE: The tutorial does not call dirty_roles anymore due to #964 and + # #958. We still call it here to see if roles are dirty as expected. with mock.patch("tuf.repository_tool.logger") as mock_logger: repository.dirty_roles() # Concat strings to avoid Python2/3 unicode prefix problems ('' vs. u'') @@ -337,6 +344,7 @@ class TestTutorial(unittest.TestCase): # ----- Tutorial Section: Delegate to Hashed Bins + repository.targets('unclaimed').remove_target("myproject/file4.txt") targets = repository.get_filepaths_in_directory( os.path.join('repository', 'targets', 'myproject'), recursive_walk=True) @@ -362,10 +370,11 @@ class TestTutorial(unittest.TestCase): ]) - for delegation in repository.targets('unclaimed').delegations: delegation.load_signing_key(private_unclaimed_key) + # NOTE: The tutorial does not call dirty_roles anymore due to #964 and + # #958. We still call it here to see if roles are dirty as expected. with mock.patch("tuf.repository_tool.logger") as mock_logger: repository.dirty_roles() # Concat strings to avoid Python2/3 unicode prefix problems ('' vs. u'')
Fix 'navigation_depth' functionality Read the Docs was not using the sphinx_rtd_theme settings due to clobbering the configuration dictionary, tweaked conf.py to avoid this.
# add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # +import importlib import os import warnings # import sys @@ -61,12 +62,9 @@ warnings.filterwarnings("ignore", category=UserWarning, # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -on_rtd = os.environ.get('READTHEDOCS') == 'True' -if on_rtd: - html_theme = 'default' -else: import sphinx_rtd_theme html_theme = 'sphinx_rtd_theme' +html_style = None html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # Add any paths that contain custom static files (such as style sheets) here,
Enable tuples and lists in handle_probability_param() handle_probability_param() in parameters.py has so far only supported single numbers, True, False and StochasticParameter. Now it also supports tuples of form (a, b), which are transformed to Uniform and lists of form [a, b, c, ...], which are transformed to Choice. These are useful for masks.
@@ -105,7 +105,7 @@ def handle_discrete_param(param, name, value_range=None, tuple_to_uniform=True, list_str = ", list of %s" % (allowed_type,) if list_to_choice else "" raise Exception("Expected %s, tuple of two %s%s or StochasticParameter for %s, got %s." % (allowed_type, allowed_type, list_str, name, type(param),)) -def handle_probability_param(param, name): +def handle_probability_param(param, name, tuple_to_uniform=False, list_to_choice=False): eps = 1e-6 if param in [True, False, 0, 1]: return Deterministic(int(param)) @@ -115,6 +115,20 @@ def handle_probability_param(param, name): return Deterministic(int(round(param))) else: return Binomial(param) + elif tuple_to_uniform and isinstance(param, tuple): + ia.do_assert(all([ + ia.is_single_number(v) for v in param + ]), "Expected parameter '%s' of type tuple to only contain number, got %s." % (name, [type(v) for v in param],)) + ia.do_assert(len(param) == 2) + ia.do_assert(0 <= param[0] <= 1.0) + ia.do_assert(0 <= param[1] <= 1.0) + return Binomial(Uniform(param[0], param[1])) + elif list_to_choice and ia.is_iterable(param): + ia.do_assert(all([ + ia.is_single_number(v) for v in param + ]), "Expected iterable parameter '%s' to only contain number, got %s." % (name, [type(v) for v in param],)) + ia.do_assert(all([0 <= p_i <= 1.0 for p_i in param])) + return Binomial(Choice(param)) elif isinstance(param, StochasticParameter): return param else:
GDB helpers: emit bind directives for BindingScope TN:
@@ -13,8 +13,8 @@ import funcy from langkit import names from langkit.compiled_types import ( AbstractNodeData, Argument, ASTNode, BoolType, CompiledType, - LexicalEnvType, LongType, Symbol, T, Token, get_context, - render as ct_render, resolve_type, EnvRebindingsType + EnvRebindingsType, LexicalEnvType, LongType, Symbol, T, Token, + gdb_bind_var, get_context, render as ct_render, resolve_type ) from langkit.diagnostics import ( Context, DiagnosticError, Severity, check_multiple, check_source_language, @@ -1269,7 +1269,10 @@ class BindingScope(ResolvedExpression): self.static_type = self.expr.type def _render_pre(self): - return self.expr._render_pre() + return '\n'.join( + [gdb_bind_var(binding) for binding in self.expr_bindings] + + [self.expr._render_pre()] + ) def _render_expr(self): return self.expr._render_expr()
Deletion: remove outer try/except block in reaper run_once run_daemon already takes care of catching unhandled exceptions and re-trying the run_once function.
@@ -463,7 +463,7 @@ def _run_once(rses, include_rses, exclude_rses, vos, chunk_size, greedy, scheme, if not rses_to_process: logger(logging.ERROR, 'Reaper: No RSEs found. Will sleep for 30 seconds') return - try: + dict_rses = {} _, total_workers, logger = heartbeat_handler.live() tot_needed_free_space = 0 @@ -601,11 +601,6 @@ def _run_once(rses, include_rses, exclude_rses, vos, chunk_size, greedy, scheme, if paused_rses: logger(logging.INFO, 'Deletion paused for a while for following RSEs: %s', ', '.join(paused_rses)) - except DatabaseException as error: - logger(logging.WARNING, 'Reaper: %s', str(error)) - except Exception: - logger(logging.CRITICAL, 'Exception', exc_info=True) - def stop(signum=None, frame=None): """
Fix Extreme.XOS.get_capabilities script HG-- branch : feature/microservices
@@ -17,7 +17,7 @@ from noc.lib.text import parse_table class Script(BaseScript): name = "Extreme.XOS.get_capabilities" - rx_lldp = re.compile(r"^\s*\d+\s+Enabled\s+Enabled", re.MULTILINE) + rx_lldp = re.compile(r"^\s*\d+(\:\d+)?\s+Enabled\s+Enabled", re.MULTILINE) rx_cdp = re.compile(r"^\s*CDP \S+ enabled ports\s+:\s+\d+", re.MULTILINE) @false_on_cli_error
$.Introspection: enhance documentation TN:
@@ -6,7 +6,7 @@ package ${ada_lib_name}.Introspection is Invalid_Field : exception; - ## In a lot of testcases, there is a single concrete AST node that has no + ## In a lot of testcases, there is a single concrete node that has no ## field. For these, generates a type that has no valid value. type Field_Reference is % if ctx.sorted_parse_fields: @@ -16,25 +16,26 @@ package ${ada_lib_name}.Introspection is new Integer range 1 .. 0 % endif ; - -- Enumeration of all AST node fields + -- Enumeration of all node fields function Field_Name (Field : Field_Reference) return String; -- Return a lower-case name for Field function Index (Field : Field_Reference) return Positive; - -- Return the index in AST nodes to access the given Field + -- Return the index in nodes to access the given ``Field`` function Field_Reference_From_Index (Kind : ${root_node_kind_name}; Index : Positive) return Field_Reference; - -- Return the field reference corresponding to the given Index in AST nodes - -- of the given Kind. Raise an Invalid_Field if there is no field - -- corresponding to this index. + -- Return the field reference corresponding to the given ``Index`` in nodes + -- of the given ``Kind``. Raise an ``Invalid_Field`` exception if there is + -- no field corresponding to this index. type Field_Reference_Array is array (Positive range <>) of Field_Reference; function Fields (Kind : ${root_node_kind_name}) return Field_Reference_Array; - -- Return the list of fields that AST nodes of the given Kind have + -- Return the list of fields that nodes of the given ``Kind`` have. This + -- returns an empty array for list nodes. function Token_Node_Kind (Kind : ${root_node_kind_name}) return Token_Kind @@ -42,7 +43,7 @@ package ${ada_lib_name}.Introspection is -- Return the token kind corresponding to the given token node kind % if not ctx.generate_unparser: -- - -- As unparser are not generated, this always raises a Program_Error + -- As unparser are not generated, this always raises a ``Program_Error`` -- exception. % endif
Typo ? I removed "-e" option from "pip install -e dist/*.whl # installs jaxlib (includes XLA)" line 58. It is now coherent with lines 69-70. When I tried the command with the "-e" it threw an error, without "-e" it worked fine.
@@ -55,7 +55,7 @@ You can install the necessary Python dependencies using ``pip``:: To build ``jaxlib`` with CUDA support, you can run:: python build/build.py --enable_cuda - pip install -e dist/*.whl # installs jaxlib (includes XLA) + pip install dist/*.whl # installs jaxlib (includes XLA) See ``python build/build.py --help`` for configuration options, including ways to
Fix arguments parsing in RandomGhosting Fixes
@@ -50,22 +50,40 @@ class RandomGhosting(RandomTransform): if axis not in (0, 1, 2): raise ValueError(f'Axes must be in (0, 1, 2), not "{axes}"') self.axes = axes - if isinstance(num_ghosts, int): - self.num_ghosts_range = num_ghosts, num_ghosts - elif isinstance(num_ghosts, tuple) and len(num_ghosts) == 2: - self.num_ghosts_range = num_ghosts - self.intensity_range = self.parse_range(intensity, 'intensity') - for n in self.intensity_range: - if n < 0: - message = ( - f'Intensity must be a positive number, not {n}') - raise ValueError(message) + self.num_ghosts_range = self.parse_num_ghosts(num_ghosts) + self.intensity_range = self.parse_intensity(intensity) if not 0 <= restore < 1: message = ( f'Restore must be a number between 0 and 1, not {restore}') raise ValueError(message) self.restore = restore + @staticmethod + def parse_num_ghosts(num_ghosts): + try: + iter(num_ghosts) + except TypeError: + num_ghosts = num_ghosts, num_ghosts + for n in num_ghosts: + if not isinstance(n, int) or n < 0: + message = ( + f'Number of ghosts must be a natural number, not {n}') + raise ValueError(message) + return num_ghosts + + @staticmethod + def parse_intensity(intensity): + try: + iter(intensity) + except TypeError: + intensity = intensity, intensity + for n in intensity: + if n < 0: + message = ( + f'Intensity must be a positive number, not {n}') + raise ValueError(message) + return intensity + def apply_transform(self, sample: Subject) -> dict: random_parameters_images_dict = {} for image_name, image_dict in sample.get_images_dict().items():
Fix ToTensor when PIL Image has mode F Fixes The only case of floating point supported by PIL seems to be `F`, so this should fix it.
@@ -59,6 +59,8 @@ def to_tensor(pic): img = torch.from_numpy(np.array(pic, np.int32, copy=False)) elif pic.mode == 'I;16': img = torch.from_numpy(np.array(pic, np.int16, copy=False)) + elif pic.mode == 'F': + img = torch.from_numpy(np.array(pic, np.float32, copy=False)) else: img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes())) # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
Remove unused variable My editor keeps moaning about it.
@@ -164,7 +164,6 @@ def measure_by_ccg(request, format=None): org_ids = utils.param_to_list(request.query_params.get('org', [])) tags = [x for x in request.query_params.get('tags', '').split(',') if x] - rolled = {} measure_values = MeasureValue.objects.by_ccg(org_ids, measure_id, tags) rsp_data = {
Extend the incremental marker for parametrize The incremental marker is adapted to handle properly test classes with parametrize defined at class level. Fix
@@ -461,21 +461,49 @@ an ``incremental`` marker which is to be used on classes: # content of conftest.py - import pytest + # store history of failures per test class name and per index in parametrize (if parametrize used) + _test_failed_incremental: Dict[str, Dict[Tuple[int, ...], str]] = {} def pytest_runtest_makereport(item, call): if "incremental" in item.keywords: + # incremental marker is used if call.excinfo is not None: - parent = item.parent - parent._previousfailed = item + # the test has failed + # retrieve the class name of the test + cls_name = str(item.cls) + # retrieve the index of the test (if parametrize is used in combination with incremental) + parametrize_index = ( + tuple(item.callspec.indices.values()) + if hasattr(item, "callspec") + else () + ) + # retrieve the name of the test function + test_name = item.originalname or item.name + # store in _test_failed_incremental the original name of the failed test + _test_failed_incremental.setdefault(cls_name, {}).setdefault( + parametrize_index, test_name + ) def pytest_runtest_setup(item): if "incremental" in item.keywords: - previousfailed = getattr(item.parent, "_previousfailed", None) - if previousfailed is not None: - pytest.xfail("previous test failed ({})".format(previousfailed.name)) + # retrieve the class name of the test + cls_name = str(item.cls) + # check if a previous test has failed for this class + if cls_name in _test_failed_incremental: + # retrieve the index of the test (if parametrize is used in combination with incremental) + parametrize_index = ( + tuple(item.callspec.indices.values()) + if hasattr(item, "callspec") + else () + ) + # retrieve the name of the first test function to fail for this class name and index + test_name = _test_failed_incremental[cls_name].get(parametrize_index, None) + # if name found, test has failed for the combination of class name & test name + if test_name is not None: + pytest.xfail("previous test failed ({})".format(test_name)) + These two hook implementations work together to abort incremental-marked tests in a class. Here is a test module example:
add Namespace.add_field This patch adds the add_field method to namespace v2 to facilitate the creation of fields that have the same name as their argument.
@@ -709,6 +709,24 @@ class Namespace: raise ValueError('Cannot define the jacobian {!r}: dimension is negative.'.format(jacobian)) setattr(self, jacobian, function.jacobian(geom, numpy.size(geom) - i)) + def add_field(self, __names: Union[str, Sequence[str]], *__bases, shape: Tuple[int, ...] = (), dtype: function.DType = float): + '''Add field(s) of the form ns.u = function.dotarg('u', ...) + + Parameters + ---------- + names : :class:`str` or iterable thereof + Name of both the generated field and the function argument. + bases : :class:`nutils.function.Array` or something that can be :meth:`nutils.function.Array.cast` into one + The arrays to take inner products with. + shape : :class:`tuple` of :class:`int`, optional + The shape to be appended to the argument. + dtype : :class:`bool`, :class:`int`, :class:`float` or :class:`complex` + The dtype of the argument. + ''' + + for name in (__names,) if isinstance(__names, str) else __names: + setattr(self, name, function.dotarg(name, *__bases, shape=shape, dtype=dtype)) + def copy_(self, **replacements: Mapping[str, function.Array]) -> 'Namespace': '''Return a copy of this namespace.
Missing Sample Updated to show missing sample text for a response.
@@ -144,11 +144,12 @@ General Issues 2. If an issue has a Jira ticket with a ``help-wanted`` label, there is a Help Wanted ticket in GitHub. It can be closed with the following note: .. code-block:: text + Hi @username Thanks for the report! We have created a [Help Wanted issue here](link to GitHub issue) and are looking for community's help. Would you be interested helping with a pull request? -3. If an issue has a Jira ticket without a ``help-wanted`` label and assigned to the current release fix version for a developer to fix, it can be closed with the following note +3. If an issue has a Jira ticket without a ``help-wanted`` label and assigned to the current release fix version for a developer to fix, it can be closed with the following note: .. code-block:: text @@ -171,7 +172,7 @@ General Issues Feature Requests ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Respond to the issue with the following note +Respond to the issue with the following note: .. code-block:: text
Attempt to fix pipe_to unit test on Windows (for real this time) The previous fix was apparently broken when I checked in with Linux line endings. This approach should be independent of that.
@@ -406,13 +406,10 @@ def test_pipe_to_shell(base_app): # Windows # Get help menu and pipe it's output to the sort shell command out = run_cmd(base_app, 'help | sort') - expected = normalize(""" - - -_relative_load edit history pause pyscript run set shortcuts -======================================== -cmdenvironment help load py quit save shell show -Documented commands (type help <topic>):""") + expected = ['', '', '_relative_load edit history pause pyscript run set shortcuts', + '========================================', + 'cmdenvironment help load py quit save shell show', + 'Documented commands (type help <topic>):'] assert out == expected else: # Mac and Linux
Update Task API Including ended_at in datetime_fields Removing unnecessary DateTimeFilters
@@ -27,8 +27,10 @@ class TaskSerializer(serializers.ModelSerializer): 'created_at', 'task_name', 'database', + 'rollback', 'relevance', + 'ended_at', ) def get_relevance(self, task): @@ -87,20 +89,6 @@ class TaskSerializer(serializers.ModelSerializer): return None -class EventFilter(filters.FilterSet): - class Meta: - model = TaskHistory - fields = { - 'updated_at': ('lte', 'gte') - } - - filter_overrides = { - django_models.DateTimeField: { - 'filter_class': django_filters.DateTimeFilter - }, - } - - class TaskAPI(viewsets.ReadOnlyModelViewSet): """ @@ -131,7 +119,6 @@ class TaskAPI(viewsets.ReadOnlyModelViewSet): serializer_class = TaskSerializer permission_classes = (permissions.IsAuthenticatedOrReadOnly,) filter_backends = (filters.OrderingFilter,) - filter_class = EventFilter filter_fields = ( 'task_id', 'task_status', @@ -141,11 +128,13 @@ class TaskAPI(viewsets.ReadOnlyModelViewSet): 'updated_at', 'created_at', 'user', - 'relevance' + 'relevance', + 'ended_at', + 'database_name' ) ordering_fields = ('created_at', 'updated_at', 'id') ordering = ('-created_at',) - datetime_fields = ('created_at', 'updated_at') + datetime_fields = ('created_at', 'updated_at', 'ended_at') def get_queryset(self): params = self.request.GET.dict()
Update CONTRIBUTING.md Update the contributing instructions to use python-poetry instead of sdispater as the repository namespace.
@@ -87,7 +87,7 @@ You will need Poetry to start contributing on the Poetry codebase. Refer to the You will first need to clone the repository using `git` and place yourself in its directory: ```bash -$ git clone [email protected]:sdispater/poetry.git +$ git clone [email protected]:python-poetry/poetry.git $ cd poetry ```
Fix handling of ZFIT_DISABLE_TF_WARNING environment variable. The logic in _maybe_disable_warnings() did not actually do what the warning about the suppression of TensorFlow warnings claimed. Setting the environment variable had no effect. Also slightly simplified the wording of the warning.
"""Top-level package for zfit.""" # Copyright (c) 2021 zfit -import inspect -import sys import warnings from pkg_resources import get_distribution @@ -32,15 +30,16 @@ __all__ = ["z", "constraint", "pdf", "minimize", "loss", "core", "data", "func", def _maybe_disable_warnings(): import os - true = "IS_TRUE" - if not os.environ.get("ZFIT_DISABLE_TF_WARNINGS", true): - return - elif true: - warnings.warn("All TensorFlow warnings are by default suppressed by zfit." - " In order to not suppress them," - " set the environment variable ZFIT_DISABLE_TF_WARNINGS to 0." + disable_warnings = os.environ.get("ZFIT_DISABLE_TF_WARNINGS") + if disable_warnings is None: + warnings.warn("TensorFlow warnings are by default suppressed by zfit." + " In order to show them," + " set the environment variable ZFIT_DISABLE_TF_WARNINGS=0." " In order to suppress the TensorFlow warnings AND this warning," - " set ZFIT_DISABLE_TF_WARNINGS manually to 1.") + " set ZFIT_DISABLE_TF_WARNINGS=1.") + elif disable_warnings == '0': + return + os.environ["KMP_AFFINITY"] = "noverbose" os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
Add flag to disable reservation cleanup This shouldn't be needed on our patched version of k8s that doesn't send offers to maint'd hosts. This adds a flag so I can disable it in the cron job that cleans up maint'd hosts.
@@ -42,6 +42,10 @@ def parse_args(): '-v', '--verbose', action='store_true', dest="verbose", default=False, ) + parser.add_argument( + '--disable-reservation-cleanup', action='store_true', + dest="disable_reservation_cleanup", default=False, + ) args = parser.parse_args() return args @@ -100,6 +104,7 @@ def main(): cleanup_forgotten_draining() cleanup_forgotten_down() + if not args.disable_reservation_cleanup: unreserve_all_resources_on_non_draining_hosts() reserve_all_resources_on_draining_hosts()
update googlebenchmark version updates googlebenchmark version to match RMM/cuDF
@@ -4,7 +4,7 @@ include(ExternalProject) ExternalProject_Add(GoogleBenchmark GIT_REPOSITORY https://github.com/google/benchmark.git - GIT_TAG main + GIT_TAG v1.5.1 SOURCE_DIR "${GBENCH_ROOT}/googlebenchmark" BINARY_DIR "${GBENCH_ROOT}/build" INSTALL_DIR "${GBENCH_ROOT}/install"
[modules/spotify] enable scrolling this change should enable scrolling for the spotify module (unfortunately, i am unable to fully test this, as i am not using spotify) fixes
@@ -110,7 +110,8 @@ class Module(core.module.Module): def hidden(self): return self.string_song == "" - def __get_song(self): + @core.decorators.scrollable + def __get_song(self, widget): bus = self.__bus if self.__bus_name == "spotifyd": spotify = bus.get_object( @@ -128,11 +129,10 @@ class Module(core.module.Module): artist=",".join(props.get("xesam:artist")), trackNumber=str(props.get("xesam:trackNumber")), ) + return self.__song def update(self): try: - self.__get_song() - if self.__bus_name == "spotifyd": bus = self.__bus.get_object( "org.mpris.MediaPlayer2.spotifyd", "/org/mpris/MediaPlayer2" @@ -156,7 +156,7 @@ class Module(core.module.Module): widget.set("state", "paused") elif widget.name == "spotify.song": widget.set("state", "song") - widget.full_text(self.__song) + widget.full_text(self.__get_song(widget)) except Exception as e: self.__song = ""
Fix "platform_adaptation" documentation test on windows For reasons I don't full understand, including "windows.h" seems to break everything. There's an alternative sleep function in stdlib.h so I've used that instead since it makes the point just as well.
cdef extern from *: """ #if defined(_WIN32) || defined(MS_WINDOWS) || defined(_MSC_VER) - #define WIN32_LEAN_AND_MEAN - #include <windows.h> - #define myapp_sleep(m) Sleep(m) + #include "stdlib.h" + #define myapp_sleep(m) _sleep(m) #else #include <unistd.h> #define myapp_sleep(m) ((void) usleep((m) * 1000))
Fix typos Fixed minor typos - Azaras to Azara's / Ruis' to Rui's
@@ -89,7 +89,7 @@ True ## 4. Combine matched records -Implement the `create_record()` function that takes a `(treasure, coordinate)` pair from Azaras list and a `(location, coordinate, quadrant)` record from Ruis' list and returns `(treasure, coordinate, location, coordinate, quadrant)` **if the coordinates match**. +Implement the `create_record()` function that takes a `(treasure, coordinate)` pair from Azara's list and a `(location, coordinate, quadrant)` record from Rui's list and returns `(treasure, coordinate, location, coordinate, quadrant)` **if the coordinates match**. If the coordinates _do not_ match, return the string **"not a match"** Re-format the coordinate as needed for accurate comparison.
Allow nic-config conversion without Heat The current script requires the orchestration (Heat) be available. This change will allow the script to convert existing templates provided without the orchestration service present.
@@ -82,6 +82,13 @@ def parse_opts(argv): parser.add_argument('template', metavar='TEMPLATE_FILE', help='Existing NIC config template to convert.') + parser.add_argument('--standalone', + default=False, + action='store_true', + help='This switch allows the script to operate in ' + 'environments where the orchestration service ' + 'is not available. Such as environemnts with ' + 'ephemeral-heat') opts = parser.parse_args(argv[1:]) @@ -225,7 +232,8 @@ class ConvertToAnsibleJ2(object): if isinstance(param, str): if param in self.param_to_var_map: return self.param_to_var_map[param] - elif param in self.stack_env.get('parameter_defaults', {}): + elif (self.stack_env and + param in self.stack_env.get('parameter_defaults', {})): stack_value = self.stack_env['parameter_defaults'][param] print('INFO - Custom Parameter {} was hard-coded in the ' 'converted template using the value from the Heat stack ' @@ -389,7 +397,7 @@ class ConvertToAnsibleJ2(object): net_config_res_props = net_config_res['properties'] if net_config_res['type'] == 'OS::Heat::Value': - h_net_conf = net_config_res_props['value'] + h_net_conf = net_config_res_props['value']['network_config'] elif net_config_res['type'] == 'OS::Heat::SoftwareConfig': h_net_conf = net_config_res_props['config']['str_replace'][ 'params']['$network_config']['network_config'] @@ -501,7 +509,10 @@ def main(): j2_template = os.path.splitext(template)[0] + '.j2' validate_files(opts, template, networks_file, j2_template) + if not opts.standalone: stack_env = get_stack_environment(opts.stack) + else: + stack_env = None converter = ConvertToAnsibleJ2(stack_env, networks_file)
Fix Sphinx crossrefs to 'Client'. Broken by move to 'spanner_v1' (the aliases in 'spanner' are not honored). Closes
@@ -42,23 +42,23 @@ Spanner Client Instantiating a Client ---------------------- -To use the API, the :class:`~google.cloud.spanner.client.Client` +To use the API, the :class:`~google.cloud.spanner_v1.client.Client` class defines a high-level interface which handles authorization and creating other objects: .. code:: python - from google.cloud import spanner - client = spanner.Client() + from google.cloud import spanner_v1 + client = spanner_v1.Client() Long-lived Defaults ------------------- -When creating a :class:`~google.cloud.spanner.client.Client`, the +When creating a :class:`~google.cloud.spanner_v1.client.Client`, the ``user_agent`` and ``timeout_seconds`` arguments have sensible defaults -(:data:`~google.cloud.spanner.client.DEFAULT_USER_AGENT` and -:data:`~google.cloud.spanner.client.DEFAULT_TIMEOUT_SECONDS`). +(:data:`~google.cloud.spanner_v1.client.DEFAULT_USER_AGENT` and +:data:`~google.cloud.spanner_v1.client.DEFAULT_TIMEOUT_SECONDS`). However, you may over-ride them and these will be used throughout all API requests made with the ``client`` you create. @@ -74,22 +74,22 @@ Configuration Engine or Google Compute Engine the project will be detected automatically. (Setting this environment variable is not required, you may instead pass the ``project`` explicitly when constructing a - :class:`~google.cloud.spanner.client.Client`). + :class:`~google.cloud.spanner_v1.client.Client`). - After configuring your environment, create a - :class:`~google.cloud.spanner.client.Client` + :class:`~google.cloud.spanner_v1.client.Client` .. code:: - >>> from google.cloud import spanner - >>> client = spanner.Client() + >>> from google.cloud import spanner_v1 + >>> client = spanner_v1.Client() or pass in ``credentials`` and ``project`` explicitly .. code:: - >>> from google.cloud import spanner - >>> client = spanner.Client(project='my-project', credentials=creds) + >>> from google.cloud import spanner_v1 + >>> client = spanner_v1.Client(project='my-project', credentials=creds) .. tip:: @@ -106,8 +106,8 @@ Warnings about Multiprocessing Next Step --------- -After a :class:`~google.cloud.spanner.client.Client`, the next -highest-level object is an :class:`~google.cloud.spanner.instance.Instance`. +After a :class:`~google.cloud.spanner_v1.client.Client`, the next +highest-level object is an :class:`~google.cloud.spanner_v1.instance.Instance`. You'll need one before you can interact with databases. Next, learn about the :doc:`instance-usage`.
[TVMC] Keep quantized weights when importing PyTorch model BYOC requires `keep_quantized_weight` be set to true when converting PyTorch models using `from_torch`. Setting this to be True when using TVMC.
@@ -262,7 +262,9 @@ class PyTorchFrontend(Frontend): input_shapes = list(shape_dict.items()) logger.debug("parse Torch model and convert into Relay computation graph") - return relay.frontend.from_pytorch(traced_model, input_shapes, **kwargs) + return relay.frontend.from_pytorch( + traced_model, input_shapes, keep_quantized_weight=True, **kwargs + ) class PaddleFrontend(Frontend):
Release: Make sure to check with pip locally before uploading to PyPI * This will avoid breakage like recently with runners wrongly handled by pip. * Only very basic test is done with pip installed Nuitka.
from __future__ import print_function import os +import sys +import shutil from nuitka.tools.release.Documentation import createReleaseDocumentation from nuitka.tools.release.Release import checkBranchName @@ -53,31 +55,31 @@ def main(): contents = open("README.rst", "rb").read() assert b".. contents" not in contents + shutil.rmtree("check_nuitka", ignore_errors = True) + shutil.rmtree("dist", ignore_errors = True) + print("Creating documentation.") createReleaseDocumentation() print("Creating source distribution.") assert os.system("python setup.py sdist") == 0 + + print("Creating virtualenv for quick test:") + assert os.system("virtualenv check_nuitka") == 0 + + print("Installing Nuitka into virtualenv:") + print("*" * 40) + assert os.system("cd check_nuitka; . bin/activate; pip install ../dist/Nuitka*.tar.gz") == 0 + print("*" * 40) + + print("Compiling basic test:") + print("*" * 40) + assert os.system("cd check_nuitka; . bin/activate; nuitka-run ../tests/basics/Asserts.py") == 0 + print("*" * 40) + + if "check" not in sys.argv: + assert False print("Uploading source dist") assert os.system("twine upload dist/*") == 0 print("Uploaded.") - - # TODO: This won't work yet. - # import time - # import xmlrpclib - # if False: - # for _i in range(60): - # # Wait some time for PyPI to catch up with us. Without delay - # # the old version will still appear. Since this is running - # # in a Buildbot, we need not be optimal. - # time.sleep(5*60) - # - # pypi = xmlrpclib.ServerProxy("https://pypi.python.org/pypi") - # pypi_versions = pypi.package_releases("Nuitka") - # - # assert len(pypi_versions) == 1, pypi_versions - # if nuitka_version == pypi_versions[0]: - # break - # - # print("Version check failed:", nuitka_version, pypi_versions) - # - # print("Uploaded OK:", pypi_versions[0]) + else: + print("Checked OK, not uploaded.")
Making the start of stop string to mark hidden tests configurable TODO: Find out why nbgrader quickstart does not put them into the configuration file?
@@ -3,13 +3,39 @@ import re from .. import utils from . import NbGraderPreprocessor +from traitlets import Unicode +from textwrap import dedent class RemoveHidden(NbGraderPreprocessor): + hidestart = Unicode( + '### HIDESTART', + config=True, + help=dedent( + """ + Suppose you want to hide some test cases from your students in a cell. + Place this string before those test cases and the corresponding string + hideend after them. + """ + ) + ) + + hideend = Unicode( + '### HIDEEND', + config=True, + help=dedent( + """ + Suppose you want to hide some test cases from your students in a cell. + Place this string after those tests. + """ + ) + ) def preprocess_cell(self, cell, resources, cell_index): if utils.is_grade(cell) or utils.is_solution(cell) or utils.is_locked(cell): - cell.source = re.sub('START(?:.|\n)*?STOP', '', cell.source) + cell.source = re.sub('{}(?:.|\n)*?{}'.format(self.hidestart, + self.hideend) + , '', cell.source) # we probably don't really need this? cell.metadata.nbgrader['oldchecksum'] = cell.metadata.nbgrader['checksum']