message
stringlengths
13
484
diff
stringlengths
38
4.63k
Remove dangling deprecation warning This deprecation is no longer mentioned elsewhere on the page.
@@ -294,10 +294,6 @@ basic slicing that returns a :term:`view`). the former will trigger advanced indexing. Be sure to understand why this occurs. - Also recognize that ``x[[1, 2, 3]]`` will trigger advanced indexing, - whereas due to the deprecated Numeric compatibility mentioned above, - ``x[[1, 2, slice(None)]]`` will trigger basic slicing. - Integer array indexing ~~~~~~~~~~~~~~~~~~~~~~
Use deepcopy before dumping This commit will avoid changes to the config object when dumping it to upload to S3. (We change the structure of the config in pre_dump of Marshmallow)
import json import logging import time +from copy import deepcopy from enum import Enum from typing import List @@ -394,9 +395,10 @@ class Cluster: ) # Upload config with default values and sections if self.config: + config_copy = deepcopy(self.config) result = AWSApi.instance().s3.put_object( bucket_name=self.bucket.name, - body=yaml.dump(ClusterSchema().dump(self.config)), + body=yaml.dump(ClusterSchema().dump(config_copy)), key=self._get_config_key(), ) # config version will be stored in DB by the cookbook at the first update
Refactor codebase remove legacy comment
@@ -341,8 +341,6 @@ def _resample_ifg(ifg, cmd, x_looks, y_looks, thresh, md=None): Convenience function to resample data from a given Ifg (more coarse). """ - # Create tmp ifg and extract data array for manual resampling as gdalwarp - # lacks the averaging method fp, tmp_path = mkstemp(suffix='.tif') check_call(cmd + [ifg.data_path, tmp_path])
Wait for exit status of watched run The watch commands ends with a message reporting the status of the watched run. It should wait until the exit status is written before checking run status.
@@ -34,6 +34,7 @@ from . import runs_impl log = logging.getLogger("guild") TAIL_BUFFER = 4096 +DEFAULT_EXIT_STATUS_TIMEOUT = 5.0 def main(args, ctx): @@ -172,6 +173,7 @@ def _tail(run): elif proc.is_running(): time.sleep(0.1) else: + _wait_for_exit_status(run) break @@ -213,6 +215,23 @@ def _wait_for_output(proc, output_path): return _try_open(output_path) +def _wait_for_exit_status(run, timeout=DEFAULT_EXIT_STATUS_TIMEOUT): + stop_at = time.time() + timeout + while True: + if time.time() >= stop_at: + log.warning("exit status for %s not written", run.id) + break + try: + exit_status = run.get("exit_status") + except Exception as e: + log.debug("error reading exit status: %s", e) + time.sleep(1.0) + else: + if exit_status is not None: + break + time.sleep(0.1) + + def _print_run_status(run): cli.out("Run %s stopped with a status of '%s'" % (run.id, run.status), err=True)
Add url property to NodeLicenseRecord So that it can be accessed like other fields on NodeLicense like name, text and license_id
@@ -76,6 +76,10 @@ class NodeLicenseRecord(ObjectIDMixin, BaseModel): def license_id(self): return self.node_license.license_id if self.node_license else None + @property + def url(self): + return self.node_license.url if self.node_license else None + def to_json(self): return serialize_node_license_record(self)
Redefining the parameter nnz This is in accordance with pull request
@@ -52,7 +52,7 @@ class csr_matrix(_cs_matrix): ndim : int Number of dimensions (this is always 2) nnz - Number of nonzero elements + Number of stored values, including explicit zeros data CSR format data array of the matrix indices
BUG: fix GEE resid_working to match GLM Note that at the moment, the GEE version is not getting hit in tests.
@@ -1471,7 +1471,7 @@ class GEEResults(base.LikelihoodModelResults): @cache_readonly def resid_working(self): val = self.resid_response - val = val / self.family.link.deriv(self.fittedvalues) + val = val * self.family.link.deriv(self.fittedvalues) return val @cache_readonly
Fix logic for search page exit route Fixes
mixins: [commonCoreStrings, commonLearnStrings, responsiveWindowMixin], data() { return { - lastRoute: null, + searchPageExitRoute: null, demographicInfo: null, }; }, return { appBarTitle: this.coreString('searchLabel'), immersivePage: true, - // Default to the Learn root page if there is no lastRoute to return to. - immersivePageRoute: this.lastRoute || this.$router.getRoute(PageNames.TOPICS_ROOT), + // Default to the Learn root page if there is no searchPageExitRoute to return to. + immersivePageRoute: + this.searchPageExitRoute || this.$router.getRoute(PageNames.TOPICS_ROOT), immersivePagePrimary: true, immersivePageIcon: 'close', }; }, watch: { $route: function(newRoute, oldRoute) { - // Return if the user is leaving or entering the Search page. - // This ensures we never set this.lastRoute to be any kind of - // SEARCH route and avoids infinite loops. - if (newRoute.name === 'SEARCH' || oldRoute.name === 'SEARCH') { - return; - } - - // Destructure the oldRoute into an object with 3 specific properties. - // Setting this.lastRoute = oldRoute causes issues for some reason. - this.lastRoute = { + const topicRouteNames = [ + PageNames.TOPICS_ROOT, + PageNames.TOPICS_CHANNEL, + PageNames.TOPICS_TOPIC, + ]; + // If going from topic -> search, save the topic route parameters for the + // exit link. + // But, if we go from search -> content, we do not edit `searchPageExitRoute` + // preserve the backwards linking from content -> search -> topic + if (topicRouteNames.includes(oldRoute.name) && newRoute.name === PageNames.SEARCH) { + this.searchPageExitRoute = { name: oldRoute.name, query: oldRoute.query, params: oldRoute.params, }; + } else if (oldRoute.name === PageNames.SEARCH && topicRouteNames.includes(newRoute.name)) { + // If going from search -> topic (either by clicking "X" or clicking a topic card + // in the results), clear out the exit route. + this.searchPageExitRoute = null; + } }, }, mounted() {
fix(File): Correct acceptable types in APIs Allow str types for start, page_length in page_length API Allow str, list[dict] file_list in move_file API
@@ -39,7 +39,7 @@ def get_attached_images(doctype: str, names: list[str]) -> frappe._dict: @frappe.whitelist() -def get_files_in_folder(folder: str, start: int = 0, page_length: int = 20) -> dict: +def get_files_in_folder(folder: str, start: int | str = 0, page_length: int | str = 20) -> dict: start = cint(start) page_length = cint(page_length) @@ -101,10 +101,11 @@ def create_new_folder(file_name: str, folder: str) -> File: @frappe.whitelist() -def move_file(file_list: list[File], new_parent: str, old_parent: str) -> None: +def move_file(file_list: list[File | dict] | str, new_parent: str, old_parent: str) -> None: if isinstance(file_list, str): file_list = json.loads(file_list) + # will check for permission on each file & update parent for file_obj in file_list: setup_folder_path(file_obj.get("name"), new_parent)
Be less shouty When I type the command name I want to be told what it does, not that I'm doing it wrong. For
dials.goniometer_calibration is a tool to aid calibration of multi-axis goniometers. -The tool takes as input exeriments.expt files for datasets recorded at the +The tool takes as input experiments.expt files for datasets recorded at the goniometer datum setting and for each goniometer axis incremented in turn. It outputs the axes and angles relating each consecutive pair of crystal setting matrices in imgCIF and MOSFLM coordinate systems, and the CIF loop describing the goniometer axes. Optionally it can also output an XOalign configuration file. +Either space_group must be specified or the parameter +use_space_group_from_experiments=True must be set + Examples:: -dials.goniometer_calibration space_group=P422 \ - experiments_o0_k0_p0.expt experiments_o0_k0_p48.expt \ +dials.goniometer_calibration space_group=P422 \\ + experiments_o0_k0_p0.expt experiments_o0_k0_p48.expt \\ experiments_o0_k48_p48.expt experiments_o48_k48_p48.expt """ @@ -53,9 +56,9 @@ def run(args): params, options = parser.parse_args(show_diff_phil=True) if not params.use_space_group_from_experiments and params.space_group is None: - sys.exit( - "Either space_group must be specified or set the parameter use_space_group_from_experiments=True" - ) + parser.print_help() + return + experiments = flatten_experiments(params.input.experiments) if len(experiments) <= 1: parser.print_help()
Increase API image build timeout In CN regions it sporadically takes more than 10 mins
@@ -201,7 +201,7 @@ def _test_docker_image_refresh(image_builder_pipeline, lambda_name): @retry( retry_on_result=lambda result: result["state"]["status"] not in {"AVAILABLE", "CANCELLED", "FAILED", "DELETED"}, wait_fixed=seconds(10), - stop_max_delay=minutes(10), + stop_max_delay=minutes(15), ) def _wait_for_image_build(image_builder_pipeline): image_builder = boto3.client("imagebuilder")
Checking if SafeUUID hack is needed before unpickling datasets. This way we don't run into an issue with pickle's cache.
@@ -1771,9 +1771,9 @@ class DataSet(object): else: f = fileOrFilename - try: + if 'SafeUUID' in dir(_uuid): state_dict = _pickle.load(f) - except AttributeError: + else: # HACK TO ALLOW UUIDs saved on python3.7 work with earlier python versions that don't have uuid.SafeUUID # HACK - maybe move this to leagacyio to deal with Python 3 versions < 3.7 not having SafeUUID? class dummy_SafeUUID(object):
Update pylint run disable C0330 (hanging indents) because of conflicts with Black style formatting
@@ -11,7 +11,7 @@ matrix: - python: 3.6 env: KERAS_BACKEND=tensorflow TENSORFLOW_V=1.15.2 KERAS_V=2.2.5 script: - - (pycodestyle --max-line-length=120 art || exit 0) && (pylint --disable=C0415,E1136 -rn art || exit 0) + - (pycodestyle --max-line-length=120 art || exit 0) && (pylint --disable=C0330,C0415,E1136 -rn art || exit 0) - py.test --pep8 -m pep8 before_install:
Bump deprecation in win_servermanager state to Neon The original deprecation notice says the "force" option in win_servermanager.installed will be removed in Fluorine. However, this change was recenlty made in 2018.3.0. We need to give 2 feature releases before removal.
@@ -115,10 +115,10 @@ def installed(name, ''' if 'force' in kwargs: salt.utils.versions.warn_until( - 'Fluorine', + 'Neon', 'Parameter \'force\' has been detected in the argument list. This' 'parameter is no longer used and has been replaced by \'recurse\'' - 'as of Salt 2018.3.0. This warning will be removed in Salt Fluorine.' + 'as of Salt 2018.3.0. This warning will be removed in Salt Neon.' ) kwargs.pop('force')
Fixed On mobile, the info icon(centered vertically * Fixed On mobile, the info icon(centered vertically Fix * Added the requested changes during reviewing fix
display: none; } .oppia-exploration-footer .oppia-navbar-footer-info-icon { - margin-top: -11px; + margin-top: -3px; } .oppia-exploration-footer .oppia-navbar-footer-info-icon:hover { - margin-top: -11px; + margin-top: -3px; } }
query: do not show duplicates in history ref
@@ -48,12 +48,17 @@ class QueryShell(shell.BQLShell, FavaModule): @staticmethod def get_history(max_entries): - """Get the most recently used shell commands.""" + """Get the most recently used shell commands (removing duplicates).""" num_entries = readline.get_current_history_length() - return [ - readline.get_history_item(index + 1) - for index in range(max(num_entries - max_entries, 0), num_entries) - ] + history = [] + for index in range(num_entries): + if len(history) >= max_entries: + return history + item = readline.get_history_item(index + 1) + if item in history: + history.remove(item) + history.append(item) + return history def _loadfun(self): self.entries = self.ledger.entries
Release 4.5.2 this is a bugfix release
@@ -3,6 +3,9 @@ The released versions correspond to PyPi releases. ## Version 4.6.0 (as yet unreleased) +## [Version 4.5.2](https://pypi.python.org/pypi/pyfakefs/4.5.2) (2021-11-07) +This is a bugfix release. + ### Changes * `os.listdir`, `os.scandir` and `pathlib.Path.listdir` now return the directory list in a random order (see [#638](../../issues/638))
remove missing_ok in Path.unlink() This parameter was introduced in python 3.8, so it will break in python 3.7. Instead, we simply ignore FileNotFoundError.
@@ -114,7 +114,10 @@ def build_singularity_image( yield str(sif) finally: if remove: - sif.unlink(missing_ok=True) + try: + sif.unlink() + except FileNotFoundError: + pass def run_docker_image(
WL: cache keymaps using variant in key Different keymaps can be generated for different variants so we need to also take those into account when storing keymaps.
@@ -76,8 +76,8 @@ class Keyboard(HasListeners): XKB_DEFAULT_LAYOUT and XKB_DEFAULT_OPTIONS and if not specified are taken from the environment. """ - if (layout, options) in self._keymaps: - keymap = self._keymaps[(layout, options)] + if (layout, options, variant) in self._keymaps: + keymap = self._keymaps[(layout, options, variant)] else: keymap = self.xkb_context.keymap_new_from_names( layout=layout, options=options, variant=variant
Make utility install playbook idempotent To ensure idempotence we do the following: 1. Ensure that the symlinks are only changed if they do not yet exist. 2. Switch the openstack client bash completion to be a handler which is executed whenever the venv changes or the symlinks change.
utility_upper_constraints_url: "{{ requirements_git_url | default('https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?h=' ~ requirements_git_install_branch | default('master')) }}" tags: - utility + handlers: + - name: Create openstack client bash_completion script + shell: >- + openstack complete > /etc/bash_completion.d/openstack_client + args: + executable: /bin/bash + listen: + - "venv changed" pre_tasks: - include_tasks: "common-tasks/os-{{ container_tech | default('lxc') }}-container-setup.yml" when: not is_metal - name: Create symlinks for openstack clients shell: | {% set _bin_name = item | regex_replace('^(?:python-)?(\w*)(?:client)$', '\\1') %} - if [[ -e "{{ utility_venv_bin }}/{{ _bin_name }}" ]]; then + set -e + return_code=0 + if [[ -e "{{ utility_venv_bin }}/{{ _bin_name }}" && ! -L "/usr/local/bin/{{ _bin_name }}" ]]; then ln -sfn {{ utility_venv_bin }}/{{ _bin_name }} /usr/local/bin/{{ _bin_name }} + return_code=2 fi + exit ${return_code} args: executable: /bin/bash with_items: "{{ _openstack_client_list }}" - - - name: Create openstack client bash_completion script - shell: | - openstack complete > /etc/bash_completion.d/openstack_client + register: _client_symlink + changed_when: _client_symlink.rc == 2 + failed_when: _client_symlink.rc not in [0,2] + notify: "Create openstack client bash_completion script"
[Metrics] Avoid null value when chain height is 0 Fixed a regression that would cause the ETH remaining height value to be set to None instead of 0.
@@ -168,7 +168,7 @@ async def get_metrics(shared_stats: dict) -> Metrics: else: sync_messages_remaining_total = None - if eth_reference_height and eth_last_committed_height: + if eth_reference_height is not None and eth_last_committed_height is not None: # Some blocks may not contain Aleph messages, and therefore the last committed height # may be higher than the height of the last block containing Aleph messages. eth_remaining_height = max(eth_reference_height - eth_last_committed_height, 0)
Update profile-nfts-flow.md (Small tweak)
@@ -5,7 +5,7 @@ SPDX-License-Identifier: Apache-2.0 # Quickstart: Profile NFTs -This is a flow showing: share user profile data privately with dapps, via Ocean Data NFTs. +This is a flow showing how to do "login with Web3" with the help of Ocean data NFTs. In this flow, a dapp is not only connected to the user's wallet, but it can access profile data that the user has privately shared to it. Here are the steps:
Update docstring in validate_provider_segment The InvalidInput exception has been moved to neutron_lib.exceptions. TrivialFix
@@ -69,7 +69,7 @@ class _TypeDriverBase(object): """Validate attributes of a provider network segment. :param segment: segment dictionary using keys defined above - :raises: neutron.common.exceptions.InvalidInput if invalid + :raises: neutron_lib.exceptions.InvalidInput if invalid Called outside transaction context to validate the provider attributes for a provider network segment. Raise InvalidInput
Fix, for debug mode white list new kind of exception expression. * This ought to be optimized, but currently the locals dict nodes do not work well, when an error could happen without fallback, so allow it until this is improved. * This adds a TODO item.
@@ -196,9 +196,13 @@ def generateRaiseExpressionCode(to_name, expression, emit, context): # Missed optimization opportunity, please report it, this should not # normally happen. We are supposed to propagate this upwards. if isDebug(): + # TODO: Need to optimize ExpressionLocalsVariableRefORFallback once we know + # it handles cases where the value is not in locals dict properly. + parent = expression.parent assert parent.isExpressionSideEffects() or \ - parent.isExpressionConditional(), \ + parent.isExpressionConditional() or \ + parent.isExpressionLocalsVariableRefORFallback(), \ (expression, expression.parent, expression.asXmlText()) # That's how we indicate exception to the surrounding world.
Update setup.py to include py.typed marker file. To make the aio_pika package compatible, it needs to indicate the presence of the marker file in the setup.py file. This ensures installing the package as a typed package.
@@ -36,6 +36,7 @@ setup( 'Programming Language :: Python :: Implementation :: CPython', ], packages=find_packages(exclude=['tests']), + package_data={'aio_pika': ['py.typed']}, install_requires=[ 'aiormq~=2.1', 'yarl',
Exclude Users - Ignore Case fixes
@@ -495,7 +495,7 @@ class ConfigLoader(object): try: # add "match begin" and "match end" markers to ensure complete match # and compile the patterns because we will use them over and over - exclude_users.append(re.compile(r'\A' + regexp + r'\Z', re.UNICODE)) + exclude_users.append(re.compile(r'\A' + regexp + r'\Z', re.UNICODE | re.IGNORECASE)) except re.error as e: validation_message = ('Illegal regular expression (%s) in %s: %s' % (regexp, 'exclude_identity_types', e))
Additional documentation ... for `Organization.affiliated_organizations`.
@@ -143,7 +143,8 @@ The primary tables used in AMY (that will likely appear in every query) are thos * `fullname` Human friendly name of the organization * `country` Stored as the [two digit country code](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2) * `latitude` and `longitude` Stored as floating point (decimal) numbers -* `affiliated_organizations` Many-to-many relationship between organizations +* `affiliated_organizations` Many-to-many relationship between organizations; the purpose of this field is to "link together" organisations that in some way are related. + For example, "University of California" organisation can be linked to "University of California, Berkeley", "University of California, Davis", and "University of California, Los Angeles". # Additional Tables in AMY
Adalog: remove an obsolete pragma TN:
@@ -19,9 +19,7 @@ package body Langkit_Support.Adalog.Abstract_Relation is procedure Wait is begin - pragma Warnings (Off, "always"); if Debug_State = Step then - pragma Warnings (On, "always"); Put_Line ("Press enter to continue .."); declare Dummy : String := Ada.Text_IO.Get_Line;
[dagit] Flip UTC offset sign in timezone picker Summary: Resolves UTC offsets are intentionally inverted, so we need to flip the sign back to be visually correct. Test Plan: View timezone picker, verify proper sign on timezones. Reviewers: max, prha, alangenfeld
@@ -46,7 +46,8 @@ export const TimezoneProvider: React.FunctionComponent = (props) => { const formatOffset = (mm: number) => { const amm = Math.abs(mm); - return `${mm < 0 ? '-' : '+'}${Math.floor(amm / 60)}:${amm % 60 < 10 ? '0' : ''}${amm % 60}`; + // moment.tz.zone() offsets are inverted: https://momentjs.com/timezone/docs/#/zone-object/offset/ + return `${mm < 0 ? '+' : '-'}${Math.floor(amm / 60)}:${amm % 60 < 10 ? '0' : ''}${amm % 60}`; }; const AllTimezoneItems = moment.tz
ebuild.ebuild_built: remove 'ebuild' from attr map initialization It should already be handled via class property.
@@ -102,7 +102,7 @@ class package(ebuild_src.base): _get_attr.update((x, partial(_chost_fallback, x.upper())) for x in ("cbuild", "chost", "ctarget")) _get_attr.update((x, post_curry(passthrough, x)) - for x in ("contents", "environment", "ebuild")) + for x in ("contents", "environment")) _get_attr.update((x, lambda s,x=x: s.data.get(x.upper(), "")) for x in ("cflags", "cxxflags", "ldflags")) _get_attr.update((x, lambda s,x=x: tuple(s.data.get(x.upper(), "").split()))
Don't generate runtime check for private abstract properties This used to be necessary when private properties were declared in the "private" part of packages in generated code, but now that we have the separate $.Analysis.Implementation package, there is no longer a "private" part and thus this workaround is no longer required. TN:
@@ -2631,10 +2631,6 @@ class PropertyDef(AbstractNodeData): scheme with current langkit capabilities in which the parser generate the right types for the functionality you want. - Note that for abstract properties that are private, this is - automatically enabled, as abstract private primitives are not - allowed in Ada. - :param None|list[DynamicVariable] dynamic_vars: List of dynamically bound variables for this property. If left to None, inherit from the overriden property, or the empty list if these is no property @@ -3300,10 +3296,6 @@ class PropertyDef(AbstractNodeData): ' properties' ) - # See abstract_runtime_check documentation in __init__ - if self.is_private and self.abstract: - self.abstract_runtime_check = True - # Add dynamically bound variables as arguments check_source_language( self.is_private or not self._dynamic_vars,
test_classes.py: Replace markdown_logger mock with assertLogs. Set level to 'ERROR' since exceptions create logs with that level.
@@ -1104,7 +1104,9 @@ Output: """ with self.settings(ERROR_BOT=None), mock.patch( "zerver.lib.markdown.timeout", side_effect=subprocess.CalledProcessError(1, []) - ), mock.patch("zerver.lib.markdown.markdown_logger"): + ), self.assertLogs( + level="ERROR" + ): # For markdown_logger.exception yield def create_default_device(
Fix swagger Don't show limit/offset for endpoints not supporting it
@@ -150,6 +150,7 @@ class AboutEthereumTracingRPCView(AboutEthereumRPCView): class ERC20IndexingView(GenericAPIView): serializer_class = serializers.ERC20IndexingSerializer + pagination_class = None # Don't show limit/offset in swagger def get(self, request): """ @@ -557,6 +558,7 @@ def swagger_safe_balance_schema(serializer_class): class SafeBalanceView(GenericAPIView): serializer_class = serializers.SafeBalanceResponseSerializer + pagination_class = None # Don't show limit/offset in swagger def get_parameters(self) -> Tuple[bool, bool]: """ @@ -965,6 +967,7 @@ class SafeIncomingTransferListView(SafeTransferListView): class SafeCreationView(GenericAPIView): serializer_class = serializers.SafeCreationInfoResponseSerializer + pagination_class = None # Don't show limit/offset in swagger @swagger_auto_schema( responses={ @@ -1000,6 +1003,7 @@ class SafeCreationView(GenericAPIView): class SafeInfoView(GenericAPIView): serializer_class = serializers.SafeInfoResponseSerializer + pagination_class = None # Don't show limit/offset in swagger @swagger_auto_schema( responses={ @@ -1043,6 +1047,7 @@ class SafeInfoView(GenericAPIView): class ModulesView(GenericAPIView): serializer_class = serializers.ModulesResponseSerializer + pagination_class = None # Don't show limit/offset in swagger @swagger_auto_schema( responses={ @@ -1073,6 +1078,7 @@ class ModulesView(GenericAPIView): class OwnersView(GenericAPIView): serializer_class = serializers.OwnerResponseSerializer + pagination_class = None # Don't show limit/offset in swagger @swagger_auto_schema( responses={
fix page history view on custom user models Username models without username field caused an exception on page history views. This commit fixes
@@ -112,9 +112,10 @@ class LockedPagesReportFilterSet(WagtailFilterSet): def get_requested_by_queryset(request): - return get_user_model().objects.filter( + User = get_user_model() + return User.objects.filter( pk__in=set(WorkflowState.objects.values_list('requested_by__pk', flat=True)) - ).order_by('username') + ).order_by(User.USERNAME_FIELD) class WorkflowReportFilterSet(WagtailFilterSet): @@ -176,9 +177,10 @@ class WorkflowTasksReportFilterSet(WagtailFilterSet): def get_audit_log_users_queryset(request): - return get_user_model().objects.filter( + User = get_user_model() + return User.objects.filter( pk__in=set(PageLogEntry.objects.values_list('user__pk', flat=True)) - ).order_by('username') + ).order_by(User.USERNAME_FIELD) class SiteHistoryReportFilterSet(WagtailFilterSet):
Remove chmod from test condition chmod() is not fully supported in Windows. Instead, use the '/' directory to trigger a non-EEXIST exception for the test condition
@@ -156,18 +156,19 @@ class TestProject(unittest.TestCase): self.assertTrue(len(project.keys) == 1) self.assertTrue(project.keys[0] == project_key['keyid']) - # Set as readonly and try to write a repo. + # Try to write to an invalid location. The OSError should be re-raised by + # create_new_project(). shutil.rmtree(targets_directory) - os.chmod(local_tmp, 0o0555) - tuf.roledb.clear_roledb() tuf.keydb.clear_keydb() + + metadata_directory = '/' + valid_metadata_directory_name = developer_tool.METADATA_DIRECTORY_NAME + developer_tool.METADATA_DIRECTORY_NAME = '/' self.assertRaises(OSError, developer_tool.create_new_project, project_name, metadata_directory, location_in_repository, targets_directory, project_key) - - os.chmod(local_tmp, 0o0777) - + developer_tool.METADATA_DIRECTORY_NAME = valid_metadata_directory_name
Duplicate persons: update form showing up condition The forms for marking people as reviewed will be always shown, whereas forms for merging persons will be only shown, if the number of records in the table is at least 2.
<td>{% if not forloop.first %}<input type="radio" name="person_b" value="{{ person.id }}" form="form_switched_names_merge">{% endif %}</td> </tr> {% endfor %} - {% if switched_persons|length >= 2 %} <tr> <td></td> <td> </form> </td> <td colspan="2"> + {% if switched_persons|length >= 2 %} <form method="GET" action="{% url 'persons_merge' %}" id="form_switched_names_merge"> <input type="hidden" name="next" value="{% url 'duplicate_persons' %}"> <input type="submit" value="Merge selected" class="btn btn-primary"> </form> + {% endif %} </td> </tr> - {% endif %} </tbody> </table> {% else %} <td>{% if not forloop.first %}<input type="radio" name="person_b" value="{{ person.id }}" form="form_same_names_merge">{% endif %}</td> </tr> {% endfor %} - {% if duplicate_persons|length >= 2 %} <tr> <td></td> <td> </form> </td> <td colspan="2"> + {% if duplicate_persons|length >= 2 %} <form method="GET" action="{% url 'persons_merge' %}" id="form_same_names_merge"> <input type="hidden" name="next" value="{% url 'duplicate_persons' %}"> <input type="submit" value="Merge selected" class="btn btn-primary"> </form> + {% endif %} </td> </tr> - {% endif %} </tbody> </table> {% else %}
[benchmark] Minor tweaks to `hail-bench compare` Fix up formatting, add harmonic mean.
@@ -2,7 +2,7 @@ import json import os import sys -from scipy.stats.mstats import gmean +from scipy.stats.mstats import gmean, hmean import numpy as np @@ -83,20 +83,21 @@ def compare(args): sys.stderr.write(f"Failed benchmarks in run 2:" + ''.join(f'\n {t}' for t in failed_2) + '\n') comparison = sorted(comparison, key=lambda x: x[2] / x[1], reverse=True) - longest_name = max(len(name) for name, _, _ in comparison) + longest_name = max(max(len(name) for name, _, _ in comparison), len('Benchmark Name')) comps = [] def format(name, ratio, t1, t2): - return f'{name:>{longest_name}} {ratio:>8} {t1:>7} {t2:>7}' + return f'{name:>{longest_name}} {ratio:>8} {t1:>8} {t2:>8}' - print(format('Name', 'Ratio', 'Time 1', 'Time 2')) - print(format('----', '-----', '------', '------')) + print(format('Benchmark Name', 'Ratio', 'Time 1', 'Time 2')) + print(format('--------------', '-----', '------', '------')) for name, r1, r2 in comparison: comps.append(r2 / r1) - print(format(name, fmt_diff(r2 / r1), fmt_time(r1, 7), fmt_time(r2, 7))) + print(format(name, fmt_diff(r2 / r1), fmt_time(r1, 8), fmt_time(r2, 8))) print('----------------------') + print(f'Harmonic mean: {fmt_diff(hmean(comps))}') print(f'Geometric mean: {fmt_diff(gmean(comps))}') - print(f'Simple mean: {fmt_diff(np.mean(comps))}') + print(f'Arithmetic mean: {fmt_diff(np.mean(comps))}') print(f'Median: {fmt_diff(np.median(comps))}')
minor code style tweaks (closer to pycharm automatic code formatting)
Unit tests specifically for 1.0.0-style DataCube """ -import openeo.metadata import pytest import shapely.geometry + +import openeo.metadata from openeo.internal.graph_building import PGNode from openeo.rest.connection import Connection - from .conftest import API_URL from ... import load_json_resource @@ -97,10 +97,12 @@ def test_ndvi_args(con100: Connection): "result": True, } + def test_rename_dimension(con100): s2 = con100.load_collection("S2") x = s2.rename_dimension(source="bands", target="ThisIsNotTheBandsDimension") - assert x.graph=={'loadcollection1': { + assert x.graph == { + 'loadcollection1': { 'arguments': { 'id': 'S2', 'spatial_extent': None, @@ -116,7 +118,9 @@ def test_rename_dimension(con100): }, 'process_id': 'rename_dimension', 'result': True - }} + } + } + def test_reduce_dimension(con100): s2 = con100.load_collection("S2") @@ -233,7 +237,10 @@ def test_apply_absolute_pgnode(con100): def test_load_collection_properties(con100): # TODO: put this somewhere and expose it to the user? def eq(value, case_sensitive=True) -> PGNode: - return PGNode(process_id="eq", arguments={"x": {"from_parameter": "value"}, "y": value, "case_sensitive": case_sensitive}) + return PGNode( + process_id="eq", + arguments={"x": {"from_parameter": "value"}, "y": value, "case_sensitive": case_sensitive} + ) def between(min, max) -> PGNode: return PGNode(process_id="between", arguments={"x": {"from_parameter": "value"}, "min": min, "max": max}) @@ -252,7 +259,6 @@ def test_load_collection_properties(con100): assert im.graph == expected - def test_apply_dimension_temporal_cumsum_with_target(con100): cumsum = con100.load_collection("S2").apply_dimension('cumsum', dimension="t", target_dimension="MyNewTime") actual_graph = cumsum.graph @@ -311,7 +317,10 @@ def test_filter_spatial_callbak(con100): }, 'runudf1': { 'arguments': { - 'data': {'features': [{'geometry': {'coordinates': [125.6,10.1],'type': 'Point'},'type': 'Feature'}],'type': 'FeatureCollection'}, + 'data': { + 'features': [{'geometry': {'coordinates': [125.6, 10.1], 'type': 'Point'}, 'type': 'Feature'}], + 'type': 'FeatureCollection' + }, 'runtime': 'Python', 'udf': 'def transform_point_into_bbox(data:UdfData): blabla' },
add tutorial for adding a new endpoint walks through the process of adding the new `fluview_meta` endpoint
@@ -7,6 +7,10 @@ This guide describes how to write and test code for the Epidata API. For preliminary steps, [install docker and create a virtual network](https://github.com/cmu-delphi/operations/blob/master/docs/frontend_development.md#setup). +After reading this guide, you may want to visit +[the `fluview_meta` tutorial](new_endpoint_tutorial.md) for an example of how +to add a new endpoint to the API. + # setup For working on the Epidata API, you'll need the following two Delphi
Fixes typo nozerconf -> nozeroconf Fixes
@@ -828,7 +828,7 @@ def _parse_network_settings(opts, current): _raise_error_network('hostname', ['server1.example.com']) if 'nozeroconf' in opts: - nozeroconf = salt.utils.dequote(opts['nozerconf']) + nozeroconf = salt.utils.dequote(opts['nozeroconf']) if nozeroconf in valid: if nozeroconf in _CONFIG_TRUE: result['nozeroconf'] = true_val
Refactor EnumNode classes creation Forward fields to the created node class, so that enum nodes can have fields and properties.
@@ -1206,7 +1206,7 @@ class StructMetaclass(CompiledTypeMetaclass): assert sum(1 for b in [is_astnode, is_struct] if b) == 1 assert sum(1 for b in [is_base, is_root_grammar_class] if b) <= 1 - # Get the fields this class define. Remove them as class members: we + # Get the fields this class defines. Remove them as class members: we # want them to be stored in their own dict (see "cls.fields" below). dct_fields = AbstractNodeData.filter_fields(dct) for f_n, _ in dct_fields: @@ -2448,14 +2448,14 @@ class EnumNodeMetaclass(type): if name == "__EnumNodeInternal": return type.__new__(mcs, name, bases, dct) - qualifier = dct.get("qualifier") + qualifier = dct.pop("qualifier", False) # If the class has True for the qualifier, then auto generate - # alternatives and node classes. - if qualifier: - dct.update({ - "alternatives": ["present", "absent"], - }) + # alternatives and node classes. Else, take them from the + # 'alternatives' field. + alternatives = ( + ["present", "absent"] if qualifier else dct.pop('alternatives') + ) from langkit.expressions import Property, AbstractProperty @@ -2465,13 +2465,16 @@ class EnumNodeMetaclass(type): "is_enum_node": True, }) + # Add other supplied fields to the base class dict + base_enum_dct.update(dct) + # Generate the abstract base node type basename = names.Name.from_camel(name) base_enum_node = abstract(type(name, (T.root_node, ), base_enum_dct)) base_enum_node.is_type_resolved = True base_enum_node._alternatives = [] - for alt in dct["alternatives"]: + for alt in alternatives: alt_name = basename + names.Name.from_lower(alt) attr_name = (names.Name("alt") + names.Name.from_lower(alt)).lower
Add InvalidAMRError exception to amr This allows capturing the exception in containing scopes easily to dump offending sentence action pair into files for debugging
@@ -2,6 +2,10 @@ import sys from transition_amr_parser.utils import print_log +class InvalidAMRError(Exception): + pass + + class AMR: def __init__(self, tokens=None, root='', nodes=None, edges=None, alignments=None, score=0.0): @@ -134,7 +138,7 @@ class AMR: else: if len(completed) < len(self.nodes): - raise Exception("Tried to print an uncompleted AMR") + raise InvalidAMRError("Tried to print an uncompleted AMR") print_log('amr', 'Failed to print AMR, ' + str(len(completed)) + ' of ' + str(len(self.nodes)) + ' nodes printed:\n ' + amr_string) if amr_string.startswith('"') or amr_string[0].isdigit() or amr_string[0] == '-': amr_string = '(x / '+amr_string+')'
TST: Add unit test for kwarg of np.einsum Ensure that explicitly stating out=None does not raise an error in np.einsum, see and
@@ -605,6 +605,10 @@ def test_einsum_misc(self): [[[1, 3], [3, 9], [5, 15], [7, 21]], [[8, 16], [16, 32], [24, 48], [32, 64]]]) + # Ensure explicitly setting out=None does not cause an error + # see issue gh-15776 and issue gh-15256 + assert_equal(np.einsum('i,j', [1], [2], out=None), [[2]]) + def test_subscript_range(self): # Issue #7741, make sure that all letters of Latin alphabet (both uppercase & lowercase) can be used # when creating a subscript from arrays
TST: Avoid possible warning from unnecessary cast with uninitialized values This should be fixed in `choose` to not do the unnecessary cast, see
@@ -21,7 +21,7 @@ class SubClass(np.ndarray): ... B1 = np.empty((1,), dtype=np.int32).view(SubClass) B2 = np.empty((1, 1), dtype=np.int32).view(SubClass) C: np.ndarray[Any, np.dtype[np.int32]] = np.array([0, 1, 2], dtype=np.int32) -D = np.empty(3).view(SubClass) +D = np.ones(3).view(SubClass) i4.all() A.all()
Optimization: Also remove assignments to mutable constant values. * These can be removed if they have no references. * Unclear if this happens a lot, but it seemed to be missing. This ought to cover "has side effects" in the future too.
@@ -251,7 +251,7 @@ class StatementAssignmentVariable(StatementChildrenHavingBase): return self.getAssignSource().mayRaiseException(exception_type) def computeStatement(self, trace_collection): - # This is very complex stuff, pylint: disable=too-many-branches + # This is very complex stuff, pylint: disable=too-many-branches,too-many-return-statements # TODO: Way too ugly to have global trace kinds just here, and needs to # be abstracted somehow. But for now we let it live here. @@ -355,8 +355,35 @@ Removed assignment of %s from itself which is known to be defined.""" % variable if last_trace is not None: if source.isCompileTimeConstant(): + if not variable.isModuleVariable(): + # Can safely forward propagate only non-mutable constants. - if not source.isMutable(): + if source.isMutable(): + # Something might be possible still, lets check for + # ununused. + if not last_trace.hasPotentialUsages() and \ + not last_trace.hasDefiniteUsages() and \ + not last_trace.getNameUsageCount(): + if not last_trace.getPrevious().isUninitTrace(): + # TODO: We could well decide, if that's even necessary, but for now + # the "StatementDelVariable" is tasked with that. + result = StatementDelVariable( + variable = self.variable, + version = self.variable_version, + tolerant = True, + source_ref = self.getSourceReference() + ) + else: + result = None + + return ( + result, + "new_statements", + "Dropped dead assignment statement to '%s'." % ( + self.getVariableName() + ) + ) + else: if not last_trace.getNameUsageCount(): self.variable_trace.setReplacementNode( lambda _usage : source.makeClone() @@ -366,7 +393,6 @@ Removed assignment of %s from itself which is known to be defined.""" % variable else: propagated = False - if not variable.isModuleVariable(): if not last_trace.hasPotentialUsages() and not last_trace.getNameUsageCount(): if not last_trace.getPrevious().isUninitTrace(): # TODO: We could well decide, if that's even necessary, but for now @@ -388,10 +414,6 @@ Removed assignment of %s from itself which is known to be defined.""" % variable self.getVariableName() ) ) - else: - # Something might be possible still. - - pass else: # More cases thinkable. pass
fix degradation factor for one year was returning 1 for single year degradation
@@ -78,6 +78,8 @@ def annuity(analysis_period, rate_escalation, rate_discount): def degradation_factor(analysis_period, rate_degradation): if analysis_period == 0: return 0 + if analysis_period == 1: + return 1 - rate_degradation factor = 1 factors = [factor] for yr in range(1, int(analysis_period)):
Update gallery.py web trader and live wind on same row
@@ -417,8 +417,7 @@ layout = html.Div(className='gallery', children=[ This app continually queries a SQL database and displays live charts of wind speed and wind direction. In Dash, the [dcc.Interval](https://dash.plot.ly/live-upates) component can be used to update any element on a recurring interval. - ''', - width=12 + ''' ), AppSection( @@ -429,8 +428,7 @@ layout = html.Div(className='gallery', children=[ description=''' This app continually queries csv files and updates Ask and Bid prices for major currency pairs as well as Stock Charts. You can also virtually buy and sell stocks and see the profit updates. - ''', - width=12 + ''' ) ]),
css: Fix erroneous `bootstrap-focus-style` ID. The commit added a bug that breaks new user invite. The CSS class ` bootstrap-focus-style` was added to `id`, hence breaking the value extraction. Fixes:
{{> help_link_widget link="/help/roles-and-permissions" }} </label> <div> - <select id="invite_as bootstrap-focus-style" class="invite-as"> + <select id="invite_as" class="invite-as bootstrap-focus-style"> <option name="invite_as" value="{{ invite_as_options.guest.code }}">{{t "Guests" }}</option> <option name="invite_as" selected="selected" value="{{ invite_as_options.member.code }}">{{t "Members" }}</option> {{#if is_admin}}
Extract .from_fluid_flow method to a class This is more in line with how we have all the Bearing classes, and makes it easier for the user to find it. Closes
"""Bearing Element module. This module defines the BearingElement classes which will be used to represent the rotor -bearings and seals. There're 6 different classes to represent bearings options, +bearings and seals. There are 7 different classes to represent bearings options, and 2 element options with 8 or 12 degrees of freedom. """ # fmt: off @@ -27,6 +27,7 @@ __all__ = [ "SealElement", "BallBearingElement", "RollerBearingElement", + "BearingFluidFlow", "BearingElement6DoF", "MagneticBearingElement", ] @@ -912,26 +913,8 @@ class BearingElement(Element): color=color, ) - @classmethod - def from_fluid_flow( - cls, - n, - nz, - ntheta, - length, - omega, - p_in, - p_out, - radius_rotor, - radius_stator, - visc, - rho, - eccentricity=None, - load=None, - tag=None, - n_link=None, - scale_factor=1.0, - ): + +class BearingFluidFlow(BearingElement): """Instantiate a bearing using inputs from its fluid flow. This method always creates elements with frequency-dependent coefficients. @@ -1018,12 +1001,33 @@ class BearingElement(Element): >>> load = 525 >>> visc = 0.1 >>> rho = 860. - >>> BearingElement.from_fluid_flow(0, nz, ntheta, length, omega, p_in, + >>> BearingFluidFlow(0, nz, ntheta, length, omega, p_in, ... p_out, radius_rotor, radius_stator, ... visc, rho, load=load) # doctest: +ELLIPSIS - BearingElement(n=0, n_link=None, - kxx=array([... + BearingFluidFlow(n=0, n_link=None, + kxx=array([14547442... """ + + def __init__( + self, + n, + nz, + ntheta, + length, + omega, + p_in, + p_out, + radius_rotor, + radius_stator, + visc, + rho, + eccentricity=None, + load=None, + tag=None, + n_link=None, + scale_factor=1.0, + color="#355d7a", + ): K = np.zeros((4, len(omega))) C = np.zeros((4, len(omega))) @@ -1044,7 +1048,7 @@ class BearingElement(Element): ) K[:, i], C[:, i] = calculate_stiffness_and_damping_coefficients(fluid_flow) - return cls( + super().__init__( n, kxx=K[0], kxy=K[1], @@ -1055,6 +1059,10 @@ class BearingElement(Element): cyx=C[2], cyy=C[3], frequency=omega, + tag=tag, + n_link=n_link, + scale_factor=scale_factor, + color=color, )
Upstream generic device test patch. Summary: So that XLA can run all tests by setting env `PYTORCH_TEST_PATH` instead of patching a diff. :) Pull Request resolved:
+import copy import inspect +import runpy import threading from functools import wraps import unittest @@ -275,6 +277,30 @@ device_type_test_bases.append(CPUTestBase) if torch.cuda.is_available(): device_type_test_bases.append(CUDATestBase) + +# Note [How to extend DeviceTypeTestBase to add new test device] +# The following logic optionally allows downstream projects like pytorch/xla to +# add more test devices. +# Instructions: +# - Add a python file (e.g. pytorch/xla/test/pytorch_test_base.py) in downstream project. +# - Inside the file, one should inherit from `DeviceTypeTestBase` class and define +# a new DeviceTypeTest class (e.g. `XLATestBase`) with proper implementation of +# `instantiate_test` method. +# - DO NOT import common_device_type inside the file. +# `runpy.run_path` with `globals()` already properly setup the context so that +# `DeviceTypeTestBase` is already available. +# - Set a top-level variable `TEST_CLASS` equal to your new class. +# E.g. TEST_CLASS = XLATensorBase +# - To run tests with new device type, set `TORCH_TEST_DEVICE` env variable to path +# to this file. Multiple paths can be separated by `:`. +# See pytorch/xla/test/pytorch_test_base.py for a more detailed example. +_TORCH_TEST_DEVICES = os.environ.get('TORCH_TEST_DEVICES', None) +if _TORCH_TEST_DEVICES: + for path in _TORCH_TEST_DEVICES.split(':'): + mod = runpy.run_path(path, init_globals=globals()) + device_type_test_bases.append(mod['TEST_CLASS']) + + PYTORCH_CUDA_MEMCHECK = os.getenv('PYTORCH_CUDA_MEMCHECK', '0') == '1' @@ -325,7 +351,7 @@ def instantiate_device_type_tests(generic_test_class, scope, except_for=None, on assert inspect.isfunction(test), "Couldn't extract function from '{0}'".format(name) # Instantiates the device-specific tests - device_type_test_class.instantiate_test(name, test) + device_type_test_class.instantiate_test(name, copy.deepcopy(test)) else: # Ports non-test member assert name not in device_type_test_class.__dict__, "Redefinition of directly defined member {0}".format(name)
ENH: added new time function Added a function to calculate decimal year from a datetime object.
@@ -43,6 +43,36 @@ def getyrdoy(date): return date.year, doy +def datetime_to_dec_year(dtime): + """Convert datetime timestamp to a decimal year. + + Parameters + ---------- + dtime : dt.datetime + Datetime timestamp + + Returns + ------- + year : float + Year with decimal containing time incriments of less than a year + + """ + + year = float(dtime.year) + day = float(dtime.strftime("%j")) - 1.0 + days_of_year = float(dt.datetime(dtime.year, 12, 31).strftime("%j")) + + # Add fraction of day to the day + day += (dtime.hour + (dtime.minute + + (dtime.second + dtime.microsecond * 1.0e-6) / 60.0) + / 60.0) / 24.0 + + # Determine the fraction of days in this year and add to year + year += (day / days_of_year) + + return year + + def parse_date(str_yr, str_mo, str_day, str_hr='0', str_min='0', str_sec='0', century=2000): """Convert string dates to dt.datetime. @@ -212,7 +242,7 @@ def freq_to_res(freq): def create_date_range(start, stop, freq='D'): - """Create array of datetime objects using input frequency from start to stop. + """Create array of datetime objects using input freq from start to stop. Parameters ----------
refactor(bump): Remove a redundant join call The list of lines is now joined unconditionally in _bump_with_regex, so there is never a need to join it in update_version_in_files. The removed join was hence a no-op because it was performed on a str.
@@ -158,7 +158,7 @@ def update_version_in_files( # Write the file out again with smart_open(filepath, "w") as file: - file.write("".join(version_file)) + file.write(version_file) def _bump_with_regex(
Fix spelling mistake Crown -> crowd
@@ -27,6 +27,6 @@ Police pepper sprays a young child who is seen crying, while protesters pour mil ### Police initiate violence | June 1 -Police pepper spray peacefully protesting crown +Police pepper spray peacefully protesting crowd * https://www.reddit.com/r/Seattle/comments/gv0ru3/this_is_the_moment_it_all_happened/
Update Changelog For newest Development version
@@ -3,9 +3,22 @@ Changelog All notable changes to this project are documented in this file. -[0.7.4-dev] in progress ------------------------ +[0.7.7-dev] in progress +------------------------ - Add support for Peewee 3.6.4 + + +[0.7.6] 2018-08-02 +------------------ +- Adds ability to attach a fee to a ``send`` transaction +- Update Node selection mechanism +- Store ``Transactions`` list items inside a ``Block`` in a consistent format. +- Improved peer connection maintenance + + +[0.7.5] 2018-07-19 +----------------------- +- Add CreateAddress Feature - Update NodeLeader peer monitoring system - Add ability to configure size of requests for blocks as well as block processing queue size - Update mainnet bootstrap files @@ -15,6 +28,7 @@ All notable changes to this project are documented in this file. - update notification endpoint to include ``total_pages`` in output, and allow ``pagesize`` paramater to be passed in - update seeds for mainnet + [0.7.3] 2018-07-12 ------------------ - Updated package requirements, removed ``pycrypto`` from all dependencies to fix install error(s) `#485 <https://github.com/CityOfZion/neo-python/issues/485>`_
[batch] update cost calculation add 0.004/instance-hr static IP cost add 0.01/core-hr service charge
@@ -8,12 +8,22 @@ log = logging.getLogger('utils') def cost_from_msec_mcpu(app, msec_mcpu): + worker_cores = app['worker_cores'] + # https://cloud.google.com/compute/all-pricing + + # per instance costs # persistent SSD: $0.17 GB/month # average number of days per month = 365.25 / 12 = 30.4375 - avg_n_days_per_month = 30.4375 + disk_cost_per_instance_hour = 0.17 * app['worker_disk_size_gb'] / avg_n_days_per_month / 24 + + ip_cost_per_instance_hour = 0.004 + + instance_cost_per_instance_hour = disk_cost_per_instance_hour + ip_cost_per_instance_hour + + # per core costs if app['worker_type'] == 'standard': cpu_cost_per_core_hour = 0.01 elif app['worker_type'] == 'highcpu': @@ -22,10 +32,14 @@ def cost_from_msec_mcpu(app, msec_mcpu): assert app['worker_type'] == 'highmem' cpu_cost_per_core_hour = 0.0125 - disk_cost_per_core_hour = 0.17 * app['worker_disk_size_gb'] / avg_n_days_per_month / 24 / app['worker_cores'] - cost_per_core_sec = (cpu_cost_per_core_hour + disk_cost_per_core_hour) / 3600 + service_cost_per_core_hour = 0.01 + + total_cost_per_core_hour = ( + cpu_cost_per_core_hour + + instance_cost_per_instance_hour / worker_cores + + service_cost_per_core_hour) - return msec_mcpu * cost_per_core_sec * 0.001 * 0.001 + return (msec_mcpu * 0.001 * 0.001) * (total_cost_per_core_hour / 3600) def parse_cpu_in_mcpu(cpu_string):
Fix empty email attribute in LDAP causes exception We were checking to see if the email attribute had no value, but not to see if it had a non-empty value! We were making the same mistake with username.
@@ -269,7 +269,7 @@ class LDAPDirectoryConnector(object): continue email, last_attribute_name = self.user_email_formatter.generate_value(record) - if email is None: + if not email: if last_attribute_name is not None: self.logger.warning('Skipping user with dn %s: empty email attribute (%s)', dn, last_attribute_name) continue @@ -296,10 +296,13 @@ class LDAPDirectoryConnector(object): username, last_attribute_name = self.user_username_formatter.generate_value(record) source_attributes['username'] = username - if last_attribute_name and not username: + if username: + user['username'] = username + else: + if last_attribute_name: self.logger.warning('No username attribute (%s) for user with dn: %s, default to email (%s)', last_attribute_name, dn, email) - user['username'] = username if username is not None else email + user['username'] = email domain, last_attribute_name = self.user_domain_formatter.generate_value(record) source_attributes['domain'] = domain
gui_guider_demo: fix pkg version Only one version is supported. Remove the check.
@@ -12,6 +12,6 @@ if PKG_USING_GUI_GUIDER_DEMO config PKG_GUI_GUIDER_DEMO_VER string - default "latest" if PKG_LVGL_VER_NUM = 0x99999 #LVGL latest version + default "latest" endif
uilts/cpustates: Fix inverted `no_idle` check If there is no information about idle states then `no_idle` should be set to `True` instead of `False`.
@@ -151,7 +151,7 @@ class PowerStateProcessor(object): def __init__(self, cpus, wait_for_marker=True, no_idle=None): if no_idle is None: - no_idle = True if cpus[0].cpuidle else False + no_idle = False if cpus[0].cpuidle else True self.power_state = SystemPowerState(len(cpus), no_idle=no_idle) self.requested_states = {} # cpu_id -> requeseted state self.wait_for_marker = wait_for_marker
ebuild.ebd: run_generic_phase(): allow tmpdir param to be explicitly unset Instead of using $T all the time.
@@ -402,7 +402,7 @@ class setup_mixin(object): def run_generic_phase(pkg, phase, env, userpriv, sandbox, fd_pipes=None, - extra_handlers=None, failure_allowed=False, logging=None): + extra_handlers=None, failure_allowed=False, logging=None, **kwargs): """ :param phase: phase to execute :param env: environment mapping for the phase @@ -422,6 +422,7 @@ def run_generic_phase(pkg, phase, env, userpriv, sandbox, fd_pipes=None, userpriv = userpriv and is_userpriv_capable() sandbox = sandbox and is_sandbox_capable() + tmpdir = kwargs.get('tmpdir', env.get('T', None)) if env is None: env = expected_ebuild_env(pkg) @@ -434,7 +435,7 @@ def run_generic_phase(pkg, phase, env, userpriv, sandbox, fd_pipes=None, sys.stdout.flush() sys.stderr.flush() try: - if not ebd.run_phase(phase, env, env.get('T'), sandbox=sandbox, + if not ebd.run_phase(phase, env, tmpdir=tmpdir, sandbox=sandbox, logging=logging, additional_commands=extra_handlers): if not failure_allowed: raise format.GenericBuildError(
PANEL_COOKIE_SECRET not respected Some env variables such as PANEL_COOKIE_SECRET and PANEL_OAUTH_ENCRYPTION are not being verified correctly. This commit fixes that.
@@ -406,7 +406,7 @@ class Serve(_BkServe): "base64-encoded bytes." ) config.oauth_encryption_key = encryption_key - else: + elif not config.oauth_encryption_key: print("WARNING: OAuth has not been configured with an " "encryption key and will potentially leak " "credentials in cookies and a JWT token embedded " @@ -435,7 +435,7 @@ class Serve(_BkServe): ) elif args.cookie_secret: config.cookie_secret = args.cookie_secret - else: + elif not config.cookie_secret: raise ValueError( "When enabling an OAuth provider you must supply " "a valid cookie_secret either using the --cookie-secret "
Dont show traceback on 'q' if error view is shown Just exit silently with return code. The useful error will be in ~/.cache/conjure-up/conjure-up.log anyway.
@@ -5,6 +5,7 @@ log output, and where to file a bug. from urwid import (Pile, Text, Filler, WidgetWrap, Divider) from ubuntui.widgets.buttons import cancel_btn from ubuntui.utils import Color, Padding +import sys class ErrorViewException(Exception): @@ -39,4 +40,4 @@ class ErrorView(WidgetWrap): return Pile(buttons) def cancel(self, button): - raise SystemExit("Install exited because of error.") + sys.exit(1)
When -vnone is enabled, don't say we stop compilation pipeline TN:
@@ -40,6 +40,7 @@ class PassManager(object): if p.disabled: continue if isinstance(p, StopPipeline): + if context.verbosity.info: printcol('Stopping pipeline execution: {}'.format(p.name), Colors.OKBLUE) return
Fix: avoid windows exe permission errors See:
@@ -9,7 +9,7 @@ install: - ps: appveyor DownloadFile "https://raw.githubusercontent.com/randy3k/UnitTesting/master/sbin/appveyor.ps1" - ps: .\appveyor.ps1 "bootstrap" -verbose - ps: .\appveyor.ps1 "install_package_control" -verbose - - ps: pip install --upgrade pip + - ps: python -m pip install --upgrade pip - ps: pip install pycodestyle build: off
Update botorch_and_ax.md Summary: Revised documentation / narrative on ax integration. Pull Request resolved:
@@ -3,41 +3,36 @@ id: botorch_and_ax title: Using botorch with Ax --- -[Ax](https://github.com/facebook/Ax) is a platform for optimizing experiments. +[Ax](https://github.com/facebook/Ax) is a platform for sequential +experimentation. It relies on botorch for implementing Bayesian Optimization algorithms, but -provides much higher-level APIs that make it easy and convenient to specify -problems. It also comes with powerful metadata management, storage of results, -different APIs that support a variety of use cases. Ax makes it convenient to -use botorch in many standard Bayesian Optimization settings. Simply put, if +provides higher-level APIs that make it easy and convenient to specify +problems, visualize results, and benchmark new algorithms. It also comes with powerful metadata management, storage of results, and deployment-related APIs. Ax makes it convenient to +use botorch in most standard Bayesian Optimization settings. Simply put, if botorch is the "un-framework", then Ax is the "framework". -Ax provides a `BotorchModel` that is a sensible default for modeling and -optimization, but that can easily be customized by specifying and passing in -custom model constructors, acquisition functions, and optimization strategies. - +Ax provides a `BotorchModel` (**TODO**: cross-link to Ax documentation) that is a sensible default for modeling and +optimization which can be customized by specifying and passing in +bespoke model constructors, acquisition functions, and optimization strategies. +This model bridge utilizes a number of built-in transformations (**TODO**: make sure these transformations are documented in Ax, and link to them here), such normalizing input spaces and outputs to ensure reasonable fitting of GPs. ## When to use botorch though Ax -*Short answer:* If it's simple to use botorch through Ax for your problem, then -use Ax. This should apply to most standard use cases. +If it's simple to use botorch through Ax for your problem, then +use Ax. It dramatically reduces the amount of bookkeeping one needs to do as a Bayesian optimization researcher, such as keeping track of results, and transforming inputs and outputs to ranges that will ensure sensible handling in (G)PyTorch. The functionality provided by Ax should apply to most standard use cases. -For instance, say you want to tinker around with some parameters of your botorch -model (e.g. the kind of kernel), but leave the rest of the the Bayesian +For instance, say you want to experiment with using a different kind of surrogate model, or a new type of acquisition function, but leave the rest of the the Bayesian Optimization loop untouched. It is then straightforward to plug your custom botorch -model into Ax to take advantage of Ax's various loop control APIs, +model or acquisition function into Ax to take advantage of Ax's various loop control APIs, as well as its powerful automated metadata management, data storage, etc. See the [Using a custom botorch model in Ax](../tutorials/custom_botorch_model_in_ax) -tutorial for how to do this. - -**TODO:** Link to docs on Ax's transformations. - +tutorial for more on how ot do this. ## When not to use Ax -If you're working in a non-standard setting, where you might deal with very -high-dimensional feature or parameter spaces, or where the model fitting process -requires interactive work, then using Ax may end up becoming cumbersome. In such +If you're working in a non-standard setting, such as those with high-dimensional or structured feature or design spaces, or where the model fitting process requires interactive work, then using Ax may not be the best solution for you. In such a situation, you might be better off writing your own full Bayesian Optimization -loop outside of Ax (hey, it's all just python after all...). The -[q-Noisy Constrained EI](../tutorials/closed_loop_botorch_only) tutorial shows -how this can be done. +loop outside of Ax. The +[q-Noisy Constrained EI](../tutorials/closed_loop_botorch_only) tutorial and +[variational auto-encoder](../tutorials/vae_mnist) tutorial give examples of how +this can be done.
Migrated from list to map for reading examples. I was using a list to store line numbers to read for sequences from text. Migrated to map.
@@ -278,23 +278,24 @@ class LazyNERDataset(Dataset): def __init__(self, data_file, tokenizer, args): self.data_file = data_file self.data_start_line = args.data_start_line if args.data_start_line else 0 - self.example_lines = self._get_examples(self.data_file, self.data_start_line) - self.num_entries = len(self.example_lines) + self.example_lines, self.num_entries = self._get_examples(self.data_file, self.data_start_line) self.tokenizer = tokenizer self.args = args self.pad_token_label_id = CrossEntropyLoss().ignore_index @staticmethod def _get_examples(data_file, data_start_line): - example_lines = [] + example_lines = {} start = data_start_line + entry_num = 0 with open(data_file, encoding="utf-8") as f: for line_idx, _ in enumerate(f, 1): if _ == '\n' and line_idx>data_start_line: - example_lines.append((start,line_idx)) + example_lines[entry_num] = (start,line_idx) start = line_idx+1 + entry_num+=1 - return example_lines + return example_lines, entry_num def __getitem__(self, idx): start, end = self.example_lines[idx]
Light : Drop support for `IECore::Light` We now simply use `IECore::Shader` to represent lights, including their network of input shaders.
#include "IECore/NullObject.h" #include "IECore/Shader.h" -#include "IECore/Light.h" #include "IECore/MessageHandler.h" #include "Gaffer/StringPlug.h" @@ -97,7 +96,6 @@ IECore::ConstObjectPtr Light::computeSource( const Context *context ) const return IECore::NullObject::defaultNullObject(); } - void Light::hashAttributes( const SceneNode::ScenePath &path, const Gaffer::Context *context, const ScenePlug *parent, IECore::MurmurHash &h ) const { hashLight( context, h ); @@ -116,19 +114,6 @@ IECore::ConstCompoundObjectPtr Light::computeAttributes( const SceneNode::SceneP { lightAttribute = shader->getType(); } - else if( const IECore::Light *light = IECore::runTimeCast<const IECore::Light>( lightShaders->members().back().get() ) ) - { - /// \todo We are phasing out the use of IECore::Light and replacing - /// it with IECore::Shader everywhere. Make sure no derived classes - /// are using it and then remove this special case code. - IECore::msg( IECore::Msg::Warning, "Light::computeAttributes", "The use of IECore::Light is deprecated - please use IECore::Shader instead." ); - const std::string &lightName = light->getName(); - size_t colon = lightName.find( ":" ); - if( colon != std::string::npos ) - { - lightAttribute = lightName.substr( 0, colon ) + ":light"; - } - } } result->members()[lightAttribute] = lightShaders;
Make running Gloo tests conditional on availability Summary: Pull Request resolved: Test Plan: Imported from OSS
@@ -82,11 +82,7 @@ ROCM_BLACKLIST = [ 'nccl', ] -DISTRIBUTED_TESTS_CONFIG = { - 'gloo': { - 'WORLD_SIZE': '2' if torch.cuda.device_count() == 2 else '3' - }, -} +DISTRIBUTED_TESTS_CONFIG = {} if dist.is_available(): @@ -98,7 +94,10 @@ if dist.is_available(): DISTRIBUTED_TESTS_CONFIG['nccl'] = { 'WORLD_SIZE': '2' if torch.cuda.device_count() == 2 else '3' } - + if dist.is_gloo_available(): + DISTRIBUTED_TESTS_CONFIG['gloo'] = { + 'WORLD_SIZE': '2' if torch.cuda.device_count() == 2 else '3' + } # https://stackoverflow.com/questions/2549939/get-signal-names-from-numbers-in-python SIGNALS_TO_NAMES_DICT = {getattr(signal, n): n for n in dir(signal)
Fix the Horovod benchmark Update the horovod installation before benchmark. Update openmpi to 4.1.
@@ -156,6 +156,7 @@ def PrepareHorovod(vm): vm.AuthenticateVm() vm.Install('google_cloud_sdk') + vm.Install('openmpi') vm.InstallPackages('wget git unzip') vm.Install('nccl') @@ -179,7 +180,10 @@ def PrepareHorovod(vm): f'sudo {pip} install ' '--extra-index-url https://developer.download.nvidia.com/compute/redist/ ' f'nvidia-dali-tf-plugin-cuda{cuda_version}') - + vm.RemoteCommand(f'sudo {pip} uninstall -y horovod') + vm.RemoteCommand( + f'sudo HOROVOD_GPU_OPERATIONS=NCCL HOROVOD_WITH_TENSORFLOW=1 HOROVOD_WITH_MPI=1 {pip} install -U --no-cache horovod' + ) vm.RemoteCommand( f'sudo {pip} install pynvml cython scipy \'opencv-python==3.4.2.17\'') vm.RemoteCommand( @@ -188,7 +192,6 @@ def PrepareHorovod(vm): vm.RemoteCommand( f'[ -d "tensorpack" ] || git clone https://github.com/tensorpack/tensorpack.git && sudo {pip} install ./tensorpack' ) - vm.RemoteCommand(f'sudo {pip} install pynvml') _CopyAndUpdateRunScripts(FLAGS.horovod_model, vm) @@ -413,8 +416,7 @@ def RunWithVMs(vms, extra_envs=None): 'warmup_steps': 101, 'results_dir': '/tmp/models', 'gpu_memory_fraction': 0.95, - 'use_static_loss_scaling': None, - 'loss_scale': 128, + 'static_loss_scale': 128, 'lr_init': 0.016, 'lr_warmup_epochs': 8, 'momentum': 0.875, @@ -422,10 +424,11 @@ def RunWithVMs(vms, extra_envs=None): 'iter_unit': 'batch' } run_flags.update({ - 'precision': FLAGS.horovod_precision, 'batch_size': FLAGS.horovod_batch_size, 'num_iter': FLAGS.horovod_num_steps, }) + if FLAGS.horovod_precision == 'fp16': + run_flags['amp'] = None # Load ImageNet training data from GCS if benchmark is not in synthetic mode if not FLAGS.horovod_synthetic:
fix error when deleting Ipv6. N:Ipv6Equipament:3839 , MSG:Failure to remove the Ipv6Equipament
@@ -3627,7 +3627,7 @@ class Ipv6(BaseModel): data_to_queue = serializer.data # Deletes Obj IP - super(Ip, self).delete() + super(Ipv6, self).delete() # Sends to Queue queue_manager = QueueManager()
If already-loaded object is a Node, no need to look it up again. This will be the case most of the time. (An example of where this isn't true is the Node Files endpoint).
@@ -76,6 +76,9 @@ class AdminOrPublic(permissions.BasePermission): class ExcludeWithdrawals(permissions.BasePermission): def has_object_permission(self, request, view, obj): + if obj.__class__.__name__ == 'Node': + node = obj + else: context = request.parser_context['kwargs'] node = AbstractNode.load(context[view.node_lookup_url_kwarg]) if node.is_retracted:
DEV: change default stack level for discontinued and deprecated [CHANGED] now set at 3. level 1 just shows the call within cogent3.util.warning, level 2 shows where the function is used, level 3 shows where the user called it from.
@@ -13,7 +13,7 @@ __email__ = "[email protected]" __status__ = "Production" -def deprecated(_type, old, new, version, reason=None, stack_level=2): +def deprecated(_type, old, new, version, reason=None, stack_level=3): """a convenience function for deprecating classes, functions, arguments. Parameters @@ -39,10 +39,10 @@ def deprecated(_type, old, new, version, reason=None, stack_level=2): with catch_warnings(): simplefilter("always") - _warn(msg, DeprecationWarning, stack_level) + _warn(msg, DeprecationWarning, stacklevel=stack_level) -def discontinued(_type, name, version, reason=None, stack_level=2): +def discontinued(_type, name, version, reason=None, stack_level=3): """convenience func to warn about discontinued attributes Parameters @@ -58,7 +58,6 @@ def discontinued(_type, name, version, reason=None, stack_level=2): why, and what choices users have stack_level as per warnings.warn - """ msg = ( f"{_type} {name} is discontinued, support will be stopped in version {version}" @@ -68,4 +67,4 @@ def discontinued(_type, name, version, reason=None, stack_level=2): with catch_warnings(): simplefilter("always") - _warn(msg, DeprecationWarning, stack_level) + _warn(msg, DeprecationWarning, stacklevel=stack_level)
Update v_get_tbl_priv_by_group.sql added DROP and REFERENCES
@@ -10,6 +10,8 @@ select , decode(charindex('w',split_part(split_part(array_to_string(t.relacl, '|'),pu.groname,2 ) ,'/',1)),0,false,true) as upd , decode(charindex('a',split_part(split_part(array_to_string(t.relacl, '|'),pu.groname,2 ) ,'/',1)),0,false,true) as ins , decode(charindex('d',split_part(split_part(array_to_string(t.relacl, '|'),pu.groname,2 ) ,'/',1)),0,false,true) as del + , decode(charindex('D',split_part(split_part(array_to_string(t.relacl, '|'),pu.groname,2 ) ,'/',1)),0,false,true) as drp + , decode(charindex('R',split_part(split_part(array_to_string(t.relacl, '|'),pu.groname,2 ) ,'/',1)),0,false,true) as ref from (select use.usename as subject,
Update test_custom_rates.py test case with custom tou energy rate only (override urdb)
@@ -270,10 +270,10 @@ class TestBlendedRate(ResourceTestCaseMixin, TestCase): post["Scenario"]["Site"]["ElectricTariff"]["tou_energy_rates_us_dollars_per_kwh"] = [.1] * 8760 post["Scenario"]["Site"]["ElectricTariff"]["add_tou_energy_rates_to_urdb_rate"] = False - # response = self.get_response(post) - # tariff = ClassAttributes(response['outputs']['Scenario']['Site']['ElectricTariff']) - # self.assertEqual(tariff.year_one_bill_us_dollars, 1.0e5, places=1) - # self.assertEqual(tariff.year_one_bill_bau_us_dollars, 1.0e5, places=1) + response = self.get_response(post) + tariff = ClassAttributes(response['outputs']['Scenario']['Site']['ElectricTariff']) + self.assertAlmostEqual(tariff.year_one_bill_us_dollars, 1.0e5, places=1) + self.assertAlmostEqual(tariff.year_one_bill_bau_us_dollars, 1.0e5, places=1) post["Scenario"]["Site"]["ElectricTariff"]["add_tou_energy_rates_to_urdb_rate"] = True response = self.get_response(post)
fix the problem when some weights are zero and the required number of poitns can't be selected
@@ -1056,7 +1056,7 @@ class DynamicSampler(object): # in that case the sample technically won't be # uniform subset = self.rstate.choice(np.nonzero(subset)[0], - size=min(nblive, subset.sum()), + size=min(nblive, (cur_wt > 0).sum()), p=cur_wt, replace=False) cur_nblive = len(subset)
add crude GPIO read support bin/run.sh --dest \!2462abf84098 --gpiord 16
@@ -142,6 +142,9 @@ def onConnected(interface): interface.sendText(args.sendtext, args.destOrAll, wantAck=True, wantResponse=True) + if args.gpiowrb or args.gpiord: + rhc = remote_hardware.RemoteHardwareClient(interface) + if args.gpiowrb: bitmask = 0 bitval = 0 @@ -149,9 +152,13 @@ def onConnected(interface): bitmask |= 1 << int(wrpair[0]) bitval |= int(wrpair[1]) << int(wrpair[0]) print(f"Writing GPIO mask 0x{bitmask:x} with value 0x{bitval:x} to {args.dest}") - rhc = remote_hardware.RemoteHardwareClient(interface) rhc.writeGPIOs(args.dest, bitmask, bitval) + if args.gpiord: + bitmask = int(args.gpiord) + print(f"Reading GPIO mask 0x{bitmask:x} from {args.dest}") + rhc.readGPIOs(args.dest, bitmask) + if args.set or args.setstr or args.setchan or args.seturl or args.router != None: closeNow = True @@ -282,6 +289,9 @@ def main(): parser.add_argument( "--gpiowrb", nargs=2, help="Set a particlar GPIO # to 1 or 0", action='append') + parser.add_argument( + "--gpiord", help="Read from a GPIO mask") + parser.add_argument( "--settime", help="Set the real time clock on the device", action="store_true")
Add .get_participants() convenience method Closes and
@@ -56,7 +56,7 @@ from .tl.functions.messages import ( GetDialogsRequest, GetHistoryRequest, SendMediaRequest, SendMessageRequest, GetChatsRequest, GetAllDraftsRequest, CheckChatInviteRequest, ReadMentionsRequest, SendMultiMediaRequest, - UploadMediaRequest, EditMessageRequest + UploadMediaRequest, EditMessageRequest, GetFullChatRequest ) from .tl.functions import channels @@ -66,7 +66,7 @@ from .tl.functions.users import ( GetUsersRequest ) from .tl.functions.channels import ( - GetChannelsRequest, GetFullChannelRequest + GetChannelsRequest, GetFullChannelRequest, GetParticipantsRequest ) from .tl.types import ( DocumentAttributeAudio, DocumentAttributeFilename, @@ -81,7 +81,7 @@ from .tl.types import ( InputDocument, InputMediaDocument, Document, MessageEntityTextUrl, InputMessageEntityMentionName, DocumentAttributeVideo, UpdateEditMessage, UpdateEditChannelMessage, UpdateShort, Updates, - MessageMediaWebPage + MessageMediaWebPage, ChannelParticipantsSearch ) from .tl.types.messages import DialogsSlice from .extensions import markdown, html @@ -1014,6 +1014,59 @@ class TelegramClient(TelegramBareClient): raise TypeError('Invalid message type: {}'.format(type(message))) + def get_participants(self, entity, limit=None, search=''): + """ + Gets the list of participants from the specified entity + + Args: + entity (:obj:`entity`): + The entity from which to retrieve the participants list. + + limit (:obj: `int`): + Limits amount of participants fetched. + + search (:obj: `str`, optional): + Look for participants with this string in name/username. + + Returns: + A list of participants with an additional .total variable on the list + indicating the total amount of members in this group/channel. + """ + entity = self.get_input_entity(entity) + limit = float('inf') if limit is None else int(limit) + if isinstance(entity, InputPeerChannel): + offset = 0 + all_participants = {} + search = ChannelParticipantsSearch(search) + while True: + loop_limit = min(limit - offset, 200) + participants = self(GetParticipantsRequest( + entity, search, offset, loop_limit, hash=0 + )) + if not participants.users: + break + for user in participants.users: + if len(all_participants) < limit: + all_participants[user.id] = user + offset += len(participants.users) + if offset > limit: + break + + users = UserList(all_participants.values()) + users.total = self(GetFullChannelRequest( + entity)).full_chat.participants_count + + elif isinstance(entity, InputPeerChat): + users = self(GetFullChatRequest(entity.chat_id)).users + if len(users) > limit: + users = users[:limit] + users = UserList(users) + users.total = len(users) + else: + users = UserList([entity]) + users.total = 1 + return users + # endregion # region Uploading files
fix execution URL upon run start Summary: We now namespace all pipelines runs with `/p/:pipeline/` in the URL Fixes Test Plan: grepped for all Link, Route, window.open, and window.location.href references Clicked on Execute button Reviewers: schrockn
@@ -26,7 +26,7 @@ export function handleStartExecutionResult( const obj = result.data.startPipelineExecution; if (obj.__typename === "StartPipelineExecutionSuccess") { - const url = `/${obj.run.pipeline.name}/runs/${obj.run.runId}`; + const url = `/p/${obj.run.pipeline.name}/runs/${obj.run.runId}`; if (opts.openInNewWindow) { window.open(url, "_blank"); } else {
Fix py38 warnings in unit tests Fixes
@@ -922,7 +922,7 @@ def test_can_base64_encode_binary_multiple_media_types( def index_view(): return app.Response( status_code=200, - body=b'\u2713', + body=u'\u2713'.encode('utf-8'), headers={'Content-Type': content_type}) event = create_event('/index', 'GET', {}) @@ -930,7 +930,7 @@ def test_can_base64_encode_binary_multiple_media_types( response = demo(event, context=None) assert response['statusCode'] == 200 assert response['isBase64Encoded'] is True - assert response['body'] == 'XHUyNzEz' + assert response['body'] == '4pyT' assert response['headers']['Content-Type'] == content_type @@ -1148,7 +1148,7 @@ def test_cannot_receive_base64_string_with_binary_response( def bincat(): return app.Response( status_code=200, - body=b'\u2713', + body=u'\u2713'.encode('utf-8'), headers={'Content-Type': content_type}) event = create_event_with_body('', '/bincat', 'GET', content_type)
Removes Gunicorn Import In Debug mode Moves the gunicorn import below the start server command for debug mode to ensure it isn't imported if it isn't used. This solves issues with manage.py being unable to start on windows.
@@ -7,7 +7,6 @@ import time from typing import List import django -import gunicorn.app.wsgiapp from django.contrib.auth import get_user_model from django.core.management import call_command, execute_from_command_line @@ -156,6 +155,9 @@ class SiteManager: call_command("runserver", "0.0.0.0:8000") return + # Import gunicorn only if we aren't in debug mode. + import gunicorn.app.wsgiapp + # Patch the arguments for gunicorn sys.argv = [ "gunicorn",
Fix error in max_blur_pool.py The example code as is, would cause an error
@@ -37,7 +37,7 @@ class MaxBlurPool2d(nn.Module): Examples: >>> input = torch.rand(1, 4, 4, 8) - >>> pool = kornia.contrib.MaxblurPool2d(kernel_size=3) + >>> pool = kornia.contrib.MaxBlurPool2d(kernel_size=3) >>> output = pool(input) # 1x4x2x4 """
Fix for printing multiple reports at once (bug introduced in 79434bb)
@@ -257,7 +257,6 @@ class ReportPrintMixin: pages = [] try: - pdf = outputs[0].get_document().copy(pages).write_pdf() if len(outputs) > 1: # If more than one output is generated, merge them into a single file @@ -265,6 +264,8 @@ class ReportPrintMixin: doc = output.get_document() for page in doc.pages: pages.append(page) + + pdf = outputs[0].get_document().copy(pages).write_pdf() else: pdf = outputs[0].get_document().write_pdf()
Ignore error attempting to deploy unknown application Fixes
@@ -228,6 +228,10 @@ class DeployController: async def _do_deploy(self, application, msg_cb): "launches deploy in background for application" + if application not in self.undeployed_applications: + app.log.error('Skipping attempt to deploy unavailable ' + '{}'.format(application)) + return self.undeployed_applications.remove(application) default_series = app.metadata_controller.series
Release udiskie 1.7.6 add russian translations (thanks fixed deprecation warnings in setup.py (thanks
CHANGELOG --------- +1.7.6 +~~~~~ +Date: 17.02.2019 + +- add russian translations (thanks @mr-GreyWolf) +- fixed deprecation warnings in setup.py (thanks @sealj553) + 1.7.5 ~~~~~ Date: 24.05.2018
Adding the ability to write an .proj file Adding some spaces re pep8 Adding some comments and changing naming of file opening section for clarity.
@@ -336,12 +336,30 @@ def _read_projection_information(asc_file): with open(proj_file, 'r') as f: projection_data_structure = f.readlines() - return projection_data_structure + return ''.join(projection_data_structure) else: return None +def _write_projection_information(asc_file, projection_string): + """Write .proj file if projection information exists on the grid. + + Parameters + ---------- + asc_file : file-like + File-like object of the data file pointing to the start of the data. + Assumption is that the projection information is in a file with the + same name as asc_file, but with the extention replaced with .proj + projection_string : string + The projection datastructure stored on the grid. + """ + proj_file = os.path.splitext(asc_file)[0] + '.proj' + + with open(proj_file, 'w') as f: + f.write(projection_string) + + def read_esri_ascii(asc_file, grid=None, reshape=False, name=None, halo=0): """Read :py:class:`~landlab.RasterModelGrid` from an ESRI ASCII file. @@ -414,6 +432,8 @@ def read_esri_ascii(asc_file, grid=None, reshape=False, name=None, halo=0): """ from ..grid import RasterModelGrid + # if the asc_file is provided as a string, open it and pass the pointer to + # _read_asc_header, and _read_asc_data if isinstance(asc_file, six.string_types): with open(asc_file, 'r') as f: header = read_asc_header(f) @@ -421,6 +441,7 @@ def read_esri_ascii(asc_file, grid=None, reshape=False, name=None, halo=0): file_name = asc_file + # otherwise, pass asc_file directly. else: header = read_asc_header(asc_file) data = _read_asc_data(asc_file) @@ -571,4 +592,9 @@ def write_esri_ascii(path, fields, names=None, clobber=False): np.savetxt(path, np.flipud(data), header=os.linesep.join(header_lines), comments='') + # if a proj file existed, duplicate it with the appropriate name. + if fields.projection: + _write_projection_information(path, fields.projection) + + return paths
tests: update collect-logs.yml playbook change `ceph -s` output to json-pretty. gather rgw logs add `health detail` command
failed_when: false changed_when: false with_items: - - "-s -f json" + - "-s -f json-pretty" - "osd tree" - "osd dump" - "pg dump" - "versions" + - "health detail -f json-pretty" - name: save ceph status to file copy: or (groups.get(mgr_group_name, []) | length == 0 and inventory_hostname in groups.get(mon_group_name, [])) + - name: get rgw log + shell: journalctl -l -u ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }} > /var/log/ceph/ceph-radosgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}.log + changed_when: false + with_items: "{{ rgw_instances | default([]) }}" + when: inventory_hostname in groups.get(rgw_group_name, []) + - name: find ceph config file and logs find: paths:
Update test_fetchers_data_ftp.py Try to skip ftp/data fetcher to check if CI tests take shorter time !
@@ -31,6 +31,7 @@ import logging log = logging.getLogger("argopy.tests.data.ftp") +skip_for_debug = pytest.mark.skipif(True, reason="Taking too long !") """ List ftp hosts to be tested. @@ -92,6 +93,7 @@ def assert_fetcher(this_fetcher, cachable=False): assert is_list_of_strings(this_fetcher.cachepath) +@skip_for_debug @requires_ftp class Test_Backend: src = 'ftp' @@ -188,6 +190,7 @@ class Test_Backend: test(_fetcher) +@skip_for_debug @requires_ftp class Test_BackendParallel: src = 'ftp'
docs: Change 'loose' to 'lose' in tutorial 'loose' is often confused with 'lose'. This is a minor fix to the documentation.
@@ -251,5 +251,5 @@ First, we setup the solver and the data iterator for the training: Comparing the two processing times, we can observe that both schemes ("static" and "dynamic") takes the same executation time, i.e., although -we created the computation graph dynamically, we did not loose +we created the computation graph dynamically, we did not lose performance.
plugins: Fix daal4py Add daal4py back to the list of all plugins. It had been removed mistakenly. Make dependency check for daal4py apply to all Python versions, not just 3.8. It is currently only available from conda. There is no PyPi release yet.
@@ -37,6 +37,7 @@ CORE_PLUGINS = [ ("model", "xgboost"), ("model", "pytorch"), ("model", "spacy"), + ("model", "daal4py"), ] # Models which currently don't support Windows or MacOS @@ -79,8 +80,6 @@ CORE_PLUGIN_DEPS = { else {}, } -# Plugins which currently don't support Python 3.8 -if sys.version_info.major == 3 and sys.version_info.minor < 8: CORE_PLUGIN_DEPS[("model", "daal4py")] = { # Must be installed already via conda, do not provide a pypi package yet "daal4py": lambda: python_package_installed("daal4py")
Minor doc updates in c10/core/Allocator.h Summary: Pull Request resolved:
@@ -169,7 +169,16 @@ struct C10_API Allocator { } }; -// Question: is this still needed? +// This context is used to generate DataPtr which have arbitrary +// std::function deleters associated with them. In some user facing +// functions, we give a (user-friendly) interface for constructing +// tensors from external data which take an arbitrary std::function +// deleter. Grep for InefficientStdFunctionContext to find these +// occurrences. +// +// This context is inefficient because we have to do a dynamic +// allocation InefficientStdFunctionContext, on top of the dynamic +// allocation which is implied by std::function itself. struct C10_API InefficientStdFunctionContext { std::unique_ptr<void, std::function<void(void*)>> ptr_; InefficientStdFunctionContext( @@ -187,7 +196,7 @@ struct C10_API InefficientStdFunctionContext { * to an allocator of a particular device from being invalidated when * SetAllocator is called.) * - * Also note that this is not thraed-safe, and we assume this function will + * Also note that this is not thread-safe, and we assume this function will * only be called during initialization. */ C10_API void SetAllocator(DeviceType t, Allocator* alloc);
Tweaked language Per
@@ -165,9 +165,9 @@ id: mi-grandrapids-2 ### Police assault peaceful protesters, among them Breonna Taylor's family members | July 12th -City of Grand Rapids Police Department officers seen pre-emptively pushing, pulling, and shoving peaceful protesters, as they circle around an officer arresting or detaining a man. +City of Grand Rapids Police Department officers seen pushing, pulling, and shoving peaceful protesters, as they circle around an officer arresting a man. -The detaining officer appears to force the man to his knees before the other officers finish encircling and view of the man is lost. +The arresting officer appears to force the man to his knees before the other officers finish encircling and view of the man is lost. Two different women, each in different videos, are shown falling or fallen over as a result of the shoving and pulling.
Update Nucleus pip package to rely on the same version of TensorFlow (1.11.0) as install.sh uses.
@@ -105,7 +105,7 @@ TensorFlow tfrecords file may be substituted. # redacted # these install_requires. install_requires=['contextlib2', 'intervaltree', 'absl-py', - 'mock', 'numpy', 'six', 'tensorflow>=1.7.0'], + 'mock', 'numpy', 'six', 'tensorflow>=1.11.0'], headers=headers,
Updates contributor details in README This commit updates the contributor email details in the README.
@@ -410,7 +410,7 @@ compliance-checker -t ncei-grid -f json -o ~/Documents/sample_grid_report.json ~ - [Dave Foster](https://github.com/daf) &lt;[email protected]&gt; - [Dan Maher](https://github.com/danieljmaher) &lt;[email protected]&gt; -- [Luke Campbell](https://github.com/lukecampbell) &lt;[email protected]&gt; +- [Luke Campbell](https://github.com/lukecampbell) &lt;[email protected]&gt; - [Kyle Wilcox](https://github.com/kwilcox) &lt;[email protected]&gt; - [Ben Adams](https://github.com/benjwadams) &lt;[email protected]&gt; - [Bob Fratantonio](https://github.com/bobfrat) &lt;[email protected]&gt;
Generate InChannelCheckFailure's message inside the exception The exception now expects channel IDs to be passed to it.
@@ -18,7 +18,11 @@ log = logging.getLogger(__name__) class InChannelCheckFailure(CheckFailure): - pass + def __init__(self, *channels: int): + self.channels = channels + channels_str = ', '.join(f"<#{c_id}>" for c_id in channels) + + super().__init__(f"Sorry, but you may only use this command within {channels_str}.") def in_channel(*channels: int, bypass_roles: typing.Container[int] = None): @@ -41,10 +45,7 @@ def in_channel(*channels: int, bypass_roles: typing.Container[int] = None): log.debug(f"{ctx.author} tried to call the '{ctx.command.name}' command. " f"The in_channel check failed.") - channels_str = ', '.join(f"<#{c_id}>" for c_id in channels) - raise InChannelCheckFailure( - f"Sorry, but you may only use this command within {channels_str}." - ) + raise InChannelCheckFailure(*channels) return commands.check(predicate)
Corrected link to Ubuntu docs removed the docs/ to make the link correct
@@ -19,4 +19,4 @@ see worked example [here](venv.md) `sudo pip install -e "git+https://github.com/jblance/mpp-solar.git#egg=mppsolar"` ### Ubuntu Install example ### -[Documented Ubuntu Install](docs/ubuntu_install.md) +[Documented Ubuntu Install](ubuntu_install.md)
Fix multiprocessing.DictProxy.values() Fixes
@@ -82,8 +82,8 @@ class DictProxy(BaseProxy, MutableMapping[_KT, _VT]): @overload def pop(self, __key: _KT, __default: _VT | _T) -> _VT | _T: ... def keys(self) -> list[_KT]: ... # type: ignore[override] - def values(self) -> list[tuple[_KT, _VT]]: ... # type: ignore[override] - def items(self) -> list[_VT]: ... # type: ignore[override] + def items(self) -> list[tuple[_KT, _VT]]: ... # type: ignore[override] + def values(self) -> list[_VT]: ... # type: ignore[override] class BaseListProxy(BaseProxy, MutableSequence[_T]): __builtins__: ClassVar[dict[str, Any]]
indices corrected indices in array_element are counted from 0.
@@ -344,8 +344,8 @@ or normal Python functions: from openeo.processes import array_element def my_bandmath(data): - band1 = array_element(data, index=1) - band2 = array_element(data, index=2) + band1 = array_element(data, index=0) + band2 = array_element(data, index=1) return band1 + 1.2 * band2
Calibration proto fixes Fix a comment noted by wcourtney on design document Fix cut-and-paster java outer classname.
@@ -6,7 +6,7 @@ import "cirq/google/api/v2/program.proto"; package cirq.google.api.v2; option java_package = "com.google.cirq.google.api.v2"; -option java_outer_classname = "BatchProto"; +option java_outer_classname = "FocusedCalibrationProto"; option java_multiple_files = true; // This message represents a request to execute a custom calibration routine. @@ -86,8 +86,10 @@ message CalibrationLayerResults { string error_message = 2; // A token identifying the calibration result. - // This could be used to tag focused circuits that use parameters + // If a token exists in the response, it can be used to tag + // focused circuits that use parameters // derived from this calibration. + // If no token exists, then the calibration was purely diagnostic. string token = 3; // Results, such as gate fidelities, gate angles, etc
Only vectorize xmap axes that have only one element per resource To make the jaxpr much less noisy. size-1 vmaps are quite pointless.
@@ -562,10 +562,9 @@ def make_xmap_callable(fun: lu.WrappedFun, class EvaluationPlan(NamedTuple): """Encapsulates preprocessing common to top-level xmap invocations and its translation rule.""" - resource_env: ResourceEnv - axis_sizes: Dict[AxisName, int] physical_axis_resources: Dict[AxisName, Tuple[ResourceAxisName, ...]] axis_subst: Dict[AxisName, Tuple[ResourceAxisName, ...]] + axis_vmap_size: Dict[AxisName, Optional[int]] @classmethod def from_axis_resources(cls, @@ -574,21 +573,33 @@ class EvaluationPlan(NamedTuple): axis_sizes: Dict[AxisName, int]): # TODO: Support sequential resources physical_axis_resources = axis_resources # NB: We only support physical resources at the moment - axis_subst = {name: axes + (fresh_resource_name(name),) for name, axes in axis_resources.items()} - return cls(resource_env, axis_sizes, physical_axis_resources, axis_subst) + resource_shape = resource_env.shape + axis_subst = dict(axis_resources) + axis_vmap_size: Dict[AxisName, Optional[int]] = {} + for naxis, raxes in axis_resources.items(): + num_resources = int(np.prod([resource_shape[axes] for axes in raxes], dtype=np.int64)) + if axis_sizes[naxis] % num_resources != 0: + raise ValueError(f"Size of axis {naxis} ({axis_sizes[naxis]}) is not divisible " + f"by the total number of resources assigned to this axis ({raxes}, " + f"{num_resources} in total)") + tile_size = axis_sizes[naxis] // num_resources + # We have to vmap when there are no resources (to handle the axis name!) or + # when every resource gets chunks of values. + if not raxes or tile_size > 1: + axis_vmap_size[naxis] = tile_size + axis_subst[naxis] += (fresh_resource_name(naxis),) + else: + axis_vmap_size[naxis] = None + return cls(physical_axis_resources, axis_subst, axis_vmap_size) def vectorize(self, f: lu.WrappedFun, in_axes, out_axes): - resource_shape = self.resource_env.shape for naxis, raxes in self.axis_subst.items(): - paxes, vaxis = raxes[:-1], raxes[-1] + tile_size = self.axis_vmap_size[naxis] + if tile_size is None: + continue + vaxis = raxes[-1] map_in_axes = tuple(unsafe_map(lambda spec: spec.get(naxis, None), in_axes)) map_out_axes = tuple(unsafe_map(lambda spec: spec.get(naxis, None), out_axes)) - paxes_size = int(np.prod([resource_shape[paxis] for paxis in paxes], dtype=np.int64)) - if self.axis_sizes[naxis] % paxes_size != 0: - raise ValueError(f"Size of axis {naxis} ({self.axis_sizes[naxis]}) is not divisible " - f"by the total number of resources assigned to this axis ({paxes}, " - f"{paxes_size} in total)") - tile_size = self.axis_sizes[naxis] // paxes_size f = pxla.vtile(f, map_in_axes, map_out_axes, tile_size=tile_size, axis_name=vaxis) return f
[swarming] log as info when old style properties is used I'm reaching out to clients still using it, but keeping the log level as error makes the logs unusable to diagnose real failures.
@@ -250,7 +250,7 @@ def new_task_request_from_rpc(msg, now): raise ValueError('Specify one of properties or task_slices, not both') if msg.properties: - logging.error('Properties is still used') + logging.info('Properties is still used') if not msg.expiration_secs: raise ValueError('missing expiration_secs') props, secret_bytes = _taskproperties_from_rpc(msg.properties)
Order columns in mysql get_columns() implementation. Fixes
@@ -4194,7 +4194,8 @@ class MySQLDatabase(Database): sql = """ SELECT column_name, is_nullable, data_type, column_default FROM information_schema.columns - WHERE table_name = %s AND table_schema = DATABASE()""" + WHERE table_name = %s AND table_schema = DATABASE() + ORDER BY ordinal_position""" cursor = self.execute_sql(sql, (table,)) pks = set(self.get_primary_keys(table)) return [ColumnMetadata(name, dt, null == 'YES', name in pks, table, df)