message
stringlengths
13
484
diff
stringlengths
38
4.63k
fix of router hanging break if no adjacent connections
@@ -284,7 +284,13 @@ class _GreedyRouter: ] if len(candidate_swap_sets) == 1: self.apply_swap(*candidate_swap_sets[0]) + + if list( + self.remaining_dag.findall_nodes_until_blocked( + self.acts_on_nonadjacent_qubits)): return + else: + break frontier_edges = sorted(time_slices[0].edges) self.bring_farthest_pair_together(frontier_edges)
Clarified installation process for development Included the words "install from source" and links to documentation on pip -e and setuptool's "developer mode".
@@ -64,7 +64,7 @@ the structure of the code and of the repository. https://github.com/plotly/plotly.py/issues/1965. If you have writing skills, the wording of existing examples can also be improved in places. -Contributing code or documentation are not the only way to contribute! You can +Contributing code or documentation is not the only way to contribute! You can also contribute to the project by - reporting bugs (see below). @@ -133,15 +133,20 @@ conda activate plotly-dev $ pip install -r packages/python/plotly/requirements.txt $ pip install -r packages/python/plotly/optional-requirements.txt -### Editable install of plotly packages +### Editable install of plotly packages (install from source) $ pip install -e packages/python/plotly/ $ pip install -e packages/python/chart-studio/ $ pip install -e packages/python/plotly-geo/ -This will ensure that the installed packages links to your local development +This will ensure that the installed packages link to your local development directory, meaning that all changes you make reflect directly in your -environment (don't forget to restart the Jupyter kernel though!). +environment (don't forget to restart the Jupyter kernel though!). For more +information see the +[`setuptools`](https://setuptools.readthedocs.io/en/latest/setuptools.html#development-mode) +and +[`pip`](https://pip.pypa.io/en/stable/reference/pip_install/#install-editable) +documentation on _development mode_. ### ipywidgets development install
Fix serialization bug * In dataset base, remove __reduce_ex__ and override __getattr__. ``torchtext.Dataset.__getattr__`` is a generator. That doesn't play well with pickle. Returning a generator (when appropriate) seems to fix the issue without changing API.
@@ -50,16 +50,6 @@ class DatasetBase(Dataset): the same structure as in the fields argument passed to the constructor. """ - def __getstate__(self): - return self.__dict__ - - def __setstate__(self, _d): - self.__dict__.update(_d) - - def __reduce_ex__(self, proto): - # This is a hack. Something is broken with torch pickle. - return super(DatasetBase, self).__reduce_ex__() - def __init__(self, fields, src_examples_iter, tgt_examples_iter, filter_pred=None): @@ -90,6 +80,15 @@ class DatasetBase(Dataset): super(DatasetBase, self).__init__(examples, fields, filter_pred) + def __getattr__(self, attr): + # avoid infinite recursion when fields isn't defined + if 'fields' not in vars(self): + raise AttributeError + if attr in self.fields: + return (getattr(x, attr) for x in self.examples) + else: + raise AttributeError + def save(self, path, remove_fields=True): if remove_fields: self.fields = []
Update Bolivia installed capacity Source unchanged. Added plants explicitly listed as biomass to both "unknown" and "biomass". Confirmed with IAEA PRIS that Bolivia has no nuclear power plants.
}, "BO": { "capacity": { - "hydro": 483.22, - "unknown": 1344.55, + "biomass": 46.22, + "hydro": 734.84, + "nuclear": 0, + "solar": 115.07, + "unknown": 2273.22, "wind": 27 }, "contributors": [
Fix complete indexing in bson serialization and remove <= 2^16 variable restriction
@@ -23,8 +23,10 @@ from dimod.binary_quadratic_model import BinaryQuadraticModel def bqm_bson_encoder(bqm): """todo""" num_variables = len(bqm) - if num_variables > 2**16: - raise ValueError + + index_dtype = np.uint32 + if num_variables <= 2**16: + index_dtype = np.uint16 variable_order = sorted(bqm.linear) num_possible_edges = num_variables*(num_variables - 1) // 2 @@ -33,13 +35,18 @@ def bqm_bson_encoder(bqm): lin, (i, j, _vals), off = bqm.to_numpy_vectors( dtype=np.float32, - index_dtype=np.uint16, + index_dtype=index_dtype, sort_indices=as_complete, variable_order=variable_order) if as_complete: vals = np.zeros(num_possible_edges, dtype=np.float32) - edge_idxs = i*(num_variables - 1) - i*(i+1)//2 + j - 1 + + def mul(a, b): + return np.multiply(a, b, dtype=np.int64) + + edge_idxs = (mul(i, num_variables - 1) - mul(i, (i+1)//2) - + ((i+1) % 2)*(i//2) + j - 1) vals[edge_idxs] = _vals else: @@ -52,6 +59,7 @@ def bqm_bson_encoder(bqm): "variable_type": "SPIN" if bqm.vartype == bqm.SPIN else "BINARY", "offset": off, "variable_order": variable_order, + "index_dtype": np.dtype(index_dtype).str, } if not as_complete: @@ -65,14 +73,15 @@ def bqm_bson_decoder(doc, cls=BinaryQuadraticModel): lin = np.frombuffer(doc["linear"], dtype=np.float32) num_variables = len(lin) vals = np.frombuffer(doc["quadratic_vals"], dtype=np.float32) + index_dtype = doc["index_dtype"] if doc["as_complete"]: i, j = zip(*itertools.combinations(range(num_variables), 2)) else: - i = np.frombuffer(doc["quadratic_head"], dtype=np.uint16) - j = np.frombuffer(doc["quadratic_tail"], dtype=np.uint16) + i = np.frombuffer(doc["quadratic_head"], dtype=index_dtype) + j = np.frombuffer(doc["quadratic_tail"], dtype=index_dtype) off = doc["offset"] - return cls.from_numpy_vectors(lin, (i, j, vals), doc["offset"], + return cls.from_numpy_vectors(lin, (i, j, vals), off, str(doc["variable_type"]), variable_order=doc["variable_order"])
Update install.rst Fix grammatical mistakes
@@ -15,7 +15,7 @@ Using Pipenv Using AUR --------- -*aiogram* is also available in Arch User Repository, so you can install this framework on any Arch-based distribution like ArchLinux, Antergos, Manjaro, etc. To do this, use your favorite AUR-helper and install `python-aiogram <https://aur.archlinux.org/packages/python-aiogram/>`_ package. +*aiogram* is also available in Arch User Repository, so you can install this framework on any Arch-based distribution like ArchLinux, Antergos, Manjaro, etc. To do this, use your favorite AUR-helper and install the `python-aiogram <https://aur.archlinux.org/packages/python-aiogram/>`_ package. From sources ------------ @@ -52,7 +52,7 @@ You can speedup your bots by following next instructions: $ pip install uvloop -- Use `ujson <https://github.com/esnme/ultrajson>`_ instead of default json module. +- Use `ujson <https://github.com/esnme/ultrajson>`_ instead of the default json module. *UltraJSON* is an ultra fast JSON encoder and decoder written in pure C with bindings for Python 2.5+ and 3. @@ -64,9 +64,9 @@ You can speedup your bots by following next instructions: - Use aiohttp speedups - - Use `cchardet <https://github.com/PyYoshi/cChardet>`_ instead of chardet module. + - Use `cchardet <https://github.com/PyYoshi/cChardet>`_ instead of the chardet module. - *cChardet* is high speed universal character encoding detector. + *cChardet* is a high speed universal character encoding detector. **Installation:** @@ -94,4 +94,4 @@ You can speedup your bots by following next instructions: $ pip install aiohttp[speedups] -In addition, you don't need do nothing, *aiogram* is automatically starts using that if is found in your environment. +In addition, you don't need do anything, *aiogram* automatically starts using that if it is found in your environment.
Fixed upload chunk retry Fixed empty body when retried chunk upload
@@ -811,14 +811,13 @@ class HostedNeptuneBackend(Backend): def _upload_loop(self, fun, data, progress_indicator, **kwargs): ret = None for part in data.generate(): - part_to_send = part.get_data() - ret = with_api_exceptions_handler(self._upload_loop_chunk)(fun, part, part_to_send, data, **kwargs) + ret = with_api_exceptions_handler(self._upload_loop_chunk)(fun, part, data, **kwargs) progress_indicator.progress(part.end - part.start) data.close() return ret - def _upload_loop_chunk(self, fun, part, part_to_send, data, **kwargs): + def _upload_loop_chunk(self, fun, part, data, **kwargs): if data.length is not None: binary_range = "bytes=%d-%d/%d" % (part.start, part.end - 1, data.length) else: @@ -830,7 +829,7 @@ class HostedNeptuneBackend(Backend): } if data.permissions is not None: headers["X-File-Permissions"] = data.permissions - response = fun(data=part_to_send, headers=headers, **kwargs) + response = fun(data=part.get_data(), headers=headers, **kwargs) response.raise_for_status() return response
Add clarification to salt ssh docs about key auto-generation. Fixes
@@ -64,7 +64,8 @@ Deploy ssh key for salt-ssh =========================== By default, salt-ssh will generate key pairs for ssh, the default path will be -/etc/salt/pki/master/ssh/salt-ssh.rsa +``/etc/salt/pki/master/ssh/salt-ssh.rsa``. The key generation happens when you run +``salt-ssh`` for the first time. You can use ssh-copy-id, (the OpenSSH key deployment tool) to deploy keys to your servers.
target: Force consistent logcat format On some devices the default logcat format was inconsistent with what was expected. This change explicitly sets the logcat format to be as expected.
@@ -113,7 +113,7 @@ class AndroidAssistant(object): if self.logcat_poller: self.logcat_poller.write_log(outfile) else: - self.target.dump_logcat(outfile) + self.target.dump_logcat(outfile, logcat_format='threadtime') def clear_logcat(self): if self.logcat_poller: @@ -226,7 +226,7 @@ class LogcatPoller(threading.Thread): def poll(self): self.last_poll = time.time() - self.target.dump_logcat(self.buffer_file, append=True, timeout=self.timeout) + self.target.dump_logcat(self.buffer_file, append=True, timeout=self.timeout, logcat_format='threadtime') self.target.clear_logcat() def insert_logcat_marker(self):
Fix Log Line for Vault Token Generation Debug Line Fixes This patch replaces an errant period with a comma to correctly log a debug statement.
@@ -40,7 +40,7 @@ def generate_token(minion_id, signature, impersonated_by_master=False): True. This happens when the master generates minion pillars. ''' log.debug( - 'Token generation request for %s (impersonated by master: %s)'. + 'Token generation request for %s (impersonated by master: %s)', minion_id, impersonated_by_master ) _validate_signature(minion_id, signature, impersonated_by_master)
bootstrap: Patch bootstrap.js to support contenteditable. If the lookup input is contenteditable, it should be searching for text rather than input.
, lookup: function (event) { var items - this.query = this.$element.val() + this.query = this.$element.is("[contenteditable]") ? this.$element.text() : this.$element.val(); if (!this.options.helpOnEmptyStrings) { if (!this.query || this.query.length < this.options.minLength) {
Avoid infinite loops with td with break-inside: avoid Related to
@@ -2753,3 +2753,21 @@ def test_table_break_children_margin(): </table> ''' assert len(render_pages(html)) == 3 + + +def test_table_td_break_inside_avoid(): + # Test regression: https://github.com/Kozea/WeasyPrint/issues/1547 + html = ''' + <style> + @page { size: 4cm } + td { break-inside: avoid; line-height: 3cm } + </style> + <table> + <tr> + <td> + a<br>a + </td> + </tr> + </table> + ''' + assert len(render_pages(html)) == 2
PathListingWidget : Fix GIL management bug This could trigger a hang when shift+clicking to expand a whole section of the HierarchyView.
@@ -927,6 +927,8 @@ void propagateExpandedWalk( QTreeView *treeView, PathModel *model, QModelIndex i void propagateExpanded( uint64_t treeViewAddress, uint64_t modelIndexAddress, bool expanded, int numLevels ) { + IECorePython::ScopedGILRelease gilRelease; + QTreeView *treeView = reinterpret_cast<QTreeView *>( treeViewAddress ); PathModel *model = dynamic_cast<PathModel *>( treeView->model() ); if( !model )
Update Readme Indicate Pretrained model link
@@ -111,6 +111,10 @@ matplotlib # visualization # Instructions +Pretrained Model can be download from below link or Step3 section: + + [http://bit.ly/result_mockingjay](http://bit.ly/result_mockingjay) + ***Before you start, make sure all the packages required listed above are installed correctly*** ### Step 0. Preprocessing - Acoustic Feature Extraction & Text Encoding
Gaffer startup : Remove compatibility for non-namespaced StringAlgo We will be removing StringAlgo entirely.
import Gaffer -for module in ( Gaffer.StringAlgo, Gaffer.MetadataAlgo, Gaffer.MonitorAlgo ) : +for module in ( Gaffer.MetadataAlgo, Gaffer.MonitorAlgo ) : for name in dir( module ) : if not name.startswith( "__" ) : setattr( Gaffer, name, getattr( module, name ) )
feat: API to fetch the latest backup available Take a backup if it doesnt exist with the required expiry
@@ -206,6 +206,31 @@ def get_backup(): recipient_list = odb.send_email() frappe.msgprint(_("Download link for your backup will be emailed on the following email address: {0}").format(', '.join(recipient_list))) + [email protected]() +def fetch_latest_backups(with_files=True, recent=3): + """Takes backup on-demand if doesnt exist satisfying the `recent` parameter + Only for: System Managers + + Args: + with_files (bool, optional): If set, files will backuped up. Defaults to True. + recent (int, optional): Won't take a new backup if backup exists within this paramter. Defaults to 3 hours + + Returns: + dict: relative Backup Paths + """ + frappe.only_for("System Manager") + odb = BackupGenerator(frappe.conf.db_name, frappe.conf.db_name, frappe.conf.db_password, db_host=frappe.db.host, db_type=frappe.conf.db_type, db_port=frappe.conf.db_port) + odb.get_backup(older_than=recent, ignore_files=not with_files) + + return { + "database": odb.backup_path_files, + "public": odb.backup_path_db, + "private": odb.backup_path_private_files, + "config": odb.site_config_backup_path + } + + def scheduled_backup(older_than=6, ignore_files=False, backup_path_db=None, backup_path_files=None, backup_path_private_files=None, force=False, verbose=False): """this function is called from scheduler deletes backups older than 7 days
[batch] add missing insert into to billing_project_users This was missing in /
@@ -30,6 +30,9 @@ CREATE TABLE IF NOT EXISTS `billing_project_users` ( FOREIGN KEY (`billing_project`) REFERENCES billing_projects(name) ON DELETE CASCADE ) ENGINE = InnoDB; +INSERT INTO `billing_project_users` (`billing_project`, `user`) +VALUES ('test', 'test-dev'); + CREATE TABLE IF NOT EXISTS `instances` ( `name` VARCHAR(100) NOT NULL, `state` VARCHAR(40) NOT NULL,
Scheduling should enable/disable systemd timer in addition to start/stop
@@ -56,7 +56,7 @@ class InsightsSchedulerSystemd(object): @property def active(self): try: - systemctl_status = run_command_get_output('systemctl is-active insights-client.timer') + systemctl_status = run_command_get_output('systemctl is-enabled insights-client.timer') return systemctl_status['status'] == 0 except OSError: logger.exception('Could not get systemd status') @@ -67,6 +67,7 @@ class InsightsSchedulerSystemd(object): try: # Start timers in the case of rhel 7 running systemd systemctl_timer = run_command_get_output('systemctl start insights-client.timer') + systemctl_timer = run_command_get_output('systemctl enable insights-client.timer') logger.debug("Starting Insights Client systemd timer.") logger.debug("Status: %s", systemctl_timer['status']) logger.debug("Output: %s", systemctl_timer['output']) @@ -79,6 +80,7 @@ class InsightsSchedulerSystemd(object): logger.debug('Stopping all systemd timers') try: # Stop timers in the case of rhel 7 running systemd + systemctl_timer = run_command_get_output('systemctl disable insights-client.timer') systemctl_timer = run_command_get_output('systemctl stop insights-client.timer') logger.debug("Stopping Insights Client systemd timer.") logger.debug("Status: %s", systemctl_timer['status'])
Improve dashboard redirect logic Use dashboard for location-restricted users, and send mobile workers to cloudcare by default
+from django.conf import settings from django.core.urlresolvers import reverse from django.http import HttpResponseRedirect from django.utils.translation import ugettext_noop, ugettext as _ @@ -14,17 +15,13 @@ from corehq.apps.domain.decorators import login_and_domain_required from corehq.apps.domain.views import DomainViewMixin, LoginAndDomainMixin, \ DefaultProjectSettingsView from corehq.apps.domain.utils import user_has_custom_top_menu -from corehq.apps.export.views import CaseExportListView, FormExportListView from corehq.apps.hqwebapp.view_permissions import user_can_view_reports -from corehq.apps.locations.views import LocationsListView from corehq.apps.hqwebapp.views import BasePageView -from corehq.apps.users.permissions import can_view_case_exports, can_view_form_exports from corehq.apps.users.views import DefaultProjectUserSettingsView from corehq.apps.locations.permissions import location_safe from corehq.apps.style.decorators import use_angular_js from corehq.apps.cloudcare.views import FormplayerMain from django_prbac.utils import has_privilege -from django.conf import settings @login_and_domain_required @@ -39,21 +36,14 @@ def default_dashboard_url(request, domain): if domain in settings.CUSTOM_DASHBOARD_PAGE_URL_NAMES: return reverse(settings.CUSTOM_DASHBOARD_PAGE_URL_NAMES[domain], args=[domain]) - if couch_user and not couch_user.has_permission(domain, 'access_all_locations'): - if couch_user.is_commcare_user(): + if (couch_user + and not couch_user.has_permission(domain, 'access_all_locations') + and couch_user.is_commcare_user()): if toggles.USE_FORMPLAYER_FRONTEND.enabled(domain): formplayer_view = FormplayerMain.urlname else: formplayer_view = "corehq.apps.cloudcare.views.default" return reverse(formplayer_view, args=[domain]) - if couch_user.has_permission(domain, 'view_reports'): - return reverse(CaseExportListView.urlname, args=[domain]) - else: - if can_view_case_exports(couch_user, domain): - return reverse(CaseExportListView.urlname, args=[domain]) - elif can_view_form_exports(couch_user, domain): - return reverse(FormExportListView.urlname, args=[domain]) - return reverse(LocationsListView.urlname, args=[domain]) if couch_user and user_has_custom_top_menu(domain, couch_user): return reverse('saved_reports', args=[domain])
Update target_encoder.py Changed "ith" to "i-th" for clarity.
@@ -27,7 +27,7 @@ class TargetEncoder(BaseEstimator, TransformerMixin): Categories (unique values) per feature: - 'auto' : Determine categories automatically from the training data. - - list : ``categories[i]`` holds the categories expected in the ith + - list : ``categories[i]`` holds the categories expected in the i-th column. The passed categories must be sorted and should not mix strings and numeric values.
Fix added docstrings to core.domain.value_generators_domain * Fix added docstrings to core.domain.value_generators_domain * Fix Addresses review comments for commit Adds type information in the docstrings. Also fixes indentation and typos. * Fix Fixes a minor typo * Fix Changes Throws to Raises
@@ -45,12 +45,23 @@ class BaseValueGenerator(object): @classmethod def get_html_template(cls): + """Returns the HTML template for the class. + + Returns: + str. The HTML template corresponding to the class. + """ return utils.get_file_contents(os.path.join( os.getcwd(), feconf.VALUE_GENERATORS_DIR, 'templates', '%s.html' % cls.__name__)) @classmethod def get_js_template(cls): + """Returns the JavaScript template for the class. + + Returns: + str. The JS template corresponding to the class. + """ + # NB: These generators should use only Angular templating. The # variables they have access to are generatorId, initArgs, # customizationArgs and objType. @@ -67,13 +78,19 @@ class BaseValueGenerator(object): class Registry(object): - """Registry of all value generators.""" + """Maintains a registry of all the value generators. - # Dict mapping value generator class names to their classes. + Attributes: + value_generators_dict: dict(str : BaseValueGenerator). Dictionary + mapping value generator class names to their classes. + """ value_generators_dict = {} @classmethod def _refresh_registry(cls): + """Refreshes the dictionary mapping between generator_id and the + corresponding generator classes. + """ cls.value_generators_dict.clear() # Assemble all generators in @@ -108,7 +125,18 @@ class Registry(object): """Gets a generator class by its id. Refreshes once if the generator is not found; subsequently, throws an - error.""" + error. + + Args: + generator_id: str. An id corresponding to a generator class. + + Returns: + class(BaseValueGenerator). A generator class mapping to the + generator id given. + + Raises: + KeyError: The given generator_id is invalid. + """ if generator_id not in cls.value_generators_dict: cls._refresh_registry() return cls.value_generators_dict[generator_id]
Fix typo & Minor changes Thanks for the fixes
PyTorch Recipes --------------------------------------------- -Recipes are bite-sized bite-sized, actionable examples of how to use specific PyTorch features, different from our full-length tutorials. +Recipes are bite-sized, actionable examples of how to use specific PyTorch features, different from our full-length tutorials. .. raw:: html @@ -40,14 +40,14 @@ Recipes are bite-sized bite-sized, actionable examples of how to use specific Py .. customcarditem:: :header: Defining a Neural Network - :card_description: Learn how to use PyTorch's torch.nn package to create and define a neural network the MNIST dataset. + :card_description: Learn how to use PyTorch's torch.nn package to create and define a neural network for the MNIST dataset. :image: ../_static/img/thumbnails/cropped/defining-a-network.PNG :link: ../recipes/recipes/defining_a_neural_network.html :tags: Basics .. customcarditem:: :header: What is a state_dict in PyTorch - :card_description: Learn how state_dict objects, Python dictionaries, are used in saving or loading models from PyTorch. + :card_description: Learn how state_dict objects and Python dictionaries are used in saving or loading models from PyTorch. :image: ../_static/img/thumbnails/cropped/what-is-a-state-dict.PNG :link: ../recipes/recipes/what_is_state_dict.html :tags: Basics @@ -90,7 +90,7 @@ Recipes are bite-sized bite-sized, actionable examples of how to use specific Py .. customcarditem:: :header: Zeroing out gradients in PyTorch - :card_description: Learn when you should zero out graidents and how doing so can help increase the accuracy of your model. + :card_description: Learn when you should zero out gradients and how doing so can help increase the accuracy of your model. :image: ../_static/img/thumbnails/cropped/zeroing-out-gradients.PNG :link: ../recipes/recipes/zeroing_out_gradients.html :tags: Basics
Improve device/firmware selection Remove iOS 10.0 which is rare and only exists on two models, limit iPhone SE to versions after 9.3, add iOS 10.3.3.
@@ -148,7 +148,8 @@ def get_device_info(account): def generate_device_info(account): ios8 = ('8.0', '8.0.1', '8.0.2', '8.1', '8.1.1', '8.1.2', '8.1.3', '8.2', '8.3', '8.4', '8.4.1') ios9 = ('9.0', '9.0.1', '9.0.2', '9.1', '9.2', '9.2.1', '9.3', '9.3.1', '9.3.2', '9.3.3', '9.3.4', '9.3.5') - ios10 = ('10.0', '10.0.1', '10.0.2', '10.0.3', '10.1', '10.1.1', '10.2', '10.2.1', '10.3', '10.3.1', '10.3.2') + # 10.0 was only for iPhone 7 and 7 Plus, and is rare + ios10 = ('10.0.1', '10.0.2', '10.0.3', '10.1', '10.1.1', '10.2', '10.2.1', '10.3', '10.3.1', '10.3.2', '10.3.3') devices = tuple(IPHONES.keys()) account['model'] = choice(devices) @@ -158,8 +159,11 @@ def generate_device_info(account): if account['model'] in ('iPhone9,1', 'iPhone9,2', 'iPhone9,3', 'iPhone9,4'): account['iOS'] = choice(ios10) - elif account['model'] in ('iPhone8,1', 'iPhone8,2', 'iPhone8,4'): + elif account['model'] in ('iPhone8,1', 'iPhone8,2'): account['iOS'] = choice(ios9 + ios10) + elif account['model'] == 'iPhone8,4': + # iPhone SE started on 9.3 + account['iOS'] = choice(('9.3', '9.3.1', '9.3.2', '9.3.3', '9.3.4', '9.3.5') + ios10) else: account['iOS'] = choice(ios8 + ios9 + ios10)
Uses latest pip for Github Actions. We resolved all dependency conflicts and we can use latest pip in the github actions.
@@ -44,8 +44,7 @@ jobs: - name: Install dependencies run: | - # TODO(b/174469322): install the latest pip once resolved. - python -m pip install --upgrade 'pip<20.3' wheel + python -m pip install --upgrade pip wheel pip install -e .[all] - name: Run unit tests
fix: redirect to page with middleware using absolute URL See Closes
@@ -2,7 +2,9 @@ import { NextRequest, NextResponse } from "next/server"; export function middleware(req: NextRequest) { if (req.cookies["couchers-sesh"] && req.nextUrl.pathname === "/") { - return NextResponse.rewrite("/dashboard"); + const url = req.nextUrl.clone(); + url.pathname = "/dashboard"; + return NextResponse.rewrite(url); } return NextResponse.next(); }
Unify the "Log out" spelling * Replaces the "Log Out" with "Log out" text to avoid having 2 versions for translations
{% if redirect.data %} <input name="{{ redirect.field }}" type="hidden" value="{{ redirect.data }}"> {% endif %} - <input type="submit" value="{% trans %}Log Out{% endtrans %}"> + <input type="submit" value="{% trans %}Log out{% endtrans %}"> </form> {% endblock %}
calico: update libnetwork plugin v1.1.3-2-d2iq fixes an issue with crashing on ipv6 enabled hosts.
}, "calico-libnetwork-plugin": { "kind": "url", - "url": "https://github.com/mesosphere/libnetwork-plugin/releases/download/v1.1.3-1-d2iq/libnetwork-plugin-amd64", - "sha1": "f363d1d1fdacefac8d91fb4336495e5b5020920f" + "url": "https://github.com/mesosphere/libnetwork-plugin/releases/download/v1.1.3-2-d2iq/libnetwork-plugin-amd64", + "sha1": "2db71bf79ac47e7eba2a59ed81440b47defb29ce" } } }
rewrite Topology.select using Topology.f_index The original implementation of `Topology.select` uses `Sample.index` to determine the selected element indices. This patch uses `Topology.f_index` instead, which leads to a slightly simpler implementation, without loops in Python (list comprehension). Timings of the cylinderflow example before and after this patch show no difference.
@@ -423,9 +423,13 @@ class Topology(types.Singleton): return function.get(values, 0, self.f_index) def select(self, indicator, ischeme='bezier2', **kwargs): + # Select elements where `indicator` is strict positive at any of the + # integration points defined by `ischeme`. We sample `indicator > 0` + # together with the element index (`self.f_index`) and keep all indices + # with at least one positive result. sample = self.sample(*element.parse_legacy_ischeme(ischeme)) - isactive = numpy.greater(sample.eval(indicator, **kwargs), 0) - selected = types.frozenarray(tuple(i for i, index in enumerate(sample.index) if isactive[index].any()), dtype=int) + isactive, ielem = sample.eval([function.greater(indicator, 0), self.f_index], **kwargs) + selected = types.frozenarray(numpy.unique(ielem[isactive])) return self[selected] @log.withcontext
CODEOWNERS for distributed optimizer. Summary: Pull Request resolved: ghstack-source-id: Test Plan: waitforbuildbot
/torch/csrc/distributed/autograd @mrshenli @pritamdamania87 @zhaojuanmao /torch/distributed/rpc @mrshenli @pritamdamania87 @zhaojuanmao /torch/distributed/autograd @mrshenli @pritamdamania87 @zhaojuanmao +/torch/distributed/optim @mrshenli @pritamdamania87 @zhaojuanmao @aazzolini
Adjust zeromq example Closes
@@ -13,5 +13,4 @@ if __name__ == '__main__': while True: request = socket.recv().decode() response = methods.dispatch(request) - if not response.is_notification: socket.send_string(str(response))
Add mariadb-client to prod image. Add mariadb client to easier debugging of deployments.
@@ -11,7 +11,7 @@ ENV PYTHONUNBUFFERED=1 RUN apt-get update && \ apt-get install -y --no-install-recommends \ - libmariadbclient18 optipng \ + libmariadbclient18 optipng mariadb-client \ libxslt1.1 && \ rm -rf /var/lib/apt/lists/*
fw/workload: Add attribute to control if package data should be cleared. Allow specifying that the package data should not be cleared before starting the workload.
@@ -175,6 +175,7 @@ class ApkWorkload(Workload): loading_time = 10 package_names = [] view = None + clear_data_on_reset = True # Set this to True to mark that this workload requires the target apk to be run # for initialisation purposes before the main run is performed. @@ -257,7 +258,8 @@ class ApkWorkload(Workload): install_timeout=self.install_timeout, uninstall=self.uninstall, exact_abi=self.exact_abi, - prefer_host_package=self.prefer_host_package) + prefer_host_package=self.prefer_host_package, + clear_data_on_reset=self.clear_data_on_reset) @once_per_instance def initialize(self, context): @@ -641,7 +643,7 @@ class PackageHandler(object): def __init__(self, owner, install_timeout=300, version=None, variant=None, package_name=None, strict=False, force_install=False, uninstall=False, - exact_abi=False, prefer_host_package=True): + exact_abi=False, prefer_host_package=True, clear_data_on_reset=True): self.logger = logging.getLogger('apk') self.owner = owner self.target = self.owner.target @@ -654,6 +656,7 @@ class PackageHandler(object): self.uninstall = uninstall self.exact_abi = exact_abi self.prefer_host_package = prefer_host_package + self.clear_data_on_reset = clear_data_on_reset self.supported_abi = self.target.supported_abi self.apk_file = None self.apk_info = None @@ -809,6 +812,7 @@ class PackageHandler(object): def reset(self, context): # pylint: disable=W0613 self.target.execute('am force-stop {}'.format(self.apk_info.package)) + if self.clear_data_on_reset: self.target.execute('pm clear {}'.format(self.apk_info.package)) def install_apk(self, context):
GlusterFS: Check for namespace if deploying a StorageClass Fixes:
oc_project: state: present name: "{{ glusterfs_namespace }}" - when: glusterfs_is_native or glusterfs_heketi_is_native + when: glusterfs_is_native or glusterfs_heketi_is_native or glusterfs_storageclass - name: Delete pre-existing heketi resources oc_obj:
Add coverage for raising ServiceNotValid if set_service is called on a service that is not defined
@@ -59,6 +59,15 @@ class TestBaseProjectKeychain(unittest.TestCase): self.assertEquals(keychain.project_config, self.project_config) self.assertEquals(keychain.key, self.key) + def test_set_non_existant_service(self): + self._test_set_non_existant_service() + + def _test_set_non_existant_service(self, project=False): + keychain = self.keychain_class(self.project_config, self.key) + with self.assertRaises(ServiceNotValid) as context: + keychain.set_service( + 'doesnotexist', ServiceConfig({'name': ''}), project) + def test_set_invalid_service(self): self._test_set_invalid_service() @@ -396,6 +405,13 @@ class TestEncryptedFileProjectKeychain(TestBaseProjectKeychain): os.chdir(self.tempdir_project) self._test_set_invalid_service() + def test_set_non_existant_service(self, mock_class): + self._mk_temp_home() + self._mk_temp_project() + mock_class.return_value = self.tempdir_home + os.chdir(self.tempdir_project) + self._test_set_non_existant_service() + def test_set_connected_app(self, mock_class): self._mk_temp_home() self._mk_temp_project()
TST: changed testing class Changed the class object in listify test. Also simplified pysat imports.
@@ -18,7 +18,6 @@ import tempfile import pysat from pysat.tests.registration_test_class import TestWithRegistration -from pysat.utils import testing def prep_dir(inst=None): @@ -229,7 +228,7 @@ class TestListify(): new_iterable = pysat.utils.listify(iterable) tst_iterable = ['test' for i in range(nitem)] - testing.assert_lists_equal(new_iterable, tst_iterable) + pysat.utils.testing.assert_lists_equal(new_iterable, tst_iterable) return @pytest.mark.parametrize('iterable', [np.nan, np.full((1, 1), np.nan), @@ -241,7 +240,8 @@ class TestListify(): new_iterable = pysat.utils.listify(iterable) tst_iterable = [np.nan for i in range(int(np.product(np.shape(iterable))))] - testing.assert_lists_equal(new_iterable, tst_iterable, test_nan=True) + pysat.utils.testing.assert_lists_equal(new_iterable, tst_iterable, + test_nan=True) return @pytest.mark.parametrize('iterable', [1, np.full((1, 1), 1), @@ -252,20 +252,20 @@ class TestListify(): new_iterable = pysat.utils.listify(iterable) tst_iterable = [1 for i in range(int(np.product(np.shape(iterable))))] - testing.assert_lists_equal(new_iterable, tst_iterable) + pysat.utils.testing.assert_lists_equal(new_iterable, tst_iterable) return @pytest.mark.parametrize('iterable', [ - pysat.Instrument(), np.full((1, 1), pysat.Instrument()), - np.full((2, 2), pysat.Instrument()), - np.full((3, 3, 3), pysat.Instrument())]) + np.timedelta64(1), np.full((1, 1), np.timedelta64(1)), + np.full((2, 2), np.timedelta64(1)), + np.full((3, 3, 3), np.timedelta64(1))]) def test_listify_class_arrays(self, iterable): """ Test listify with various np.arrays of classes.""" new_iterable = pysat.utils.listify(iterable) - tst_iterable = [pysat.Instrument() + tst_iterable = [np.timedelta64(1) for i in range(int(np.product(np.shape(iterable))))] - testing.assert_lists_equal(new_iterable, tst_iterable) + pysat.utils.testing.assert_lists_equal(new_iterable, tst_iterable) return @@ -353,8 +353,9 @@ class TestLoadNetCDF4(): sorted(self.loaded_inst.columns), axis=1) # Check that names are lower case when written - testing.assert_lists_equal(self.loaded_inst.columns, - self.testInst.data.columns, test_case=False) + pysat.utils.testing.assert_lists_equal(self.loaded_inst.columns, + self.testInst.data.columns, + test_case=False) # Test the loaded data self.eval_loaded_data() @@ -389,7 +390,7 @@ class TestLoadNetCDF4(): sorted(self.loaded_inst.columns), axis=1) # Check that names are in the expected case - testing.assert_lists_equal(self.loaded_inst.columns, + pysat.utils.testing.assert_lists_equal(self.loaded_inst.columns, self.testInst.data.columns) return
update linear_elastic_damping.py example for current TS solvers use HDF5 for output
@@ -32,6 +32,7 @@ options = { 'ts' : 'ts', 'save_steps' : -1, 'post_process_hook_final' : print_times, + 'output_format' : 'h5', } variables = { @@ -57,6 +58,12 @@ equations = { + dw_lin_elastic.i.Omega( solid.D, v, u ) = 0""", } +def adapt_time_step(ts, status, adt, problem, verbose=False): + if ts.time > 0.5: + ts.set_time_step(0.1) + + return True + solvers = deepcopy(solvers) # Do not spoil linear_elastic.py namespace in tests. solvers.update({ 'ts' : ('ts.adaptive', { @@ -64,20 +71,14 @@ solvers.update({ 't1' : 1.0, 'dt' : None, 'n_step' : 101, - 'adapt_fun' : 'adapt_time_step', + 'adapt_fun' : adapt_time_step, + 'verbose' : 1, }), }) -def adapt_time_step(ts, status, adt, problem): - if ts.time > 0.5: - ts.set_time_step(0.1) - - return True - ls = solvers['ls'] ls[1].update({'presolve' : True}) functions = { 'ebc_sin' : (ebc_sin,), - 'adapt_time_step' : (adapt_time_step,), }
placed subproblem solves into celery tasks Locally on the docker the speedup isn't all that evident, but solves are in the task format
@@ -31,7 +31,7 @@ import julia import sys import traceback import os -from celery import shared_task, Task +from celery import shared_task, Task, group from reo.exceptions import REoptError, OptimizationTimeout, UnexpectedError, NotOptimal, REoptFailedToStartError from reo.models import ModelManager from reo.src.profiler import Profiler @@ -281,6 +281,7 @@ def run_decomposed_model(data, model, reopt_inputs, print("iter_ub: ", iter_ub) if iter_ub < ub: ub = iter_ub + best_result_dicts = copy.deepcopy() min_charge_adder = iter_min_charge_adder prod_incentives = iter_prod_incentives gap = (ub - lb) / lb @@ -309,10 +310,27 @@ def solve_subproblems(models, reopt_param, results_dicts, update): :param update: Boolean that is True if skipping the creation of output expressions, and False o.w. :return: results_dicts -- dictionary in which key=month and vals are submodel results dictionaries """ + inputs = [] for idx in range(1, 13): - results_dicts[idx] = julia.Main.reopt_solve(models[idx], reopt_param, results_dicts[idx], update) + inputs.append({"m": models[idx], + "p": reopt_param, + "r": results_dicts[idx], + "u": update, + "month": idx + }) + jobs = group(solve_subproblem.s(x) for x in inputs) + r = jobs.apply() + results = r.get() + for i, result in enumerate(results): + results_dicts[i+1] = result return results_dicts + +@shared_task(name='solve_subproblem') +def solve_subproblem(kwargs): + return julia.Main.reopt_solve(kwargs["m"], kwargs["p"], kwargs["r"], kwargs["u"]) + + def fix_sizing_decisions(ub_models, reopt_param, system_sizes): for i in range(1, 13): julia.Main.fix_sizing_decisions(ub_models[i], reopt_param, system_sizes) @@ -323,7 +341,7 @@ def get_objective_value(ub_result_dicts, reopt_inputs): Calculates the full-year problem objective value by adjusting year-long components as required. :param ub_result_dicts: subproblem results dictionaries - :param reopt_inputs: inputs dicrtionary from DataManager + :param reopt_inputs: inputs dictionary from DataManager :return obj: full-year objective value :return prod_incentives: list of production incentive by technology :return min_charge_adder: calculated annual minimum charge adder @@ -346,14 +364,21 @@ def get_objective_value(ub_result_dicts, reopt_inputs): def get_added_peak_tou_costs(ub_result_dicts, reopt_inputs): """ - Calculated added TOU costs to according to peak lookback months, - using individual + Calculated added TOU costs to according to peak lookback months. :param ub_result_dicts: :param reopt_inputs: :return: """ + if (reopt_inputs['DemandLookbackPercent'] == 0.0 + or len(reopt_inputs['DemandLookbackMonths']) == 0 + or len(reopt_inputs['Ratchets']) == 0 + ): return 0.0 + """ + + """ + def get_average_sizing_decisions(models, reopt_param): sizes = julia.Main.get_sizing_decisions(models[1], reopt_param) for i in range(2, 13):
Rename to inline_backend_fmt() Rename to inline_fmt() Tmp
@@ -32,7 +32,7 @@ logger = logging.getLogger('matplotlib.mathtext') logger.setLevel(logging.ERROR) # suppress warnings! __all__ = [ - 'rc', 'rc_configurator', 'inline_backend_config', + 'rc', 'rc_configurator', 'inline_backend_fmt', ] # Dictionaries used to track custom proplot settings @@ -426,7 +426,7 @@ def _get_synced_params(key, value): # Backend elif key == 'inlinefmt': - inline_backend_config(value) + inline_backend_fmt(value) # Cycler elif key in ('cycle', 'rgbcycle'): @@ -1127,7 +1127,7 @@ class rc_configurator(object): yield self[key] -def inline_backend_config(fmt=None): +def inline_backend_fmt(fmt=None): """ Set up the `ipython inline backend \ <https://ipython.readthedocs.io/en/stable/interactive/magics.html#magic-matplotlib>`__
User Manual: Avoid backslashes in mention of default onefile tempdir spec * We now convert the backward slashes to forward slashes for a while now, and it's good to use this on Windows too, and esp. for quoting in shells, it's much less of a problem.
@@ -484,7 +484,7 @@ Finding files`_ as well. For the unpacking, by default a unique user temporary path one is used, and then deleted, however this default -``--onefile-tempdir-spec="%TEMP%\\onefile_%PID%_%TIME%"`` can be +``--onefile-tempdir-spec="%TEMP%/onefile_%PID%_%TIME%"`` can be overridden with a path specification that is using then using a cached path, avoiding repeated unpacking, e.g. with ``--onefile-tempdir-spec="%CACHE_DIR%/%COMPANY%/%PRODUCT%/%VERSION"``
Update version 0.9.3 -> 0.9.4 New Features * `assert_consistent_bqm` to `dimod.testing` for testing different BQM implementations * Testing is now done with parameterized package - this does not affect installed packages * FileView version 2.0 with improved docs
# # ================================================================================================ -__version__ = '0.9.3' +__version__ = '0.9.4' __author__ = 'D-Wave Systems Inc.' __authoremail__ = '[email protected]' __description__ = 'A shared API for binary quadratic model samplers.'
Fix input test Updated with constraints as static method
@@ -41,4 +41,6 @@ class TestCRESTInput(PymatgenTest): cin = CRESTInput(molecule=mol, constraints=constraints) with open(os.path.join(expected_output_dir, "expected_constrains.txt"), "r") as f: exp_con = f.read() - self.assertEqual(exp_con.strip(), cin.constrains_template().strip()) + self.assertEqual(exp_con.strip(), cin.constrains_template( + molecule=mol,reference_fnm='crest_in.xyz', + constraints=constraints).strip())
Update .env Removed old values from Server 2 / 3 4 Added missing = sign.
@@ -71,14 +71,12 @@ HLL_HOST_2= HLL_PORT_2= HLL_PASSWORD_2= RCONWEB_PORT_2=8011 -RCONWEB_PASSWORD_2= -RCONWEB_USERNAME_2=rconuser DISCORD_WEBHOOK_AUDIT_LOG_2= SERVER_SHORT_NAME_2=MyServer2 DISCORD_CHAT_WEBHOOK_2= DISCORD_PING_TRIGGER_WORDS_2= DISCORD_PING_TRIGGER_ROLES_2= -DISCORD_PING_TRIGGER_WEBHOOK_2 +DISCORD_PING_TRIGGER_WEBHOOK_2= DISCORD_KILLS_WEBHOOK_2= DISCORD_SEND_KILL_UPDATES_2= DISCORD_SEND_TEAM_KILL_UPDATES_2=yes @@ -89,8 +87,6 @@ HLL_HOST_3= HLL_PORT_3= HLL_PASSWORD_3= RCONWEB_PORT_3=8012 -RCONWEB_PASSWORD_3= -RCONWEB_USERNAME_3=rconuser DISCORD_WEBHOOK_AUDIT_LOG_3= SERVER_SHORT_NAME_3=MyServer3 DISCORD_CHAT_WEBHOOK_3= @@ -107,8 +103,6 @@ HLL_HOST_4= HLL_PORT_4= HLL_PASSWORD_4= RCONWEB_PORT_4=8013 -RCONWEB_PASSWORD_4= -RCONWEB_USERNAME_4=rconuser DISCORD_WEBHOOK_AUDIT_LOG_4= SERVER_SHORT_NAME_4=MyServer4 DISCORD_CHAT_WEBHOOK_4=
Update docker-compose.yaml change version/tag of image (latest is not found)
@@ -7,6 +7,6 @@ services: - ~/OpenBBUserData:/home/python/OpenBBUserData - ~/.openbb_terminal:/home/python/.openbb_terminal platform: linux/amd64 - image: ghcr.io/openbb-finance/openbbterminal/openbb:latest + image: ghcr.io/openbb-finance/openbbterminal/openbb:2.0.0 stdin_open: true # docker run -i tty: true # docker run -t
Load gen_extra.calc at import time This allows downstream variants to import their gen_extra/calc.py module.
@@ -48,6 +48,12 @@ CLOUDCONFIG_KEYS = {'coreos', 'runcmd', 'apt_sources', 'root', 'mounts', 'disk_s PACKAGE_KEYS = {'package', 'root'} +# Allow overriding calculators with a `gen_extra/calc.py` if it exists +gen_extra_calc = None +if os.path.exists('gen_extra/calc.py'): + gen_extra_calc = importlib.machinery.SourceFileLoader('gen_extra.calc', 'gen_extra/calc.py').load_module() + + def stringify_configuration(configuration: dict): """Create a stringified version of the complete installer configuration to send to gen.generate()""" @@ -448,10 +454,8 @@ def get_dcosconfig_source_target_and_templates( base_source = gen.internals.Source(is_user=False) base_source.add_entry(gen.calc.entry, replace_existing=False) - # Allow overriding calculators with a `gen_extra/calc.py` if it exists - if os.path.exists('gen_extra/calc.py'): - mod = importlib.machinery.SourceFileLoader('gen_extra.calc', 'gen_extra/calc.py').load_module() - base_source.add_entry(mod.entry, replace_existing=True) + if gen_extra_calc: + base_source.add_entry(gen_extra_calc.entry, replace_existing=True) def add_builtin(name, value): base_source.add_must(name, json_prettyprint(value))
Update bug-fix-release.md Added ordering coaster to T-2 Logistics
@@ -205,7 +205,7 @@ The final release is cut - RC cuts and bug fixes should be completed by this dat - Ensure [Security Policies](https://docs.mattermost.com/process/security.html) page has been updated - Update dependancies after release branch is cut in `mattermost-server`, `mattermost-webapp`, `desktop`, `mattermost-mobile` and `mattermost-redux` 5. Logistics: - - Update [MVP page](https://www.mattermost.org/mvp/) with the most valuable contributor of the release + - Update [MVP page](https://www.mattermost.org/mvp/) with the most valued professional of the release and order the contributor's coaster 6. Docs: - Finalize docs - If reviews are not complete, hold a 30 minute doc review meeting with PMs and anyone else who has changed or reviewed docs this release and wants to join
6.4 migration: fix ordering of drop_drift calls First drop the trigger that uses the column, then drop the column
@@ -457,12 +457,6 @@ EXECUTE PROCEDURE recalc_drift_instance_counts_update(); def drop_drift_availability_columns(): - op.drop_column('node_instances', 'is_status_check_ok') - op.drop_column('node_instances', 'has_configuration_drift') - op.drop_column('deployments', 'unavailable_instances') - op.drop_column('deployments', 'drifted_instances') - op.drop_column('nodes', 'unavailable_instances') - op.drop_column('nodes', 'drifted_instances') op.execute(""" DROP TRIGGER recalc_drift_instance_counts_insert ON node_instances; DROP TRIGGER recalc_drift_instance_counts_update ON node_instances; @@ -470,3 +464,9 @@ DROP FUNCTION recalc_drift_instance_counts_insert(); DROP FUNCTION recalc_drift_instance_counts_update(); DROP FUNCTION recalc_drift_instance_counts(integer); """) + op.drop_column('node_instances', 'is_status_check_ok') + op.drop_column('node_instances', 'has_configuration_drift') + op.drop_column('deployments', 'unavailable_instances') + op.drop_column('deployments', 'drifted_instances') + op.drop_column('nodes', 'unavailable_instances') + op.drop_column('nodes', 'drifted_instances')
Fix bug Properly mark variables within XML The XML placeables must be marked before variable placeables to avoid marking variables, but leaving out XML attributes and tags.
@@ -90,20 +90,28 @@ def mark_placeables(text): NewlineEscapePlaceable.parse, TabEscapePlaceable.parse, EscapePlaceable.parse, + # The spaces placeable can match '\n ' and mask the newline, # so it has to come later. SpacesPlaceable.parse, - PythonFormatNamedPlaceable.parse, - PythonFormatPlaceable.parse, + + # The XML placeables must be marked before variable placeables + # to avoid marking variables, but leaving out tags. See: + # https://bugzilla.mozilla.org/show_bug.cgi?id=1334926 general.XMLTagPlaceable.parse, general.AltAttrPlaceable.parse, general.XMLEntityPlaceable.parse, + + PythonFormatNamedPlaceable.parse, + PythonFormatPlaceable.parse, general.PythonFormattingPlaceable.parse, general.JavaMessageFormatPlaceable.parse, general.FormattingPlaceable.parse, + # The Qt variables can consume the %1 in %1$s which will mask a printf # placeable, so it has to come later. general.QtFormattingPlaceable.parse, + general.UrlPlaceable.parse, general.FilePlaceable.parse, general.EmailPlaceable.parse,
Filter out crashes in fuzz_task which have an empty state. Should fix
@@ -231,6 +231,9 @@ class Crash(object): if self.is_archived() and not self.fuzzed_key: return 'Unable to store testcase in blobstore: %s' % self.crash_state + if not self.crash_state or not self.crash_type: + return 'Empty crash state or type' + return None @@ -996,8 +999,8 @@ def filter_crashes(crashes): for crash in crashes: if not crash.is_valid(): - logs.log('Ignore crash (reason=%s, state=%s).' % (crash.get_error(), - crash.crash_state)) + logs.log('Ignore crash (reason=%s, type=%s, state=%s).' % + (crash.get_error(), crash.crash_type, crash.crash_state)) continue filtered.append(crash)
fix sparse dqn Summary: Pull Request resolved: we need to unfold embeddings from different sparse features
@@ -72,8 +72,12 @@ class SparseDQN(ModelBase): dense_features = torch.cat( (state.float_features, action.float_features), dim=-1 ) + batch_size = dense_features.shape[0] sparse_features = self.fetch_id_list_features(state, action) + # shape: batch_size, num_sparse_features, embedding_dim embedded_sparse = self.sparse_arch(sparse_features) + # shape: batch_size, num_sparse_features * embedding_dim + embedded_sparse = embedded_sparse.reshape(batch_size, -1) concatenated_dense = torch.cat((dense_features, embedded_sparse), dim=-1) return self.q_network(concatenated_dense)
Mention jupytext --check pytest in the documentation Closes
@@ -42,8 +42,13 @@ You may also find useful to `--pipe` the text representation of a notebook into jupytext --sync --pipe black notebook.ipynb # read most recent version of notebook, reformat with black, save ``` -Execute `jupytext --help` to access the full documentation. +For programs that don't accept pipes, use `{}` as a placeholder for the name of a temporary file that will contain the text representation of the notebook. For instance, run `pytest` on your notebook with: +```bash +jupytext --check 'pytest {}' notebook.ipynb # export the notebook in format py:percent in a temp file, run pytest +``` +(read more about running `pytest` on notebooks in our example [`Tests in a notebook.md`](https://github.com/mwouts/jupytext/blob/master/demo/Tests%20in%20a%20notebook.md)). +Execute `jupytext --help` to access the full documentation. ## Notebook and cell metadata filters
Ability to manually turn off pants pex creation for test running. Useful e.g., if running pants from sources in another repo via a script that invokes pants's own ./pants wrapper. In that case we know we don't have issues with pants's integration test rooting.
@@ -122,7 +122,9 @@ for arg in "$@"; do test_goal_used=true fi done -if [[ "${test_goal_used}" == 'true' && "${TRAVIS}" != 'true' ]]; then + +no_regen_pex="${NO_REGEN_PEX:-${TRAVIS}}" +if [[ "${test_goal_used}" == 'true' && "${no_regen_pex}" != 'true' ]]; then "$HERE/build-support/bin/bootstrap_pants_pex.sh" fi
Also register op schema when no kernels are registered Summary: Pull Request resolved:
@@ -34,6 +34,13 @@ private: #define C10_DEFINE_OP_SCHEMA(Name, Schema) \ C10_EXPORT const c10::OperatorHandle& Name() { \ + /* must be meyers singleton to make sure this is registered before any */ \ + /* kernels referencing it are registered. */ \ static ::c10::detail::OpSchemaRegistrar registrar(Schema); \ return registrar.opHandle(); \ + } \ + namespace { \ + /* to make sure the schema is registered even if it is not referenced by */ \ + /* a kernel registration, call it in a global object. */ \ + const c10::OperatorHandle& _c10_op_registration_instance_##Name = Name(); \ }
Update Travis builds to run on Trusty Drops pypy builds.
sudo: false +dist: trusty + language: python python: - "2.7" @@ -6,16 +8,12 @@ python: - "3.4" - "3.5" - "3.6" - # allow failures on CPython dev and pypy + # allow failures on CPython dev # we want to be warned about these, but they aen't critical - "3.7-dev" - - "pypy" - - "pypy3" matrix: allow_failures: - python: "3.7-dev" - - python: "pypy" - - python: "pypy3" cache: pip script: - make travis
pkg_unparsing_impl_body_ada.mako: remove unused function TN:
@@ -35,11 +35,6 @@ package body ${ada_lib_name}.Unparsing.Implementation is subtype Present_Token_Sequence_Template is Token_Sequence_Template (True); - function Create_Token_Sequence - (First, Last : Token_Type) return Present_Token_Sequence_Template - with Pre => First /= No_Token and then Last /= No_Token; - -- Create a present sequence of tokens from the given token range - function Create_Token_Sequence (Unparser : Token_Sequence_Access; First_Token : in out Token_Type) return Present_Token_Sequence_Template @@ -149,16 +144,6 @@ package body ${ada_lib_name}.Unparsing.Implementation is -- Create_Token_Sequence -- --------------------------- - function Create_Token_Sequence - (First, Last : Token_Type) return Present_Token_Sequence_Template is - begin - return (Present => True, First => First, Last => Last); - end Create_Token_Sequence; - - --------------------------- - -- Create_Token_Sequence -- - --------------------------- - function Create_Token_Sequence (Unparser : Token_Sequence_Access; First_Token : in out Token_Type) return Present_Token_Sequence_Template
Python3.8: Improved compatibility with behaviour changes * These were causing errors to the test suite (keep GeneratorExit) and the other was derived from diff, closing sets async form of the stop iteration.
@@ -1219,6 +1219,17 @@ static int Nuitka_AsyncgenAthrow_traverse(struct Nuitka_AsyncgenAthrowObject *as } static PyObject *Nuitka_AsyncgenAthrow_send(struct Nuitka_AsyncgenAthrowObject *asyncgen_athrow, PyObject *arg) { +#if _DEBUG_ASYNCGEN + PRINT_STRING("Nuitka_AsyncgenAthrow_send: Enter with state:\asyncgen_athrow:"); + PRINT_ITEM((PyObject *)asyncgen_athrow); + PRINT_NEW_LINE(); + PRINT_FORMAT("State on entry is asyncgen_athrow->m_state = %d (%s)\n", asyncgen_athrow->m_state, + getAwaitableStateStr(asyncgen_athrow->m_state)); + PRINT_STRING("Nuitka_AsyncgenAthrow_send: arg:"); + PRINT_ITEM(arg); + PRINT_NEW_LINE(); +#endif + struct Nuitka_AsyncgenObject *asyncgen = asyncgen_athrow->m_gen; // If finished, just report StopIteration. @@ -1243,7 +1254,12 @@ static PyObject *Nuitka_AsyncgenAthrow_send(struct Nuitka_AsyncgenAthrowObject * // Can also close only once. if (asyncgen->m_closed) { +#if PYTHON_VERSION >= 380 + asyncgen_athrow->m_state = AWAITABLE_STATE_CLOSED; + PyErr_SetNone(PyExc_StopAsyncIteration); +#else PyErr_SetNone(PyExc_StopIteration); +#endif return NULL; } @@ -1342,14 +1358,31 @@ check_error: } else if (PyErr_ExceptionMatches(PyExc_GeneratorExit)) { asyncgen_athrow->m_state = AWAITABLE_STATE_CLOSED; +#if PYTHON_VERSION >= 380 + if (asyncgen_athrow->m_args == NULL) { +#endif CLEAR_ERROR_OCCURRED(); PyErr_SetNone(PyExc_StopIteration); +#if PYTHON_VERSION >= 380 + } +#endif } return NULL; } static PyObject *Nuitka_AsyncgenAthrow_throw(struct Nuitka_AsyncgenAthrowObject *asyncgen_athrow, PyObject *args) { +#if _DEBUG_ASYNCGEN + PRINT_STRING("Nuitka_AsyncgenAthrow_throw: Enter with state:\asyncgen_athrow:"); + PRINT_ITEM((PyObject *)asyncgen_athrow); + PRINT_NEW_LINE(); + PRINT_FORMAT("State on entry is asyncgen_athrow->m_state = %d (%s)\n", asyncgen_athrow->m_state, + getAwaitableStateStr(asyncgen_athrow->m_state)); + PRINT_STRING("Nuitka_AsyncgenAthrow_throw: args:"); + PRINT_ITEM(args); + PRINT_NEW_LINE(); +#endif + PyObject *retval; #if PYTHON_VERSION < 375
DOC: updated block comments Updated some block comment grammar.
@@ -582,9 +582,9 @@ def generate_instrument_list(inst_loc, user_info=None): if not travis_skip: instrument_download.append(inst_dict) elif not inst._password_req: - # we don't want to test download for this combo - # But we do want to test the download warnings - # for instruments without a password requirement + # We don't want to test download for this combo, but + # we do want to test the download warnings for + # instruments without a password requirement instrument_no_download.append(inst_dict) output = {'names': instrument_names,
Removed validation No longer necessary because select2 doesn't allow you to enter free text.
@@ -548,9 +548,6 @@ hqDefine("cloudcare/js/form_entry/entrycontrols_full", function () { self.options.subscribe(function () { self.renderSelect2(); - if (!self.isValid(self.rawAnswer())) { - self.question.error(gettext('Not a valid choice')); - } }); self.renderSelect2 = function () {
llvm, functions/TransferFunctions: Zero the output array before writing max value/indicator Fixes occasional result corruption.
@@ -2261,6 +2261,9 @@ class SoftMax(TransferFunction): with pnlvm.helpers.array_ptr_loop(builder, arg_in, "exp_div") as args: self.__gen_llvm_exp_div(*args, **kwargs) elif output_type == MAX_VAL: + # zero out the output array + with pnlvm.helpers.array_ptr_loop(builder, arg_in, "zero_output") as (b,i): + b.store(ctx.float_ty(0), b.gep(arg_out, [ctx.int32_ty(0), i])) ptri = builder.gep(arg_in, [ctx.int32_ty(0), index]) exp_f = ctx.get_builtin("exp", [ctx.float_ty]) orig_val = builder.load(ptri) @@ -2269,6 +2272,9 @@ class SoftMax(TransferFunction): val = builder.fdiv(val, exp_sum) builder.store(val, ptro) elif output_type == MAX_INDICATOR: + # zero out the output array + with pnlvm.helpers.array_ptr_loop(builder, arg_in, "zero_output") as (b,i): + b.store(ctx.float_ty(0), b.gep(arg_out, [ctx.int32_ty(0), i])) builder.store(ctx.float_ty(1), ptro) return builder
add instructions for setting up .env file for docs release Test Plan: inspection Reviewers: sashank, yuhan, bob
@@ -44,10 +44,17 @@ git push Once you have _confirmed_ that the new version of the site is up at `docs.dagster.io` (may take up to 5 min), clone the following repo and run: ``` -# This updates the search index against the live site +# If you haven't already, check out the doc scraper repo, which builds the search index against +# the live site. If you are not running from a fresh checkout, make sure you've picked up any +# new changes. git clone https://github.com/dagster-io/docsearch-scraper.git cd docsearch-scraper pipenv install pipenv shell + +# This command will update the search index against the live site. +# If this is your first time running this, you will be prompted to set up your `.env` file with the +# appropriate values for `APPLICATION_ID`, and `API_KEY` (see `.env.example`). These should be +# the same as NEXT_ALGOLIA_APP_ID and NEXT_ALGOLIA_ADMIN_KEY, respectively. ./docsearch docker:run config.json ```
WebUI: Remove SOURCE state See commit
* - Thomas Beermann, <[email protected]>, 2014-2015 * - Stefan Prenner, <[email protected]>, 2017-2018 * - Hannes Hansen, <[email protected]>, 2018 - * - Dimitrios Christidis, <[email protected]>, 2019 + * - Dimitrios Christidis, <[email protected]>, 2019-2020 */ html_replicas_base = '<div id="t_replicas" class="columns panel">' + @@ -28,7 +28,6 @@ html_replicas_table = '<table id="dt_replicas" class="compact stripe order-colum '<font color=orange>COPYING</font> ' + '<font color=black>BEING_DELETED</font> ' + '<font color=pink>BAD</font> ' + - '<font color=blue>SOURCE</font> ' + '</div>'; html_contents = '<div id="t_contents" class="columns panel">' + @@ -168,8 +167,6 @@ load_replicas = function(scope, name) { str_rses += "black>" + rse; } else if (state == 'BAD') { str_rses += "pink>" + rse; - } if (state == 'SOURCE') { - str_rses += "blue>" + rse; } str_rses += "</font><br>"; }); @@ -468,8 +465,6 @@ load_dataset_replicas = function(scope, name) { tmp['state'] += "black>" + state; } else if (state == 'BAD') { tmp['state'] += "pink>" + state; - } if (state == 'SOURCE') { - tmp['state'] += "blue>" + state; } tmp['created_at'] = replica['created_at']; tmp['accessed_at'] = replica['accessed_at'];
Add Circuit.zip method Handy to have around when building circuits up in tiled pieces and wanting to guarantee the moment structure comes out right.
@@ -3764,3 +3764,56 @@ def test_deprecated(): circuit = cirq.Circuit([cirq.H(q)]) with cirq.testing.assert_logs('final_state_vector', 'deprecated'): _ = circuit.final_wavefunction() + + +def test_zip(): + a, b, c, d = cirq.LineQubit.range(4) + + circuit1 = cirq.Circuit(cirq.H(a), cirq.CNOT(a, b)) + circuit2 = cirq.Circuit(cirq.X(c), cirq.Y(c), cirq.Z(c)) + circuit3 = cirq.Circuit(cirq.Moment(), cirq.Moment(cirq.S(d))) + + # Calling works both static-style and instance-style. + assert circuit1.zip(circuit2) == cirq.Circuit.zip(circuit1, circuit2) + + # Empty cases. + assert cirq.Circuit.zip() == cirq.Circuit() + assert cirq.Circuit.zip(cirq.Circuit()) == cirq.Circuit() + assert cirq.Circuit().zip(cirq.Circuit()) == cirq.Circuit() + assert circuit1.zip(cirq.Circuit()) == circuit1 + assert cirq.Circuit(cirq.Moment()).zip(cirq.Circuit()) == cirq.Circuit( + cirq.Moment()) + assert cirq.Circuit().zip(cirq.Circuit(cirq.Moment())) == cirq.Circuit( + cirq.Moment()) + + # Small cases. + assert circuit1.zip(circuit2) == circuit2.zip(circuit1) == cirq.Circuit( + cirq.Moment( + cirq.H(a), + cirq.X(c), + ), + cirq.Moment( + cirq.CNOT(a, b), + cirq.Y(c), + ), + cirq.Moment(cirq.Z(c),), + ) + assert circuit1.zip(circuit2, circuit3) == cirq.Circuit( + cirq.Moment( + cirq.H(a), + cirq.X(c), + ), + cirq.Moment( + cirq.CNOT(a, b), + cirq.Y(c), + cirq.S(d), + ), + cirq.Moment(cirq.Z(c),), + ) + + # Overlapping operations. + with pytest.raises(ValueError, match="moment index 1.*\n.*CNOT"): + _ = cirq.Circuit.zip( + cirq.Circuit(cirq.X(a), cirq.CNOT(a, b)), + cirq.Circuit(cirq.X(b), cirq.Z(b)), + )
Added dummy user data for cypress tests added dummy data for cypress tests
"groups": [], "user_permissions": [] } + }, + { + "model": "users.user", + "pk": 21, + "fields": { + "password": "argon2$argon2i$v=19$m=512,t=2,p=2$TTBSdHR5U2tlTHNT$YAw7zxAUVGlIUCWH6ejUtg", + "last_login": null, + "is_superuser": false, + "first_name": "Dev", + "last_name": "Doctor", + "email": "", + "is_staff": false, + "is_active": true, + "date_joined": "2022-10-28T06:48:51.373Z", + "username": "devdoctor", + "user_type": 15, + "created_by": 1, + "ward": null, + "local_body": null, + "district": 7, + "state": 1, + "phone_number": "+919876543219", + "alt_phone_number": "+919876543219", + "gender": 1, + "age": 20, + "home_facility": 1, + "verified": true, + "deleted": false, + "pf_endpoint": null, + "pf_p256dh": null, + "pf_auth": null, + "asset": null, + "groups": [], + "user_permissions": [] + } + }, + { + "model": "users.user", + "pk": 22, + "fields": { + "password": "argon2$argon2i$v=19$m=512,t=2,p=2$TTBSdHR5U2tlTHNT$YAw7zxAUVGlIUCWH6ejUtg", + "last_login": null, + "is_superuser": false, + "first_name": "Dev", + "last_name": "Staff", + "email": "", + "is_staff": false, + "is_active": true, + "date_joined": "2022-10-28T06:48:51.373Z", + "username": "devstaff2", + "user_type": 10, + "created_by": 1, + "ward": null, + "local_body": null, + "district": 7, + "state": 1, + "phone_number": "+919876543219", + "alt_phone_number": "+919876543219", + "gender": 1, + "age": 20, + "home_facility": 1, + "verified": true, + "deleted": false, + "pf_endpoint": null, + "pf_p256dh": null, + "pf_auth": null, + "asset": null, + "groups": [], + "user_permissions": [] + } } ]
Cleanup, use same C "bool" definition as CPython2 does. * They use an enum in their headers which when included clashes with our previous "int" typedef. * So we do the same, even for Python3, where they seem to have stopped doing that.
#define initstate system_initstate #endif -/* Include the Python C-API header files. */ +/* Include the relevant Python C-API header files. */ #include "Python.h" #include "methodobject.h" #include "frameobject.h" #include "pydebug.h" #include "marshal.h" +/* The bool type. From Python2 header or self defined for Python3. */ +#if PYTHON_VERSION < 300 +#include "asdl.h" +#else +#ifndef __cplusplus +typedef enum {false, true} bool; +#endif +#endif + /* See above. */ #if PYTHON_VERSION < 300 #undef initproc #include <malloc.h> #endif -/* C bool type */ -#ifndef __cplusplus -typedef int bool; -#define true 1 -#define false 0 -#endif - /* An idea I first saw used with Cython, hint the compiler about branches * that are more or less likely to be taken. And hint the compiler about * things that we assume to be normally true. If other compilers can do
Fix typo in schedule inference This was caught by the daceml test suite
@@ -180,7 +180,7 @@ class TilingType(aenum.AutoNumberEnum): # Maps from ScheduleType to default StorageType SCOPEDEFAULT_STORAGE = { - StorageType.Default: StorageType.Default, + ScheduleType.Default: StorageType.Default, None: StorageType.CPU_Heap, ScheduleType.Sequential: StorageType.Register, ScheduleType.MPI: StorageType.CPU_Heap,
more details on decoder in PE tutorial Describe what decoder.rnn_size and decoder.encoder_projection are used for.
@@ -357,6 +357,15 @@ decoder. Without further ado, here it goes:: As in the case of encoders, the decoder needs its RNN and embedding size settings, maximum output length, dropout parameter, and vocabulary settings. +The outputs of the individual encoders are by default simply concatenated +and projected to the decoder hidden state (of ``rnn_size``). Internally, +the code is ready to support arbitrary mappings by adding one more parameter +here: ``encoder_projection``. + +Note that you may set ``rnn_size`` to ``None``. Neural Monkey will then directly +use the concatenation of encoder states without any mapping. This is particularly +useful when you have just one encoder as in MT. + The line ``reuse_word_embeddings=True`` means that the embeddings (including embedding size) are shared with the from the first encoder in the list (here ``trans_encoder``).
Fix crypto_com default init parameters The default initialization parameters for the crypto_com_order_book_tracker has been modified
@@ -31,7 +31,7 @@ class CryptoComOrderBookTracker(OrderBookTracker): def __init__( self, - shared_client: aiohttp.ClientSession, + shared_client: aiohttp.ClientSession = None, throttler: Optional[AsyncThrottler] = None, trading_pairs: Optional[List[str]] = None, ):
Don't autoescape when rendering jija templates Enabling autoescape leads to unusable CFN templates.
@@ -1111,7 +1111,11 @@ def render_template(template_str, params_dict, tags, config_version=None): :param params_dict: Template parameters dict """ try: - environment = Environment(loader=BaseLoader, autoescape=True) + # A nosec comment is appended to the following line in order to disable the B701 check. + # This is done because it's needed to enable the desired functionality. The current callers + # of this function pass a template_str representing either a custom template specified by + # the user or the default template. + environment = Environment(loader=BaseLoader) # nosec nosemgrep environment.filters["sha1"] = lambda value: hashlib.sha1(value.strip().encode()).hexdigest() # nosec nosemgrep environment.filters["bool"] = lambda value: value.lower() == "true" template = environment.from_string(template_str)
Support pkgs kwarg in pkg.upgrade on FreeBSD The pkgs kwarg is passed by salt.states.pkg.uptodate. If it is not recognized by the pkg module, all packages are upgraded.
@@ -1154,6 +1154,7 @@ def upgrade(*names, **kwargs): force = kwargs.pop('force', False) local = kwargs.pop('local', False) dryrun = kwargs.pop('dryrun', False) + pkgs = kwargs.pop('pkgs', []) opts = '' if force: opts += 'f' @@ -1168,7 +1169,10 @@ def upgrade(*names, **kwargs): cmd.append('upgrade') if opts: cmd.append('-' + opts) + if names: cmd.extend(names) + if pkgs: + cmd.extend(pkgs) old = list_pkgs() result = __salt__['cmd.run_all'](cmd,
ebuild.processor: more rework and simplification Drop old unused, forget_all_processors() function.
@@ -50,12 +50,6 @@ def _single_thread_allowed(functor): return _inner -@_single_thread_allowed -def forget_all_processors(): - active_ebp_list[:] = [] - inactive_ebp_list[:] = [] - - @_single_thread_allowed def shutdown_all_processors(): """Kill all known processors.""" @@ -336,17 +330,12 @@ class EbuildProcessor: "gid": os_data.portage_gid, "groups": [os_data.portage_gid], }) - else: - if spawn.is_userpriv_capable(): + elif spawn.is_userpriv_capable(): spawn_opts.update({ "gid": os_data.portage_gid, "groups": [0, os_data.portage_gid], }) - # open pipes used for communication - cread, cwrite = os.pipe() - dread, dwrite = os.pipe() - # force invalid bashrc env = {x: "/not/valid" for x in ("BASHRC", "BASH_ENV")} @@ -380,6 +369,10 @@ class EbuildProcessor: "PKGCORE_EBD_WRITE_FD": str(max_fd - 3), }) + # open pipes used for communication + cread, cwrite = os.pipe() + dread, dwrite = os.pipe() + # allow pipe overrides except ebd-related ebd_pipes = {0: 0, 1: 1, 2: 2} if fd_pipes:
[cmap] Document rationale for getBestCmap choice of subtable Fixes
@@ -91,6 +91,11 @@ class table__c_m_a_p(DefaultTable.DefaultTable): (0, 1), # Unicode 1.1 (0, 0) # Unicode 1.0 + This particular order matches what HarfBuzz uses to choose what + subtable to use by default. This order prefers the largest-repertoire + subtable, and among those, prefers the Windows-platform over the + Unicode-platform as the former has wider support. + This order can be customized via the ``cmapPreferences`` argument. """ for platformID, platEncID in cmapPreferences:
Typo I think this is suppose to be the type of the `text` param instead of the param itself right?
@@ -119,7 +119,7 @@ def word_tokenize(text, language='english', preserve_line=False): for the specified language). :param text: text to split into words - :param text: str + :type text: str :param language: the model name in the Punkt corpus :type language: str :param preserve_line: An option to keep the preserve the sentence and not sentence tokenize it.
pyglow check Debugged pyglow check
@@ -46,14 +46,27 @@ install: fi - pwd - ls - - if [[ -d "$pyglow_dir" && -e "$pyglow_dir/setup.py" && "$TRAVIS_PYTHON_VERSION" == "2.7" ]]; then - cd pyglow; + # check if there is a partial download of pyglow, remove if true + - export PYGLOW_DIR=./pyglow + - if [[ -d "$PYGLOW_DIR" && "$TRAVIS_PYTHON_VERSION" == "2.7" ]]; then + export PYGLOW_SIZE=($(du -s $PYGLOW_DIR)); + echo 'PYGLOW_SIZE' $PYGLOW_SIZE; + export CHECK_SIZE=50000; + if [[ "${PYGLOW_SIZE}" -lt "$CHECK_SIZE" ]]; then + rm -r $PYGLOW_DIR; + echo 'removing pyglow directory'; + fi + fi + # If pyglow exists, install it + - if [[ -d "$PYGLOW_DIR" && -e "$PYGLOW_DIR/setup.py" && "$TRAVIS_PYTHON_VERSION" == "2.7" ]]; then + cd $PYGLOW_DIR; python setup.py install; cd ..; fi - - if [[ ! -d "$pyglow_dir" || ! -e "$pyglow_dir/setup.py" && "$TRAVIS_PYTHON_VERSION" == "2.7" ]]; then + # If no pyglow, clone, then run script + - if [[ ! -d "$PYGLOW_DIR" || ! -e "$PYGLOW_DIR/setup.py" && "$TRAVIS_PYTHON_VERSION" == "2.7" ]]; then travis_wait 40 git clone https://github.com/timduly4/pyglow.git; - cd pyglow; + cd $PYGLOW_DIR; travis_wait 40 ./pyglow_install.sh >/dev/null; cd ..; fi
docs: Add the firestore_setup_client_create_with_project_id region tag The Firestore quickstart_new_instance sample can be used to demonstrate setting up client using a project id. With this region tag, we can include it into this doc:
@@ -21,12 +21,14 @@ from google.cloud import firestore def quickstart_new_instance(): # [START firestore_setup_client_create] + # [START firestore_setup_client_create_with_project_id] from google.cloud import firestore # The `project` parameter is optional and represents which project the client # will act on behalf of. If not supplied, the client falls back to the default # project inferred from the environment. db = firestore.Client(project='my-project-id') + # [END firestore_setup_client_create_with_project_id] # [END firestore_setup_client_create] return db
Renamed getChildView => childView getChildView was removed from CollectionView
@@ -111,7 +111,7 @@ hqDefine("cloudcare/js/formplayer/layout/views/settings", function () { var SettingsContainerView = Marionette.CollectionView.extend({ tagName: 'tbody', - getChildView: function (item) { + childView: function (item) { if (item.get('slug') === slugs.SET_LANG) { return LangSettingView; } else if (item.get('slug') === slugs.SET_DISPLAY) {
Update todos.css Cleaned up code to make it easier to read
@@ -4,13 +4,13 @@ body{ background: linear-gradient(to right, #96DEDA, #50C9C3); /* W3C, IE 10+/ Edge, Firefox 16+, Chrome 26+, Opera 12+, Safari 7+ */ } + #container{ width: 350px; margin: 10% auto; background: #f7f7f7; box-shadow: 0px 0px 1px rgba(0, 0, 0, 0.2); height: 350px; - } header @@ -37,12 +37,13 @@ h1{ font-size: 24px; letter-spacing: 1.1px; } + .fa-plus{ float:right; } + .fa-trash{ margin: right:20px; - background: #FA2C40; color: white; text-align: center; @@ -50,7 +51,6 @@ h1{ display: block; height: 25px; transition: 0.2s; - } li:hover .fa-trash{ @@ -64,6 +64,7 @@ ul{ margin: 0; padding: 0; } + li{ margin: 0px; padding-top: 0px; @@ -71,11 +72,12 @@ li{ height: 40px; color: #666; background-color: #f7f7f7; - } + li:nth-child(2n){ background-color: white; } + input{ border: 3px rgba(0, 0, 0, 0); width: 100%; @@ -85,11 +87,13 @@ input{ font-size: 18px; background-color: white; } + input:focus{ border: 2.5px solid steelblue; outline: none; - background-color::#f7f7f7; + background-color: #f7f7f7; } + .h11 { font-size: 20px; font-family: Arial;
Added "www.vdtrack.com" to "data/StevenBlack/hosts" Follow-up to the last one. 127.0.0.1 www.vdtrack.com
127.0.0.1 twitter.cm # common misspelling 127.0.0.1 ttwitter.com # common misspelling and, besides, scumbags there. 127.0.0.1 vdtrack.com +127.0.0.1 www.vdtrack.com 127.0.0.1 virtual.thewhig.com 127.0.0.1 www.dobre-programy.pl 127.0.0.1 www.quickcash-system.com
Pack token records This saves quite a lot of memory and has minimal performance impact.
@@ -47,7 +47,7 @@ package Langkit_Support.Token_Data_Handlers is -- this is either null or the symbolization of the token text. -- -- For instance: null for keywords but actual text for identifiers. - end record; + end record with Pack; -- Holder for per-token data to be stored in the token data handler -- Trivias are tokens that are not to be taken into account during parsing, @@ -58,7 +58,7 @@ package Langkit_Support.Token_Data_Handlers is type Trivia_Node is record T : aliased Stored_Token_Data; Has_Next : Boolean; - end record; + end record with Pack; -- This defines a node in a trivia linked list package Token_Vectors is new Langkit_Support.Vectors
config: fix ipv6 As of nautilus, if you set `ms bind ipv6 = True` you must explicitly set `ms bind ipv4 = False` too, otherwise OSDs will still try to pick up an IPv4 address. Closes:
@@ -10,6 +10,7 @@ auth supported = none {% endif %} {% if ip_version == 'ipv6' %} ms bind ipv6 = true +ms bind ipv4 = false {% endif %} {% if common_single_host_mode is defined and common_single_host_mode %} osd crush chooseleaf type = 0
Qt : Use legacy xcb tablet coordinates Works around an issue with 5.12+ when using a wacom tablet on linux. This generally results in clicks being misplaced and drag operations behaving erratically. See: This can hopefully be removed once this patch is in:
@@ -56,6 +56,8 @@ fi export LC_NUMERIC=C +########################################################################## + # Find where this script is located, resolving any symlinks that were used # to invoke it. Set GAFFER_ROOT based on the script location. ########################################################################## @@ -193,6 +195,14 @@ if [[ -e $GAFFER_ROOT/qt/plugins ]] ; then export QT_QPA_PLATFORM_PLUGIN_PATH="$GAFFER_ROOT/qt/plugins" fi +# Work around issue with Qt 5.12+ when using a wacom tablet on linux. +# See https://bugreports.qt.io/browse/QTBUG-77826 +# This can hopefully be removed once this patch is in: +# https://codereview.qt-project.org/c/qt/qtbase/+/284141 +########################################################################## + +export QT_XCB_TABLET_LEGACY_COORDINATES=1 + # Get the executable path set up, for running child processes from Gaffer ##########################################################################
update: remove an old parameter in ceph_key module call the `containerized` parameter in ceph_key module doesn't exist anymore. This was making the module failing but was hidden because of the `ignore_errors: True`.
caps: mon: "allow profile {{ item.0 }}" cluster: "{{ cluster }}" - containerized: "{{ 'docker exec ceph-mon-' + hostvars[item.1]['ansible_hostname'] if containerized_deployment else None }}" when: - cephx delegate_to: "{{ item.1 }}"
Make ceph-ansible integration respect PythonInterpreter PythonInterpreter defaults to /usr/bin/python. If a user overrides this default, e.g. to something like python3, then we should use it. Modify ceph-base.yml to use the PythonInterpreter parameter. The variable will already be set to ansible_python_interpreter by the calling ansible execution. Closes-Bug:
@@ -469,6 +469,7 @@ outputs: - '{% if ansible_ssh_private_key_file is defined %}--private-key {{ansible_ssh_private_key_file}}{% endif %}' - '-i' - '{{playbook_dir}}/ceph-ansible/inventory.yml' + - '{% if ansible_python_interpreter is defined %}-e ansible_python_interpreter={{ansible_python_interpreter}}{% endif %}' - '{{playbook_dir}}/ceph-ansible/nodes_uuid_playbook.yml' - name: set ceph-ansible params from Heat set_fact: @@ -556,6 +557,7 @@ outputs: expression: $.data.items().select($[0] + '=' + $[1]).join(' ') - ansible-playbook - '{% if ansible_ssh_private_key_file is defined %}--private-key {{ansible_ssh_private_key_file}}{% endif %}' + - '{% if ansible_python_interpreter is defined %}-e ansible_python_interpreter={{ansible_python_interpreter}}{% endif %}' - '-{%- for number in range(0, ceph_ansible_playbook_verbosity) -%}v{% endfor %}' - if: - ceph_ansible_skip_tags_set
improve variable code Remove redundant variable, and rename for better understanding. Fix loading logic to handle cmd and env variables correctly.
@@ -45,12 +45,13 @@ def load_variables( if higher_level_variables is None: higher_level_variables = {} - env_variables = _load_from_env() current_variables: Dict[str, VariableEntry] = dict() if isinstance(higher_level_variables, list): + env_variables = _load_from_env() cmd_variables = _load_from_pairs(higher_level_variables) else: current_variables.update(higher_level_variables) + env_variables = {} cmd_variables = {} # current_variables uses to support variable in variable file path current_variables.update(env_variables) @@ -58,10 +59,11 @@ def load_variables( final_variables: Dict[str, VariableEntry] = dict() final_variables.update( - _load_from_runbook(runbook_data, current_variables=current_variables) + _load_from_runbook(runbook_data, higher_level_variables=current_variables) ) if isinstance(higher_level_variables, dict): final_variables.update(higher_level_variables) + else: final_variables.update(env_variables) final_variables.update(cmd_variables) @@ -103,11 +105,10 @@ def _load_from_env() -> Dict[str, VariableEntry]: def _load_from_runbook( - runbook_data: Any, current_variables: Dict[str, VariableEntry] + runbook_data: Any, higher_level_variables: Dict[str, VariableEntry] ) -> Dict[str, VariableEntry]: # make a copy to prevent modifying existing dict - current_variables = current_variables.copy() - results: Dict[str, VariableEntry] = {} + current_variables = higher_level_variables.copy() if constants.VARIABLE in runbook_data: variable_entries: List[ @@ -150,14 +151,14 @@ def _load_from_runbook( value, is_secret=entry.is_secret, ) - results.update(loaded_variables) current_variables.update(loaded_variables) + current_variables.update(higher_level_variables) is_current_updated = True left_variables.remove(entry) if undefined_variables: raise LisaException(f"variables are undefined: {undefined_variables}") - return results + return current_variables def _load_from_file(
Don't build sdist/bdist_wheel with script of deploy phase Trying to fix diofant/diofant#777
@@ -64,9 +64,8 @@ matrix: - git clone https://github.com/diofant/diofant.github.io.git sphinx_docs - rm -rf sphinx_docs/.git - rsync -a --delete build/sphinx/html/ sphinx_docs/en/latest/ - - python setup.py sdist bdist_wheel - VERSION=$(python -c 'import diofant;print(diofant.__version__)') - - cp -a dist gdist + - mkdir gdist - cp -a build/sphinx/html diofant-docs-html-${VERSION} - zip -r gdist/diofant-docs-html-${VERSION}.zip diofant-docs-html-${VERSION} - cp -a build/sphinx/latex/diofant.pdf gdist/diofant-docs-${VERSION}.pdf @@ -97,7 +96,9 @@ matrix: skip_cleanup: true draft: true file_glob: true - file: gdist/* + file: + - dist/* + - gdist/* name: "Diofant ${VERSION}" body: "See [release notes](https://diofant.readthedocs.io/\ en/latest/release/notes-${SHORT_VERSION}.html)."
regression in page parsing causes baseline to be set to polygon shape. fixes
@@ -171,7 +171,7 @@ def parse_page(filename): baseline = None if base is not None and not base.get('points').isspace() and len(base.get('points')): try: - baseline = _parse_coords(pol.get('points')) + baseline = _parse_coords(base.get('points')) except: logger.info('TextLine {} without baseline'.format(line.get('id'))) continue
Fix overzealous html-escaping in Markup renderer HTML entities in link and image titles, urls, and image alts were being double-escaped. The following is legal Markdown ![&copy;](img.png) It should render to <img src="img.png" alt="&copy;"> rather than <img src="img.png" alt="&amp;copy;"> Ref:
@@ -2,7 +2,6 @@ import threading from weakref import ref as weakref import mistune -from markupsafe import escape from markupsafe import Markup from werkzeug.urls import url_parse @@ -12,6 +11,10 @@ from lektor.context import get_ctx _markdown_cache = threading.local() +def escape(text: str) -> str: + return mistune.escape(text, quote=True) + + class ImprovedRenderer(mistune.Renderer): def link(self, link, title, text): if self.record is not None:
Add order_by kwarg to DWaveSampler constructor Used for adjusting the feature based solver selection.
@@ -105,6 +105,12 @@ class DWaveSampler(dimod.Sampler, dimod.Structured): then it will instead propogate the `SolverNotFoundError` to the user. + order_by (callable/str/None): + Solver sorting key function or (or :class:`~dwave.cloud.Solver` + attribute/item dot-separated path). + See :class:`~dwave.cloud.Client.get_solvers` for a more detailed + description of the parameter. + config_file (str, optional): Path to a configuration file that identifies a D-Wave system and provides connection information. @@ -151,7 +157,7 @@ class DWaveSampler(dimod.Sampler, dimod.Structured): for explanations of technical terms in descriptions of Ocean tools. """ - def __init__(self, failover=False, retry_interval=-1, **config): + def __init__(self, failover=False, retry_interval=-1, order_by=None, **config): if config.get('solver_features') is not None: warn("'solver_features' argument has been renamed to 'solver'.", DeprecationWarning) @@ -162,7 +168,12 @@ class DWaveSampler(dimod.Sampler, dimod.Structured): config['solver'] = config.pop('solver_features') self.client = Client.from_config(**config) + + if order_by is None: + # use the default from the cloud-client self.solver = self.client.get_solver() + else: + self.solver = self.client.get_solver(order_by=order_by) self.failover = failover self.retry_interval = retry_interval
snapcraft: use 2.1 branch This allows us to pull in the next iteration of Juju including the upcoming bash completion fixes.
@@ -52,7 +52,7 @@ parts: juju: source: https://github.com/juju/juju.git source-type: git - source-tag: juju-2.1-beta5 + source-branch: "2.1" source-depth: 1 plugin: godeps go-importpath: github.com/juju/juju
[cleanup] Raise an exception if 'titles' is still used as where parameter 'titles' value for where parameter was deprecated 5 years ago.
@@ -1296,19 +1296,15 @@ class GeneratorsMixin: :raises TypeError: a namespace identifier has an inappropriate type such as NoneType or bool """ - where_types = ['nearmatch', 'text', 'title', 'titles'] + where_types = ['nearmatch', 'text', 'title'] if not searchstring: raise Error('search: searchstring cannot be empty') if where not in where_types: raise Error("search: unrecognized 'where' value: {}".format(where)) - if where in ('title', 'titles'): - if where == 'titles': - issue_deprecation_warning("where='titles'", "where='title'", - since='20160224') - where = 'title' - if self.has_extension('CirrusSearch') and \ - isinstance(self.family, pywikibot.family.WikimediaFamily): + if where == 'title' \ + and self.has_extension('CirrusSearch') \ + and isinstance(self.family, pywikibot.family.WikimediaFamily): # 'title' search was disabled, use intitle instead searchstring = 'intitle:' + searchstring issue_deprecation_warning(
Enable Cosmos HTTPS tunneling through a proxy secured by basic auth See
@@ -27,6 +27,7 @@ ExecStartPre=/bin/ping -c1 leader.mesos ExecStartPre=/opt/mesosphere/bin/bootstrap dcos-cosmos ExecStart=/opt/mesosphere/bin/java \\ -Xmx2G \\ + -Djdk.http.auth.tunneling.disabledSchemes="" \\ -classpath ${PKG_PATH}/usr/cosmos.jar \\ com.simontuffs.onejar.Boot \\ -admin.port=127.0.0.1:9990 \\
Update task.py reverting change
@@ -19,7 +19,6 @@ import bigbench.api.task as task from bigbench.benchmark_tasks.coqa_conversational_question_answering.coqa_official_evaluation_script import \ CoQAEvaluator import os -import random class CoQA(task.Task): @@ -62,13 +61,8 @@ class CoQA(task.Task): ) def evaluate_model(self, model, max_examples=-1, random_seed=None): - if random_seed: - random.seed(random_seed) - else: - random.seed(0) max_examples = max_examples if max_examples > 0 else self.num_examples turn_ids = list(self.evaluator.gold_data.keys()) - random.shuffle(turn_ids) batch_start_index = 0 predicted_answers = {}
Update Thanos to 0.25.0 Release notes:
%global debug_package %{nil} Name: thanos -Version: 0.24.0 -Release: 2%{?dist} +Version: 0.25.0 +Release: 1%{?dist} Summary: Highly available Prometheus setup with long term storage capabilities. License: ASL 2.0 URL: https://thanos.io
Ditch outdated tasks from deploy.json Server is now containerized, so we don't need to do minification or any of the root_web stuff from github-deploy-repo anymore
"actions": [ "// client - API", - { - "type": "minimize-js", - "src": "src/client/delphi_epidata.js", - "dst": "src/client/delphi_epidata.min.js" - }, { "type": "copy", "src": "src/client/delphi_epidata.py", "dst": "[[package]]/client/delphi_epidata.py", "add-header-comment": true }, - { - "type": "move", - "src": "src/client/", - "dst": "[[root_web]]/lib/", - "match": "^delphi_epidata.*\\.(js|py|coffee|R)$", - "add-header-comment": true - }, - - "// server - API (note: glob doesn't include dotfiles)", - { - "type": "move", - "src": "src/server/", - "dst": "[[root_web]]", - "match": "^.*\\.(html|php)$", - "add-header-comment": true - }, - { - "type": "move", - "src": "src/server/.htaccess", - "dst": "[[root_web]]/.htaccess", - "add-header-comment": true - }, "// server", {
update discord invite discordapp.com domain will go defunct very soon, everything has transitioned to discord.com now
@@ -268,4 +268,4 @@ At this point the rest of the resolution is straightforward since there is no mo * [Official Website](https://python-poetry.org) * [Issue Tracker](https://github.com/python-poetry/poetry/issues) -* [Discord](https://discordapp.com/invite/awxPgve) +* [Discord](https://discord.com/invite/awxPgve)
Update cyclesort.py Changing for Python 3 using exception handling for robust code
@@ -44,7 +44,13 @@ def cycle_sort(array): # Main Code starts here -user_input = input('Enter numbers separated by a comma:\n') +if __name__ == '__main__': + try: + raw_input # Python 2 + except NameError: + raw_input = input # Python 3 + +user_input = raw_input('Enter numbers separated by a comma:\n') unsorted = [int(item) for item in user_input.split(',')] n = len(unsorted) cycle_sort(unsorted)
MAINT: Clarify sign of last iircomb coefficient There's no reason to create a variable and then overwrite it.
@@ -5278,18 +5278,20 @@ def iircomb(w0, Q, ftype='notch', fs=2.0): # b - cz^-N or b + cz^-N b = np.zeros(N + 1) b[0] = bx - b[-1] = cx if ftype == 'notch': b[-1] = -cx + else: + b[-1] = +cx # Compute denominator coefficients # Eq 11.5.1 (p. 590) or Eq 11.5.4 (p. 591) from reference [1] # 1 - az^-N or 1 + az^-N a = np.zeros(N + 1) a[0] = 1 - a[-1] = ax if ftype == 'notch': a[-1] = -ax + else: + a[-1] = +ax return b, a
swarming: switch ts_mon metrics from seconds to milliseconds The problem isn't the data but the bucketting; they get bucketed at second resolution. Since most hooks runs in a negligible amount of time, this leads to unactionable data. Review-Url:
@@ -105,13 +105,13 @@ DEFAULT_SETTINGS = { ### Monitoring -_bucketer = ts_mon.GeometricBucketer(growth_factor=10**0.05, +_bucketer = ts_mon.GeometricBucketer(growth_factor=10**0.07, num_finite_buckets=100) hooks_durations = ts_mon.CumulativeDistributionMetric( 'swarming/bots/hooks/durations', bucketer=_bucketer, - description='Duration of bot hook calls.', - units=ts_mon.MetricsDataUnits.SECONDS) + description='Duration of bot hook calls in ms', + units=ts_mon.MetricsDataUnits.MILLISECONDS) def _flatten_dimensions(dimensions): @@ -129,7 +129,7 @@ def monitor_call(func): try: return func(chained, botobj, name, *args, **kwargs) finally: - duration = max(0, time.time() - start) + duration = max(0, (time.time() - start) * 1000) if botobj and botobj.dimensions: flat_dims = _flatten_dimensions(botobj.dimensions) if flat_dims:
Bugfix: validation always fails: 'str' object has no attribute 'get' Recently changed data validation function does its checks by treating attributes/values as they were dictionaries; however, they are lists of dictionaries. data["telemetry"].get("ts") is bound to fail: the correct check would be data["telemetry"][i].get("ts"), for each element i.
@@ -43,8 +43,26 @@ class TBUtility: error = 'deviceName is empty in data: ' if error is None and not data.get("deviceType"): error = 'deviceType is empty in data: ' - if error is None and data.get("attributes") is None and (data.get("telemetry") is None or (data["telemetry"].get("ts") is not None and len(data["telemetry"].get("values")) == 0)): + + if error is None: + got_attributes = False + got_telemetry = False + + if data.get("attributes") is not None: + for entry in data.get("attributes"): + if entry.get("ts") is not None and len(entry.get("values")) > 0: + got_attributes = True + break + + if data.get("telemetry") is not None: + for entry in data.get("telemetry"): + if entry.get("ts") is not None and len(entry.get("values")) > 0: + got_telemetry = True + break + + if got_attributes == False and got_telemetry == False: error = 'No telemetry and attributes in data: ' + if error is not None: json_data = dumps(data) if isinstance(json_data, bytes):
Ensemble class modified Ensemble class changed in order to work with the TD3 algorithm now, withouth an index fit fits every target added min target selection
@@ -28,8 +28,8 @@ class Ensemble(object): def fit(self, *z, **fit_params): """ - Fit the ``idx``-th model of the ensemble if ``idx`` is provided, a - random model otherwise. + Fit the ``idx``-th model of the ensemble if ``idx`` is provided, every + model otherwise. Args: *z (list): a list containing the inputs to use to predict with each @@ -39,7 +39,8 @@ class Ensemble(object): """ idx = fit_params.pop('idx', None) if idx is None: - self[np.random.choice(len(self))].fit(*z, **fit_params) + for i in range(len(self)): + self[i].fit(*z, **fit_params) else: self[idx].fit(*z, **fit_params) @@ -72,6 +73,8 @@ class Ensemble(object): results = np.mean(predictions, axis=0) elif self._prediction == 'sum': results = np.sum(predictions, axis=0) + elif self._prediction == 'min': + results = np.amin(predictions, axis=0) else: raise ValueError if predict_params.get('compute_variance', False):
Adapted AnalogSignal test to the renaming of duplicate_with_new_array to duplicate_with_new_data
@@ -241,7 +241,7 @@ class TestAnalogSignalProperties(unittest.TestCase): signal1 = self.signals[1] signal2 = self.signals[2] data2 = self.data[2] - signal1b = signal1.duplicate_with_new_array(data2) + signal1b = signal1.duplicate_with_new_data(data2) assert_arrays_almost_equal(np.asarray(signal1b), np.asarray(signal2 / 1000.), 1e-12) self.assertEqual(signal1b.t_start, signal1.t_start)
ssh-add -k should be lower case k Upper case K errors out for linux
@@ -99,7 +99,7 @@ def generate_instructions(chapter, platform): dcc.SyntaxHighlighter( ('$ ssh-add ~/.ssh/id_rsa' if platform == 'Windows' else - '$ ssh-add -K ~/.ssh/id_rsa'), + '$ ssh-add -k ~/.ssh/id_rsa'), customStyle=styles.code_container, language='python' ),