message
stringlengths 13
484
| diff
stringlengths 38
4.63k
|
---|---|
llvm/autodiff/optimizer: Provide generic initialization routine
Cleanup. | @@ -79,9 +79,8 @@ class Optimizer():
return llvm_func
- # to be implemented by child classes - sets the initial values for the optim struct
def initialize_optimizer_struct(self, ctx, builder, optim_struct):
- raise Exception("Unimplemented method!")
+ builder.store(optim_struct.type.pointee(None), optim_struct)
# to be implemented by child classes - steps the optimizer
def step(self, ctx, builder, optim_struct, model_params):
@@ -115,22 +114,6 @@ class AdamOptimizer(Optimizer):
extra_types += [m_t, v_t, time_counter]
return super()._get_optimizer_struct_type(ctx, extra_types=extra_types)
- def initialize_optimizer_struct(self, ctx, builder, optim_struct):
- # set initial moments to 0
- delta_w = builder.gep(
- optim_struct, [ctx.int32_ty(0), ctx.int32_ty(self._DELTA_W_NUM)])
- m_t = builder.gep(
- optim_struct, [ctx.int32_ty(0), ctx.int32_ty(self._M_T_NUM)])
- v_t = builder.gep(
- optim_struct, [ctx.int32_ty(0), ctx.int32_ty(self._V_T_NUM)])
- t = builder.gep(
- optim_struct, [ctx.int32_ty(0), ctx.int32_ty(self._T_NUM)])
- self._gen_zero_gradient_struct(ctx, builder, delta_w)
- self._gen_zero_gradient_struct(ctx, builder, m_t)
- self._gen_zero_gradient_struct(ctx, builder, v_t)
- builder.store(ctx.float_ty(0), t)
-
-
# steps the adam optimizer (methodology: https://arxiv.org/pdf/1412.6980.pdf )
def step(self, ctx):
name = self._composition.name + "_ADAM_STEP"
@@ -294,12 +277,6 @@ class SGDOptimizer(Optimizer):
super().__init__(pytorch_model)
self.lr = lr
- def _get_optimizer_struct_type(self, ctx):
- return super()._get_optimizer_struct_type(ctx)
-
- def initialize_optimizer_struct(self, ctx, builder, optim_struct):
- pass
-
# steps the sgd optimizer (methodology: https://arxiv.org/pdf/1412.6980.pdf )
def step(self, ctx):
name = self._composition.name + "_SGD_STEP"
|
added `fluxcov` to aliases for covariances to fix
modified: photdata.py | @@ -25,7 +25,7 @@ PHOTDATA_ALIASES = OrderedDict([
('fluxerr', {'fluxerr', 'fe', 'fluxerror', 'flux_error', 'flux_err'}),
('zp', {'zp', 'zpt', 'zeropoint', 'zero_point'}),
('zpsys', {'zpsys', 'zpmagsys', 'magsys'}),
- ('fluxcov', {'cov', 'covar', 'covariance', 'covmat'})
+ ('fluxcov', {'cov', 'covar', 'covariance', 'covmat', 'fluxcov'})
])
PHOTDATA_REQUIRED_ALIASES = ('time', 'band', 'flux', 'fluxerr', 'zp', 'zpsys')
|
[dagit] Link to run config from Queued tab
Summary: On the "Queued" tab, display a small callout linking to the queue configuration section of the "Instance status" page.
Test Plan: View "Queued" tab, verify rendering and behavior.
Reviewers: johann, prha, dgibson | import {gql, NetworkStatus} from '@apollo/client';
-import {Colors, NonIdealState, Spinner, Tab, Tabs, Tag} from '@blueprintjs/core';
+import {Callout, Colors, Icon, NonIdealState, Spinner, Tab, Tabs, Tag} from '@blueprintjs/core';
import {IconNames} from '@blueprintjs/icons';
import {isEqual} from 'lodash';
import * as React from 'react';
import {RouteComponentProps} from 'react-router';
+import {Link} from 'react-router-dom';
import styled from 'styled-components';
import {CursorPaginationControls} from 'src/CursorControls';
@@ -187,6 +188,11 @@ export const RunsRoot: React.FunctionComponent<RouteComponentProps> = () => {
onChange={setFilterTokens}
loading={queryResult.loading}
/>
+ {selectedTab === 'queued' ? (
+ <Callout icon={<Icon icon="multi-select" iconSize={20} />}>
+ <Link to="/instance/config#run_coordinator">View queue configuration</Link>
+ </Callout>
+ ) : null}
<RunsQueryRefetchContext.Provider value={{refetch: queryResult.refetch}}>
<Loading queryResult={queryResult} allowStaleData={true}>
{({pipelineRunsOrError}) => {
|
Update _html.py
Remote duplicated docs | @@ -423,11 +423,6 @@ def write_html(
require an active internet connection in order to load the plotly.js
library.
- If 'directory', a script tag is included that references an external
- plotly.min.js bundle that is assumed to reside in the same
- directory as the HTML file. If `file` is a string to a local file path
- and `full_html` is True then
-
If 'directory', a script tag is included that references an external
plotly.min.js bundle that is assumed to reside in the same
directory as the HTML file. If `file` is a string to a local file
|
gui: fix xplore page with non-default config path
A couple of sqobjects were initialized without parameters in the xplore
page. This means that if the suzieq configuration was not in one of the
default paths, the page crashes.
Passing the config to the sqobjects fixes the problem. | @@ -127,7 +127,9 @@ class XplorePage(SqGuiPage):
st.form_submit_button('Get', on_click=self._fetch_data)
if state.table:
- state.tables_obj = get_sqobject(state.table)()
+ state.tables_obj = get_sqobject(state.table)(
+ config_file=self._config_file
+ )
fields = state.tables_obj.describe()
colist = sorted((filter(lambda x: x not in ['index', 'sqvers'],
fields.name.tolist())))
@@ -558,7 +560,8 @@ class XplorePage(SqGuiPage):
dfcols = sorted((filter(lambda x: x not in ['index', 'sqvers'],
dfcols)))
- uniq_clicked = get_sqobject(state.table)().unique_default_column[0]
+ uniq_clicked = get_sqobject(state.table)(
+ config_file=self._config_file).unique_default_column[0]
if 'state' in dfcols:
uniq_clicked = 'state'
elif 'status' in dfcols:
|
Increase the prec of test_baddbmm
Summary:
This test is flaky on my computer, the error is:
```
AssertionError: tensor(1.3351e-05) not less than or equal to 1e-05
```
Pull Request resolved: | @@ -14539,7 +14539,7 @@ scipy_lobpcg | {:10.2e} | {:10.2e} | {:6} | N/A
self.assertEqual(torch.baddbmm(1, res2, 0, b1, b2), res2)
res4 = torch.baddbmm(res2, b1, b2, beta=1, alpha=.5)
- self.assertEqual(res4, res * 3)
+ self.assertEqual(res4, res * 3, prec=2e-5)
res5 = torch.baddbmm(res2, b1, b2, beta=0, alpha=1)
self.assertEqual(res5, res)
|
libmanage.py: install static libraries first
TN: | @@ -795,11 +795,13 @@ class ManageScript(object):
argv.extend(self.gpr_scenario_vars(args, 'prod', library_type))
self.check_call(args, 'Install', argv)
+ # Install the static libraries first, so that in the resulting project
+ # files, "static" is the default library type.
build_shared, build_static = self.what_to_build(args, is_library)
- if build_shared:
- run('relocatable')
if build_static:
run('static')
+ if build_shared:
+ run('relocatable')
def do_build(self, args):
"""
|
Add ApiSetu API
Add ApiSetu to OpenData API | @@ -856,6 +856,7 @@ API | Description | Auth | HTTPS | CORS |
API | Description | Auth | HTTPS | CORS |
|---|---|---|---|---|
| [18F](http://18f.github.io/API-All-the-X/) | Unofficial US Federal Government API Development | No | No | Unknown |
+| [Apisetu.gov.in](https://www.apisetu.gov.in/) | An Indian Government platform that provides a lot of APIS for KYC, business, education & employment | No | Yes | Yes |
| [Archive.org](https://archive.readme.io/docs) | The Internet Archive | No | Yes | Unknown |
| [Callook.info](https://callook.info) | United States ham radio callsigns | No | Yes | Unknown |
| [CARTO](https://carto.com/) | Location Information Prediction | `apiKey` | Yes | Unknown |
|
Correct /index.html for subdirectories on S3
S3 server may not error on serving a directory. Adjust /index.html to
include trailing slash on directory links. | <body>
<h2>{{ title }}</h2>
<h3><a href="channeldata.json">channeldata.json</a></h3>
- {% for subdir in subdirs %}<a href="{{ subdir }}">{{ subdir }}</a> {% endfor %}
+ {% for subdir in subdirs %}<a href="{{ subdir }}/">{{ subdir }}</a> {% endfor %}
<table>
<tr>
<th style="padding-right:18px;">Package</th>
|
Remove outdated warning from tutorial text.
The tutorial has been updated and now uses options not available
before 3.7.0. It is misleading to state that the tutorial was
written for v3.5.4. | @@ -6,12 +6,6 @@ Author: Jessica Bruhn, `NanoImaging Services <https://www.nanoimagingservices.co
.. highlight:: none
-.. warning::
-
- This tutorial was prepared using DIALS version 3.5.4, downloaded
- from :doc:`this site <../../../installation>`. Results may differ with other
- versions of the software.
-
General Notes
=============
|
Make error message more informative
Summary:
Pull Request resolved:
I am debugging a failed workflow and found the original error message to be not informative. | @@ -1718,7 +1718,7 @@ class Net(object):
OrderedDict(inputs) if input_is_pair_list else
OrderedDict(zip(inputs, inputs)))
for output in outputs:
- assert self.BlobIsDefined(output)
+ assert self.BlobIsDefined(output), "{} is not defined".format(output)
input_names = {str(k): str(v) for k, v in viewitems(inputs)}
output_names = [str(o) for o in outputs]
proto = self._net
@@ -1901,7 +1901,7 @@ class Net(object):
def AddExternalOutput(self, *outputs):
for output in outputs:
assert isinstance(output, BlobReference)
- assert self.BlobIsDefined(output)
+ assert self.BlobIsDefined(output), "{} is not defined".format(output)
for output in outputs:
self.Proto().external_output.extend([str(output)])
@@ -1988,7 +1988,7 @@ class Net(object):
'Tried to append to missing output record'
)
for blob in record.field_blobs():
- assert self.BlobIsDefined(blob)
+ assert self.BlobIsDefined(blob), "{} is not defined".format(blob)
for blob in record.field_blobs():
self.AddExternalOutput(blob)
self._output_record = self._output_record + schema.Struct(
|
update Group Id attribute with examples
Add some hints for which Group Id Attribute to use for LDAP and AD. | @@ -2099,7 +2099,7 @@ Group Display Name Attribute
Group Id Attribute
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-(Required) Enter an AD/LDAP Group ID attribute to use as a unique identifier for Groups. This should be an AD/LDAP value that does not change.
+(Required) Enter an AD/LDAP Group ID attribute to use as a unique identifier for Groups. This should be an AD/LDAP value that does not change. For LDAP this usually is `entryUUID` and `objectGUID` for AD.
.. note::
This attribute is used only when AD/LDAP Group Sync is enabled. See `AD/LDAP Group Sync documentation <https://docs.mattermost.com/deployment/ldap-group-sync.html>`_ for more information on enabling and configuring AD/LDAP Group Sync (*Available in Enterprise Edition E20 and higher*).
|
Add more node-exporter fixes
These options changed between 0.12 and 0.15. | @@ -310,8 +310,8 @@ coreos:
--name node-exporter \
--restart always \
prom/node-exporter:v0.15.2 \
- --collector.procfs /host/proc \
- --collector.sysfs /host/sys \
+ --path.procfs /host/proc \
+ --path.sysfs /host/sys \
--collector.filesystem.ignored-mount-points ^/(sys|proc|dev|host|etc)($|/)
ssh_authorized_keys:
|
Change csp_policy to be a function
It depends on map_tiles_src which depends on the asset_url setting.
Better to make it a function.
If we discover it's really important to cache the output, then we can do
that then. | @@ -23,6 +23,7 @@ from ichnaea.content.stats import global_stats, histogram, regions
from ichnaea.models.content import StatKey
from ichnaea import util
+
HERE = os.path.dirname(__file__)
IMAGE_PATH = os.path.join(HERE, "static", "images")
FAVICON_PATH = os.path.join(IMAGE_PATH, "favicon.ico")
@@ -55,7 +56,12 @@ def configure_tiles_url(asset_url):
MAP_TILES_SRC, MAP_TILES_URL = configure_tiles_url(settings("asset_url"))
-CSP_POLICY = CSP_POLICY.format(base=CSP_BASE, tiles=MAP_TILES_SRC)
+
+
+def get_csp_policy():
+ """Return value for Content-Security-Policy HTTP header."""
+ map_tiles_src, _ = configure_tiles_url(settings("asset_url"))
+ return CSP_POLICY.format(base=CSP_BASE, tiles=map_tiles_src)
def configure_content(config):
@@ -93,7 +99,7 @@ def security_headers(event):
response.headers.add("X-Content-Type-Options", "nosniff")
# Headers for HTML responses.
if response.content_type == "text/html":
- response.headers.add("Content-Security-Policy", CSP_POLICY)
+ response.headers.add("Content-Security-Policy", get_csp_policy())
response.headers.add("X-Frame-Options", "DENY")
response.headers.add("X-XSS-Protection", "1; mode=block")
|
Add reading from .uns colors for sc.pl.violin
Also throw an error when using non-categorical columns | @@ -679,6 +679,16 @@ def violin(
keys = [keys]
if groupby is not None:
obs_df = get.obs_df(adata, keys=[groupby] + keys, layer=layer, use_raw=use_raw)
+ if kwds.get('palette', None) is None:
+ if not is_categorical_dtype(adata.obs[groupby]):
+ raise ValueError(
+ f'The column `adata.obs[{groupby!r}]` needs to be categorical, '
+ f'but is of dtype {adata.obs[groupby].dtype}.'
+ )
+ _utils.add_colors_for_categorical_sample_annotation(adata, groupby)
+ kwds['palette'] = dict(
+ zip(obs_df[groupby].cat.categories, adata.uns[f'{groupby}_colors'])
+ )
else:
obs_df = get.obs_df(adata, keys=keys, layer=layer, use_raw=use_raw)
if groupby is None:
|
llvm, function/Linear: Convert to new get_params() helper method
Workaround inconsistent parameter shapes | @@ -3162,14 +3162,11 @@ class Linear(TransferFunction): # ---------------------------------------------
# self.functionOutputType = None
- def get_param_struct_type(self):
- #TODO: convert this to use get_param_initializer
- with pnlvm.LLVMBuilderContext() as ctx:
- return pnlvm._convert_python_struct_to_llvm_ir(ctx, (0,0))
-
- def get_param_initializer(self):
- return (self.get_current_function_param(SLOPE),
- self.get_current_function_param(INTERCEPT))
+ def get_params(self):
+ # WORKAROUND: get_current_function_param sometimes returns [x],
+ # soemtimes x
+ return (np.atleast_1d(self.get_current_function_param(SLOPE))[0],
+ np.atleast_1d(self.get_current_function_param(INTERCEPT))[0])
def _gen_llvm_transfer(self, builder, index, ctx, vi, vo, params):
ptri = builder.gep(vi, [ctx.int32_ty(0), index])
|
Linter
I had flake8 turned off in my dpy env -_- | @@ -36,7 +36,8 @@ class SourceConverter(commands.Converter):
return argument.lower()
raise commands.BadArgument(
- f"Unable to convert `{utils.escape_markdown(argument)}` to valid command{', tag,' if show_tag else ''} or Cog."
+ f"Unable to convert `{utils.escape_markdown(argument)}` to valid\
+ command{', tag,' if show_tag else ''} or Cog."
)
|
Format-The-Codebase
try reverting dtype change | @@ -337,6 +337,12 @@ def _convert(image, dtype, force_copy=False, uniform=False):
"""
kind = a.dtype.kind
if n > m and a.max() < 2 ** m:
+ mnew = int(np.ceil(m / 2) * 2)
+ if mnew > m:
+ dtype = "int{}".format(mnew)
+ else:
+ dtype = "uint{}".format(mnew)
+ n = int(np.ceil(n / 2) * 2)
return a.astype(_dtype_bits(kind, m))
elif n == m:
return a.copy() if copy else a
|
Fix noveltranslate cover
when loading the page the cover `src` is a blank placeholder that then get replaced with the cover `data-lazy-src`. The crawler was downloading the placeholder instead of the actual cover. | @@ -42,7 +42,7 @@ class NovelTranslateCrawler(Crawler):
logger.info("Novel title: %s", self.novel_title)
self.novel_cover = self.absolute_url(
- soup.select_one(".summary_image a img")["src"]
+ soup.select_one(".summary_image a img")["data-lazy-src"]
)
logger.info("Novel cover: %s", self.novel_cover)
|
[tests] remove 7 years old code stuff
See | @@ -3,13 +3,6 @@ skip_tags: true
version: 6.0.{build}
environment:
- APPVEYOR_PYTHON_URL: "https://raw.githubusercontent.com/dvorapa/python-appveyor-demo/master/appveyor/"
-
- # SDK v7.0 MSVC Express 2008's SetEnv.cmd script will fail if the
- # /E:ON and /V:ON options are not enabled in the batch script interpreter
- # See: http://stackoverflow.com/a/13751649/163740
- CMD_IN_ENV: "cmd /E:ON /V:ON /C .\\appveyor\\run_with_env.cmd"
-
PYWIKIBOT_DIR: "%appdata%\\Pywikibot"
PYWIKIBOT_USER_CONFIG: "%appdata%\\Pywikibot\\user-config.py"
PYWIKIBOT_PASSWD_FILE: "%appdata%\\Pywikibot\\passwordfile"
@@ -81,8 +74,6 @@ install:
# Download the Appveyor Python build accessories into subdirectory .\appveyor
- mkdir appveyor
- ps: $wc = new-object net.webclient
- - ps: $run = $wc.DownloadString($env:APPVEYOR_PYTHON_URL + 'run_with_env.cmd')
- - ps: $run | Out-File -Encoding ascii -FilePath appveyor\run_with_env.cmd
- python -m venv env
- env\Scripts\activate.bat
|
Add a comment
Partly to re-trigger the CI because pypi failed. | @@ -59,6 +59,8 @@ class DaskExecutor(Executor):
meta_data: Dict[str, str],
process_func: Callable,
):
+ '''Create a dask future for a dask task to run the analysis.
+ '''
data_result = self.dask.submit(
run_coffea_processor,
events_url=file_url,
|
add mix to the list of audio source for composing the previewoutput.
there is probably i better way to do this | @@ -16,6 +16,7 @@ class AVPreviewOutput(TCPMultiConnection):
self.source = source
self.audio_streams = Config.getAudioStreams().get_stream_source()
+ self.audio_streams.append('mix')
self.bin = "" if Args.no_bins else """
bin.(
|
Change collision detection for IntervalVehicles
They now collide vehicles within their state interval | import copy
import numpy as np
+from highway_env import utils
from highway_env.vehicle.behavior import LinearVehicle
@@ -250,3 +251,26 @@ class IntervalVehicle(LinearVehicle):
interval_gain = -np.array([k[0], k[0]])
return interval_gain*x # Note: no flip of x, contrary to using intervals_product(k,interval_minus(x))
+ def check_collision(self, other):
+ """
+ Check for collision with another vehicle.
+
+ :param other: the other vehicle
+ """
+ if not self.COLLISIONS_ENABLED or self.crashed or other is self:
+ return
+
+ # Fast rectangular pre-check
+ if not utils.point_in_rectangle(other.position,
+ self.interval_observer.position[0]-self.LENGTH,
+ self.interval_observer.position[1]+self.LENGTH):
+ return
+
+ # Projection of other vehicle to uncertainty rectangle. This is the possible position of this vehicle which is
+ # the most likely to collide with other vehicle
+ projection = np.minimum(np.maximum(other.position, self.interval_observer.position[0]),
+ self.interval_observer.position[1])
+ # Accurate elliptic check
+ if utils.point_in_ellipse(other.position, projection, self.heading, self.LENGTH, self.WIDTH):
+ self.velocity = other.velocity = min(self.velocity, other.velocity)
+ self.crashed = other.crashed = True
|
Fix tox issues
This patch fixes issues with the openstack-tox-molecule
and openstack-tox-linters Zuul jobs. | @@ -27,6 +27,8 @@ commands =
bash -c "set -e; for config in $(ls conf/); do \
echo conf/$config; pykwalify -d conf/$config -s browbeat/schema/browbeat.yml; done"
{[testenv:dist]commands}
+allowlist_externals =
+ bash
[testenv:dist]
basepython = python3
@@ -80,6 +82,8 @@ commands =
[testenv:molecule]
commands = {toxinidir}/ci-scripts/molecule/test-molecule.sh
+allowlist_externals =
+ {toxinidir}/ci-scripts/molecule/test-molecule.sh
[flake8]
# E123, E125 skipped as they are invalid PEP-8.
|
(fix) add test for conversion rate mode not set
No orders should be placed if conversion rate mode is not set (or is
by default rate oracle and a conversion is not available)
Meet >%80 coverage, before: 74.68%, after: 92% | @@ -614,6 +614,37 @@ class HedgedMarketMakingUnitTest(unittest.TestCase):
self.assertAlmostEqual(Decimal("1.0104"), taker_fill2.price)
self.assertAlmostEqual(Decimal("3.0"), taker_fill2.amount)
+ def test_with_conversion_rate_mode_not_set(self):
+ self.clock.remove_iterator(self.strategy)
+ self.market_pair: MakerTakerMarketPair = MakerTakerMarketPair(
+ MarketTradingPairTuple(self.maker_market, *["COINALPHA-QETH", "COINALPHA", "QETH"]),
+ MarketTradingPairTuple(self.taker_market, *self.trading_pairs_taker),
+ )
+ self.maker_market.set_balanced_order_book("COINALPHA-QETH", 1.05, 0.55, 1.55, 0.01, 10)
+
+ config_map_with_conversion_rate_mode_not_set = ClientConfigAdapter(
+ CrossExchangeMarketMakingConfigMap(
+ maker_market=self.exchange_name_maker,
+ taker_market=self.exchange_name_taker,
+ maker_market_trading_pair=self.trading_pairs_maker[0],
+ taker_market_trading_pair=self.trading_pairs_taker[0],
+ min_profitability=Decimal("1"),
+ order_amount = Decimal("1"),
+ )
+ )
+
+ self.strategy: CrossExchangeMarketMakingStrategy = CrossExchangeMarketMakingStrategy()
+ self.strategy.init_params(
+ config_map=config_map_with_conversion_rate_mode_not_set,
+ market_pairs=[self.market_pair],
+ logging_options=self.logging_options,
+ )
+ self.clock.add_iterator(self.strategy)
+ self.clock.backtest_til(self.start_timestamp + 5)
+ self.ev_loop.run_until_complete(asyncio.sleep(0.5))
+ self.assertEqual(0, len(self.strategy.active_maker_bids))
+ self.assertEqual(0, len(self.strategy.active_maker_asks))
+
def test_with_conversion(self):
self.clock.remove_iterator(self.strategy)
self.market_pair: MakerTakerMarketPair = MakerTakerMarketPair(
|
DOC: Fix type of `codes` in docstring of `_vq._vq()`
The codes array that is written out contains entries of type `int32_t`, not `vq_type`---changed the docstring to reflect that definition. | @@ -75,7 +75,7 @@ cdef int _vq(vq_type *obs, vq_type *code_book,
The number of features of each observation.
nobs : int
The number of observations.
- codes : vq_type*
+ codes : int32_t*
The pointer to the new codes array.
low_dist : vq_type*
low_dist[i] is the Euclidean distance from obs[i] to the corresponding
|
Update to Readme
Additional clarification for what DVC aims to do. | @@ -46,7 +46,7 @@ learning projects. Key features:
#. it helps manage experiments with Git tags or branches and **metrics** tracking;
-It aims to replace tools like Excel and Docs that are being commonly used as a knowledge repo and
+**DVC** aims to replace tools like Excel and Google Docs that are being commonly used as a knowledge repo and
a ledger for the team, ad-hoc scripts to track and move deploy different model versions, ad-hoc
data file suffixes and prefixes.
|
fix deterministicpol
if self.noise is None, previous versino doesnt work | @@ -44,7 +44,10 @@ class DeterministicPol(BasePol):
def forward(self, obs):
mean = self.net(obs)
+ if self.noise is not None:
action_noise = self.noise()
+ else:
+ action_noise = self.noise
apply_noise = self.apply_noise
ac = mean
if action_noise is not None and apply_noise:
|
Various fixes for ISO
Fix | @@ -319,6 +319,7 @@ class MD_DataIdentification(object):
self.distance = []
self.uom = []
self.resourcelanguage = []
+ self.resourcelanguagecode = []
self.creator = []
self.publisher = []
self.contributor = []
@@ -349,7 +350,8 @@ class MD_DataIdentification(object):
self.aggregationinfo = util.testXMLValue(val)
self.uricode = []
- for i in md.findall(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:identifier/gmd:RS_Identifier/gmd:code/gco:CharacterString', namespaces)):
+ for i in md.findall(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:identifier/gmd:RS_Identifier/gmd:code/gco:CharacterString', namespaces)) + \
+ md.findall(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:identifier/gmd:MD_Identifier/gmd:code/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.uricode.append(val)
@@ -405,7 +407,7 @@ class MD_DataIdentification(object):
self.securityconstraints = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_SecurityConstraints/gmd:classification/gmd:MD_ClassificationCode', namespaces)):
- val = util.testXMLValue(i)
+ val = _testCodeListValue(i)
if val is not None:
self.securityconstraints.append(val)
@@ -429,9 +431,15 @@ class MD_DataIdentification(object):
self.distance.append(val)
self.uom.append(i.get("uom"))
- self.resourcelanguage = []
+ self.resourcelanguagecode = []
for i in md.findall(util.nspath_eval('gmd:language/gmd:LanguageCode', namespaces)):
val = _testCodeListValue(i)
+ if val is not None:
+ self.resourcelanguagecode.append(val)
+
+ self.resourcelanguage = []
+ for i in md.findall(util.nspath_eval('gmd:language/gco:CharacterString', namespaces)):
+ val = util.testXMLValue(i)
if val is not None:
self.resourcelanguage.append(val)
@@ -458,6 +466,7 @@ class MD_DataIdentification(object):
val = md.find(util.nspath_eval('gmd:abstract/gmx:Anchor', namespaces))
+ self.abstract_url = None
if val is not None:
self.abstract = util.testXMLValue(val)
self.abstract_url = val.attrib.get(util.nspath_eval('xlink:href', namespaces))
@@ -500,7 +509,7 @@ class MD_DataIdentification(object):
mdkw['thesaurus']['date'] = util.testXMLValue(val)
val = i.find(util.nspath_eval('gmd:MD_Keywords/gmd:thesaurusName/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:dateType/gmd:CI_DateTypeCode', namespaces))
- mdkw['thesaurus']['datetype'] = util.testXMLValue(val)
+ mdkw['thesaurus']['datetype'] = util.testXMLAttribute(val, 'codeListValue')
mdkw['keywords'] = []
|
[dagit] Delete commented code in AssetView
Test Plan: Buildkite
Reviewers: prha | @@ -11,20 +11,6 @@ export const AssetView: React.FC<{assetKey: AssetKey}> = ({assetKey}) => {
const assetPath = assetKey.path.join(' \u203A ');
useDocumentTitle(`Asset: ${assetPath}`);
- // return (
- // <Loading queryResult={queryResult}>
- // {({assetOrError}) => {
- // if (assetOrError.__typename !== 'Asset') {
- // return null;
- // }
- // if (!assetOrError.assetMaterializations.length) {
- // return <p>This asset has never been materialized.</p>;
- // }
- // return <AssetViewWithData asset={assetOrError} />;
- // }}
- // </Loading>
- // );
-
return (
<Group spacing={24} direction="column">
<AssetDetails assetKey={assetKey} />
|
Move argparse parsing of CLI args back to cmdloop() from __init__()
This is so unit tests pass | @@ -472,24 +472,11 @@ class Cmd(cmd.Cmd):
self._startup_commands.append("load '{}'".format(startup_script))
# Transcript files to run instead of interactive command loop
- self._transcript_files = None
-
- # Check for command line args
- if allow_cli_args:
- parser = argparse.ArgumentParser()
- parser.add_argument('-t', '--test', action="store_true",
- help='Test against transcript(s) in FILE (wildcards OK)')
- callopts, callargs = parser.parse_known_args()
-
- # If transcript testing was called for, use other arguments as transcript files
- if callopts.test:
- self._transcript_files = callargs
- # If commands were supplied at invocation, then add them to the command queue
- elif callargs:
- self._startup_commands.extend(callargs)
- elif transcript_files:
self._transcript_files = transcript_files
+ # Should commands at invocation and -t/--test transcript test running be allowed
+ self._allow_cli_args = allow_cli_args
+
# The default key for sorting tab completion matches. This only applies when the matches are not
# already marked as sorted by setting self.matches_sorted to True. Its default value performs a
# case-insensitive alphabetical sort. If natural sorting preferred, then set this to NATURAL_SORT_KEY.
@@ -4026,6 +4013,20 @@ class Cmd(cmd.Cmd):
original_sigint_handler = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGINT, self.sigint_handler)
+ # Check for command line args
+ if self._allow_cli_args:
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-t', '--test', action="store_true",
+ help='Test against transcript(s) in FILE (wildcards OK)')
+ callopts, callargs = parser.parse_known_args()
+
+ # If transcript testing was called for, use other arguments as transcript files
+ if callopts.test:
+ self._transcript_files = callargs
+ # If commands were supplied at invocation, then add them to the command queue
+ elif callargs:
+ self._startup_commands.extend(callargs)
+
# Grab terminal lock before the prompt has been drawn by readline
self.terminal_lock.acquire()
|
Update lammps_check.py
Maintainers list for LAMMPS checks | @@ -41,7 +41,7 @@ class LAMMPSBaseCheck(rfm.RunOnlyRegressionTest):
}
self.tags = {'scs', 'external-resources'}
- self.maintainers = ['TR', 'VH']
+ self.maintainers = ['VH']
@rfm.parameterized_test(*([s, v]
|
[ENH] Fix incorrect `update_predict` arg default and docstring on `cv` arg
Fixes `update_predict` default `cv` param and docstring. `None` was raising an exception, so it should not default to it. No deprecation necessary due to erroneous default and docstring. | @@ -767,7 +767,7 @@ class BaseForecaster(BaseEstimator):
def update_predict(
self,
y,
- cv=None,
+ cv,
X=None,
update_params=True,
reset_forecaster=True,
@@ -812,7 +812,8 @@ class BaseForecaster(BaseEstimator):
For further details:
on usage, see forecasting tutorial examples/01_forecasting.ipynb
on specification of formats, examples/AA_datatypes_and_datasets.ipynb
- cv : temporal cross-validation generator, optional (default=None)
+ cv : temporal cross-validation generator, e.g. a splitter like
+ SlidingWindowSplitter or ExpandingWindowSplitter
X : time series in sktime compatible format, optional (default=None)
Exogeneous time series for updating and forecasting
Should be of same scitype (Series, Panel, or Hierarchical) as y
|
When in staging, compute measures in dedicated dataset
Overwriting production measures data isn't a good idea as we sometimes
use these in analyses. | @@ -100,6 +100,9 @@ LOGGING = {
}
}
+# BigQuery project name
+BQ_MEASURES_DATASET = 'staging_{}'.format(BQ_MEASURES_DATASET)
+
# For grabbing images that we insert into alert emails
GRAB_HOST = "http://staging.openprescribing.net"
|
[MoreUtils] [p]color now uses another site in embeds link
Fixed embed permissions checks | @@ -219,7 +219,7 @@ class MoreUtils:
if len(allowed_roles) > 0:
em.add_field(name="Roles", value="\n".join([str(x) for x in allowed_roles]))
em.set_image(url=emoji.url)
- if ctx.message.channel.permissions_for(ctx.message.author).embed_links:
+ if ctx.message.channel.permissions_for(ctx.message.server.me).embed_links:
await self.bot.say(embed=em)
else:
await self.bot.say("```\n" +
@@ -252,11 +252,11 @@ class MoreUtils:
description="Provided HEX: " + color + "\nRGB: " + str(colorrgb) + "\nCMYK: " + str(
colorcmyk) + "\nHSV: " + str(colorhsv) + "\nHLS: " + str(colorhls) + "\nYIQ: " + str(
coloryiq) + "\nint: " + str(colorint),
- url='http://www.colorpicker.com/' + str(color.lstrip('#')), colour=colorint,
+ url='http://www.color-hex.com/color/' + str(color.lstrip('#')), colour=colorint,
timestamp=ctx.message.timestamp)
em.set_thumbnail(url="https://xenforo.com/rgba.php?r={}&g={}&b={}&a=255"
.format(colorrgb[0], colorrgb[1], colorrgb[2]))
- if ctx.message.channel.permissions_for(ctx.message.author).embed_links:
+ if ctx.message.channel.permissions_for(ctx.message.server.me).embed_links:
await self.bot.say(embed=em)
else:
await self.bot.say("```\n" +
|
publish soft_delete_{forms,cases} to kafka
otherwise they remain effectively deleted in ES | @@ -453,6 +453,8 @@ class FormAccessorSQL(AbstractFormAccessor):
@staticmethod
def soft_undelete_forms(domain, form_ids):
+ from corehq.form_processor.change_publishers import publish_form_saved
+
assert isinstance(form_ids, list)
problem = 'Restored on {}'.format(datetime.utcnow())
with get_cursor(XFormInstanceSQL) as cursor:
@@ -461,7 +463,14 @@ class FormAccessorSQL(AbstractFormAccessor):
[domain, form_ids, problem]
)
results = fetchall_as_namedtuple(cursor)
- return sum([result.affected_count for result in results])
+ return_value = sum([result.affected_count for result in results])
+
+ for form_ids_chunk in chunked(form_ids, 500):
+ forms = FormAccessorSQL.get_forms(form_ids_chunk)
+ for form in forms:
+ publish_form_saved(form)
+
+ return return_value
@staticmethod
def soft_delete_forms(domain, form_ids, deletion_date=None, deletion_id=None):
@@ -971,6 +980,8 @@ class CaseAccessorSQL(AbstractCaseAccessor):
@staticmethod
def soft_undelete_cases(domain, case_ids):
+ from corehq.form_processor.change_publishers import publish_case_saved
+
assert isinstance(case_ids, list)
with get_cursor(CommCareCaseSQL) as cursor:
@@ -979,7 +990,14 @@ class CaseAccessorSQL(AbstractCaseAccessor):
[domain, case_ids]
)
results = fetchall_as_namedtuple(cursor)
- return sum([result.affected_count for result in results])
+ return_value = sum([result.affected_count for result in results])
+
+ for case_ids_chunk in chunked(case_ids, 500):
+ cases = CaseAccessorSQL.get_cases(case_ids_chunk)
+ for case in cases:
+ publish_case_saved(case)
+
+ return return_value
@staticmethod
def get_deleted_case_ids_by_owner(domain, owner_id):
|
fw/instrument: Add log signal maps
Add method name mappings for ERROR_LOGGED and WARNING_LOGGED signals. | @@ -135,6 +135,9 @@ SIGNAL_MAP = OrderedDict([
('on_successful_job', signal.SUCCESSFUL_JOB),
('after_job', signal.AFTER_JOB),
+ ('on_error', signal.ERROR_LOGGED),
+ ('on_warning', signal.WARNING_LOGGED),
+
# ('on_run_start', signal.RUN_START),
# ('on_run_end', signal.RUN_END),
# ('on_workload_spec_start', signal.WORKLOAD_SPEC_START),
@@ -172,9 +175,6 @@ SIGNAL_MAP = OrderedDict([
# ('before_overall_results_processing', signal.BEFORE_OVERALL_RESULTS_PROCESSING),
# ('on_successful_overall_results_processing', signal.SUCCESSFUL_OVERALL_RESULTS_PROCESSING),
# ('after_overall_results_processing', signal.AFTER_OVERALL_RESULTS_PROCESSING),
-
- # ('on_error', signal.ERROR_LOGGED),
- # ('on_warning', signal.WARNING_LOGGED),
])
|
[modules] dunst: try to handle errors gracefully
Try to handle dunst pause/unpause errors "gracefully" (ignore them).
fixes | @@ -14,7 +14,10 @@ class Module(bumblebee.engine.Module):
)
self._paused = False
# Make sure that dunst is currently not paused
+ try:
bumblebee.util.execute("killall -SIGUSR2 dunst")
+ except:
+ pass
engine.input.register_callback(self, button=bumblebee.input.LEFT_MOUSE,
cmd=self.toggle_status
)
@@ -22,10 +25,13 @@ class Module(bumblebee.engine.Module):
def toggle_status(self, event):
self._paused = not self._paused
+ try:
if self._paused:
bumblebee.util.execute("killall -SIGUSR1 dunst")
else:
bumblebee.util.execute("killall -SIGUSR2 dunst")
+ except:
+ self._paused = not self._paused # toggling failed
def state(self, widget):
if self._paused:
|
User Interface: Corrected implied options description for standalone
mode. | @@ -69,9 +69,9 @@ parser.add_option(
help = """\
Enable standalone mode in build. This allows you to transfer the created binary
to other machines without it relying on an existing Python installation. It
-implies these options: "--recurse-all --recurse-stdlib". You may also want
-to use "--python-flag=no_site" to avoid the "site.py" module. Defaults to
-off.""",
+implies these option: "--recurse-all". You may also want to use
+"--python-flag=no_site" to avoid the "site.py" module, which can save a lot
+of code dependencies. Defaults to off.""",
)
parser.add_option(
|
Reduce levels of shells from 2 to 1
From cmd.exe -> rez_shell.bat -> you
To cmd.exe -> you | @@ -188,22 +188,6 @@ class CMD(Shell):
else:
_record_shell(executor, files=startup_sequence["files"], print_msg=(not quiet))
- if shell_command:
- # Launch the provided command in the configured shell and wait
- # until it exits.
- executor.command(shell_command)
-
- # Test for None specifically because resolved_context.execute_rex_code
- # passes '' and we do NOT want to keep a shell open during a rex code
- # exec operation.
- elif shell_command is None:
- # Launch the configured shell itself and wait for user interaction
- # to exit.
- executor.command('cmd /Q /K')
-
- # Exit the configured shell.
- executor.command('exit %errorlevel%')
-
code = executor.get_output()
target_file = os.path.join(tmpdir, "rez-shell.%s"
% self.file_extension())
@@ -227,6 +211,10 @@ class CMD(Shell):
cmd_flags = ['/Q', '/K']
cmd = cmd + [self.executable] + cmd_flags + ['call {}'.format(target_file)]
+
+ if shell_command:
+ cmd += ["& " + shell_command]
+
is_detached = (cmd[0] == 'START')
p = popen(cmd, env=env, shell=is_detached, **Popen_args)
|
ENH: integrate: ensure points are unique in quad
When invoking QUADPACK's QAGPE with `quad(f, a, b, points=(c,d,e))`
make sure that a,b,c,d,e are unique to prevent integrating
zero-width intervals and invoking `f()` at bad points. | @@ -445,9 +445,11 @@ def _quad(func,a,b,args,full_output,epsabs,epsrel,limit,points):
if infbounds != 0:
raise ValueError("Infinity inputs cannot be used with break points.")
else:
- nl = len(points)
- the_points = numpy.zeros((nl+2,), float)
- the_points[:nl] = points
+ #Duplicates force function evaluation at sinular points
+ the_points = numpy.unique(points)
+ the_points = the_points[a < the_points]
+ the_points = the_points[the_points < b]
+ the_points = numpy.concatenate((the_points, (0., 0.)))
return _quadpack._qagpe(func,a,b,the_points,args,full_output,epsabs,epsrel,limit)
|
Use user key for genesis and proposal devmode
This kubernetes devmode file previously used different keys for genesis
and proposal. | @@ -65,7 +65,7 @@ items:
&& sawtooth keygen my_key \
&& sawset genesis -k /root/.sawtooth/keys/my_key.priv \
&& sawset proposal create \
- -k /etc/sawtooth/keys/validator.priv \
+ -k /root/.sawtooth/keys/my_key.priv \
sawtooth.consensus.algorithm.name=Devmode \
sawtooth.consensus.algorithm.version=0.1 \
-o config.batch \
|
Exclude another presentation where `quantity` is unreliable
Fixes | @@ -57,6 +57,10 @@ ON
rx.month = dt.date
AND rx.bnf_code = dt.bnf_code
WHERE
- -- lantanaprost quantities are broken in data
- rx.bnf_code <> '1106000L0AAAAAA'
+-- These can be prescribed fractionally, but BSA round quantity down,
+-- making quantity unreliable. See #1764
+ rx.bnf_code <> '1106000L0AAAAAA' -- latanoprost
+AND
+ rx.bnf_code <> '1308010Z0AAABAB' -- Ingenol Mebutate_Gel
+
-- trivial savings / costs are discounted in the measure definition
|
AC: fix add_extension()
Ticket 94472 | @@ -362,7 +362,7 @@ class OpenVINOLauncher(Launcher):
if cpu_extensions:
selection_mode = self.config.get('_cpu_extensions_mode')
cpu_extensions = get_cpu_extension(cpu_extensions, selection_mode)
- self.ie_core.add_extension(str(cpu_extensions), 'CPU')
+ self.ie_core.add_extension(str(cpu_extensions))
ov_set_config(
self.ie_core, {'CPU_BIND_THREAD': 'YES' if not self._is_multi() else 'NO'}, device='CPU')
gpu_extensions = self.config.get('gpu_extensions')
|
[instancer] Fix bug in _instantiateFeatureVariations()
Fixes | @@ -805,12 +805,12 @@ def _instantiateFeatureVariationRecord(
return applies, shouldKeep
-def _limitFeatureVariationRecord(record, axisRanges, fvarAxes):
+def _limitFeatureVariationRecord(record, axisRanges, axisOrder):
newConditions = []
for i, condition in enumerate(record.ConditionSet.ConditionTable):
if condition.Format == 1:
axisIdx = condition.AxisIndex
- axisTag = fvarAxes[axisIdx].axisTag
+ axisTag = axisOrder[axisIdx]
if axisTag in axisRanges:
axisRange = axisRanges[axisTag]
newRange = _limitFeatureVariationConditionRange(condition, axisRange)
@@ -854,7 +854,7 @@ def _instantiateFeatureVariations(table, fvarAxes, axisLimits):
record, i, location, fvarAxes, axisIndexMap
)
if shouldKeep:
- shouldKeep = _limitFeatureVariationRecord(record, axisRanges, fvarAxes)
+ shouldKeep = _limitFeatureVariationRecord(record, axisRanges, axisOrder)
if shouldKeep and _featureVariationRecordIsUnique(record, uniqueRecords):
newRecords.append(record)
|
Make sure the output ufo has a kerningGroupConversionRenameMap, otherwise UFOs generated in version 2 will have invalid kerning group names.
Add a description and example of the instance name localisation. | @@ -256,6 +256,7 @@ class DesignSpaceProcessor(DesignSpaceDocument):
for sourceDescriptor in self.sources:
loc = Location(sourceDescriptor.location)
sourceFont = self.fonts[sourceDescriptor.name]
+ # this makes assumptions about the groups of all sources being the same.
kerningItems.append((loc, self.mathKerningClass(sourceFont.kerning, sourceFont.groups)))
bias, self._kerningMutator = buildMutator(kerningItems, axes=self._preppedAxes, bias=self.defaultLoc)
return self._kerningMutator
@@ -283,6 +284,7 @@ class DesignSpaceProcessor(DesignSpaceDocument):
for sourceDescriptor in self.sources:
if not sourceDescriptor.name in self.fonts:
self.fonts[sourceDescriptor.name] = self._instantiateFont(sourceDescriptor.path)
+ self.problems.append("loaded master from %s, format %d"%(sourceDescriptor.path, getUFOVersion(sourceDescriptor.path)))
names = names | set(self.fonts[sourceDescriptor.name].keys())
self.glyphNames = list(names)
@@ -292,6 +294,13 @@ class DesignSpaceProcessor(DesignSpaceDocument):
self._preppedAxes = self._prepAxesForBender()
# make fonty things here
loc = Location(instanceDescriptor.location)
+ # groups,
+ if hasattr(self.fonts[self.default.name], "kerningGroupConversionRenameMaps"):
+ renameMap = self.fonts[self.default.name].kerningGroupConversionRenameMaps
+ self.problems.append("renameMap %s"%renameMap)
+ else:
+ renameMap = {}
+ font.kerningGroupConversionRenameMaps = renameMap
# make the kerning
if instanceDescriptor.kerning:
try:
@@ -339,6 +348,9 @@ class DesignSpaceProcessor(DesignSpaceDocument):
selectedGlyphNames = glyphNames
else:
selectedGlyphNames = self.glyphNames
+ # add the glyphnames to the font.lib['public.glyphOrder']
+ if not 'public.glyphOrder' in font.lib.keys():
+ font.lib['public.glyphOrder'] = selectedGlyphNames
for glyphName in selectedGlyphNames:
try:
glyphMutator = self.getGlyphMutator(glyphName)
|
docs: add note for public network
* Change for public network
According to
* Revert "Change for public network"
This reverts commit
* Mentioning the difference of the example and the title with its reasoning | @@ -35,3 +35,9 @@ account of your own, here's an example of how to do so:
.. literalinclude:: ../../examples/create_account.py
:language: python
:linenos:
+
+
+Note: To avoid risks, TESTNET is used in the example above. In order to use the
+Stellar Live Network you will have to change the network passphrase to
+Network.PUBLIC_NETWORK_PASSPHRASE and the server URL to point to
+https://horizon.stellar.org too.
|
Correct lifecycle shutdown procedure.
Services available changes when services are shutdown. | @@ -1838,7 +1838,7 @@ class Kernel:
)
for domain, services in self.services_available():
- for service in services:
+ for service in list(services):
self.set_service_lifecycle(service, LIFECYCLE_SHUTDOWN)
def shutdown(self):
|
STY: contains variable names
Changed the __contains__ variable names to be descriptive. | @@ -544,13 +544,12 @@ class Meta(object):
"{}; ".format(key.__repr__()),
"expected tuple, list, or str"]))
- # QUESTION: DOES THIS NEED TO CHANGE???
- def __contains__(self, other):
+ def __contains__(self, data_var):
"""case insensitive check for variable name
Parameters
----------
- other : str
+ data_var : str
Variable name to check if present within the Meta object.
Returns
@@ -562,11 +561,11 @@ class Meta(object):
"""
does_contain = False
- if other.lower() in [i.lower() for i in self.keys()]:
+ if data_var.lower() in [ikey.lower() for ikey in self.keys()]:
does_contain = True
if not does_contain:
- if other.lower() in [i.lower() for i in self.keys_nD()]:
+ if data_var.lower() in [ikey.lower() for ikey in self.keys_nD()]:
does_contain = True
return does_contain
|
Fix typos in kubernetes db migration guide
Summary: Resolves
Test Plan: eyes
Reviewers: #docs, catherinewu, sashank | @@ -34,9 +34,9 @@ export DAEMON_DEPLOYMENT_NAME=`kubectl get deploy \
--selector=component=dagster-daemon -o jsonpath="{.items[0].metadata.name}"`
# Save each deployment's replica count to scale back up after migrating
-export DAGIT_DEPLOYMENT_REPLICA_COUNT=`k get deploy \
+export DAGIT_DEPLOYMENT_REPLICA_COUNT=`kubectl get deploy \
--selector=component=dagit -o jsonpath="{.items[0].status.replicas}"`
-export DAEMON_DEPLOYMENT_REPLICA_COUNT=`k get deploy \
+export DAEMON_DEPLOYMENT_REPLICA_COUNT=`kubectl get deploy \
--selector=component=dagster-daemon -o jsonpath="{.items[0].status.replicas}"`
# Scale down the Deployments
@@ -52,7 +52,7 @@ kubectl scale deploy $DAEMON_DEPLOYMENT_NAME --replicas=0
export HELM_DAGSTER_RELEASE_NAME=<DAGSTER_RELEASE_NAME>
helm template $HELM_DAGSTER_RELEASE_NAME dagster/dagster \
- --set "migrate.enabled=true"
+ --set "migrate.enabled=true" \
--show-only templates/job-instance-migrate.yaml \
--values values.yaml \
| kubectl apply -f -
|
Remove `invest_status`
Since the new parameter `invest_non_convex` exists in the `NonConvexInvestFlow`, there is no need for the `invest_status`. Moreover, usage of the `NonConvexInvestFlow` class indicates that the `invest` always exists. Therefore, `invest_status` can be removed and by removing it, the computation time would increase. | @@ -523,9 +523,6 @@ class NonConvexInvestFlowBlock(SimpleBlock):
bounds=_investvar_bound_rule,
)
- # create status variable for the nonconvex investment flow
- self.invest_status = Var(self.NON_CONVEX_INVEST_FLOWS, within=Binary)
-
# New nonconvex-investment-related variable defined in the
# <class 'oemof.solph.flows.NonConvexInvestFlow'> class.
@@ -706,8 +703,7 @@ class NonConvexInvestFlowBlock(SimpleBlock):
def _min_invest_rule(block, i, o):
"""Rule definition for applying a minimum investment"""
expr = (
- m.flows[i, o].investment.minimum * self.invest_status[i, o]
- <= self.invest[i, o]
+ m.flows[i, o].investment.minimum <= self.invest[i, o]
)
return expr
@@ -718,7 +714,7 @@ class NonConvexInvestFlowBlock(SimpleBlock):
def _max_invest_rule(block, i, o):
"""Rule definition for applying a minimum investment"""
expr = self.invest[i, o] <= (
- m.flows[i, o].investment.maximum * self.invest_status[i, o]
+ m.flows[i, o].investment.maximum
)
return expr
@@ -869,7 +865,7 @@ class NonConvexInvestFlowBlock(SimpleBlock):
for i, o in self.NON_CONVEX_INVEST_FLOWS:
investment_costs += (
self.invest[i, o] * m.flows[i, o].investment.ep_costs
- + self.invest_status[i, o] * m.flows[i, o].investment.offset
+ + m.flows[i, o].investment.offset
)
self.investment_costs = Expression(expr=investment_costs)
|
Update requirementslib with retry for failed wheels
Fixes | @@ -1102,8 +1102,12 @@ build-backend = "{1}"
def build(self):
# type: () -> "SetupInfo"
dist_path = None
+ metadata = None
try:
dist_path = self.build_wheel()
+ metadata = self.get_metadata_from_wheel(
+ os.path.join(self.extra_kwargs["build_dir"], dist_path)
+ )
except Exception:
try:
dist_path = self.build_sdist()
@@ -1112,10 +1116,6 @@ build-backend = "{1}"
self.populate_metadata(metadata)
except Exception:
pass
- else:
- metadata = self.get_metadata_from_wheel(
- os.path.join(self.extra_kwargs["build_dir"], dist_path)
- )
if metadata:
self.populate_metadata(metadata)
if not self.metadata or not self.name:
|
reduce input shapes for matmul
Summary:
Pull Request resolved:
as title
Test Plan:
```
buck run //caffe2/benchmarks/operator_benchmark/pt:matmul_test -- --iteration 1 | @@ -23,9 +23,9 @@ mm_short_configs = op_bench.config_list(
mm_long_configs = op_bench.cross_product_configs(
- M=[64, 128, 256],
- N=range(2, 10, 3),
- K=[128, 512, 1024],
+ M=[64, 128],
+ N=[64, 128],
+ K=[512],
trans_a=[True, False],
trans_b=[True, False],
device=['cpu', 'cuda'],
|
symbiotic-build.sh: support newer klee
Klee merged clang's extra flags as LLVMCC.ExtraFlags not
EXTRA_LLVMCC.Flags. Fix that. | @@ -477,9 +477,14 @@ if [ $FROM -le 4 ]; then
|| exitmsg "Failed building klee 32-bit runtime library"
rm -f Release+Asserts/lib/kleeRuntimeIntrinsic.bc*
rm -f Release+Asserts/lib/klee-libc.bc*
- make -C runtime/Intrinsic -f Makefile.cmake.bitcode EXTRA_LLVMCC.Flags=-m32 \
+ # EXTRA_LLVMCC.Flags is obsolete and to be removed soon
+ make -C runtime/Intrinsic -f Makefile.cmake.bitcode \
+ LLVMCC.ExtraFlags=-m32 \
+ EXTRA_LLVMCC.Flags=-m32 \
|| exitmsg "Failed building 32-bit klee runtime library"
- make -C runtime/klee-libc -f Makefile.cmake.bitcode EXTRA_LLVMCC.Flags=-m32 \
+ make -C runtime/klee-libc -f Makefile.cmake.bitcode \
+ LLVMCC.ExtraFlags=-m32 \
+ EXTRA_LLVMCC.Flags=-m32 \
|| exitmsg "Failed building 32-bit klee runtime library"
# copy 32-bit library version to prefix
|
Fix fields name in TT system stats report
HG--
branch : feature/microservices | @@ -83,9 +83,9 @@ class ReportTTSystemStatApplication(SimpleReport):
ts_to_date = time.mktime(to_date.timetuple())
# Manged Object block
- q1 = """select server, service, count(), round(quantile(0.75)(duration), 0)/1000 as q1,
+ q1 = """select server, service, count(), round(quantile(0.25)(duration), 0)/1000 as q1,
round(quantile(0.5)(duration), 0)/1000 as q2,
- round(quantile(0.25)(duration), 0)/1000 as q3,
+ round(quantile(0.75)(duration), 0)/1000 as q3,
round(quantile(0.95)(duration),0)/1000 as p95 from span where %s
group by server, service"""
|
Update description of valid whitelist for non-admin user
Non-admin user can filter instance by instance-uuid and other
filter keys with being configured using the "os_compute_api:
servers:allow_all_filters" policy rule. The policy rule was
added with
Closes-Bug:1819425 | @@ -154,7 +154,9 @@ There is whitelist for valid filter keys. Any filter key other than from
whitelist will be silently ignored.
- For non-admin users, whitelist is different from admin users whitelist.
- Valid whitelist for non-admin users includes
+ The valid whitelist can be configured using the
+ ``os_compute_api:servers:allow_all_filters`` policy rule. By default,
+ the valid whitelist for non-admin users includes
- ``all_tenants``
- ``changes-since``
|
update api-keys
update link | @@ -4,9 +4,9 @@ In order to trade on a centralized exchange and IDEX, you will need to import yo
Please see below for instructions to find your API keys for the exchanges that Hummingbot currently supports:
-* [Binance](/connectors/binance)
+* [Binance](/connectors/binance/#creating-binance-api-keys)
-* [Coinbase Pro](/connectors/coinbase)
+* [Coinbase Pro](/connectors/coinbase/#creating-coinbase-pro-api-keys)
* [Huobi Global](/connectors/huobi/#creating-huobi-api-keys)
|
Fix the doc version.
The doc version was wrongly set to 0.0.1 | @@ -22,7 +22,7 @@ copyright = '2021, QuTiP Community'
author = 'QuTiP Community'
# The full version, including alpha/beta/rc tags
-release = '0.0.1'
+release = '0.1.0'
# -- General configuration ---------------------------------------------------
|
$.Analysis.Token: properly wrap No_Token_Index into No_Token
TN: | @@ -2472,7 +2472,9 @@ package body ${ada_lib_name}.Analysis is
(Node : access ${root_node_value_type}'Class;
Index : Token_Index) return Token_Type
is
- ((TDH => Token_Data (Node.Unit),
+ (if Index = No_Token_Index
+ then No_Token
+ else (TDH => Token_Data (Node.Unit),
Token => Index,
Trivia => No_Token_Index));
|
Adding time filter and refactoring fields mapping
Deleting console.log(...) | @@ -198,13 +198,10 @@ script:
var dt = new Date();
dt.setMinutes(dt.getMinutes() - dt.getTimezoneOffset());
dt.setHours(dt.getHours() - parseInt(args[LAST_HOURS]))
- console.log("Date Time = " + dt);
-
var buildDateFormat = function () {
return dt.toISOString().slice(0,19).replace('T',' ') + '"-"' + now.toISOString().slice(0,19).replace('T',' ') + '"';
};
//if query is empry string
- console.log(args)
if(!args || !args[QUERY]) {
args[QUERY] = 'select * where time = "' + buildDateFormat();
}
@@ -227,7 +224,6 @@ script:
using: true
};
handleTimeFilter(p);
- console.log(p[QUERY])
var keys = Object.keys(p);
if (keys.length > 0) {
q = '&';
|
Adding UI and Class for Default roles per issue
Added Default to Sec.Role | @@ -49,6 +49,7 @@ class Role(db.Model, RoleMixin):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(80), unique=True)
description = db.Column(db.String(255))
+ default = db.Column(db.Boolean)
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
|
tools: Update run-dev.py to output right subdomain if on Zulip droplet.
I have updated `tools/run-dev.py` to output the correct subdomain such as
`http://zulip.username.zulipdev.org` so that the user knows the correct
subdomain to access the Zulip Dev realm on. | @@ -370,7 +370,10 @@ def print_listeners() -> None:
# EXTERNAL_HOST logic from dev_settings.py.
IS_DEV_DROPLET = pwd.getpwuid(os.getuid()).pw_name == "zulipdev"
if IS_DEV_DROPLET:
- default_hostname = os.uname()[1].lower()
+ # Technically, the `zulip.` is a subdomain of the server, so
+ # this is kinda misleading, but 99% of development is done on
+ # the default/zulip subdomain.
+ default_hostname = "zulip." + os.uname()[1].lower()
else:
default_hostname = "localhost"
external_host = os.getenv("EXTERNAL_HOST", f"{default_hostname}:{proxy_port}")
|
Update irs990.yaml
Changing description to say that dataset contains data from 2013 to present. | Name: IRS 990 Filings
-Description: Machine-readable data from certain electronic 990 forms filed with the IRS from 2011 to present.
+Description: Machine-readable data from certain electronic 990 forms filed with the IRS from 2013 to present.
Documentation: https://docs.opendata.aws/irs-990/readme.html
Contact: https://www.irs.gov/e-file-providers/e-file-for-charities-and-non-profits
UpdateFrequency: New filings are added regularly
|
exceptions raised if passed ball_tree or kd_tree
KNeighborsTimeSeriesClassifier cannot be used with algorithms kd_tree or ball_tree in the base class KNeighborsClassifier. | @@ -114,6 +114,18 @@ class KNeighborsTimeSeriesClassifier(_KNeighborsClassifier, BaseClassifier):
metric_params=None,
**kwargs
):
+ if algorithm == "kd_tree":
+ raise ValueError(
+ "KNeighborsTimeSeriesClassifier cannot work with kd_tree since kd_tree "
+ "cannot be used with a callable distance metric and we do not support "
+ "precalculated distances as yet."
+ )
+ if algorithm == "ball_tree":
+ raise ValueError(
+ "KNeighborsTimeSeriesClassifier cannot work with ball_tree since "
+ "ball_tree has a list of hard coded distances it can use, and cannot "
+ "work with 3-D arrays"
+ )
self._cv_for_params = False
# TODO: add in capacity for euclidean
|
Fix empty constraint
When using the empty constriant without this patch, I was getting an error [here](https://github.com/scipy/scipy/blob/5c342cd4335aab4835390fb36e4405b1a64407e5/scipy/optimize/_trustregion_constr/tr_interior_point.py#L93)
```
IndexError: arrays used as indices must be of integer (or boolean) type
``` | @@ -88,7 +88,7 @@ class CanonicalConstraint(object):
def hess(x, v_eq, v_ineq):
return empty_hess
- return cls(0, 0, fun, jac, hess, np.empty(0))
+ return cls(0, 0, fun, jac, hess, np.empty(0, dtype=np.bool))
@classmethod
def concatenate(cls, canonical_constraints, sparse_jacobian):
|
update __init__
Try to fix the conflict between my branch and the master | @@ -85,17 +85,17 @@ from plantcv.plantcv import transform
# add new functions to end of lists
-__all__ = ['fatal_error', 'print_image', 'plot_image', 'color_palette', 'plot_colorbar', 'apply_mask', 'readimage', 'readbayer',
- 'laplace_filter', 'sobel_filter', 'scharr_filter', 'hist_equalization', 'plot_hist', 'image_add',
- 'image_subtract', 'erode', 'dilate', 'watershed', 'rectangle_mask', 'rgb2gray_hsv', 'rgb2gray_lab',
+__all__ = ['fatal_error', 'print_image', 'plot_image', 'color_palette', 'plot_colorbar', 'apply_mask', 'readimage',
+ 'readbayer', 'laplace_filter', 'sobel_filter', 'scharr_filter', 'hist_equalization', 'plot_hist', 'erode',
+ 'image_add', 'image_subtract', 'dilate', 'watershed', 'rectangle_mask', 'rgb2gray_hsv', 'rgb2gray_lab',
'rgb2gray', 'median_blur', 'fill', 'invert', 'logical_and', 'logical_or', 'logical_xor',
'find_objects', 'roi_objects', 'transform', 'object_composition', 'analyze_object',
'analyze_bound_horizontal', 'analyze_bound_vertical', 'analyze_color', 'analyze_nir_intensity',
'fluor_fvfm', 'print_results', 'resize', 'flip', 'crop_position_mask', 'get_nir', 'report_size_marker_area',
'white_balance', 'acute_vertex', 'scale_features', 'landmark_reference_pt_dist',
- 'x_axis_pseudolandmarks', 'y_axis_pseudolandmarks', 'gaussian_blur', 'cluster_contours', 'pseudocolor',
- 'cluster_contour_splitimg', 'rotate', 'shift_img', 'output_mask', 'auto_crop',
- 'background_subtraction', 'naive_bayes_classifier', 'acute', 'distance_transform', 'params']
+ 'x_axis_pseudolandmarks', 'y_axis_pseudolandmarks', 'gaussian_blur', 'cluster_contours',
+ 'cluster_contour_splitimg', 'rotate', 'shift_img', 'output_mask', 'auto_crop', 'canny_edge_detect',
+ 'background_subtraction', 'naive_bayes_classifier', 'acute', 'distance_transform', 'params', 'pseudocolor']
from ._version import get_versions
__version__ = get_versions()['version']
|
Update dipyridamole.json
add comment to SQL | ],
"numerator_from": "{hscic}.normalised_prescribing_standard ",
"numerator_where": [
- "(bnf_code LIKE '0209000L0%')",
- " OR (bnf_code LIKE '0209000V0%') "
+ "(",
+ "bnf_code LIKE '0209000L0%' OR --Dipyridamole \n",
+ "bnf_code LIKE '0209000V0%' --Dipyridamole & Aspirin \n",
+ ")"
],
"denominator_columns": [
"SUM(items) AS denominator, "
],
"denominator_from": "{hscic}.normalised_prescribing_standard ",
"denominator_where": [
- "(bnf_code LIKE '0209000L0%' OR bnf_code LIKE '0209000V0%' OR bnf_code LIKE '0209000C0%') "
+ "(",
+ "bnf_code LIKE '0209000C0%' OR --Clopidogrel \n",
+ "bnf_code LIKE '0209000L0%' OR --Dipyridamole \n",
+ "bnf_code LIKE '0209000V0%' --Dipyridamole & Aspirin \n",
+ ")"
]
}
|
Don't pass along null values as labels
The initial lazy migration of this feature inadvertently set these
translation dicts to something like {"en": None}, when they should've
been left blank (the empty string is also an acceptable val).
This causes issues in the translation code: | @@ -2287,7 +2287,8 @@ class CaseSearch(DocumentSchema):
def get_search_title_label(self, app, lang, for_default=False):
if for_default:
lang = app.default_language
- return self.title_label.get(lang, '')
+ # Some apps have undefined labels incorrectly set to None, normalize here
+ return self.title_label.get(lang) or ''
def overwrite_attrs(self, src_config, slugs):
if 'search_properties' in slugs:
|
Update README.md
Add Bible api in Books Category | @@ -111,8 +111,8 @@ API | Description | Auth | HTTPS | CORS |
### Books
API | Description | Auth | HTTPS | CORS |
|---|---|---|---|---|
-| [Bible](https://bibleapi.co/) | RESTful Bible API with 7 versions, 4 languages and multiple features | `apiKey` | Yes | Unknown |
| [Bhagavad Gita](https://bhagavadgita.io/api) | Bhagavad Gita text | `OAuth` | Yes | Yes |
+| [Bible](https://bibleapi.co/) | RESTful Bible API with 7 versions, 4 languages and multiple features | `apiKey` | Yes | Unknown |
| [British National Bibliography](http://bnb.data.bl.uk/) | Books | No | No | Unknown |
| [Goodreads](https://www.goodreads.com/api) | Books | `apiKey` | Yes | Unknown |
| [Google Books](https://developers.google.com/books/) | Books | `OAuth` | Yes | Unknown |
|
Fix label deletion
Updated `gam delete labels` to process labels in reverse hierarchial order to avoid deleting a parent label before all of its child labels are deleted. | @@ -6325,20 +6325,17 @@ def doDeleteLabel(users):
labels = callGAPI(gmail.users().labels(), 'list', userId=user, fields='labels(id,name,type)')
del_labels = []
if label == '--ALL_LABELS--':
- for del_label in labels['labels']:
- if del_label['type'] == 'system':
- continue
+ for del_label in sorted(labels['labels'], key=lambda k: k['name'], reverse=True):
+ if del_label['type'] != 'system':
del_labels.append(del_label)
elif label[:6].lower() == 'regex:':
regex = label[6:]
p = re.compile(regex)
- for del_label in labels['labels']:
- if del_label['type'] == 'system':
- continue
- elif p.match(del_label['name']):
+ for del_label in sorted(labels['labels'], key=lambda k: k['name'], reverse=True):
+ if del_label['type'] != 'system' and p.match(del_label['name']):
del_labels.append(del_label)
else:
- for del_label in labels['labels']:
+ for del_label in sorted(labels['labels'], key=lambda k: k['name'], reverse=True):
if label_name_lower == del_label['name'].lower():
del_labels.append(del_label)
break
|
Updated link in Note
* Updated link in Note
Replaced the link to Troubleshooting guide in the "Note". Minor grammatical fixes.
* Update source/install/docker-local-machine.rst | @@ -5,13 +5,13 @@ Local Machine Setup using Docker
The following instructions use Docker to install Mattermost in *Preview Mode* for exploring product functionality on a single machine.
-**Note:** This configuration should not be used in production, as it's using a known password string, contains other non-production configuration settings, and it does not support upgrade.
+**Note:** This configuration should not be used in production, as it uses a known password string, contains other non-production configuration settings, and does not support upgrade.
If you're looking for a production installation with Docker, please see the `Mattermost Production Docker Deployment Guide <http://docs.mattermost.com/install/prod-docker.html>`__.
.. note::
If you have any problems installing Mattermost, see
- the `troubleshooting guide <https://www.mattermost.org/troubleshoot/>`__. For help with inviting users to your system, see `inviting team members <https://docs.mattermost.com/help/getting-started/managing-members.html#inviting-team-members>`__ and other `getting started information <https://docs.mattermost.com/guides/user.html#getting-started>`__. To submit an improvement or correction, click **Edit** at the top of this page.
+ the `troubleshooting guide <https://docs.mattermost.com/install/troubleshooting.html>`__. For help with inviting users to your system, see `inviting team members <https://docs.mattermost.com/help/getting-started/managing-members.html#inviting-team-members>`__ and other `getting started information <https://docs.mattermost.com/guides/user.html#getting-started>`__. To submit an improvement or correction, click **Edit** at the top of this page.
One-line Docker Install
-----------------------
@@ -22,7 +22,7 @@ If you have Docker set up, Mattermost installs in one-line:
docker run --name mattermost-preview -d --publish 8065:8065 --add-host dockerhost:127.0.0.1 mattermost/mattermost-preview
-Otherwise, see step-by-step instructions:
+Otherwise, follow the step-by-step instructions:
macOS
^^^^^^^^
@@ -135,4 +135,3 @@ Accessing Your Container
.. code:: bash
docker exec -ti mattermost-preview /bin/bash
-
|
Update classes.py
Message-Id:
Message-Id: | def new_alien_list(positions):
"""
+ Function that takes a list of positions and creates one alien
+ instance per position
:param positions: list - a list of tuples of (x, y) coordinates
:return: list - a list of alien objects
-
- Function that takes a list of positions and creates one alien
- instance per position
"""
pass
|
fix: rename cancelled docs patch query
use backquotes in queries where column names are dynamic(To avoid query
issues incase reserved keywords used as a table columns.) | @@ -129,9 +129,9 @@ def update_linked_doctypes(doctype, cancelled_doc_names):
update
`tab{linked_dt}`
set
- {column}=CONCAT({column}, '-CANC')
+ `{column}`=CONCAT(`{column}`, '-CANC')
where
- {column} in %(cancelled_doc_names)s;
+ `{column}` in %(cancelled_doc_names)s;
""".format(linked_dt=linked_dt, column=field),
{'cancelled_doc_names': cancelled_doc_names})
else:
@@ -151,9 +151,9 @@ def update_dynamic_linked_doctypes(doctype, cancelled_doc_names):
update
`tab{linked_dt}`
set
- {column}=CONCAT({column}, '-CANC')
+ `{column}`=CONCAT(`{column}`, '-CANC')
where
- {column} in %(cancelled_doc_names)s and {doctype_fieldname}=%(dt)s;
+ `{column}` in %(cancelled_doc_names)s and {doctype_fieldname}=%(dt)s;
""".format(linked_dt=linked_dt, column=fieldname, doctype_fieldname=doctype_fieldname),
{'cancelled_doc_names': cancelled_doc_names, 'dt': doctype})
else:
|
Fix a test compilation failure due to bad skip flag in
Rust tests were accidentally skipped in the top commit of
[ci skip-build-wheels] | @@ -348,13 +348,13 @@ async fn jdk_symlink() {
#[tokio::test]
#[cfg(unix)]
-async fn test_update_env() {
+async fn test_apply_chroot() {
let mut env: BTreeMap<String, String> = BTreeMap::new();
env.insert("PATH".to_string(), "/usr/bin:{chroot}/bin".to_string());
let work_dir = TempDir::new().unwrap();
let mut req = Process::new(owned_string_vec(&["/usr/bin/env"])).env(env.clone());
- local::update_env(&work_dir.path(), &mut req);
+ local::apply_chroot(work_dir.path().to_str().unwrap(), &mut req);
let path = format!("/usr/bin:{}/bin", work_dir.path().to_str().unwrap());
|
Separate / distinguish API docs for different API versions.
Closes | .. include:: /../tasks/README.rst
-Api Reference
+API Reference
-------------
+
+This package includes clients for multiple versions of the Tasks
+API. By default, you will get ``v2beta3``, the latest version.
+
.. toctree::
:maxdepth: 2
- gapic/v2beta2/api
- gapic/v2beta2/types
gapic/v2beta3/api
gapic/v2beta3/types
+
+The previous beta release, spelled ``v2beta2``, is provided to continue to
+support code previously written against them. In order to use ththem, you
+will want to import from e.g. ``google.cloud.tasks_v2beta2`` in lieu of
+``google.cloud.tasks`` (or the equivalent ``google.cloud.tasks_v2beta3``).
+
+An API and type reference is provided the this beta also:
+
+.. toctree::
+ :maxdepth: 2
+
+ gapic/v2beta2/api
+ gapic/v2beta2/types
|
Move me/self check on get_input_entity to the beginning
It would otherwise fail since the addition of getting entity
by exact name if someone had 'me' or 'self' as their name. | @@ -2425,6 +2425,9 @@ class TelegramClient(TelegramBareClient):
Returns:
:tl:`InputPeerUser`, :tl:`InputPeerChat` or :tl:`InputPeerChannel`.
"""
+ if peer in ('me', 'self'):
+ return InputPeerSelf()
+
try:
# First try to get the entity from cache, otherwise figure it out
return self.session.get_input_entity(peer)
@@ -2432,8 +2435,6 @@ class TelegramClient(TelegramBareClient):
pass
if isinstance(peer, str):
- if peer in ('me', 'self'):
- return InputPeerSelf()
return utils.get_input_peer(self._get_entity_from_string(peer))
original_peer = peer
|
fix arg
[fixes COMMCAREHQ-1JD3] | @@ -69,7 +69,7 @@ class OtherCasesReassignmentProcessor():
def process(self):
from custom.icds.location_reassignment.utils import reassign_cases
new_site_codes = set()
- reassignments_by_location_id = defaultdict([])
+ reassignments_by_location_id = defaultdict(list)
for case_id, details in self.reassignments.items():
new_site_codes.add(details['new_site_code'])
new_locations_by_site_code = {
|
Add Member.get_role
Adds an efficient way to check if a member has a role by ID.
This is done in a way consistent with the existing user API of the
library.
The more debated Member.has_role_id/has_role is intentionally not
included for review at this time given the heavy bikeshedding of it. | @@ -830,3 +830,20 @@ class Member(discord.abc.Messageable, _BaseUser):
user_id = self.id
for role in roles:
await req(guild_id, user_id, role.id, reason=reason)
+
+ def get_role(self, role_id: int) -> Optional[discord.Role]:
+ """Returns a role with the given ID from roles which the member has.
+
+ .. versionadded:: 2.0
+
+ Parameters
+ -----------
+ role_id: :class:`int`
+ The ID to search for.
+
+ Returns
+ --------
+ Optional[:class:`Role`]
+ The role or ``None`` if not found in the member's roles.
+ """
+ return self.guild.get_role(role_id) if self._roles.has(role_id) else None
|
Expands coverage in test_to_tensor to 1 and 4 image channels
expands coverage on test_to_tensor to 1 and 4 image channels | @@ -248,9 +248,11 @@ class Tester(unittest.TestCase):
assert (y.equal(x))
def test_to_tensor(self):
- channels = 3
+ test_channels = [1, 3, 4]
height, width = 4, 4
trans = transforms.ToTensor()
+
+ for channels in test_channels:
input_data = torch.ByteTensor(channels, height, width).random_(0, 255).float().div_(255)
img = transforms.ToPILImage()(input_data)
output = trans(img)
|
Increase NY width
Increasing width for both primary and secondary | @@ -56,6 +56,12 @@ primary:
height: 5000
message: using viewport height 5000 for NE
+ NY:
+ renderSettings:
+ viewport:
+ width: 5000
+ message: using viewport width 5000 for NY
+
OK:
overseerScript: page.manualWait(); await page.waitForDelay(60000); page.done();
message: waiting 60 sec to load OK
@@ -191,6 +197,12 @@ secondary:
overseerScript: page.manualWait(); await page.waitForDelay(30000); page.done();
message: waiting 30 sec to load NJ secondary
+ NY:
+ renderSettings:
+ viewport:
+ width: 5000
+ message: using viewport width 5000 for NY
+
OK:
file: pdf
message: downloading pdf for OK - daily name is autogenerated by formula
|
Update detection-testing.yml
For a quotation mark | @@ -257,7 +257,7 @@ jobs:
with:
python-version: '3.9' #Available versions here - https://github.com/actions/python-versions/releases easy to change/make a matrix/use pypy
architecture: 'x64' # optional x64 or x86. Defaults to x64 if not specified
- cache: 'pip
+ cache: 'pip'
- name: Install Python Dependencies
run: |
|
Fix warning when creating ndarray from list of DSA with mismatched lengths
./packages/syft/src/syft/core/adp/data_subject_list.py:626: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray.
input_subjects = np.array(input_subjects) | @@ -623,7 +623,7 @@ class DataSubjectArray:
# per data point, we should make sure that we implement in such a way we expand
# the datasubjects automatically for row to data point mapping.
if not isinstance(input_subjects, np.ndarray):
- input_subjects = np.array(input_subjects)
+ input_subjects = np.array(input_subjects, dtype=DataSubjectArray)
data_map = (
lambda x: DataSubjectArray([str(x)])
|
ci: include a waitfordebug label
This commit includes a feature to stop the CI job
after the cluster is deployed for debugging purposes. | @@ -310,6 +310,29 @@ if [[ "$LAUNCH_FROM" == "h" ]]; then
echo "(launch_e2e.sh) ==> The deployment failed, we still need to run the cleanup tasks"
FAILED="1"
}
+
+ if [[ "$JOB_TYPE" == "pr" ]]; then
+ #
+ # This while true will provide the feature of adding the label 'waitfordebug'
+ # to any PR in the main repository, if this label is found, then we will wait for
+ # 10 minutes until the label is removed from the pull request, after the label is
+ # removed the cleanup tasks will be executed.
+ #
+ while true; do
+ waitfordebug=$(curl \
+ --silent \
+ --location \
+ --request GET "https://api.github.com/repos/kubeinit/kubeinit/issues/${PULL_REQUEST}/labels" | \
+ jq -c '.[] | select(.name | contains("waitfordebug")).name' | tr -d '"')
+ if [ "$waitfordebug" == "waitfordebug" ]; then
+ echo "Wait for debugging the environment for 10 minutes"
+ sleep 600
+ else
+ break
+ fi
+ done
+ fi
+
for SPEC in $KUBEINIT_SPEC; do
echo "(launch_e2e.sh) ==> Cleaning ${SPEC}"
ansible-playbook \
|
Restore "Salt Community" doc section
This was moved to the `topics/index.rst` page in PR but
seems to have been accidentally removed in PR | @@ -89,3 +89,98 @@ the Salt project so that we can all benefit together as Salt grows.
Please feel free to sprinkle Salt around your systems and let the
deliciousness come forth.
+Salt Community
+==============
+
+Join the Salt!
+
+There are many ways to participate in and communicate with the Salt community.
+
+Salt has an active IRC channel and a mailing list.
+
+Mailing List
+============
+
+Join the `salt-users mailing list`_. It is the best place to ask questions
+about Salt and see whats going on with Salt development! The Salt mailing list
+is hosted by Google Groups. It is open to new members.
+
+.. _`salt-users mailing list`: https://groups.google.com/forum/#!forum/salt-users
+
+
+IRC
+===
+
+The ``#salt`` IRC channel is hosted on the popular `Freenode`_ network. You
+can use the `Freenode webchat client`_ right from your browser.
+
+`Logs of the IRC channel activity`_ are being collected courtesy of Moritz Lenz.
+
+.. _Freenode:: http://freenode.net/irc_servers.shtml
+.. _Freenode webchat client:: http://webchat.freenode.net/?channels=salt&uio=Mj10cnVlJjk9dHJ1ZSYxMD10cnVl83
+.. _Logs of the IRC channel activity:: http://irclog.perlgeek.de/salt/
+
+If you wish to discuss the development of Salt itself join us in
+``#salt-devel``.
+
+
+Follow on Github
+================
+
+The Salt code is developed via Github. Follow Salt for constant updates on what
+is happening in Salt development:
+
+|saltrepo|
+
+
+Blogs
+=====
+
+SaltStack Inc. keeps a `blog`_ with recent news and advancements:
+
+http://www.saltstack.com/blog/
+
+.. _`blog`: http://www.saltstack.com/blog/
+
+
+Example Salt States
+===================
+
+The official ``salt-states`` repository is:
+https://github.com/saltstack/salt-states
+
+A few examples of salt states from the community:
+
+* https://github.com/blast-hardcheese/blast-salt-states
+* https://github.com/kevingranade/kevingranade-salt-state
+* https://github.com/uggedal/states
+* https://github.com/mattmcclean/salt-openstack/tree/master/salt
+* https://github.com/rentalita/ubuntu-setup/
+* https://github.com/brutasse/states
+* https://github.com/bclermont/states
+* https://github.com/pcrews/salt-data
+
+Follow on ohloh
+===============
+
+https://www.ohloh.net/p/salt
+
+Other community links
+=====================
+
+- `Salt Stack Inc. <http://www.saltstack.com>`_
+- `Subreddit <http://www.reddit.com/r/saltstack>`_
+- `Google+ <https://plus.google.com/114449193225626631691/posts>`_
+- `YouTube <http://www.youtube.com/user/SaltStack>`_
+- `Facebook <https://www.facebook.com/SaltStack>`_
+- `Twitter <https://twitter.com/SaltStackInc>`_
+- `Wikipedia page <http://en.wikipedia.org/wiki/Salt_(software)>`_
+
+Hack the Source
+===============
+
+If you want to get involved with the development of source code or the
+documentation efforts, please review the :ref:`contributing documentation
+<contributing>`!
+
+.. _`Apache 2.0 license`: http://www.apache.org/licenses/LICENSE-2.0.html
|
Update nested_inputs.py
Added CO2_cost_us_dollars_per_tonne and CO2_cost_escalation_pct | @@ -276,7 +276,23 @@ nested_input_definitions = {
"max": 1.0,
"default": 0.3,
"description": "Additional cost, in percent of non-islandable capital costs, to make a distributed energy system islandable from the grid and able to serve critical loads. Includes all upgrade costs such as additional laber and critical load panels."
- }
+ },
+ "CO2_cost_us_dollars_per_tonne": {
+ "type": "float",
+ "min": 0.0,
+ "max": 1.0e3,
+ "default": 51.0,
+ "description": "Social Cost of CO2 in the first year of the analysis. Units are US dollars per metric ton of CO2. The default of $51/t is the 2020 value (using a 3 pct discount rate) estimated by the U.S. Interagency Working Group on Social Cost of Greenhouse Gases."
+ },
+ "CO2_cost_escalation_pct": {
+ "type": "float",
+ "min": -1,
+ "max": 1,
+ "default": 0.01778,
+ "description": "Annual nominal Social Cost of CO2 escalation rate."
+ },
+ # TODO: add in PM2.5, SO2, NOx here
+ # TODO: potentially add in CO2_discount_rate
},
"LoadProfile": {
@@ -554,6 +570,7 @@ nested_input_definitions = {
"type": ["list_of_float", "float"],
"description": "Carbon Dioxide emissions factor over all hours in one year. Can be provided as either a single constant fraction that will be applied across all timesteps, or an annual timeseries array at an hourly (8,760 samples), 30 minute (17,520 samples), or 15 minute (35,040 samples) resolution.",
},
+ # TODO: emissions_factor_series_pct_decrease - % annual decrease in the marginal emissions rates
"emissions_factor_series_lb_NOx_per_kwh": {
"type": ["list_of_float", "float"],
"description": "NOx emissions factor over all hours in one year. Can be provided as either a single constant fraction that will be applied across all timesteps, or an annual timeseries array at an hourly (8,760 samples), 30 minute (17,520 samples), or 15 minute (35,040 samples) resolution.",
|
BranchCreator : Fix dependency tracking bug
In practice this was harmless because the dirtying performed by `inPlug()->childNamesPlug()` was equivalent, but it's better to be precise. | @@ -160,7 +160,7 @@ void BranchCreator::affects( const Plug *input, AffectedPlugsContainer &outputs
{
FilteredSceneProcessor::affects( input, outputs );
- if( input == parentPlug() || input == filteredPathsPlug() )
+ if( input == parentPlug() || input == filteredPathsPlug() || input == inPlug()->existsPlug() )
{
outputs.push_back( parentPathsPlug() );
}
|
Fix for function path
AttributeError: module 'urllib' has no attribute 'urlencode' | @@ -60,7 +60,7 @@ For an information on what the ssl_options can be set to reference the `official
The following example demonstrates how to generate the ssl_options string with `Python's urllib <http://docs.python.org/2/library/urllib.html>`_::
import urllib
- urllib.urlencode({'ssl_options': {'certfile': '/etc/ssl/mycert.pem', 'keyfile': '/etc/ssl/mykey.pem'}})
+ urllib.parse.urlencode({'ssl_options': {'certfile': '/etc/ssl/mycert.pem', 'keyfile': '/etc/ssl/mykey.pem'}})
.. rubric:: Footnotes
|
refactoring and fixing refrigeration loads
moving supply and return temperatures for refrigeration to constants
changing to supply and return temperatures for refrigeration (from previously wrong data-center temps)
refactoring function and np.vectorize call to simple, understandable for loop | @@ -7,6 +7,7 @@ import numpy as np
import pandas as pd
from cea.technologies import heatpumps
from cea.constants import HOURS_IN_YEAR
+from cea.demand.constants import T_C_REF_SUP_0, T_C_REF_RE_0
__author__ = "Jimeno A. Fonseca"
__copyright__ = "Copyright 2016, Architecture and Building Systems - ETH Zurich"
@@ -33,32 +34,28 @@ def has_refrigeration_load(bpr):
else:
return False
+
def calc_Qcre_sys(bpr, tsd, schedules):
# calculate refrigeration loads
- tsd['Qcre'] = schedules['Qcre'] * bpr.internal_loads['Qcre_Wm2']
+ tsd['Qcre'] = schedules['Qcre'] * bpr.internal_loads['Qcre_Wm2'] * -1.0 # cooling loads are negative
# calculate distribution losses for refrigeration loads analogously to space cooling distribution losses
Y = bpr.building_systems['Y'][0]
Lv = bpr.building_systems['Lv']
- Qcre_d_ls = ((tsd['Tcdata_sys_sup'] + tsd['Tcdata_sys_re']) / 2 - tsd['T_ext']) * (
+ Qcre_d_ls = ((T_C_REF_SUP_0 + T_C_REF_RE_0) / 2 - tsd['T_ext']) * (
tsd['Qcre'] / np.nanmin(tsd['Qcre'])) * (Lv * Y)
# calculate system loads for data center
tsd['Qcre_sys'] = tsd['Qcre'] + Qcre_d_ls
- def function(Qcre_sys):
- if Qcre_sys > 0:
- Tcref_re_0 = 5
- Tcref_sup_0 = 1
- mcpref = Qcre_sys/(Tcref_re_0-Tcref_sup_0)
- else:
- mcpref = 0.0
- Tcref_re_0 = 0.0
- Tcref_sup_0 = 0.0
- return mcpref, Tcref_re_0, Tcref_sup_0
-
- tsd['mcpcre_sys'], tsd['Tcre_sys_re'], tsd['Tcre_sys_sup'] = np.vectorize(function)(tsd['Qcre_sys'])
+ # writing values to tsd, replacing function and np.vectorize call with simple for loop
+ for h in range(HOURS_IN_YEAR):
+ if tsd['Qcre_sys'][h] > 0:
+ tsd['mcpcre_sys'][h] = tsd['Qcre_sys'][h] / (T_C_REF_RE_0 - T_C_REF_SUP_0)
+ tsd['Tcre_sys_re'][h] = T_C_REF_RE_0
+ tsd['Tcre_sys_sup'][h] = T_C_REF_SUP_0
return tsd
+
def calc_Qref(locator, bpr, tsd):
"""
it calculates final loads
|
Create Invoice popup: Mark memo field as required
Both amount and memo are required by the API, but only the amount field is marked as such in the UI. This commit also marks the memo field as required. | filled
dense
v-model.trim="receive.data.memo"
- label="Memo"
+ label="Memo *"
placeholder="LNbits invoice"
></q-input>
<div v-if="receive.status == 'pending'" class="row q-mt-lg">
|
[revert]: since the UCR is migration, so values are already stored as
string, which are difficult to migrate
Github Issue:
Authored-by: Shubham Bansal | },
{
"column_id": "migration_status",
- "datatype": "small_integer",
+ "datatype": "string",
"type": "expression",
"expression": {
- "type": "switch",
- "switch_on": {
+ "type": "root_doc",
+ "expression": {
+ "datatype": "string",
"type": "property_path",
- "property_path": ["form", "migration_status"]
- },
- "cases": {
- "migrated": {
- "type": "constant",
- "constant": 1
+ "property_path": [
+ "form",
+ "migration_status"
+ ]
}
},
- "default": {
- "type": "constant",
- "constant": 0
- }
- }
+ "is_nullable": true,
+ "is_primary_key": false
}
],
"named_expressions": {
|
validators: Fix dependentSchemas when instance is not an object
Just like 'dependentRequired' and 'dependencies', 'dependentSchemas'
needs to handle instance not being an object. | @@ -265,6 +265,9 @@ def dependentRequired(validator, dependentRequired, instance, schema):
def dependentSchemas(validator, dependentSchemas, instance, schema):
+ if not validator.is_type(instance, "object"):
+ return
+
for property, dependency in dependentSchemas.items():
if property not in instance:
continue
|
Updating the `read_yaml` to use the file cache.
Also stores the raw pre-untagged parsed data to make cross locale usage easier. | @@ -55,6 +55,7 @@ class PodDoesNotExistError(Error, IOError):
# Pods can create temp directories. Need to track temp dirs for cleanup.
_POD_TEMP_DIRS = []
+
@atexit.register
def goodbye_pods():
for tmp_dir in _POD_TEMP_DIRS:
@@ -63,6 +64,7 @@ def goodbye_pods():
# TODO(jeremydw): A handful of the properties of "pod" should be moved to the
# "podspec" class.
+
class Pod(object):
DEFAULT_EXTENSIONS_DIR_NAME = 'extensions'
FEATURE_UI = 'ui'
@@ -797,14 +799,24 @@ class Pod(object):
return json.load(fp)
def read_yaml(self, path, locale=None):
- with self.profile.timer('Pod.read_yaml', label=path, meta={'path': path}):
+ """Read, parse, and untag a yaml file."""
+ contents = self.podcache.file_cache.get(path, locale=locale)
+ if contents is None:
+ label = '{} ({})'.format(path, locale)
+ meta = {'path': path, 'locale': locale}
+ with self.profile.timer('Pod.read_yaml', label=label, meta=meta):
+ fields = self.podcache.file_cache.get(path, locale='__raw__')
+ if fields is None:
fields = utils.parse_yaml(self.read_file(path), pod=self)
+ self.podcache.file_cache.add(path, fields, locale='__raw__')
try:
- return document_fields.DocumentFields.untag(
+ contents = document_fields.DocumentFields.untag(
fields, locale=locale, params={'env': self.env.name})
- except Exception as e:
+ self.podcache.file_cache.add(path, contents, locale=locale)
+ except Exception:
logging.error('Error parsing -> {}'.format(path))
raise
+ return contents
def render_paths(self, paths, routes, suffix=None, append_slashes=False):
"""Renders the given paths and yields each path and content."""
|
gatherkeys: no need to decode - already done by remoto
Resolves: rm#39489 | @@ -143,7 +143,7 @@ def gatherkeys_missing(args, distro, rlogger, keypath, keytype, dest_dir):
keyring_path_local = os.path.join(dest_dir, keyring_name_local)
with open(keyring_path_local, 'wb') as f:
for line in out:
- f.write(line + b'\n')
+ f.write(line.decode('utf-8') + '\n')
return True
@@ -183,7 +183,7 @@ def gatherkeys_with_mon(args, host, dest_dir):
rlogger.debug(line)
return False
try:
- mon_status = json.loads(b''.join(out).decode('utf-8'))
+ mon_status = json.loads(''.join(out))
except ValueError:
rlogger.error('"ceph mon_status %s" output was not json', host)
for line in out:
|
Fix typo in comment about kill()
Fix typo in comment about kill method in PopenWorker class used to kill
child processes created by the worker. | @@ -124,7 +124,7 @@ class PopenWorker:
self._reader.close()
except IOError:
pass
- # kill all child processes recurisvely
+ # kill all child processes recursively
try:
kill_child_processes(self._proc.pid)
except TypeError:
|
Use pexpect.TIMEOUT instead of pexpect.exceptions.TIMEOUT
All pexpect submodules have been moved into the pexpect package as of version 3.0. | @@ -16,7 +16,7 @@ def expect_exact(context, expected, timeout):
timedout = False
try:
context.cli.expect_exact(expected, timeout=timeout)
- except pexpect.exceptions.TIMEOUT:
+ except pexpect.TIMEOUT:
timedout = True
if timedout:
# Strip color codes out of the output.
|
[DOCS] Fix tvm.build API doc layout
The newline in the pydoc breaks the layout of parameter inputs in API `tvm.build` | @@ -153,8 +153,7 @@ def build(
Parameters
----------
- inputs : Union[tvm.te.schedule.Schedule,
- tvm.tir.PrimFunc, IRModule, Mapping[str, IRModule]]
+ inputs : Union[tvm.te.schedule.Schedule, tvm.tir.PrimFunc, IRModule, Mapping[str, IRModule]]
The input to be built
args : Optional[List[Union[tvm.tir.Buffer, tensor.Tensor, Var]]]
|
Update create_dir_if_not_there.py
added os.path.jon() for making a full path safely and it's python3 compatible | @@ -13,7 +13,7 @@ try:
home = os.path.expanduser("~") # Set the variable home by expanding the users set home directory
print(home) # Print the location
- if not os.path.exists(os.path.join(home, 'testdir')):
+ if not os.path.exists(os.path.join(home, 'testdir')): # os.path.jon() for making a full path safely
os.makedirs(os.path.join(home, 'testdir')) # If not create the directory, inside their home directory
except Exception as e:
print(e)
|
Improve help text for network create --external
Closes-Bug: | @@ -271,14 +271,16 @@ class CreateNetwork(common.NetworkAndComputeShowOne,
'--external',
action='store_true',
help=self.enhance_help_neutron(
- _("Set this network as an external network "
+ _("The network has an external routing facility that's not "
+ "managed by Neutron and can be used as in: "
+ "openstack router set --external-gateway NETWORK "
"(external-net extension required)"))
)
external_router_grp.add_argument(
'--internal',
action='store_true',
help=self.enhance_help_neutron(
- _("Set this network as an internal network (default)"))
+ _("Opposite of '--external' (default)"))
)
default_router_grp = parser.add_mutually_exclusive_group()
default_router_grp.add_argument(
@@ -690,13 +692,15 @@ class SetNetwork(common.NeutronCommandWithExtraArgs):
external_router_grp.add_argument(
'--external',
action='store_true',
- help=_("Set this network as an external network "
+ help=_("The network has an external routing facility that's not "
+ "managed by Neutron and can be used as in: "
+ "openstack router set --external-gateway NETWORK "
"(external-net extension required)")
)
external_router_grp.add_argument(
'--internal',
action='store_true',
- help=_("Set this network as an internal network")
+ help=_("Opposite of '--external'")
)
default_router_grp = parser.add_mutually_exclusive_group()
default_router_grp.add_argument(
|
[skip ci][ci] Add missing guard to skip CI check
This was failing in the docker image validation since the `BRANCH_NAME` environment variable wasn't set. The part in question still runs even with the `skip ci`
cc | @@ -132,7 +132,7 @@ def cancel_previous_build() {
}
def should_skip_ci(pr_number) {
- if (!env.BRANCH_NAME.startsWith('PR-')) {
+ if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) {
// never skip CI on build sourced from a branch
return false
}
|
Feature/251 better logging in retrain script:
add logging verbosity flag to run arguments in retrain.py script
don't log each created bottleneck file
pull request amendments | @@ -351,7 +351,7 @@ def create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor):
"""Create a single bottleneck file."""
- tf.logging.info('Creating bottleneck at ' + bottleneck_path)
+ tf.logging.debug('Creating bottleneck at ' + bottleneck_path)
image_path = get_image_path(image_lists, label_name, index,
image_dir, category)
if not tf.gfile.Exists(image_path):
@@ -964,10 +964,33 @@ def export_model(module_spec, class_count, saved_model_dir):
)
+def logging_level_verbosity(logging_verbosity):
+ """Converts logging_level into TensorFlow logging verbosity value
+
+ Args:
+ logging_level: String value representing logging level: 'DEBUG', 'INFO',
+ 'WARN', 'ERROR', 'FATAL'
+ """
+ name_to_level = {
+ 'FATAL': tf.logging.FATAL,
+ 'ERROR': tf.logging.ERROR,
+ 'WARN': tf.logging.WARN,
+ 'INFO': tf.logging.INFO,
+ 'DEBUG': tf.logging.DEBUG
+ }
+
+ try:
+ return name_to_level[logging_verbosity]
+ except Exception as e:
+ raise RuntimeError('Not supported logs verbosity (%s). Use one of %s.' %
+ (str(e), [*name_to_level]))
+
+
def main(_):
# Needed to make sure the logging output is visible.
# See https://github.com/tensorflow/tensorflow/issues/3047
- tf.logging.set_verbosity(tf.logging.INFO)
+ logging_verbosity = logging_level_verbosity(FLAGS.logging_verbosity)
+ tf.logging.set_verbosity(logging_verbosity)
if not FLAGS.image_dir:
tf.logging.error('Must set flag --image_dir.')
@@ -1309,5 +1332,11 @@ if __name__ == '__main__':
type=str,
default='',
help='Where to save the exported graph.')
+ parser.add_argument(
+ '--logging_verbosity',
+ type=str,
+ default='INFO',
+ choices=['DEBUG', 'INFO', 'WARN', 'ERROR', 'FATAL'],
+ help='How much logging output should be produced.')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
Subsets and Splits